summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/stable/sysfs-driver-usb-usbtmc35
-rw-r--r--Documentation/ABI/testing/configfs-stp-policy-p_sys-t41
-rw-r--r--Documentation/ABI/testing/configfs-usb-gadget-uvc24
-rw-r--r--Documentation/ABI/testing/sysfs-bus-pci24
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb19
-rw-r--r--Documentation/ABI/testing/sysfs-bus-vmbus21
-rw-r--r--Documentation/ABI/testing/sysfs-class-lcd-s6e63m027
-rw-r--r--Documentation/ABI/testing/sysfs-class-net18
-rw-r--r--Documentation/PCI/endpoint/pci-test-howto.txt19
-rw-r--r--Documentation/PCI/pci-error-recovery.txt35
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt17
-rw-r--r--Documentation/admin-guide/security-bugs.rst47
-rw-r--r--Documentation/cgroup-v1/rdma.txt2
-rw-r--r--Documentation/core-api/printk-formats.rst5
-rw-r--r--Documentation/devicetree/bindings/arm/al,alpine.txt72
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-at91.txt170
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-sysregs.txt171
-rw-r--r--Documentation/devicetree/bindings/arm/coresight.txt120
-rw-r--r--Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp34
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt4
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-dcfg.txt19
-rw-r--r--Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-scfg.txt19
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.txt39
-rw-r--r--Documentation/devicetree/bindings/arm/secure.txt19
-rw-r--r--Documentation/devicetree/bindings/arm/zte,sysctrl.txt30
-rw-r--r--Documentation/devicetree/bindings/arm/zte.txt27
-rw-r--r--Documentation/devicetree/bindings/connector/usb-connector.txt8
-rw-r--r--Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/jz4780-dma.txt14
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt1
-rw-r--r--Documentation/devicetree/bindings/fpga/fpga-region.txt4
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt85
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt36
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt5
-rw-r--r--Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt1
-rw-r--r--Documentation/devicetree/bindings/mfd/arizona.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/atmel-usart.txt (renamed from Documentation/devicetree/bindings/serial/atmel-usart.txt)25
-rw-r--r--Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt39
-rw-r--r--Documentation/devicetree/bindings/misc/lwn-bk4.txt26
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_can.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/pci-keystone.txt3
-rw-r--r--Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt1
-rw-r--r--Documentation/devicetree/bindings/pci/rcar-pci.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/ti-pci.txt5
-rw-r--r--Documentation/devicetree/bindings/phy/brcm-sata-phy.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/phy-cadence-dp.txt30
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-hdmi.txt43
-rw-r--r--Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt23
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt1
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt11
-rw-r--r--Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt10
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt31
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt45
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt69
-rw-r--r--Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt57
-rw-r--r--Documentation/devicetree/bindings/power/reset/qcom,pon.txt5
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq25890.txt3
-rw-r--r--Documentation/devicetree/bindings/power/supply/bq27xxx.txt1
-rw-r--r--Documentation/devicetree/bindings/power/supply/sc2731_charger.txt40
-rw-r--r--Documentation/devicetree/bindings/reset/fsl,imx7-src.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/adi,adau1977.txt54
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l51.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/maxim,max98088.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/mikroe,mikroe-proto.txt23
-rw-r--r--Documentation/devicetree/bindings/sound/nau8822.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/pcm3060.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,q6afe.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/renesas,rsnd.txt5
-rw-r--r--Documentation/devicetree/bindings/sound/st,sta32x.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.txt7
-rw-r--r--Documentation/devicetree/bindings/sound/sun4i-i2s.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt12
-rw-r--r--Documentation/devicetree/bindings/sound/ts3a227e.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/wm8782.txt17
-rw-r--r--Documentation/devicetree/bindings/thermal/qcom-spmi-temp-alarm.txt16
-rw-r--r--Documentation/devicetree/bindings/thermal/qoriq-thermal.txt6
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt5
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-thermal.txt6
-rw-r--r--Documentation/devicetree/bindings/thermal/stm32-thermal.txt61
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal.txt2
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,cmt.txt9
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,ostm.txt3
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/ehci-mv.txt23
-rw-r--r--Documentation/devicetree/bindings/usb/exynos-usb.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/faraday,fotg210.txt35
-rw-r--r--Documentation/devicetree/bindings/usb/fcs,fusb302.txt32
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usb3.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/renesas_usbhs.txt11
-rw-r--r--Documentation/devicetree/bindings/usb/usb-ehci.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/usb-ohci.txt6
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt3
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas-wdt.txt1
-rw-r--r--Documentation/driver-api/fpga/fpga-bridge.rst37
-rw-r--r--Documentation/driver-api/fpga/fpga-mgr.rst126
-rw-r--r--Documentation/driver-api/fpga/fpga-programming.rst107
-rw-r--r--Documentation/driver-api/fpga/fpga-region.rst91
-rw-r--r--Documentation/driver-api/fpga/index.rst2
-rw-r--r--Documentation/driver-api/fpga/intro.rst2
-rw-r--r--Documentation/driver-api/index.rst2
-rw-r--r--Documentation/driver-api/pci/index.rst22
-rw-r--r--Documentation/driver-api/pci/p2pdma.rst145
-rw-r--r--Documentation/driver-api/pci/pci.rst (renamed from Documentation/driver-api/pci.rst)0
-rw-r--r--Documentation/driver-api/soundwire/stream.rst36
-rw-r--r--Documentation/driver-api/uio-howto.rst4
-rw-r--r--Documentation/filesystems/fscrypt.rst10
-rw-r--r--Documentation/filesystems/porting11
-rw-r--r--Documentation/input/event-codes.rst11
-rw-r--r--Documentation/ioctl/ioctl-number.txt2
-rw-r--r--Documentation/nvmem/nvmem.txt31
-rw-r--r--Documentation/s390/vfio-ap.txt837
-rw-r--r--Documentation/scsi/ufs.txt20
-rw-r--r--Documentation/sound/hd-audio/models.rst2
-rw-r--r--Documentation/sound/kernel-api/writing-an-alsa-driver.rst307
-rw-r--r--Documentation/switchtec.txt30
-rw-r--r--Documentation/trace/stm.rst38
-rw-r--r--Documentation/trace/sys-t.rst62
-rw-r--r--Documentation/virtual/kvm/api.txt135
-rw-r--r--MAINTAINERS93
-rw-r--r--Makefile37
-rw-r--r--arch/alpha/include/asm/unistd.h2
-rw-r--r--arch/alpha/kernel/entry.S53
-rw-r--r--arch/alpha/kernel/systbls.S318
-rw-r--r--arch/arc/Makefile6
-rw-r--r--arch/arc/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/Makefile20
-rw-r--r--arch/arm/boot/compressed/libfdt_env.h2
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi5
-rw-r--r--arch/arm/crypto/Kconfig7
-rw-r--r--arch/arm/crypto/Makefile2
-rw-r--r--arch/arm/crypto/chacha20-neon-core.S277
-rw-r--r--arch/arm/crypto/crc32-ce-glue.c2
-rw-r--r--arch/arm/crypto/ghash-ce-core.S108
-rw-r--r--arch/arm/crypto/ghash-ce-glue.c38
-rw-r--r--arch/arm/crypto/speck-neon-core.S434
-rw-r--r--arch/arm/crypto/speck-neon-glue.c288
-rw-r--r--arch/arm/include/asm/kvm_arm.h3
-rw-r--r--arch/arm/include/asm/kvm_host.h13
-rw-r--r--arch/arm/include/asm/kvm_mmu.h15
-rw-r--r--arch/arm/include/asm/stage2_pgtable.h54
-rw-r--r--arch/arm/include/asm/unistd.h4
-rw-r--r--arch/arm/kernel/devtree.c5
-rw-r--r--arch/arm/kernel/topology.c6
-rw-r--r--arch/arm/mach-at91/pm_suspend.S8
-rw-r--r--arch/arm/mach-mmp/devices.c11
-rw-r--r--arch/arm/mach-shmobile/pm-rcar-gen2.c8
-rw-r--r--arch/arm/mach-shmobile/pm-rmobile.c2
-rw-r--r--arch/arm/mach-shmobile/timer.c10
-rw-r--r--arch/arm64/Kconfig4
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/Makefile17
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi37
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi7
-rw-r--r--arch/arm64/configs/defconfig2
-rw-r--r--arch/arm64/crypto/Kconfig11
-rw-r--r--arch/arm64/crypto/Makefile6
-rw-r--r--arch/arm64/crypto/aes-ce.S5
-rw-r--r--arch/arm64/crypto/aes-glue.c217
-rw-r--r--arch/arm64/crypto/aes-modes.S416
-rw-r--r--arch/arm64/crypto/aes-neon.S6
-rw-r--r--arch/arm64/crypto/crc32-ce-core.S287
-rw-r--r--arch/arm64/crypto/crc32-ce-glue.c244
-rw-r--r--arch/arm64/crypto/crct10dif-ce-core.S314
-rw-r--r--arch/arm64/crypto/crct10dif-ce-glue.c14
-rw-r--r--arch/arm64/crypto/speck-neon-core.S352
-rw-r--r--arch/arm64/crypto/speck-neon-glue.c282
-rw-r--r--arch/arm64/include/asm/compat.h26
-rw-r--r--arch/arm64/include/asm/cpufeature.h21
-rw-r--r--arch/arm64/include/asm/device.h1
-rw-r--r--arch/arm64/include/asm/dma-mapping.h7
-rw-r--r--arch/arm64/include/asm/kvm_arm.h155
-rw-r--r--arch/arm64/include/asm/kvm_asm.h3
-rw-r--r--arch/arm64/include/asm/kvm_host.h18
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h10
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h42
-rw-r--r--arch/arm64/include/asm/ptrace.h3
-rw-r--r--arch/arm64/include/asm/stage2_pgtable-nopmd.h42
-rw-r--r--arch/arm64/include/asm/stage2_pgtable-nopud.h39
-rw-r--r--arch/arm64/include/asm/stage2_pgtable.h236
-rw-r--r--arch/arm64/include/asm/stat.h2
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm64/kernel/pci.c5
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kvm/guest.c6
-rw-r--r--arch/arm64/kvm/handle_exit.c7
-rw-r--r--arch/arm64/kvm/hyp/Makefile1
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S16
-rw-r--r--arch/arm64/kvm/hyp/s2-setup.c90
-rw-r--r--arch/arm64/kvm/hyp/switch.c4
-rw-r--r--arch/arm64/kvm/hyp/sysreg-sr.c19
-rw-r--r--arch/arm64/kvm/hyp/tlb.c4
-rw-r--r--arch/arm64/kvm/reset.c108
-rw-r--r--arch/arm64/mm/dma-mapping.c267
-rw-r--r--arch/c6x/Makefile2
-rw-r--r--arch/c6x/boot/dts/Makefile17
-rw-r--r--arch/c6x/boot/dts/linked_dtb.S2
-rw-r--r--arch/c6x/include/asm/sections.h1
-rw-r--r--arch/c6x/include/uapi/asm/unistd.h1
-rw-r--r--arch/c6x/kernel/setup.c15
-rw-r--r--arch/c6x/kernel/vmlinux.lds.S10
-rw-r--r--arch/h8300/Makefile11
-rw-r--r--arch/h8300/include/uapi/asm/unistd.h1
-rw-r--r--arch/hexagon/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/include/asm/unistd.h3
-rw-r--r--arch/m68k/configs/amiga_defconfig2
-rw-r--r--arch/m68k/configs/apollo_defconfig2
-rw-r--r--arch/m68k/configs/atari_defconfig2
-rw-r--r--arch/m68k/configs/bvme6000_defconfig2
-rw-r--r--arch/m68k/configs/hp300_defconfig2
-rw-r--r--arch/m68k/configs/mac_defconfig2
-rw-r--r--arch/m68k/configs/multi_defconfig2
-rw-r--r--arch/m68k/configs/mvme147_defconfig2
-rw-r--r--arch/m68k/configs/mvme16x_defconfig2
-rw-r--r--arch/m68k/configs/q40_defconfig2
-rw-r--r--arch/m68k/configs/sun3_defconfig2
-rw-r--r--arch/m68k/configs/sun3x_defconfig2
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/mac/misc.c75
-rw-r--r--arch/microblaze/Makefile4
-rw-r--r--arch/microblaze/boot/dts/Makefile4
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c6
-rw-r--r--arch/mips/Kconfig42
-rw-r--r--arch/mips/Makefile26
-rw-r--r--arch/mips/bcm47xx/workarounds.c8
-rw-r--r--arch/mips/bmips/setup.c9
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi15
-rw-r--r--arch/mips/boot/dts/ingenic/jz4770.dtsi30
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi3
-rw-r--r--arch/mips/boot/dts/lantiq/danube.dtsi42
-rw-r--r--arch/mips/boot/dts/lantiq/easy50712.dts14
-rw-r--r--arch/mips/boot/dts/mscc/Makefile2
-rw-r--r--arch/mips/boot/dts/mscc/ocelot.dtsi19
-rw-r--r--arch/mips/boot/dts/mscc/ocelot_pcb120.dts107
-rw-r--r--arch/mips/boot/dts/mscc/ocelot_pcb123.dts6
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c16
-rw-r--r--arch/mips/cavium-octeon/setup.c9
-rw-r--r--arch/mips/cavium-octeon/smp.c7
-rw-r--r--arch/mips/configs/generic/board-ocelot.config10
-rw-r--r--arch/mips/generic/Kconfig6
-rw-r--r--arch/mips/generic/Makefile1
-rw-r--r--arch/mips/generic/Platform2
-rw-r--r--arch/mips/generic/board-ocelot.its.S (renamed from arch/mips/generic/board-ocelot_pcb123.its.S)17
-rw-r--r--arch/mips/generic/kexec.c44
-rw-r--r--arch/mips/include/asm/asm-eva.h6
-rw-r--r--arch/mips/include/asm/asm.h116
-rw-r--r--arch/mips/include/asm/compat.h28
-rw-r--r--arch/mips/include/asm/io.h129
-rw-r--r--arch/mips/include/asm/kexec.h11
-rw-r--r--arch/mips/include/asm/mach-loongson64/irq.h2
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h16
-rw-r--r--arch/mips/include/asm/mipsregs.h20
-rw-r--r--arch/mips/include/asm/processor.h2
-rw-r--r--arch/mips/include/asm/r4kcache.h73
-rw-r--r--arch/mips/include/asm/smp-ops.h3
-rw-r--r--arch/mips/include/asm/smp.h16
-rw-r--r--arch/mips/include/asm/unistd.h3
-rw-r--r--arch/mips/kernel/Makefile18
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c14
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c14
-rw-r--r--arch/mips/kernel/crash.c7
-rw-r--r--arch/mips/kernel/head.S18
-rw-r--r--arch/mips/kernel/machine_kexec.c143
-rw-r--r--arch/mips/kernel/mips-mt.c59
-rw-r--r--arch/mips/kernel/relocate.c2
-rw-r--r--arch/mips/kernel/setup.c144
-rw-r--r--arch/mips/kernel/smp-bmips.c7
-rw-r--r--arch/mips/kernel/smp-cps.c80
-rw-r--r--arch/mips/kernel/traps.c5
-rw-r--r--arch/mips/kernel/unaligned.c47
-rw-r--r--arch/mips/lib/Makefile2
-rw-r--r--arch/mips/lib/iomap-pci.c7
-rw-r--r--arch/mips/lib/iomap.c227
-rw-r--r--arch/mips/lib/memcpy.S22
-rw-r--r--arch/mips/lib/memset.S60
-rw-r--r--arch/mips/loongson64/common/Makefile1
-rw-r--r--arch/mips/loongson64/fuloong-2e/Makefile2
-rw-r--r--arch/mips/loongson64/fuloong-2e/dma.c12
-rw-r--r--arch/mips/loongson64/lemote-2f/Makefile2
-rw-r--r--arch/mips/loongson64/lemote-2f/dma.c (renamed from arch/mips/loongson64/common/dma.c)4
-rw-r--r--arch/mips/loongson64/loongson-3/irq.c56
-rw-r--r--arch/mips/loongson64/loongson-3/numa.c34
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c14
-rw-r--r--arch/mips/mm/init.c7
-rw-r--r--arch/mips/netlogic/common/irq.c14
-rw-r--r--arch/mips/pci/ops-loongson3.c34
-rw-r--r--arch/mips/pci/pci-legacy.c4
-rw-r--r--arch/mips/pci/pci-rt2880.c2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_usb.c4
-rw-r--r--arch/mips/ralink/cevt-rt3352.c6
-rw-r--r--arch/mips/ralink/ill_acc.c2
-rw-r--r--arch/mips/ralink/rt305x.c5
-rw-r--r--arch/mips/sgi-ip22/ip28-berr.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c11
-rw-r--r--arch/mips/tools/.gitignore1
-rw-r--r--arch/mips/tools/Makefile5
-rw-r--r--arch/mips/tools/elf-entry.c96
-rw-r--r--arch/mips/txx9/generic/setup.c5
-rw-r--r--arch/nds32/Makefile2
-rw-r--r--arch/nds32/include/uapi/asm/unistd.h1
-rw-r--r--arch/nios2/Makefile11
-rw-r--r--arch/nios2/boot/Makefile22
-rw-r--r--arch/nios2/boot/dts/Makefile6
-rw-r--r--arch/nios2/include/uapi/asm/unistd.h1
-rw-r--r--arch/nios2/kernel/cpuinfo.c4
-rw-r--r--arch/nios2/kernel/time.c4
-rw-r--r--arch/openrisc/include/uapi/asm/unistd.h1
-rw-r--r--arch/openrisc/kernel/setup.c3
-rw-r--r--arch/parisc/include/asm/compat.h24
-rw-r--r--arch/parisc/include/asm/unistd.h3
-rw-r--r--arch/powerpc/Kbuild16
-rw-r--r--arch/powerpc/Kconfig19
-rw-r--r--arch/powerpc/Kconfig.debug6
-rw-r--r--arch/powerpc/Makefile94
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/Makefile66
-rw-r--r--arch/powerpc/boot/crt0.S4
-rw-r--r--arch/powerpc/boot/dts/Makefile6
-rw-r--r--arch/powerpc/boot/dts/fsl/Makefile4
-rw-r--r--arch/powerpc/boot/libfdt_env.h2
-rw-r--r--arch/powerpc/boot/opal.c8
-rw-r--r--arch/powerpc/boot/serial.c1
-rw-r--r--arch/powerpc/configs/g5_defconfig1
-rw-r--r--arch/powerpc/configs/maple_defconfig1
-rw-r--r--arch/powerpc/configs/powernv_defconfig4
-rw-r--r--arch/powerpc/configs/ppc64_defconfig4
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/configs/skiroot_defconfig154
-rw-r--r--arch/powerpc/include/asm/accounting.h4
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h24
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h152
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h8
-rw-r--r--arch/powerpc/include/asm/book3s/64/hugetlb.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h107
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h3
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h181
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-radix.h1
-rw-r--r--arch/powerpc/include/asm/compat.h24
-rw-r--r--arch/powerpc/include/asm/cputhreads.h2
-rw-r--r--arch/powerpc/include/asm/cputime.h1
-rw-r--r--arch/powerpc/include/asm/drmem.h5
-rw-r--r--arch/powerpc/include/asm/eeh.h24
-rw-r--r--arch/powerpc/include/asm/error-injection.h13
-rw-r--r--arch/powerpc/include/asm/exception-64s.h17
-rw-r--r--arch/powerpc/include/asm/firmware.h5
-rw-r--r--arch/powerpc/include/asm/fixmap.h2
-rw-r--r--arch/powerpc/include/asm/hvcall.h52
-rw-r--r--arch/powerpc/include/asm/io.h33
-rw-r--r--arch/powerpc/include/asm/iommu.h2
-rw-r--r--arch/powerpc/include/asm/kgdb.h5
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h45
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h118
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h3
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h16
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h8
-rw-r--r--arch/powerpc/include/asm/machdep.h3
-rw-r--r--arch/powerpc/include/asm/mce.h3
-rw-r--r--arch/powerpc/include/asm/mmu.h15
-rw-r--r--arch/powerpc/include/asm/mmu_context.h2
-rw-r--r--arch/powerpc/include/asm/mpic.h7
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h69
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-40x.h43
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-44x.h30
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-8xx.h87
-rw-r--r--arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h33
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h41
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h100
-rw-r--r--arch/powerpc/include/asm/nohash/pte-book3e.h41
-rw-r--r--arch/powerpc/include/asm/opal-api.h1
-rw-r--r--arch/powerpc/include/asm/paca.h18
-rw-r--r--arch/powerpc/include/asm/pgtable.h29
-rw-r--r--arch/powerpc/include/asm/pnv-pci.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h1
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h1
-rw-r--r--arch/powerpc/include/asm/processor.h7
-rw-r--r--arch/powerpc/include/asm/pte-common.h219
-rw-r--r--arch/powerpc/include/asm/ptrace.h36
-rw-r--r--arch/powerpc/include/asm/reg.h9
-rw-r--r--arch/powerpc/include/asm/rtas.h15
-rw-r--r--arch/powerpc/include/asm/slice.h1
-rw-r--r--arch/powerpc/include/asm/smp.h11
-rw-r--r--arch/powerpc/include/asm/sparsemem.h11
-rw-r--r--arch/powerpc/include/asm/stackprotector.h38
-rw-r--r--arch/powerpc/include/asm/thread_info.h17
-rw-r--r--arch/powerpc/include/asm/trace.h15
-rw-r--r--arch/powerpc/include/asm/uaccess.h6
-rw-r--r--arch/powerpc/include/asm/unistd.h3
-rw-r--r--arch/powerpc/include/asm/user.h2
-rw-r--r--arch/powerpc/include/uapi/asm/kvm.h1
-rw-r--r--arch/powerpc/include/uapi/asm/ptrace.h11
-rw-r--r--arch/powerpc/include/uapi/asm/sigcontext.h6
-rw-r--r--arch/powerpc/kernel/Makefile13
-rw-r--r--arch/powerpc/kernel/asm-offsets.c32
-rw-r--r--arch/powerpc/kernel/btext.c2
-rw-r--r--arch/powerpc/kernel/cacheinfo.c37
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S4
-rw-r--r--arch/powerpc/kernel/crash_dump.c2
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c4
-rw-r--r--arch/powerpc/kernel/eeh.c42
-rw-r--r--arch/powerpc/kernel/eeh_dev.c2
-rw-r--r--arch/powerpc/kernel/eeh_driver.c237
-rw-r--r--arch/powerpc/kernel/eeh_pe.c160
-rw-r--r--arch/powerpc/kernel/entry_32.S4
-rw-r--r--arch/powerpc/kernel/entry_64.S33
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S244
-rw-r--r--arch/powerpc/kernel/fadump.c4
-rw-r--r--arch/powerpc/kernel/head_8xx.S6
-rw-r--r--arch/powerpc/kernel/io-workarounds.c4
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/isa-bridge.c6
-rw-r--r--arch/powerpc/kernel/kgdb.c43
-rw-r--r--arch/powerpc/kernel/mce.c9
-rw-r--r--arch/powerpc/kernel/mce_power.c9
-rw-r--r--arch/powerpc/kernel/module.c8
-rw-r--r--arch/powerpc/kernel/module_64.c14
-rw-r--r--arch/powerpc/kernel/pci_32.c1
-rw-r--r--arch/powerpc/kernel/pci_64.c2
-rw-r--r--arch/powerpc/kernel/process.c90
-rw-r--r--arch/powerpc/kernel/prom_init.c223
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh16
-rw-r--r--arch/powerpc/kernel/ptrace.c68
-rw-r--r--arch/powerpc/kernel/rtas.c13
-rw-r--r--arch/powerpc/kernel/rtasd.c25
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/setup_64.c18
-rw-r--r--arch/powerpc/kernel/smp.c245
-rw-r--r--arch/powerpc/kernel/swsusp_asm64.S2
-rw-r--r--arch/powerpc/kernel/time.c104
-rw-r--r--arch/powerpc/kernel/tm.S75
-rw-r--r--arch/powerpc/kernel/trace/Makefile4
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c261
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64.S12
-rw-r--r--arch/powerpc/kernel/traps.c123
-rw-r--r--arch/powerpc/kernel/vdso32/datapage.S1
-rw-r--r--arch/powerpc/kernel/vdso32/gettimeofday.S1
-rw-r--r--arch/powerpc/kernel/vdso64/datapage.S1
-rw-r--r--arch/powerpc/kernel/vdso64/gettimeofday.S1
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S16
-rw-r--r--arch/powerpc/kvm/Makefile5
-rw-r--r--arch/powerpc/kvm/book3s.c46
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c718
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c94
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c87
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv.c873
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c92
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S95
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c1291
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c10
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c13
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S809
-rw-r--r--arch/powerpc/kvm/book3s_hv_tm.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_tm_builtin.c5
-rw-r--r--arch/powerpc/kvm/book3s_pr.c5
-rw-r--r--arch/powerpc/kvm/book3s_xics.c14
-rw-r--r--arch/powerpc/kvm/book3s_xive.c63
-rw-r--r--arch/powerpc/kvm/book3s_xive_template.c8
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S8
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c1
-rw-r--r--arch/powerpc/kvm/powerpc.c15
-rw-r--r--arch/powerpc/kvm/tm.S250
-rw-r--r--arch/powerpc/kvm/trace_book3s.h1
-rw-r--r--arch/powerpc/lib/Makefile4
-rw-r--r--arch/powerpc/lib/code-patching.c3
-rw-r--r--arch/powerpc/lib/error-inject.c16
-rw-r--r--arch/powerpc/lib/mem_64.S4
-rw-r--r--arch/powerpc/mm/8xx_mmu.c5
-rw-r--r--arch/powerpc/mm/Makefile13
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c2
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables-8xx.c82
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables-book3s64.c120
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables-generic.c82
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables.c167
-rw-r--r--arch/powerpc/mm/dump_linuxpagetables.h19
-rw-r--r--arch/powerpc/mm/hash_native_64.c4
-rw-r--r--arch/powerpc/mm/hash_utils_64.c13
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage.c13
-rw-r--r--arch/powerpc/mm/mem.c13
-rw-r--r--arch/powerpc/mm/mmu_context_book3s64.c9
-rw-r--r--arch/powerpc/mm/mmu_decl.h6
-rw-r--r--arch/powerpc/mm/numa.c6
-rw-r--r--arch/powerpc/mm/pgtable-book3e.c9
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c11
-rw-r--r--arch/powerpc/mm/pgtable-hash64.c7
-rw-r--r--arch/powerpc/mm/pgtable-radix.c65
-rw-r--r--arch/powerpc/mm/pgtable.c32
-rw-r--r--arch/powerpc/mm/pgtable_32.c70
-rw-r--r--arch/powerpc/mm/pgtable_64.c57
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c2
-rw-r--r--arch/powerpc/mm/slb.c784
-rw-r--r--arch/powerpc/mm/slb_low.S335
-rw-r--r--arch/powerpc/mm/slice.c38
-rw-r--r--arch/powerpc/mm/tlb-radix.c11
-rw-r--r--arch/powerpc/mm/tlb_nohash.c3
-rw-r--r--arch/powerpc/oprofile/Makefile1
-rw-r--r--arch/powerpc/oprofile/backtrace.c2
-rw-r--r--arch/powerpc/perf/Makefile1
-rw-r--r--arch/powerpc/perf/imc-pmu.c2
-rw-r--r--arch/powerpc/perf/power7-pmu.c1
-rw-r--r--arch/powerpc/platforms/40x/Kconfig9
-rw-r--r--arch/powerpc/platforms/44x/Kconfig22
-rw-r--r--arch/powerpc/platforms/44x/fsp2.c8
-rw-r--r--arch/powerpc/platforms/4xx/ocm.c7
-rw-r--r--arch/powerpc/platforms/4xx/soc.c2
-rw-r--r--arch/powerpc/platforms/82xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/85xx/smp.c4
-rw-r--r--arch/powerpc/platforms/8xx/m8xx_setup.c5
-rw-r--r--arch/powerpc/platforms/8xx/machine_check.c4
-rw-r--r--arch/powerpc/platforms/Kconfig21
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype5
-rw-r--r--arch/powerpc/platforms/Makefile2
-rw-r--r--arch/powerpc/platforms/cell/Kconfig3
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c25
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c2
-rw-r--r--arch/powerpc/platforms/maple/Kconfig1
-rw-r--r--arch/powerpc/platforms/pasemi/Kconfig1
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c2
-rw-r--r--arch/powerpc/platforms/powermac/Makefile3
-rw-r--r--arch/powerpc/platforms/powermac/feature.c51
-rw-r--r--arch/powerpc/platforms/powermac/setup.c15
-rw-r--r--arch/powerpc/platforms/powermac/time.c126
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig6
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c62
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c21
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c198
-rw-r--r--arch/powerpc/platforms/powernv/opal-powercap.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor-groups.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-sysparam.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c47
-rw-r--r--arch/powerpc/platforms/ps3/Kconfig2
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c2
-rw-r--r--arch/powerpc/platforms/ps3/spu.c3
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig9
-rw-r--r--arch/powerpc/platforms/pseries/Makefile3
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c41
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c4
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c66
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c40
-rw-r--r--arch/powerpc/platforms/pseries/firmware.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c28
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c116
-rw-r--r--arch/powerpc/platforms/pseries/ibmebus.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c295
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c5
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c23
-rw-r--r--arch/powerpc/platforms/pseries/msi.c3
-rw-r--r--arch/powerpc/platforms/pseries/papr_scm.c345
-rw-r--r--arch/powerpc/platforms/pseries/pci.c1
-rw-r--r--arch/powerpc/platforms/pseries/pmem.c164
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h11
-rw-r--r--arch/powerpc/platforms/pseries/ras.c308
-rw-r--r--arch/powerpc/platforms/pseries/setup.c14
-rw-r--r--arch/powerpc/platforms/pseries/vio.c27
-rw-r--r--arch/powerpc/sysdev/Kconfig5
-rw-r--r--arch/powerpc/sysdev/Makefile3
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c8
-rw-r--r--arch/powerpc/sysdev/ipic.c2
-rw-r--r--arch/powerpc/sysdev/xics/Makefile1
-rw-r--r--arch/powerpc/sysdev/xive/Kconfig3
-rw-r--r--arch/powerpc/sysdev/xive/Makefile1
-rw-r--r--arch/powerpc/sysdev/xive/common.c7
-rw-r--r--arch/powerpc/sysdev/xive/native.c11
-rw-r--r--arch/powerpc/xmon/Makefile5
-rw-r--r--arch/powerpc/xmon/xmon.c56
-rw-r--r--arch/riscv/Kconfig52
-rw-r--r--arch/riscv/Kconfig.debug35
-rw-r--r--arch/riscv/Makefile21
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/futex.h128
-rw-r--r--arch/riscv/include/asm/processor.h2
-rw-r--r--arch/riscv/include/asm/smp.h47
-rw-r--r--arch/riscv/include/asm/switch_to.h12
-rw-r--r--arch/riscv/include/asm/tlbflush.h16
-rw-r--r--arch/riscv/include/asm/unistd.h1
-rw-r--r--arch/riscv/include/uapi/asm/elf.h3
-rw-r--r--arch/riscv/kernel/Makefile1
-rw-r--r--arch/riscv/kernel/cacheinfo.c7
-rw-r--r--arch/riscv/kernel/cpu.c87
-rw-r--r--arch/riscv/kernel/cpufeature.c15
-rw-r--r--arch/riscv/kernel/entry.S88
-rw-r--r--arch/riscv/kernel/fpu.S106
-rw-r--r--arch/riscv/kernel/head.S4
-rw-r--r--arch/riscv/kernel/irq.c12
-rw-r--r--arch/riscv/kernel/mcount.S1
-rw-r--r--arch/riscv/kernel/process.c6
-rw-r--r--arch/riscv/kernel/ptrace.c52
-rw-r--r--arch/riscv/kernel/setup.c13
-rw-r--r--arch/riscv/kernel/signal.c75
-rw-r--r--arch/riscv/kernel/smp.c82
-rw-r--r--arch/riscv/kernel/smpboot.c46
-rw-r--r--arch/riscv/lib/Makefile3
-rw-r--r--arch/riscv/mm/ioremap.c2
-rw-r--r--arch/s390/Kconfig11
-rw-r--r--arch/s390/configs/debug_defconfig1
-rw-r--r--arch/s390/configs/performance_defconfig1
-rw-r--r--arch/s390/crypto/aes_s390.c48
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/include/asm/compat.h18
-rw-r--r--arch/s390/include/asm/kvm_host.h15
-rw-r--r--arch/s390/include/asm/unistd.h3
-rw-r--r--arch/s390/include/uapi/asm/kvm.h2
-rw-r--r--arch/s390/kvm/kvm-s390.c184
-rw-r--r--arch/s390/kvm/kvm-s390.h1
-rw-r--r--arch/s390/kvm/vsie.c210
-rw-r--r--arch/s390/mm/gmap.c10
-rw-r--r--arch/s390/tools/gen_facilities.c2
-rw-r--r--arch/sh/boards/of-generic.c2
-rw-r--r--arch/sh/include/asm/unistd.h2
-rw-r--r--arch/sparc/include/asm/cmpxchg_64.h7
-rw-r--r--arch/sparc/include/asm/compat.h25
-rw-r--r--arch/sparc/include/asm/prom.h3
-rw-r--r--arch/sparc/include/asm/switch_to_64.h3
-rw-r--r--arch/sparc/include/asm/unistd.h3
-rw-r--r--arch/sparc/include/asm/vdso.h2
-rw-r--r--arch/sparc/kernel/process_64.c25
-rw-r--r--arch/sparc/kernel/rtrap_64.S1
-rw-r--r--arch/sparc/kernel/signal32.c12
-rw-r--r--arch/sparc/kernel/signal_64.c6
-rw-r--r--arch/sparc/mm/init_64.c1
-rw-r--r--arch/sparc/vdso/vclock_gettime.c149
-rw-r--r--arch/sparc/vdso/vdso-layout.lds.S3
-rw-r--r--arch/sparc/vdso/vdso.lds.S2
-rw-r--r--arch/sparc/vdso/vdso2c.h17
-rw-r--r--arch/sparc/vdso/vdso32/vdso32.lds.S2
-rw-r--r--arch/sparc/vdso/vma.c222
-rw-r--r--arch/unicore32/include/uapi/asm/unistd.h1
-rw-r--r--arch/x86/crypto/Makefile5
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c47
-rw-r--r--arch/x86/crypto/fpu.c207
-rw-r--r--arch/x86/crypto/sha1-mb/Makefile14
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb.c1011
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_ctx.h134
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr.h110
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S287
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S304
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c64
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S209
-rw-r--r--arch/x86/crypto/sha1-mb/sha1_x8_avx2.S492
-rw-r--r--arch/x86/crypto/sha256-mb/Makefile14
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb.c1013
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_ctx.h134
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr.h108
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S304
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S307
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c65
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S214
-rw-r--r--arch/x86/crypto/sha256-mb/sha256_x8_avx2.S598
-rw-r--r--arch/x86/crypto/sha512-mb/Makefile12
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb.c1047
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_ctx.h128
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr.h104
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S281
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S297
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S224
-rw-r--r--arch/x86/crypto/sha512-mb/sha512_x4_avx2.S531
-rw-r--r--arch/x86/include/asm/compat.h19
-rw-r--r--arch/x86/include/asm/irq_remapping.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h70
-rw-r--r--arch/x86/include/asm/unistd.h3
-rw-r--r--arch/x86/include/asm/virtext.h2
-rw-r--r--arch/x86/include/asm/vmx.h13
-rw-r--r--arch/x86/include/uapi/asm/kvm.h8
-rw-r--r--arch/x86/kernel/devicetree.c2
-rw-r--r--arch/x86/kvm/hyperv.c280
-rw-r--r--arch/x86/kvm/hyperv.h4
-rw-r--r--arch/x86/kvm/lapic.c45
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.c393
-rw-r--r--arch/x86/kvm/mmu.h13
-rw-r--r--arch/x86/kvm/mmu_audit.c12
-rw-r--r--arch/x86/kvm/paging_tmpl.h15
-rw-r--r--arch/x86/kvm/svm.c64
-rw-r--r--arch/x86/kvm/trace.h42
-rw-r--r--arch/x86/kvm/vmx.c2287
-rw-r--r--arch/x86/kvm/vmx_shadow_fields.h5
-rw-r--r--arch/x86/kvm/x86.c244
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/pci/acpi.c2
-rw-r--r--arch/x86/pci/fixup.c12
-rw-r--r--arch/xtensa/Makefile12
-rw-r--r--arch/xtensa/include/asm/unistd.h2
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c6
-rw-r--r--block/bfq-wf2q.c18
-rw-r--r--block/blk-core.c1
-rw-r--r--block/blk-lib.c13
-rw-r--r--block/blk-mq-debugfs.c1
-rw-r--r--block/blk-mq.c4
-rw-r--r--block/blk-sysfs.c13
-rw-r--r--block/blk-zoned.c359
-rw-r--r--block/blk.h8
-rw-r--r--block/ioctl.c4
-rw-r--r--crypto/Kconfig101
-rw-r--r--crypto/Makefile4
-rw-r--r--crypto/aegis.h20
-rw-r--r--crypto/ahash.c25
-rw-r--r--crypto/algapi.c17
-rw-r--r--crypto/algboss.c2
-rw-r--r--crypto/algif_aead.c12
-rw-r--r--crypto/algif_hash.c2
-rw-r--r--crypto/authenc.c8
-rw-r--r--crypto/authencesn.c8
-rw-r--r--crypto/ccm.c9
-rw-r--r--crypto/chacha20_generic.c7
-rw-r--r--crypto/cryptd.c32
-rw-r--r--crypto/crypto_null.c11
-rw-r--r--crypto/crypto_user_base.c (renamed from crypto/crypto_user.c)9
-rw-r--r--crypto/crypto_user_stat.c463
-rw-r--r--crypto/echainiv.c4
-rw-r--r--crypto/gcm.c8
-rw-r--r--crypto/internal.h8
-rw-r--r--crypto/lrw.c339
-rw-r--r--crypto/mcryptd.c675
-rw-r--r--crypto/morus1280.c7
-rw-r--r--crypto/morus640.c16
-rw-r--r--crypto/ofb.c225
-rw-r--r--crypto/rng.c1
-rw-r--r--crypto/rsa-pkcs1pad.c9
-rw-r--r--crypto/seqiv.c4
-rw-r--r--crypto/shash.c33
-rw-r--r--crypto/skcipher.c24
-rw-r--r--crypto/speck.c307
-rw-r--r--crypto/tcrypt.c27
-rw-r--r--crypto/tcrypt.h1
-rw-r--r--crypto/testmgr.c42
-rw-r--r--crypto/testmgr.h863
-rw-r--r--crypto/xcbc.c8
-rw-r--r--crypto/xts.c269
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_adxl.c192
-rw-r--r--drivers/acpi/nfit/core.c297
-rw-r--r--drivers/acpi/nfit/intel.h38
-rw-r--r--drivers/acpi/nfit/nfit.h21
-rw-r--r--drivers/acpi/pci_root.c17
-rw-r--r--drivers/acpi/property.c97
-rw-r--r--drivers/acpi/x86/apple.c2
-rw-r--r--drivers/android/Kconfig2
-rw-r--r--drivers/android/binder.c489
-rw-r--r--drivers/android/binder_trace.h36
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/sata_inic162x.c2
-rw-r--r--drivers/base/component.c6
-rw-r--r--drivers/base/devres.c36
-rw-r--r--drivers/base/devtmpfs.c2
-rw-r--r--drivers/base/platform-msi.c14
-rw-r--r--drivers/block/cryptoloop.c22
-rw-r--r--drivers/block/null_blk.h11
-rw-r--r--drivers/block/null_blk_main.c30
-rw-r--r--drivers/block/null_blk_zoned.c57
-rw-r--r--drivers/block/rsxx/core.c2
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/block/xen-blkfront.c3
-rw-r--r--drivers/block/z2ram.c3
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c27
-rw-r--r--drivers/bus/mvebu-mbus.c2
-rw-r--r--drivers/cdrom/gdrom.c8
-rw-r--r--drivers/char/hw_random/core.c4
-rw-r--r--drivers/char/random.c24
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm-dev-common.c147
-rw-r--r--drivers/char/tpm/tpm-dev.c11
-rw-r--r--drivers/char/tpm/tpm-dev.h18
-rw-r--r--drivers/char/tpm/tpm-interface.c30
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm2-cmd.c4
-rw-r--r--drivers/char/tpm/tpmrm-dev.c15
-rw-r--r--drivers/char/tpm/xen-tpmfront.c2
-rw-r--r--drivers/clk/mvebu/clk-cpu.c4
-rw-r--r--drivers/clocksource/Makefile26
-rw-r--r--drivers/clocksource/asm9260_timer.c2
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c20
-rw-r--r--drivers/clocksource/pxa_timer.c6
-rw-r--r--drivers/clocksource/renesas-ostm.c11
-rw-r--r--drivers/clocksource/riscv_timer.c12
-rw-r--r--drivers/clocksource/sh_cmt.c106
-rw-r--r--drivers/clocksource/sh_mtu2.c10
-rw-r--r--drivers/clocksource/sh_tmu.c10
-rw-r--r--drivers/clocksource/timer-armada-370-xp.c (renamed from drivers/clocksource/time-armada-370-xp.c)0
-rw-r--r--drivers/clocksource/timer-cadence-ttc.c (renamed from drivers/clocksource/cadence_ttc_timer.c)2
-rw-r--r--drivers/clocksource/timer-efm32.c (renamed from drivers/clocksource/time-efm32.c)0
-rw-r--r--drivers/clocksource/timer-fsl-ftm.c (renamed from drivers/clocksource/fsl_ftm_timer.c)0
-rw-r--r--drivers/clocksource/timer-integrator-ap.c2
-rw-r--r--drivers/clocksource/timer-lpc32xx.c (renamed from drivers/clocksource/time-lpc32xx.c)0
-rw-r--r--drivers/clocksource/timer-orion.c (renamed from drivers/clocksource/time-orion.c)8
-rw-r--r--drivers/clocksource/timer-owl.c (renamed from drivers/clocksource/owl-timer.c)0
-rw-r--r--drivers/clocksource/timer-pistachio.c (renamed from drivers/clocksource/time-pistachio.c)0
-rw-r--r--drivers/clocksource/timer-qcom.c (renamed from drivers/clocksource/qcom-timer.c)0
-rw-r--r--drivers/clocksource/timer-sp804.c2
-rw-r--r--drivers/clocksource/timer-versatile.c (renamed from drivers/clocksource/versatile.c)0
-rw-r--r--drivers/clocksource/timer-vf-pit.c (renamed from drivers/clocksource/vf_pit_timer.c)0
-rw-r--r--drivers/clocksource/timer-vt8500.c (renamed from drivers/clocksource/vt8500_timer.c)0
-rw-r--r--drivers/clocksource/timer-zevio.c (renamed from drivers/clocksource/zevio-timer.c)8
-rw-r--r--drivers/crypto/Makefile2
-rw-r--r--drivers/crypto/atmel-aes.c5
-rw-r--r--drivers/crypto/atmel-authenc.h13
-rw-r--r--drivers/crypto/atmel-ecc.c11
-rw-r--r--drivers/crypto/atmel-ecc.h14
-rw-r--r--drivers/crypto/atmel-sha.c5
-rw-r--r--drivers/crypto/atmel-tdes.c5
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c20
-rw-r--r--drivers/crypto/caam/Kconfig57
-rw-r--r--drivers/crypto/caam/Makefile10
-rw-r--r--drivers/crypto/caam/caamalg.c728
-rw-r--r--drivers/crypto/caam/caamalg_desc.c143
-rw-r--r--drivers/crypto/caam/caamalg_desc.h28
-rw-r--r--drivers/crypto/caam/caamalg_qi.c627
-rw-r--r--drivers/crypto/caam/caamalg_qi2.c5165
-rw-r--r--drivers/crypto/caam/caamalg_qi2.h223
-rw-r--r--drivers/crypto/caam/caamhash.c80
-rw-r--r--drivers/crypto/caam/caamhash_desc.c80
-rw-r--r--drivers/crypto/caam/caamhash_desc.h21
-rw-r--r--drivers/crypto/caam/caampkc.c1
-rw-r--r--drivers/crypto/caam/caamrng.c1
-rw-r--r--drivers/crypto/caam/compat.h2
-rw-r--r--drivers/crypto/caam/ctrl.c1
-rw-r--r--drivers/crypto/caam/dpseci.c426
-rw-r--r--drivers/crypto/caam/dpseci.h333
-rw-r--r--drivers/crypto/caam/dpseci_cmd.h149
-rw-r--r--drivers/crypto/caam/error.c79
-rw-r--r--drivers/crypto/caam/error.h6
-rw-r--r--drivers/crypto/caam/jr.c1
-rw-r--r--drivers/crypto/caam/qi.c43
-rw-r--r--drivers/crypto/caam/qi.h3
-rw-r--r--drivers/crypto/caam/regs.h30
-rw-r--r--drivers/crypto/caam/sg_sw_qm.h29
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h30
-rw-r--r--drivers/crypto/cavium/cpt/cptvf_reqmanager.c20
-rw-r--r--drivers/crypto/cavium/nitrox/Makefile3
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_common.h19
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_csr.h111
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_debugfs.c115
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_dev.h162
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.c71
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_hal.h23
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.c337
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_isr.h10
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_lib.c98
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c203
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_reqmgr.c49
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_sriov.c151
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c13
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h2
-rw-r--r--drivers/crypto/ccp/psp-dev.c47
-rw-r--r--drivers/crypto/ccp/sp-platform.c53
-rw-r--r--drivers/crypto/ccree/cc_hw_queue_defs.h6
-rw-r--r--drivers/crypto/chelsio/chcr_algo.c30
-rw-r--r--drivers/crypto/chelsio/chcr_core.c2
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c7
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_main.c3
-rw-r--r--drivers/crypto/mxs-dcp.c142
-rw-r--r--drivers/crypto/omap-aes.c17
-rw-r--r--drivers/crypto/omap-aes.h2
-rw-r--r--drivers/crypto/picoxcell_crypto.c21
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c60
-rw-r--r--drivers/crypto/qce/ablkcipher.c13
-rw-r--r--drivers/crypto/qce/cipher.h2
-rw-r--r--drivers/crypto/s5p-sss.c113
-rw-r--r--drivers/crypto/sahara.c31
-rw-r--r--drivers/crypto/vmx/aes_cbc.c22
-rw-r--r--drivers/crypto/vmx/aes_ctr.c18
-rw-r--r--drivers/crypto/vmx/aes_xts.c18
-rw-r--r--drivers/dma/Kconfig13
-rw-r--r--drivers/dma/Makefile3
-rw-r--r--drivers/dma/at_hdmac.c2
-rw-r--r--drivers/dma/at_xdmac.c4
-rw-r--r--drivers/dma/bcm2835-dma.c8
-rw-r--r--drivers/dma/coh901318.c28
-rw-r--r--drivers/dma/dma-jz4740.c21
-rw-r--r--drivers/dma/dma-jz4780.c289
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c4
-rw-r--r--drivers/dma/dw/core.c5
-rw-r--r--drivers/dma/dw/platform.c2
-rw-r--r--drivers/dma/ep93xx_dma.c21
-rw-r--r--drivers/dma/fsl-edma-common.c626
-rw-r--r--drivers/dma/fsl-edma-common.h233
-rw-r--r--drivers/dma/fsl-edma.c729
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/hsu/hsu.c4
-rw-r--r--drivers/dma/idma64.c9
-rw-r--r--drivers/dma/imx-dma.c20
-rw-r--r--drivers/dma/ioat/init.c23
-rw-r--r--drivers/dma/k3dma.c36
-rw-r--r--drivers/dma/mcf-edma.c317
-rw-r--r--drivers/dma/mmp_tdma.c29
-rw-r--r--drivers/dma/mv_xor.c4
-rw-r--r--drivers/dma/mxs-dma.c3
-rw-r--r--drivers/dma/nbpfaxi.c9
-rw-r--r--drivers/dma/owl-dma.c283
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/dma/pxa_dma.c5
-rw-r--r--drivers/dma/sh/rcar-dmac.c3
-rw-r--r--drivers/dma/sh/shdma-arm.h5
-rw-r--r--drivers/dma/sh/shdma-base.c5
-rw-r--r--drivers/dma/sh/shdma-of.c5
-rw-r--r--drivers/dma/sh/shdma-r8a73a4.c5
-rw-r--r--drivers/dma/sh/shdma.h6
-rw-r--r--drivers/dma/sh/shdmac.c6
-rw-r--r--drivers/dma/sh/sudmac.c5
-rw-r--r--drivers/dma/sh/usb-dmac.c5
-rw-r--r--drivers/dma/sprd-dma.c81
-rw-r--r--drivers/dma/st_fdma.c7
-rw-r--r--drivers/dma/ste_dma40.c14
-rw-r--r--drivers/dma/stm32-dma.c20
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/dma/timb_dma.c2
-rw-r--r--drivers/edac/altera_edac.c667
-rw-r--r--drivers/edac/altera_edac.h73
-rw-r--r--drivers/edac/amd64_edac.c24
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/edac/cpc925_edac.c20
-rw-r--r--drivers/edac/ghes_edac.c23
-rw-r--r--drivers/edac/i3200_edac.c2
-rw-r--r--drivers/edac/i7core_edac.c5
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/sb_edac.c204
-rw-r--r--drivers/edac/skx_edac.c7
-rw-r--r--drivers/edac/thunderx_edac.c4
-rw-r--r--drivers/extcon/extcon-intel-cht-wc.c60
-rw-r--r--drivers/extcon/extcon-intel-int3496.c12
-rw-r--r--drivers/extcon/extcon-max14577.c24
-rw-r--r--drivers/extcon/extcon-max77693.c22
-rw-r--r--drivers/extcon/extcon-max77843.c19
-rw-r--r--drivers/extcon/extcon-max8997.c22
-rw-r--r--drivers/extcon/extcon.c15
-rw-r--r--drivers/firmware/google/Kconfig32
-rw-r--r--drivers/firmware/google/Makefile2
-rw-r--r--drivers/firmware/google/coreboot_table-acpi.c88
-rw-r--r--drivers/firmware/google/coreboot_table-of.c82
-rw-r--r--drivers/firmware/google/coreboot_table.c126
-rw-r--r--drivers/firmware/google/coreboot_table.h6
-rw-r--r--drivers/firmware/google/gsmi.c122
-rw-r--r--drivers/firmware/google/vpd.c2
-rw-r--r--drivers/firmware/scpi_pm_domain.c2
-rw-r--r--drivers/fpga/altera-cvp.c8
-rw-r--r--drivers/fpga/altera-fpga2sdram.c8
-rw-r--r--drivers/fpga/altera-freeze-bridge.c13
-rw-r--r--drivers/fpga/altera-hps2fpga.c7
-rw-r--r--drivers/fpga/altera-pr-ip-core.c9
-rw-r--r--drivers/fpga/altera-ps-spi.c11
-rw-r--r--drivers/fpga/dfl-afu-dma-region.c2
-rw-r--r--drivers/fpga/dfl-fme-br.c11
-rw-r--r--drivers/fpga/dfl-fme-mgr.c13
-rw-r--r--drivers/fpga/dfl-fme-region.c6
-rw-r--r--drivers/fpga/dfl.c6
-rw-r--r--drivers/fpga/fpga-bridge.c68
-rw-r--r--drivers/fpga/fpga-mgr.c64
-rw-r--r--drivers/fpga/fpga-region.c65
-rw-r--r--drivers/fpga/ice40-spi.c10
-rw-r--r--drivers/fpga/machxo2-spi.c11
-rw-r--r--drivers/fpga/of-fpga-region.c6
-rw-r--r--drivers/fpga/socfpga-a10.c5
-rw-r--r--drivers/fpga/socfpga.c10
-rw-r--r--drivers/fpga/ts73xx-fpga.c11
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c4
-rw-r--r--drivers/fpga/xilinx-spi.c12
-rw-r--r--drivers/fpga/zynq-fpga.c5
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c5
-rw-r--r--drivers/hid/Kconfig16
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-bigbenff.c414
-rw-r--r--drivers/hid/hid-core.c2
-rw-r--r--drivers/hid/hid-cougar.c66
-rw-r--r--drivers/hid/hid-elan.c2
-rw-r--r--drivers/hid/hid-google-hammer.c413
-rw-r--r--drivers/hid/hid-ids.h6
-rw-r--r--drivers/hid/hid-input.c64
-rw-r--r--drivers/hid/hid-logitech-hidpp.c309
-rw-r--r--drivers/hid/hid-magicmouse.c142
-rw-r--r--drivers/hid/hid-microsoft.c141
-rw-r--r--drivers/hid/hid-multitouch.c72
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/i2c-hid/Makefile3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c (renamed from drivers/hid/i2c-hid/i2c-hid.c)60
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c376
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.h20
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c32
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c75
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c41
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.c52
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/bus.h5
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client-buffers.c49
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.c24
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/client.h5
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h2
-rw-r--r--drivers/hid/wacom_wac.c19
-rw-r--r--drivers/hv/channel.c300
-rw-r--r--drivers/hv/channel_mgmt.c54
-rw-r--r--drivers/hv/hv.c15
-rw-r--r--drivers/hv/hv_balloon.c2
-rw-r--r--drivers/hv/hv_kvp.c14
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/hv/vmbus_drv.c118
-rw-r--r--drivers/hwtracing/coresight/coresight-catu.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c81
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c183
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c132
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.h26
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c58
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c93
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c28
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c198
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c385
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c6
-rw-r--r--drivers/hwtracing/coresight/coresight.c184
-rw-r--r--drivers/hwtracing/coresight/of_coresight.c262
-rw-r--r--drivers/hwtracing/stm/Kconfig29
-rw-r--r--drivers/hwtracing/stm/Makefile6
-rw-r--r--drivers/hwtracing/stm/core.c292
-rw-r--r--drivers/hwtracing/stm/heartbeat.c2
-rw-r--r--drivers/hwtracing/stm/p_basic.c48
-rw-r--r--drivers/hwtracing/stm/p_sys-t.c382
-rw-r--r--drivers/hwtracing/stm/policy.c147
-rw-r--r--drivers/hwtracing/stm/stm.h56
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c12
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/addr.c406
-rw-r--r--drivers/infiniband/core/cache.c79
-rw-r--r--drivers/infiniband/core/cm.c9
-rw-r--r--drivers/infiniband/core/cma.c251
-rw-r--r--drivers/infiniband/core/cma_configfs.c2
-rw-r--r--drivers/infiniband/core/core_priv.h12
-rw-r--r--drivers/infiniband/core/cq.c10
-rw-r--r--drivers/infiniband/core/device.c264
-rw-r--r--drivers/infiniband/core/fmr_pool.c5
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/mad.c80
-rw-r--r--drivers/infiniband/core/mad_priv.h2
-rw-r--r--drivers/infiniband/core/netlink.c4
-rw-r--r--drivers/infiniband/core/nldev.c37
-rw-r--r--drivers/infiniband/core/rdma_core.c56
-rw-r--r--drivers/infiniband/core/rdma_core.h1
-rw-r--r--drivers/infiniband/core/restrack.c30
-rw-r--r--drivers/infiniband/core/rw.c11
-rw-r--r--drivers/infiniband/core/sa.h8
-rw-r--r--drivers/infiniband/core/sa_query.c70
-rw-r--r--drivers/infiniband/core/security.c7
-rw-r--r--drivers/infiniband/core/sysfs.c101
-rw-r--r--drivers/infiniband/core/umem.c125
-rw-r--r--drivers/infiniband/core/umem_odp.c621
-rw-r--r--drivers/infiniband/core/user_mad.c13
-rw-r--r--drivers/infiniband/core/uverbs.h15
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c43
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c140
-rw-r--r--drivers/infiniband/core/uverbs_main.c340
-rw-r--r--drivers/infiniband/core/uverbs_std_types_flow_action.c7
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c12
-rw-r--r--drivers/infiniband/core/verbs.c19
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c125
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c134
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c88
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c77
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h5
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c55
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c50
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c20
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h2
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile42
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c486
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h71
-rw-r--r--drivers/infiniband/hw/hfi1/chip_registers.h4
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c4
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h48
-rw-r--r--drivers/infiniband/hw/hfi1/init.c113
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.c94
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h192
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c4
-rw-r--r--drivers/infiniband/hw/hfi1/msix.c363
-rw-r--r--drivers/infiniband/hw/hfi1/msix.h (renamed from arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c)55
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c75
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c8
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c100
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h31
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c24
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c382
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c56
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h21
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c69
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h3
-rw-r--r--drivers/infiniband/hw/hfi1/trace_iowait.h54
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c14
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c22
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c137
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h20
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c251
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h35
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h11
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c12
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c21
-rw-r--r--drivers/infiniband/hw/hns/Kconfig1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c6
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h45
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c629
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h96
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c123
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c212
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c41
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c73
-rw-r--r--drivers/infiniband/hw/mlx4/Kconfig1
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c20
-rw-r--r--drivers/infiniband/hw/mlx4/main.c182
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c8
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c129
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h14
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c3
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c358
-rw-r--r--drivers/infiniband/hw/mlx5/flow.c393
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c510
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c9
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h98
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c14
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c123
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c491
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c44
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c4
-rw-r--r--drivers/infiniband/hw/nes/nes.c3
-rw-r--r--drivers/infiniband/hw/nes/nes.h9
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c63
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c74
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c3
-rw-r--r--drivers/infiniband/hw/qedr/main.c73
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_roce_cm.c4
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c5
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c342
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c101
-rw-r--r--drivers/infiniband/hw/qib/qib_uc.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c17
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c47
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h15
-rw-r--r--drivers/infiniband/hw/usnic/usnic_debugfs.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c39
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c74
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c16
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c91
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h3
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c46
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/Kconfig2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c677
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h42
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c15
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c13
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c39
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h5
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c35
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c49
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c55
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_recv.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c17
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c29
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c36
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c2
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c18
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c9
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c2
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c3
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c3
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c19
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c28
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c22
-rw-r--r--drivers/iommu/Kconfig21
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_init.c2
-rw-r--r--drivers/iommu/arm-smmu-v3.c140
-rw-r--r--drivers/iommu/arm-smmu.c106
-rw-r--r--drivers/iommu/dma-iommu.c55
-rw-r--r--drivers/iommu/fsl_pamu.c2
-rw-r--r--drivers/iommu/fsl_pamu_domain.c119
-rw-r--r--drivers/iommu/intel-iommu-debugfs.c314
-rw-r--r--drivers/iommu/intel-iommu.c32
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c11
-rw-r--r--drivers/iommu/io-pgtable-arm.c23
-rw-r--r--drivers/iommu/io-pgtable.h5
-rw-r--r--drivers/iommu/iommu.c58
-rw-r--r--drivers/iommu/iova.c22
-rw-r--r--drivers/iommu/ipmmu-vmsa.c5
-rw-r--r--drivers/iommu/of_iommu.c25
-rw-r--r--drivers/irqchip/Kconfig3
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c249
-rw-r--r--drivers/irqchip/irq-gic-v3.c85
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c253
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c507
-rw-r--r--drivers/irqchip/irq-sifive-plic.c10
-rw-r--r--drivers/irqchip/qcom-pdc.c1
-rw-r--r--drivers/macintosh/adb-iop.c50
-rw-r--r--drivers/macintosh/adb.c8
-rw-r--r--drivers/macintosh/adbhid.c53
-rw-r--r--drivers/macintosh/macio_asic.c8
-rw-r--r--drivers/macintosh/macio_sysfs.c8
-rw-r--r--drivers/macintosh/via-cuda.c35
-rw-r--r--drivers/macintosh/via-macii.c352
-rw-r--r--drivers/macintosh/via-pmu.c33
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c25
-rw-r--r--drivers/md/Kconfig11
-rw-r--r--drivers/md/dm-cache-policy-smq.c2
-rw-r--r--drivers/md/dm-core.h10
-rw-r--r--drivers/md/dm-crypt.c15
-rw-r--r--drivers/md/dm-flakey.c30
-rw-r--r--drivers/md/dm-integrity.c23
-rw-r--r--drivers/md/dm-ioctl.c18
-rw-r--r--drivers/md/dm-linear.c35
-rw-r--r--drivers/md/dm-mpath.c26
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-rq.c316
-rw-r--r--drivers/md/dm-rq.h4
-rw-r--r--drivers/md/dm-sysfs.c3
-rw-r--r--drivers/md/dm-table.c56
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm-verity-fec.c5
-rw-r--r--drivers/md/dm-writecache.c5
-rw-r--r--drivers/md/dm-zoned-metadata.c80
-rw-r--r--drivers/md/dm-zoned-target.c23
-rw-r--r--drivers/md/dm.c194
-rw-r--r--drivers/md/dm.h1
-rw-r--r--drivers/md/md-bitmap.c9
-rw-r--r--drivers/md/md-cluster.c234
-rw-r--r--drivers/md/md-cluster.h2
-rw-r--r--drivers/md/md.c113
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/md/raid10.c109
-rw-r--r--drivers/md/raid5-cache.c2
-rw-r--r--drivers/md/raid5.c12
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c5
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c5
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c5
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h2
-rw-r--r--drivers/message/fusion/mptbase.c12
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/mfd/Kconfig26
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/adp5520.c2
-rw-r--r--drivers/mfd/arizona-core.c10
-rw-r--r--drivers/mfd/at91-usart.c72
-rw-r--r--drivers/mfd/cros_ec.c3
-rw-r--r--drivers/mfd/cros_ec_dev.c1
-rw-r--r--drivers/mfd/intel_msic.c49
-rw-r--r--drivers/mfd/intel_soc_pmic_bxtwc.c56
-rw-r--r--drivers/mfd/intel_soc_pmic_chtdc_ti.c5
-rw-r--r--drivers/mfd/intel_soc_pmic_chtwc.c5
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c25
-rw-r--r--drivers/mfd/intel_soc_pmic_core.h12
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c78
-rw-r--r--drivers/mfd/madera-core.c33
-rw-r--r--drivers/mfd/max14577.c28
-rw-r--r--drivers/mfd/max77620.c2
-rw-r--r--drivers/mfd/max77686.c32
-rw-r--r--drivers/mfd/max77693.c34
-rw-r--r--drivers/mfd/max77843.c19
-rw-r--r--drivers/mfd/max8997-irq.c30
-rw-r--r--drivers/mfd/max8997.c40
-rw-r--r--drivers/mfd/max8998-irq.c18
-rw-r--r--drivers/mfd/max8998.c28
-rw-r--r--drivers/mfd/mc13xxx-core.c3
-rw-r--r--drivers/mfd/menelaus.c13
-rw-r--r--drivers/mfd/motorola-cpcap.c51
-rw-r--r--drivers/mfd/sec-core.c16
-rw-r--r--drivers/mfd/sec-irq.c24
-rw-r--r--drivers/mfd/ti-lmu.c91
-rw-r--r--drivers/mfd/ti_am335x_tscadc.c14
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c2
-rw-r--r--drivers/misc/ad525x_dpot-spi.c2
-rw-r--r--drivers/misc/ad525x_dpot.c6
-rw-r--r--drivers/misc/apds990x.c1
-rw-r--r--drivers/misc/bh1770glc.c3
-rw-r--r--drivers/misc/cxl/flash.c4
-rw-r--r--drivers/misc/cxl/guest.c2
-rw-r--r--drivers/misc/echo/echo.c2
-rw-r--r--drivers/misc/eeprom/Kconfig11
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at25.c13
-rw-r--r--drivers/misc/eeprom/ee1004.c281
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c19
-rw-r--r--drivers/misc/genwqe/card_base.c1
-rw-r--r--drivers/misc/genwqe/card_ddcb.c1
-rw-r--r--drivers/misc/genwqe/card_utils.c15
-rw-r--r--drivers/misc/kgdbts.c16
-rw-r--r--drivers/misc/lkdtm/usercopy.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c1
-rw-r--r--drivers/misc/mei/main.c4
-rw-r--r--drivers/misc/mic/scif/scif_dma.c9
-rw-r--r--drivers/misc/mic/scif/scif_fence.c2
-rw-r--r--drivers/misc/ocxl/config.c4
-rw-r--r--drivers/misc/sgi-gru/grukservices.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_channel.c6
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c3
-rw-r--r--drivers/misc/sgi-xp/xpc_sn2.c2
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/misc/sram.c6
-rw-r--r--drivers/misc/vmw_balloon.c1802
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_host.c4
-rw-r--r--drivers/misc/vmw_vmci/vmci_resource.c3
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c20
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c9
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/sfc/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c12
-rw-r--r--drivers/net/ppp/ppp_mppe.c27
-rw-r--r--drivers/nfc/nfcmrvl/uart.c5
-rw-r--r--drivers/nvdimm/bus.c20
-rw-r--r--drivers/nvdimm/dimm.c6
-rw-r--r--drivers/nvdimm/dimm_devs.c60
-rw-r--r--drivers/nvdimm/label.c144
-rw-r--r--drivers/nvdimm/label.h4
-rw-r--r--drivers/nvdimm/namespace_devs.c1
-rw-r--r--drivers/nvdimm/nd-core.h1
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/pfn_devs.c61
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvdimm/region_devs.c11
-rw-r--r--drivers/nvme/host/core.c4
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c98
-rw-r--r--drivers/nvme/target/configfs.c47
-rw-r--r--drivers/nvme/target/core.c180
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c3
-rw-r--r--drivers/nvme/target/nvmet.h17
-rw-r--r--drivers/nvme/target/rdma.c22
-rw-r--r--drivers/nvmem/core.c533
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c7
-rw-r--r--drivers/nvmem/mxs-ocotp.c4
-rw-r--r--drivers/nvmem/sunxi_sid.c22
-rw-r--r--drivers/of/base.c149
-rw-r--r--drivers/of/device.c5
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/of/of_mdio.c12
-rw-r--r--drivers/of/of_numa.c19
-rw-r--r--drivers/of/of_private.h8
-rw-r--r--drivers/of/overlay.c4
-rw-r--r--drivers/of/platform.c8
-rw-r--r--drivers/of/unittest-data/overlay_15.dts4
-rw-r--r--drivers/of/unittest-data/tests-overlay.dtsi4
-rw-r--r--drivers/of/unittest.c29
-rw-r--r--drivers/pci/Kconfig20
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/access.c4
-rw-r--r--drivers/pci/controller/Kconfig4
-rw-r--r--drivers/pci/controller/dwc/Makefile2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c11
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c176
-rw-r--r--drivers/pci/controller/dwc/pci-keystone-dw.c484
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c788
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.h57
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h4
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c56
-rw-r--r--drivers/pci/controller/pci-aardvark.c129
-rw-r--r--drivers/pci/controller/pci-host-common.c8
-rw-r--r--drivers/pci/controller/pci-mvebu.c384
-rw-r--r--drivers/pci/controller/pcie-cadence-ep.c13
-rw-r--r--drivers/pci/controller/pcie-cadence-host.c7
-rw-r--r--drivers/pci/controller/pcie-cadence.c20
-rw-r--r--drivers/pci/controller/pcie-iproc.c8
-rw-r--r--drivers/pci/controller/pcie-mediatek.c321
-rw-r--r--drivers/pci/controller/pcie-mobiveil.c7
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c9
-rw-r--r--drivers/pci/controller/pcie-xilinx.c7
-rw-r--r--drivers/pci/controller/vmd.c2
-rw-r--r--drivers/pci/hotplug/TODO74
-rw-r--r--drivers/pci/hotplug/acpiphp.h10
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c36
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/cpci_hotplug.h11
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_core.c105
-rw-r--r--drivers/pci/hotplug/cpci_hotplug_pci.c6
-rw-r--r--drivers/pci/hotplug/cpqphp.h9
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c61
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c31
-rw-r--r--drivers/pci/hotplug/ibmphp.h9
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c121
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c70
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c53
-rw-r--r--drivers/pci/hotplug/pciehp.h133
-rw-r--r--drivers/pci/hotplug/pciehp_core.c168
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c263
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c184
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c41
-rw-r--r--drivers/pci/hotplug/pnv_php.c40
-rw-r--r--drivers/pci/hotplug/rpaphp.h10
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c20
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c11
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c22
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c44
-rw-r--r--drivers/pci/hotplug/sgi_hotplug.c63
-rw-r--r--drivers/pci/hotplug/shpchp.h8
-rw-r--r--drivers/pci/hotplug/shpchp_core.c48
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c21
-rw-r--r--drivers/pci/iov.c3
-rw-r--r--drivers/pci/msi.c9
-rw-r--r--drivers/pci/of.c101
-rw-r--r--drivers/pci/p2pdma.c805
-rw-r--r--drivers/pci/pci-acpi.c63
-rw-r--r--drivers/pci/pci-bridge-emul.c408
-rw-r--r--drivers/pci/pci-bridge-emul.h124
-rw-r--r--drivers/pci/pci.c112
-rw-r--r--drivers/pci/pci.h78
-rw-r--r--drivers/pci/pcie/Kconfig4
-rw-r--r--drivers/pci/pcie/aer.c239
-rw-r--r--drivers/pci/pcie/aer_inject.c96
-rw-r--r--drivers/pci/pcie/aspm.c4
-rw-r--r--drivers/pci/pcie/dpc.c72
-rw-r--r--drivers/pci/pcie/err.c281
-rw-r--r--drivers/pci/pcie/pme.c30
-rw-r--r--drivers/pci/pcie/portdrv.h32
-rw-r--r--drivers/pci/pcie/portdrv_core.c21
-rw-r--r--drivers/pci/pcie/portdrv_pci.c31
-rw-r--r--drivers/pci/probe.c24
-rw-r--r--drivers/pci/quirks.c96
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/pci/setup-bus.c28
-rw-r--r--drivers/pci/slot.c3
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/Makefile2
-rw-r--r--drivers/phy/broadcom/Kconfig3
-rw-r--r--drivers/phy/broadcom/phy-bcm-cygnus-pcie.c4
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c74
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c4
-rw-r--r--drivers/phy/cadence/Kconfig10
-rw-r--r--drivers/phy/cadence/Makefile1
-rw-r--r--drivers/phy/cadence/phy-cadence-dp.c541
-rw-r--r--drivers/phy/lantiq/phy-lantiq-rcu-usb2.c5
-rw-r--r--drivers/phy/marvell/Kconfig11
-rw-r--r--drivers/phy/marvell/Makefile1
-rw-r--r--drivers/phy/marvell/phy-berlin-sata.c6
-rw-r--r--drivers/phy/marvell/phy-pxa-usb.c345
-rw-r--r--drivers/phy/qualcomm/Kconfig17
-rw-r--r--drivers/phy/qualcomm/Makefile4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c222
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h15
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qusb2.c4
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs-i.h2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c50
-rw-r--r--drivers/phy/renesas/Kconfig1
-rw-r--r--drivers/phy/renesas/Makefile1
-rw-r--r--drivers/phy/renesas/phy-rcar-gen2.c5
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb2.c86
-rw-r--r--drivers/phy/renesas/phy-rcar-gen3-usb3.c5
-rw-r--r--drivers/phy/rockchip/Kconfig8
-rw-r--r--drivers/phy/rockchip/Makefile1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-emmc.c4
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-hdmi.c1277
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c8
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c8
-rw-r--r--drivers/phy/rockchip/phy-rockchip-usb.c145
-rw-r--r--drivers/phy/socionext/Kconfig34
-rw-r--r--drivers/phy/socionext/Makefile8
-rw-r--r--drivers/phy/socionext/phy-uniphier-pcie.c240
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb2.c244
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3hs.c422
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3ss.c349
-rw-r--r--drivers/phy/tegra/xusb.c4
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c29
-rw-r--r--drivers/platform/goldfish/goldfish_pipe.c399
-rw-r--r--drivers/platform/goldfish/goldfish_pipe_qemu.h98
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/asus-wmi.c39
-rw-r--r--drivers/platform/x86/eeepc-laptop.c43
-rw-r--r--drivers/platform/x86/intel_cht_int33fe.c27
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c119
-rw-r--r--drivers/power/reset/qcom-pon.c1
-rw-r--r--drivers/power/reset/rmobile-reset.c5
-rw-r--r--drivers/power/supply/Kconfig7
-rw-r--r--drivers/power/supply/Makefile1
-rw-r--r--drivers/power/supply/ab8500_fg.c52
-rw-r--r--drivers/power/supply/bq25890_charger.c62
-rw-r--r--drivers/power/supply/bq27xxx_battery.c9
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c2
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c117
-rw-r--r--drivers/power/supply/ds2780_battery.c2
-rw-r--r--drivers/power/supply/ds2781_battery.c2
-rw-r--r--drivers/power/supply/ds2782_battery.c2
-rw-r--r--drivers/power/supply/max14577_charger.c22
-rw-r--r--drivers/power/supply/max17040_battery.c18
-rw-r--r--drivers/power/supply/max17042_battery.c32
-rw-r--r--drivers/power/supply/max77693_charger.c22
-rw-r--r--drivers/power/supply/max8925_power.c1
-rw-r--r--drivers/power/supply/max8997_charger.c26
-rw-r--r--drivers/power/supply/max8998_charger.c28
-rw-r--r--drivers/power/supply/power_supply_sysfs.c3
-rw-r--r--drivers/power/supply/sc2731_charger.c504
-rw-r--r--drivers/power/supply/twl4030_charger.c35
-rw-r--r--drivers/reset/reset-imx7.c1
-rw-r--r--drivers/s390/crypto/Makefile4
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c157
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c939
-rw-r--r--drivers/s390/crypto/vfio_ap_private.h88
-rw-r--r--drivers/s390/net/ism_drv.c4
-rw-r--r--drivers/scsi/3w-9xxx.c50
-rw-r--r--drivers/scsi/3w-sas.c38
-rw-r--r--drivers/scsi/3w-xxxx.c20
-rw-r--r--drivers/scsi/3w-xxxx.h1
-rw-r--r--drivers/scsi/53c700.h2
-rw-r--r--drivers/scsi/BusLogic.c36
-rw-r--r--drivers/scsi/FlashPoint.c6
-rw-r--r--drivers/scsi/Kconfig35
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR5380.c167
-rw-r--r--drivers/scsi/NCR5380.h2
-rw-r--r--drivers/scsi/a100u2w.c20
-rw-r--r--drivers/scsi/aacraid/aachba.c7
-rw-r--r--drivers/scsi/aacraid/commsup.c2
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/advansys.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7770.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c44
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_pci.c8
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx.h6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c6
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c41
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_pci.c7
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.h4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_gram.y4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_scan.l4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c4
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c9
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c5
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c46
-rw-r--r--drivers/scsi/am53c974.c54
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c29
-rw-r--r--drivers/scsi/atp870u.c6
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c10
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c15
-rw-r--r--drivers/scsi/be2iscsi/be_main.c75
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c27
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h2
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.c108
-rw-r--r--drivers/scsi/bfa/bfa_fcbuild.h9
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.h2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c3
-rw-r--r--drivers/scsi/csiostor/csio_init.c8
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c6
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c12
-rw-r--r--drivers/scsi/csiostor/csio_wr.c17
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c154
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h3
-rw-r--r--drivers/scsi/dc395x.c191
-rw-r--r--drivers/scsi/esp_scsi.c286
-rw-r--r--drivers/scsi/esp_scsi.h38
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c41
-rw-r--r--drivers/scsi/fnic/fnic_main.c19
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c99
-rw-r--r--drivers/scsi/fnic/vnic_dev.c26
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c161
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c15
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c15
-rw-r--r--drivers/scsi/hpsa.c148
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c1
-rw-r--r--drivers/scsi/ips.c81
-rw-r--r--drivers/scsi/isci/host.c8
-rw-r--r--drivers/scsi/isci/host.h2
-rw-r--r--drivers/scsi/isci/request.c4
-rw-r--r--drivers/scsi/isci/task.c4
-rw-r--r--drivers/scsi/iscsi_tcp.c3
-rw-r--r--drivers/scsi/jazz_esp.c30
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libfc/fc_rport.c22
-rw-r--r--drivers/scsi/libsas/sas_ata.c2
-rw-r--r--drivers/scsi/libsas/sas_discover.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c22
-rw-r--r--drivers/scsi/lpfc/lpfc.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c344
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h36
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h45
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c111
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c19
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c20
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c34
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c310
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c14
-rw-r--r--drivers/scsi/mac_esp.c217
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c117
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.h2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c153
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c16
-rw-r--r--drivers/scsi/mesh.c8
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c1189
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h9
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c89
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c527
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1488
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c355
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c101
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_warpdrive.c70
-rw-r--r--drivers/scsi/mvsas/mv_init.c21
-rw-r--r--drivers/scsi/mvsas/mv_sas.c12
-rw-r--r--drivers/scsi/mvumi.c89
-rw-r--r--drivers/scsi/myrb.c3656
-rw-r--r--drivers/scsi/myrb.h958
-rw-r--r--drivers/scsi/myrs.c3268
-rw-r--r--drivers/scsi/myrs.h1134
-rw-r--r--drivers/scsi/nsp32.c18
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/pm8001/pm8001_defs.h8
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c31
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.h4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c31
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c49
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c119
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h9
-rw-r--r--drivers/scsi/qedf/qedf_main.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla1280.c26
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c587
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h32
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c536
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c412
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h23
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c84
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c52
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c319
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c542
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c51
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c10
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c26
-rw-r--r--drivers/scsi/raid_class.c4
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/scsi_transport_sas.c2
-rw-r--r--drivers/scsi/sd.c15
-rw-r--r--drivers/scsi/sd.h15
-rw-r--r--drivers/scsi/sd_zbc.c501
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c100
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c11
-rw-r--r--drivers/scsi/snic/snic_disc.c7
-rw-r--r--drivers/scsi/snic/snic_io.c25
-rw-r--r--drivers/scsi/snic/snic_main.c24
-rw-r--r--drivers/scsi/snic/snic_scsi.c15
-rw-r--r--drivers/scsi/snic/vnic_dev.c29
-rw-r--r--drivers/scsi/sun3x_esp.c30
-rw-r--r--drivers/scsi/sun_esp.c61
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c15
-rw-r--r--drivers/scsi/ufs/Kconfig19
-rw-r--r--drivers/scsi/ufs/Makefile3
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c82
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h5
-rw-r--r--drivers/scsi/ufs/ufs.h94
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c210
-rw-r--r--drivers/scsi/ufs/ufs_bsg.h23
-rw-r--r--drivers/scsi/ufs/ufshcd.c431
-rw-r--r--drivers/scsi/ufs/ufshcd.h12
-rw-r--r--drivers/scsi/ufs/ufshci.h25
-rw-r--r--drivers/scsi/vmw_pvscsi.c77
-rw-r--r--drivers/scsi/zorro_esp.c290
-rw-r--r--drivers/slimbus/core.c37
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c34
-rw-r--r--drivers/soc/dove/pmu.c8
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c58
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c2
-rw-r--r--drivers/soc/fsl/qe/qe_tdm.c4
-rw-r--r--drivers/soc/qcom/apr.c2
-rw-r--r--drivers/soc/rockchip/pm_domains.c44
-rw-r--r--drivers/soc/tegra/pmc.c12
-rw-r--r--drivers/soc/ti/knav_dma.c8
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c8
-rw-r--r--drivers/soundwire/bus.c6
-rw-r--r--drivers/soundwire/bus.h4
-rw-r--r--drivers/soundwire/intel.c68
-rw-r--r--drivers/soundwire/intel_init.c2
-rw-r--r--drivers/soundwire/stream.c488
-rw-r--r--drivers/spi/Kconfig8
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-at91-usart.c432
-rw-r--r--drivers/staging/erofs/namei.c19
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c34
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c28
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c34
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c26
-rw-r--r--drivers/target/iscsi/cxgbit/cxgbit_cm.c23
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c15
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c44
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h1
-rw-r--r--drivers/target/target_core_iblock.c58
-rw-r--r--drivers/target/target_core_iblock.h1
-rw-r--r--drivers/target/target_core_sbc.c23
-rw-r--r--drivers/target/target_core_transport.c19
-rw-r--r--drivers/target/target_core_xcopy.c3
-rw-r--r--drivers/tc/tc.c8
-rw-r--r--drivers/thermal/Kconfig2
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/armada_thermal.c4
-rw-r--r--drivers/thermal/da9062-thermal.c4
-rw-r--r--drivers/thermal/hisi_thermal.c249
-rw-r--r--drivers/thermal/imx_thermal.c31
-rw-r--r--drivers/thermal/of-thermal.c152
-rw-r--r--drivers/thermal/qcom-spmi-temp-alarm.c158
-rw-r--r--drivers/thermal/qcom/tsens-8916.c12
-rw-r--r--drivers/thermal/qcom/tsens-8960.c41
-rw-r--r--drivers/thermal/qcom/tsens-8974.c12
-rw-r--r--drivers/thermal/qcom/tsens-common.c62
-rw-r--r--drivers/thermal/qcom/tsens-v2.c8
-rw-r--r--drivers/thermal/qcom/tsens.c19
-rw-r--r--drivers/thermal/qcom/tsens.h23
-rw-r--r--drivers/thermal/qoriq_thermal.c5
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c2
-rw-r--r--drivers/thermal/rcar_thermal.c11
-rw-r--r--drivers/thermal/st/Kconfig14
-rw-r--r--drivers/thermal/st/Makefile1
-rw-r--r--drivers/thermal/st/stm_thermal.c760
-rw-r--r--drivers/thunderbolt/cap.c3
-rw-r--r--drivers/thunderbolt/ctl.c12
-rw-r--r--drivers/thunderbolt/ctl.h3
-rw-r--r--drivers/thunderbolt/dma_port.c5
-rw-r--r--drivers/thunderbolt/dma_port.h5
-rw-r--r--drivers/thunderbolt/domain.c7
-rw-r--r--drivers/thunderbolt/eeprom.c5
-rw-r--r--drivers/thunderbolt/icm.c5
-rw-r--r--drivers/thunderbolt/nhi.c33
-rw-r--r--drivers/thunderbolt/nhi.h3
-rw-r--r--drivers/thunderbolt/nhi_regs.h1
-rw-r--r--drivers/thunderbolt/path.c26
-rw-r--r--drivers/thunderbolt/property.c5
-rw-r--r--drivers/thunderbolt/switch.c71
-rw-r--r--drivers/thunderbolt/tb.c10
-rw-r--r--drivers/thunderbolt/tb.h9
-rw-r--r--drivers/thunderbolt/tb_msgs.h5
-rw-r--r--drivers/thunderbolt/tb_regs.h3
-rw-r--r--drivers/thunderbolt/xdomain.c5
-rw-r--r--drivers/tty/serial/Kconfig1
-rw-r--r--drivers/tty/serial/atmel_serial.c42
-rw-r--r--drivers/uio/uio.c35
-rw-r--r--drivers/uio/uio_dmem_genirq.c3
-rw-r--r--drivers/uio/uio_fsl_elbc_gpcm.c3
-rw-r--r--drivers/uio/uio_hv_generic.c116
-rw-r--r--drivers/uio/uio_pdrv_genirq.c3
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c19
-rw-r--r--drivers/usb/chipidea/core.c19
-rw-r--r--drivers/usb/chipidea/host.c9
-rw-r--r--drivers/usb/chipidea/otg.c9
-rw-r--r--drivers/usb/chipidea/otg.h3
-rw-r--r--drivers/usb/chipidea/udc.c9
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c4
-rw-r--r--drivers/usb/class/usbtmc.c1583
-rw-r--r--drivers/usb/core/buffer.c8
-rw-r--r--drivers/usb/core/driver.c3
-rw-r--r--drivers/usb/core/generic.c27
-rw-r--r--drivers/usb/core/hcd.c14
-rw-r--r--drivers/usb/core/hub.c42
-rw-r--r--drivers/usb/core/phy.c7
-rw-r--r--drivers/usb/core/port.c10
-rw-r--r--drivers/usb/dwc2/core.h29
-rw-r--r--drivers/usb/dwc2/debugfs.c1
-rw-r--r--drivers/usb/dwc2/gadget.c121
-rw-r--r--drivers/usb/dwc2/hcd.c48
-rw-r--r--drivers/usb/dwc2/hw.h15
-rw-r--r--drivers/usb/dwc2/params.c7
-rw-r--r--drivers/usb/dwc2/platform.c8
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c212
-rw-r--r--drivers/usb/dwc3/gadget.c29
-rw-r--r--drivers/usb/early/xhci-dbc.c3
-rw-r--r--drivers/usb/gadget/function/f_uac2.c216
-rw-r--r--drivers/usb/gadget/function/f_uvc.c57
-rw-r--r--drivers/usb/gadget/function/u_uvc.h3
-rw-r--r--drivers/usb/gadget/function/uvc.h16
-rw-r--r--drivers/usb/gadget/function/uvc_configfs.c1168
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c4
-rw-r--r--drivers/usb/gadget/function/uvc_video.c48
-rw-r--r--drivers/usb/gadget/function/uvc_video.h2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c8
-rw-r--r--drivers/usb/gadget/udc/core.c9
-rw-r--r--drivers/usb/gadget/udc/fotg210-udc.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c36
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.c3
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c14
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-hcd.c11
-rw-r--r--drivers/usb/host/ehci-mv.c181
-rw-r--r--drivers/usb/host/ehci-q.c4
-rw-r--r--drivers/usb/host/ehci-timer.c2
-rw-r--r--drivers/usb/host/ehci.h4
-rw-r--r--drivers/usb/host/fotg210-hcd.c50
-rw-r--r--drivers/usb/host/fotg210.h7
-rw-r--r--drivers/usb/host/ohci-at91.c2
-rw-r--r--drivers/usb/host/pci-quirks.c12
-rw-r--r--drivers/usb/host/xhci-hub.c5
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c429
-rw-r--r--drivers/usb/host/xhci-mtk.h23
-rw-r--r--drivers/usb/host/xhci-pci.c24
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci-ring.c20
-rw-r--r--drivers/usb/host/xhci-tegra.c144
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/misc/appledisplay.c7
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/misc/trancevibrator.c4
-rw-r--r--drivers/usb/mtu3/mtu3_core.c4
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c22
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c8
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c113
-rw-r--r--drivers/usb/renesas_usbhs/common.h5
-rw-r--r--drivers/usb/renesas_usbhs/rcar3.c27
-rw-r--r--drivers/usb/serial/cypress_m8.c7
-rw-r--r--drivers/usb/serial/ftdi_sio.c391
-rw-r--r--drivers/usb/serial/ftdi_sio.h28
-rw-r--r--drivers/usb/storage/Kconfig23
-rw-r--r--drivers/usb/storage/isd200.c2
-rw-r--r--drivers/usb/typec/Kconfig45
-rw-r--r--drivers/usb/typec/Makefile6
-rw-r--r--drivers/usb/typec/class.c40
-rw-r--r--drivers/usb/typec/fusb302/Kconfig7
-rw-r--r--drivers/usb/typec/fusb302/Makefile2
-rw-r--r--drivers/usb/typec/tcpm/Kconfig52
-rw-r--r--drivers/usb/typec/tcpm/Makefile7
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c (renamed from drivers/usb/typec/fusb302/fusb302.c)75
-rw-r--r--drivers/usb/typec/tcpm/fusb302_reg.h (renamed from drivers/usb/typec/fusb302/fusb302_reg.h)0
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c (renamed from drivers/usb/typec/tcpci.c)0
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h (renamed from drivers/usb/typec/tcpci.h)0
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c (renamed from drivers/usb/typec/tcpci_rt1711h.c)0
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c (renamed from drivers/usb/typec/tcpm.c)17
-rw-r--r--drivers/usb/typec/tcpm/wcove.c (renamed from drivers/usb/typec/typec_wcove.c)0
-rw-r--r--drivers/usb/usbip/vudc_main.c10
-rw-r--r--drivers/usb/wusbcore/crypto.c16
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c6
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c23
-rw-r--r--drivers/video/backlight/Kconfig16
-rw-r--r--drivers/video/backlight/Makefile2
-rw-r--r--drivers/video/backlight/adp5520_bl.c2
-rw-r--r--drivers/video/backlight/adp8860_bl.c2
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/ld9040.c811
-rw-r--r--drivers/video/backlight/ld9040_gamma.h202
-rw-r--r--drivers/video/backlight/lm3639_bl.c6
-rw-r--r--drivers/video/backlight/pwm_bl.c81
-rw-r--r--drivers/video/backlight/s6e63m0.c857
-rw-r--r--drivers/video/backlight/s6e63m0_gamma.h266
-rw-r--r--drivers/video/fbdev/chipsfb.c3
-rw-r--r--drivers/video/fbdev/controlfb.c5
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c2
-rw-r--r--drivers/video/fbdev/platinumfb.c5
-rw-r--r--drivers/video/fbdev/valkyriefb.c12
-rw-r--r--drivers/vme/vme.c1
-rw-r--r--drivers/w1/masters/omap_hdq.c2
-rw-r--r--drivers/w1/slaves/w1_ds2438.c66
-rw-r--r--fs/aio.c8
-rw-r--r--fs/btrfs/inode.c12
-rw-r--r--fs/cifs/cifs_debug.c17
-rw-r--r--fs/cifs/cifs_debug.h28
-rw-r--r--fs/cifs/cifs_dfs_ref.c7
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifs_ioctl.h11
-rw-r--r--fs/cifs/cifsfs.c30
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h14
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/cifssmb.c23
-rw-r--r--fs/cifs/connect.c13
-rw-r--r--fs/cifs/file.c56
-rw-r--r--fs/cifs/inode.c73
-rw-r--r--fs/cifs/ioctl.c48
-rw-r--r--fs/cifs/misc.c2
-rw-r--r--fs/cifs/smb2glob.h2
-rw-r--r--fs/cifs/smb2inode.c332
-rw-r--r--fs/cifs/smb2maperror.c2
-rw-r--r--fs/cifs/smb2ops.c212
-rw-r--r--fs/cifs/smb2pdu.c260
-rw-r--r--fs/cifs/smb2pdu.h13
-rw-r--r--fs/cifs/smb2proto.h28
-rw-r--r--fs/cifs/smbdirect.c38
-rw-r--r--fs/cifs/trace.h109
-rw-r--r--fs/cifs/transport.c78
-rw-r--r--fs/compat_binfmt_elf.c2
-rw-r--r--fs/compat_ioctl.c69
-rw-r--r--fs/crypto/fscrypt_private.h4
-rw-r--r--fs/crypto/keyinfo.c10
-rw-r--r--fs/exec.c8
-rw-r--r--fs/kernfs/symlink.c5
-rw-r--r--fs/nfs/delegation.c17
-rw-r--r--fs/nfs/dir.c295
-rw-r--r--fs/nfs/filelayout/filelayout.c1
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c1
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayoutdev.c2
-rw-r--r--fs/nfs/inode.c70
-rw-r--r--fs/nfs/nfs3proc.c5
-rw-r--r--fs/nfs/nfs3xdr.c10
-rw-r--r--fs/nfs/nfs4_fs.h3
-rw-r--r--fs/nfs/nfs4client.c16
-rw-r--r--fs/nfs/nfs4proc.c53
-rw-r--r--fs/nfs/nfs4state.c254
-rw-r--r--fs/nfs/nfs4xdr.c2
-rw-r--r--fs/nfs/pagelist.c49
-rw-r--r--fs/nfs/pnfs.c16
-rw-r--r--fs/nfs/pnfs.h1
-rw-r--r--fs/nfs/read.c10
-rw-r--r--fs/read_write.c2
-rw-r--r--fs/select.c20
-rw-r--r--fs/stat.c3
-rw-r--r--fs/timerfd.c12
-rw-r--r--fs/utimes.c73
-rw-r--r--include/acpi/acpi_bus.h8
-rw-r--r--include/asm-generic/compat.h24
-rw-r--r--include/asm-generic/sections.h14
-rw-r--r--include/asm-generic/unistd.h13
-rw-r--r--include/crypto/acompress.h38
-rw-r--r--include/crypto/aead.h51
-rw-r--r--include/crypto/akcipher.h76
-rw-r--r--include/crypto/algapi.h14
-rw-r--r--include/crypto/cbc.h2
-rw-r--r--include/crypto/chacha20.h3
-rw-r--r--include/crypto/hash.h38
-rw-r--r--include/crypto/internal/cryptouser.h8
-rw-r--r--include/crypto/internal/geniv.h2
-rw-r--r--include/crypto/kpp.h51
-rw-r--r--include/crypto/mcryptd.h114
-rw-r--r--include/crypto/morus1280_glue.h2
-rw-r--r--include/crypto/morus640_glue.h2
-rw-r--r--include/crypto/null.h2
-rw-r--r--include/crypto/rng.h29
-rw-r--r--include/crypto/skcipher.h118
-rw-r--r--include/crypto/speck.h62
-rw-r--r--include/dt-bindings/clock/exynos3250.h5
-rw-r--r--include/dt-bindings/clock/exynos4.h7
-rw-r--r--include/dt-bindings/clock/exynos5250.h7
-rw-r--r--include/dt-bindings/clock/exynos5260-clk.h7
-rw-r--r--include/dt-bindings/clock/exynos5410.h7
-rw-r--r--include/dt-bindings/clock/exynos5420.h7
-rw-r--r--include/dt-bindings/clock/exynos5433.h5
-rw-r--r--include/dt-bindings/clock/exynos7-clk.h7
-rw-r--r--include/dt-bindings/clock/s3c2410.h5
-rw-r--r--include/dt-bindings/clock/s3c2412.h5
-rw-r--r--include/dt-bindings/clock/s3c2443.h5
-rw-r--r--include/dt-bindings/interrupt-controller/arm-gic.h2
-rw-r--r--include/dt-bindings/interrupt-controller/irq.h2
-rw-r--r--include/dt-bindings/mfd/at91-usart.h17
-rw-r--r--include/dt-bindings/reset/imx7-reset.h4
-rw-r--r--include/dt-bindings/thermal/thermal_exynos.h12
-rw-r--r--include/dt-bindings/usb/pd.h26
-rw-r--r--include/linux/acpi.h9
-rw-r--r--include/linux/adxl.h13
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h33
-rw-r--r--include/linux/cgroup.h15
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/compat.h101
-rw-r--r--include/linux/compat_time.h32
-rw-r--r--include/linux/compiler_types.h1
-rw-r--r--include/linux/coresight.h41
-rw-r--r--include/linux/cpufeature.h2
-rw-r--r--include/linux/crc-t10dif.h1
-rw-r--r--include/linux/crypto.h110
-rw-r--r--include/linux/cuda.h4
-rw-r--r--include/linux/device-mapper.h18
-rw-r--r--include/linux/device.h30
-rw-r--r--include/linux/dma-direct.h2
-rw-r--r--include/linux/dma/sprd-dma.h69
-rw-r--r--include/linux/edac.h5
-rw-r--r--include/linux/elfcore-compat.h8
-rw-r--r--include/linux/fpga/fpga-bridge.h4
-rw-r--r--include/linux/fpga/fpga-mgr.h4
-rw-r--r--include/linux/fpga/fpga-region.h4
-rw-r--r--include/linux/fsl/mc.h14
-rw-r--r--include/linux/hid.h28
-rw-r--r--include/linux/hw_random.h3
-rw-r--r--include/linux/hyperv.h14
-rw-r--r--include/linux/intel-iommu.h72
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/iommu.h10
-rw-r--r--include/linux/iova.h1
-rw-r--r--include/linux/irqchip/arm-gic-common.h6
-rw-r--r--include/linux/irqchip/arm-gic-v3.h9
-rw-r--r--include/linux/irqchip/arm-gic.h5
-rw-r--r--include/linux/irqdomain.h1
-rw-r--r--include/linux/kernfs.h9
-rw-r--r--include/linux/libfdt_env.h1
-rw-r--r--include/linux/memremap.h6
-rw-r--r--include/linux/mfd/cros_ec_commands.h11
-rw-r--r--include/linux/mfd/ingenic-tcu.h56
-rw-r--r--include/linux/mfd/intel_msic.h7
-rw-r--r--include/linux/mfd/intel_soc_pmic.h13
-rw-r--r--include/linux/mfd/intel_soc_pmic_bxtwc.h10
-rw-r--r--include/linux/mfd/madera/core.h2
-rw-r--r--include/linux/mfd/madera/pdata.h1
-rw-r--r--include/linux/mfd/max14577-private.h11
-rw-r--r--include/linux/mfd/max14577.h11
-rw-r--r--include/linux/mfd/max77686-private.h15
-rw-r--r--include/linux/mfd/max77686.h15
-rw-r--r--include/linux/mfd/max77693-common.h6
-rw-r--r--include/linux/mfd/max77693-private.h15
-rw-r--r--include/linux/mfd/max77693.h15
-rw-r--r--include/linux/mfd/max77843-private.h6
-rw-r--r--include/linux/mfd/max8997-private.h15
-rw-r--r--include/linux/mfd/max8997.h16
-rw-r--r--include/linux/mfd/max8998-private.h15
-rw-r--r--include/linux/mfd/max8998.h15
-rw-r--r--include/linux/mfd/mc13xxx.h1
-rw-r--r--include/linux/mfd/samsung/core.h11
-rw-r--r--include/linux/mfd/samsung/irq.h10
-rw-r--r--include/linux/mfd/samsung/rtc.h15
-rw-r--r--include/linux/mfd/samsung/s2mpa01.h7
-rw-r--r--include/linux/mfd/samsung/s2mps11.h9
-rw-r--r--include/linux/mfd/samsung/s2mps13.h14
-rw-r--r--include/linux/mfd/samsung/s2mps14.h14
-rw-r--r--include/linux/mfd/samsung/s2mps15.h11
-rw-r--r--include/linux/mfd/samsung/s2mpu02.h14
-rw-r--r--include/linux/mfd/samsung/s5m8763.h10
-rw-r--r--include/linux/mfd/samsung/s5m8767.h10
-rw-r--r--include/linux/mfd/ti-lmu.h3
-rw-r--r--include/linux/mlx5/driver.h23
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/msi.h17
-rw-r--r--include/linux/ndctl.h22
-rw-r--r--include/linux/nfs_fs.h2
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/nfs_xdr.h28
-rw-r--r--include/linux/nvmem-consumer.h100
-rw-r--r--include/linux/nvmem-provider.h50
-rw-r--r--include/linux/of.h30
-rw-r--r--include/linux/of_pci.h10
-rw-r--r--include/linux/pci-dma-compat.h18
-rw-r--r--include/linux/pci-dma.h12
-rw-r--r--include/linux/pci-p2pdma.h114
-rw-r--r--include/linux/pci.h7
-rw-r--r--include/linux/pci_hotplug.h43
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/phy/phy-qcom-ufs.h38
-rw-r--r--include/linux/platform_data/dma-ep93xx.h2
-rw-r--r--include/linux/platform_data/dma-mcf-edma.h38
-rw-r--r--include/linux/platform_data/ehci-sh.h16
-rw-r--r--include/linux/platform_data/mv_usb.h1
-rw-r--r--include/linux/pmu.h4
-rw-r--r--include/linux/power/bq27xxx_battery.h1
-rw-r--r--include/linux/qed/qed_rdma_if.h11
-rw-r--r--include/linux/restart_block.h4
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/soundwire/sdw.h12
-rw-r--r--include/linux/string.h7
-rw-r--r--include/linux/sunrpc/auth.h18
-rw-r--r--include/linux/sunrpc/auth_gss.h1
-rw-r--r--include/linux/sunrpc/bc_xprt.h1
-rw-r--r--include/linux/sunrpc/gss_krb5.h33
-rw-r--r--include/linux/sunrpc/sched.h10
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/sunrpc/xdr.h11
-rw-r--r--include/linux/sunrpc/xprt.h35
-rw-r--r--include/linux/sunrpc/xprtsock.h36
-rw-r--r--include/linux/swiotlb.h9
-rw-r--r--include/linux/syscalls.h21
-rw-r--r--include/linux/tc.h1
-rw-r--r--include/linux/thunderbolt.h5
-rw-r--r--include/linux/time32.h78
-rw-r--r--include/linux/timekeeping.h12
-rw-r--r--include/linux/timekeeping32.h53
-rw-r--r--include/linux/uio_driver.h1
-rw-r--r--include/linux/usb/chipidea.h6
-rw-r--r--include/linux/wait.h20
-rw-r--r--include/rdma/ib_addr.h11
-rw-r--r--include/rdma/ib_cm.h2
-rw-r--r--include/rdma/ib_sa.h38
-rw-r--r--include/rdma/ib_umem.h9
-rw-r--r--include/rdma/ib_umem_odp.h75
-rw-r--r--include/rdma/ib_verbs.h149
-rw-r--r--include/rdma/rdma_cm.h11
-rw-r--r--include/rdma/rdma_netlink.h4
-rw-r--r--include/rdma/rdma_vt.h51
-rw-r--r--include/rdma/rdmavt_qp.h7
-rw-r--r--include/rdma/restrack.h12
-rw-r--r--include/rdma/uverbs_ioctl.h111
-rw-r--r--include/rdma/uverbs_std_types.h51
-rw-r--r--include/soc/fsl/dpaa2-fd.h242
-rw-r--r--include/soc/fsl/dpaa2-global.h15
-rw-r--r--include/soc/fsl/dpaa2-io.h4
-rw-r--r--include/sound/hda_codec.h (renamed from sound/pci/hda/hda_codec.h)0
-rw-r--r--include/sound/memalloc.h3
-rw-r--r--include/sound/rawmidi.h1
-rw-r--r--include/sound/simple_card_utils.h27
-rw-r--r--include/sound/soc-acpi-intel-match.h6
-rw-r--r--include/sound/soc-dapm.h9
-rw-r--r--include/sound/soc-dpcm.h10
-rw-r--r--include/sound/soc.h45
-rw-r--r--include/target/iscsi/iscsi_target_core.h6
-rw-r--r--include/target/iscsi/iscsi_target_stat.h4
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/f2fs.h1
-rw-r--r--include/trace/events/rpcrdma.h18
-rw-r--r--include/trace/events/sunrpc.h37
-rw-r--r--include/uapi/asm-generic/unistd.h2
-rw-r--r--include/uapi/linux/android/binder.h10
-rw-r--r--include/uapi/linux/blkzoned.h3
-rw-r--r--include/uapi/linux/cryptouser.h52
-rw-r--r--include/uapi/linux/fs.h4
-rw-r--r--include/uapi/linux/input-event-codes.h18
-rw-r--r--include/uapi/linux/kvm.h26
-rw-r--r--include/uapi/linux/ndctl.h52
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/usb/tmc.h41
-rw-r--r--include/uapi/linux/usb/video.h304
-rw-r--r--include/uapi/linux/vfio.h2
-rw-r--r--include/uapi/rdma/ib_user_verbs.h20
-rw-r--r--include/uapi/rdma/mlx5-abi.h16
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h21
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_verbs.h12
-rw-r--r--include/uapi/rdma/rdma_netlink.h3
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h7
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h106
-rw-r--r--include/uapi/sound/asound.h2
-rw-r--r--ipc/mqueue.c8
-rw-r--r--ipc/msg.c6
-rw-r--r--ipc/sem.c10
-rw-r--r--ipc/shm.c6
-rw-r--r--ipc/syscall.c2
-rw-r--r--ipc/util.h2
-rw-r--r--kernel/compat.c8
-rw-r--r--kernel/dma/direct.c2
-rw-r--r--kernel/dma/swiotlb.c326
-rw-r--r--kernel/futex_compat.c2
-rw-r--r--kernel/irq/irqdomain.c5
-rw-r--r--kernel/irq/manage.c8
-rw-r--r--kernel/printk/printk.c86
-rw-r--r--kernel/sched/core.c8
-rw-r--r--kernel/signal.c19
-rw-r--r--kernel/softirq.c6
-rw-r--r--kernel/time/hrtimer.c8
-rw-r--r--kernel/time/posix-stubs.c18
-rw-r--r--kernel/time/posix-timers.c30
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/time.c97
-rw-r--r--kernel/time/timekeeping.c24
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Kconfig.debug8
-rw-r--r--lib/Makefile4
-rw-r--r--lib/chacha20.c6
-rw-r--r--lib/crc-t10dif.c57
-rw-r--r--lib/memcat_p.c34
-rw-r--r--lib/string.c1
-rw-r--r--lib/test_memcat_p.c115
-rw-r--r--lib/udivmoddi4.c310
-rw-r--r--lib/umoddi3.c (renamed from arch/nios2/boot/linked_dtb.S)25
-rw-r--r--lib/vsprintf.c223
-rw-r--r--mm/util.c7
-rw-r--r--net/bluetooth/bnep/sock.c19
-rw-r--r--net/bluetooth/cmtp/sock.c19
-rw-r--r--net/bluetooth/hidp/core.c10
-rw-r--r--net/bluetooth/hidp/hidp.h2
-rw-r--r--net/bluetooth/hidp/sock.c79
-rw-r--r--net/ceph/crypto.c12
-rw-r--r--net/ceph/crypto.h2
-rw-r--r--net/compat.c10
-rw-r--r--net/core/netclassid_cgroup.c1
-rw-r--r--net/mac802154/llsec.c16
-rw-r--r--net/mac802154/llsec.h2
-rw-r--r--net/rxrpc/ar-internal.h2
-rw-r--r--net/rxrpc/rxkad.c44
-rw-r--r--net/socket.c18
-rw-r--r--net/sunrpc/auth.c310
-rw-r--r--net/sunrpc/auth_generic.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c45
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c87
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_keys.c9
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c53
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c38
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c18
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c28
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c28
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c1
-rw-r--r--net/sunrpc/auth_null.c6
-rw-r--r--net/sunrpc/auth_unix.c4
-rw-r--r--net/sunrpc/backchannel_rqst.c1
-rw-r--r--net/sunrpc/clnt.c174
-rw-r--r--net/sunrpc/sched.c178
-rw-r--r--net/sunrpc/socklib.c10
-rw-r--r--net/sunrpc/svc_xprt.c2
-rw-r--r--net/sunrpc/svcsock.c6
-rw-r--r--net/sunrpc/xdr.c34
-rw-r--r--net/sunrpc/xprt.c908
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c20
-rw-r--r--net/sunrpc/xprtrdma/fmr_ops.c131
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c137
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c30
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c15
-rw-r--r--net/sunrpc/xprtrdma/transport.c120
-rw-r--r--net/sunrpc/xprtrdma/verbs.c178
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h18
-rw-r--r--net/sunrpc/xprtsock.c1107
-rw-r--r--samples/mei/mei-amt-version.c2
-rw-r--r--scripts/Makefile3
-rw-r--r--scripts/Makefile.lib2
-rw-r--r--scripts/dtc/Makefile9
-rw-r--r--scripts/dtc/Makefile.dtc4
-rw-r--r--scripts/dtc/checks.c143
-rw-r--r--scripts/dtc/data.c4
-rw-r--r--scripts/dtc/dtc-parser.y16
-rw-r--r--scripts/dtc/dtc.c11
-rw-r--r--scripts/dtc/dtc.h13
-rw-r--r--scripts/dtc/flattree.c2
-rw-r--r--scripts/dtc/libfdt/fdt.c81
-rw-r--r--scripts/dtc/libfdt/fdt_addresses.c35
-rw-r--r--scripts/dtc/libfdt/fdt_overlay.c6
-rw-r--r--scripts/dtc/libfdt/fdt_ro.c199
-rw-r--r--scripts/dtc/libfdt/fdt_rw.c28
-rw-r--r--scripts/dtc/libfdt/fdt_sw.c109
-rw-r--r--scripts/dtc/libfdt/libfdt.h76
-rw-r--r--scripts/dtc/libfdt/libfdt_env.h1
-rw-r--r--scripts/dtc/libfdt/libfdt_internal.h5
-rw-r--r--scripts/dtc/livetree.c12
-rw-r--r--scripts/dtc/treesource.c225
-rwxr-xr-xscripts/dtc/update-dtc-source.sh2
-rw-r--r--scripts/dtc/util.c23
-rw-r--r--scripts/dtc/util.h20
-rw-r--r--scripts/dtc/version_gen.h2
-rw-r--r--scripts/dtc/yamltree.c247
-rw-r--r--security/integrity/digsig.c10
-rw-r--r--security/integrity/evm/evm_crypto.c4
-rw-r--r--security/integrity/ima/ima.h2
-rw-r--r--security/integrity/ima/ima_api.c3
-rw-r--r--security/integrity/ima/ima_crypto.c54
-rw-r--r--security/integrity/ima/ima_fs.c9
-rw-r--r--security/integrity/ima/ima_init.c2
-rw-r--r--security/integrity/ima/ima_main.c2
-rw-r--r--security/integrity/ima/ima_template.c11
-rw-r--r--security/loadpin/Kconfig4
-rw-r--r--security/loadpin/loadpin.c26
-rw-r--r--security/smack/smack_lsm.c15
-rw-r--r--security/smack/smackfs.c3
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c15
-rw-r--r--sound/arm/Kconfig1
-rw-r--r--sound/core/memalloc.c41
-rw-r--r--sound/core/oss/pcm_plugin.c4
-rw-r--r--sound/core/pcm_lib.c21
-rw-r--r--sound/core/rawmidi.c22
-rw-r--r--sound/core/seq/oss/seq_oss_timer.c2
-rw-r--r--sound/core/seq/seq_system.c22
-rw-r--r--sound/core/seq/seq_virmidi.c4
-rw-r--r--sound/core/sgbuf.c15
-rw-r--r--sound/firewire/Kconfig2
-rw-r--r--sound/firewire/amdtp-stream.c78
-rw-r--r--sound/firewire/bebob/bebob.c58
-rw-r--r--sound/firewire/bebob/bebob_maudio.c5
-rw-r--r--sound/firewire/dice/dice.c41
-rw-r--r--sound/firewire/digi00x/digi00x.c35
-rw-r--r--sound/firewire/fireface/ff.c36
-rw-r--r--sound/firewire/fireworks/fireworks.c69
-rw-r--r--sound/firewire/isight.c18
-rw-r--r--sound/firewire/motu/motu.c47
-rw-r--r--sound/firewire/oxfw/oxfw-scs1x.c5
-rw-r--r--sound/firewire/oxfw/oxfw-spkr.c5
-rw-r--r--sound/firewire/oxfw/oxfw-stream.c13
-rw-r--r--sound/firewire/oxfw/oxfw.c63
-rw-r--r--sound/firewire/tascam/tascam.c40
-rw-r--r--sound/hda/ext/hdac_ext_controller.c22
-rw-r--r--sound/i2c/cs8427.c2
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c6
-rw-r--r--sound/isa/sb/sb8_main.c10
-rw-r--r--sound/mips/hal2.c13
-rw-r--r--sound/pci/asihpi/hpios.c2
-rw-r--r--sound/pci/atiixp.c6
-rw-r--r--sound/pci/au88x0/au88x0_core.c6
-rw-r--r--sound/pci/cs46xx/cs46xx_lib.c6
-rw-r--r--sound/pci/emu10k1/emupcm.c3
-rw-r--r--sound/pci/hda/hda_auto_parser.c2
-rw-r--r--sound/pci/hda/hda_beep.h2
-rw-r--r--sound/pci/hda/hda_bind.c14
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_controller.c36
-rw-r--r--sound/pci/hda/hda_controller.h20
-rw-r--r--sound/pci/hda/hda_eld.c2
-rw-r--r--sound/pci/hda/hda_generic.c2
-rw-r--r--sound/pci/hda/hda_hwdep.c2
-rw-r--r--sound/pci/hda/hda_intel.c112
-rw-r--r--sound/pci/hda/hda_jack.c2
-rw-r--r--sound/pci/hda/hda_proc.c2
-rw-r--r--sound/pci/hda/hda_sysfs.c2
-rw-r--r--sound/pci/hda/hda_tegra.c20
-rw-r--r--sound/pci/hda/patch_analog.c2
-rw-r--r--sound/pci/hda/patch_ca0110.c2
-rw-r--r--sound/pci/hda/patch_ca0132.c1617
-rw-r--r--sound/pci/hda/patch_cirrus.c2
-rw-r--r--sound/pci/hda/patch_cmedia.c2
-rw-r--r--sound/pci/hda/patch_conexant.c3
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c29
-rw-r--r--sound/pci/hda/patch_si3054.c2
-rw-r--r--sound/pci/hda/patch_sigmatel.c22
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/pci/intel8x0.c97
-rw-r--r--sound/pci/intel8x0m.c20
-rw-r--r--sound/pci/rme32.c22
-rw-r--r--sound/pci/rme9652/hdspm.c2
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c77
-rw-r--r--sound/soc/amd/acp-pcm-dma.c30
-rw-r--r--sound/soc/amd/acp.h3
-rw-r--r--sound/soc/atmel/Kconfig12
-rw-r--r--sound/soc/atmel/Makefile2
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c13
-rw-r--r--sound/soc/atmel/mikroe-proto.c165
-rw-r--r--sound/soc/atmel/tse850-pcm5142.c78
-rw-r--r--sound/soc/bcm/cygnus-ssp.c13
-rw-r--r--sound/soc/codecs/Kconfig36
-rw-r--r--sound/soc/codecs/Makefile10
-rw-r--r--sound/soc/codecs/adau1761.c3
-rw-r--r--sound/soc/codecs/adau17x1.c86
-rw-r--r--sound/soc/codecs/adau17x1.h4
-rw-r--r--sound/soc/codecs/cs4265.c12
-rw-r--r--sound/soc/codecs/cs42l51.c21
-rw-r--r--sound/soc/codecs/dmic.c1
-rw-r--r--sound/soc/codecs/es8328.c4
-rw-r--r--sound/soc/codecs/hdac_hda.c483
-rw-r--r--sound/soc/codecs/hdac_hda.h24
-rw-r--r--sound/soc/codecs/hdac_hdmi.c11
-rw-r--r--sound/soc/codecs/max98088.c36
-rw-r--r--sound/soc/codecs/max98373.c47
-rw-r--r--sound/soc/codecs/nau8822.c1136
-rw-r--r--sound/soc/codecs/nau8822.h204
-rw-r--r--sound/soc/codecs/pcm186x.c3
-rw-r--r--sound/soc/codecs/pcm3060-i2c.c60
-rw-r--r--sound/soc/codecs/pcm3060-spi.c59
-rw-r--r--sound/soc/codecs/pcm3060.c295
-rw-r--r--sound/soc/codecs/pcm3060.h88
-rw-r--r--sound/soc/codecs/pcm3168a.c82
-rw-r--r--sound/soc/codecs/rt274.c2
-rw-r--r--sound/soc/codecs/rt5514-spi.c14
-rw-r--r--sound/soc/codecs/rt5651.c1
-rw-r--r--sound/soc/codecs/rt5663.c7
-rw-r--r--sound/soc/codecs/rt5668.c10
-rw-r--r--sound/soc/codecs/rt5670.c12
-rw-r--r--sound/soc/codecs/rt5677-spi.c1
-rw-r--r--sound/soc/codecs/rt5682.c86
-rw-r--r--sound/soc/codecs/rt5682.h14
-rw-r--r--sound/soc/codecs/sgtl5000.c2
-rw-r--r--sound/soc/codecs/sta32x.c30
-rw-r--r--sound/soc/codecs/tas5720.c103
-rw-r--r--sound/soc/codecs/tas6424.c58
-rw-r--r--sound/soc/codecs/tas6424.h10
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c85
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h23
-rw-r--r--sound/soc/codecs/tscs454.c2
-rw-r--r--sound/soc/codecs/wm2000.c54
-rw-r--r--sound/soc/codecs/wm8782.c63
-rw-r--r--sound/soc/codecs/wm8904.c1
-rw-r--r--sound/soc/codecs/wm8974.c1
-rw-r--r--sound/soc/codecs/wm9712.c3
-rw-r--r--sound/soc/codecs/wm_adsp.c26
-rw-r--r--sound/soc/davinci/davinci-mcasp.c37
-rw-r--r--sound/soc/fsl/fsl_asrc_dma.c2
-rw-r--r--sound/soc/fsl/fsl_esai.c2
-rw-r--r--sound/soc/fsl/fsl_utils.c4
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c5
-rw-r--r--sound/soc/generic/audio-graph-card.c21
-rw-r--r--sound/soc/generic/audio-graph-scu-card.c55
-rw-r--r--sound/soc/generic/simple-card-utils.c53
-rw-r--r--sound/soc/generic/simple-card.c30
-rw-r--r--sound/soc/generic/simple-scu-card.c54
-rw-r--r--sound/soc/hisilicon/hi6210-i2s.c4
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c4
-rw-r--r--sound/soc/intel/boards/Kconfig22
-rw-r--r--sound/soc/intel/boards/Makefile4
-rw-r--r--sound/soc/intel/boards/broadwell.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c4
-rw-r--r--sound/soc/intel/boards/bytcr_rt5651.c4
-rw-r--r--sound/soc/intel/boards/cht_bsw_rt5672.c9
-rw-r--r--sound/soc/intel/boards/kbl_da7219_max98927.c983
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_max98927.c5
-rw-r--r--sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c5
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.c127
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_common.h38
-rw-r--r--sound/soc/intel/boards/skl_hda_dsp_generic.c183
-rw-r--r--sound/soc/intel/common/Makefile3
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-byt-match.c7
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-hda-match.c40
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-kbl-match.c13
-rw-r--r--sound/soc/intel/common/sst-firmware.c2
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c71
-rw-r--r--sound/soc/intel/skylake/skl-topology.c4
-rw-r--r--sound/soc/intel/skylake/skl.c96
-rw-r--r--sound/soc/intel/skylake/skl.h12
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-cs42448.c13
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-wm8960.c14
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-mt6351.c14
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-max98090.c13
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c12
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c12
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c12
-rw-r--r--sound/soc/meson/Kconfig13
-rw-r--r--sound/soc/meson/Makefile2
-rw-r--r--sound/soc/meson/axg-card.c16
-rw-r--r--sound/soc/meson/axg-fifo.c2
-rw-r--r--sound/soc/meson/axg-pdm.c654
-rw-r--r--sound/soc/meson/axg-tdm-interface.c50
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c4
-rw-r--r--sound/soc/omap/omap-hdmi-audio.c4
-rw-r--r--sound/soc/pxa/Kconfig13
-rw-r--r--sound/soc/pxa/pxa-ssp.c6
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c48
-rw-r--r--sound/soc/qcom/apq8096.c7
-rw-r--r--sound/soc/qcom/qdsp6/q6adm.c17
-rw-r--r--sound/soc/qcom/qdsp6/q6asm-dai.c8
-rw-r--r--sound/soc/qcom/qdsp6/q6asm.c1
-rw-r--r--sound/soc/qcom/qdsp6/q6core.c9
-rw-r--r--sound/soc/qcom/sdm845.c7
-rw-r--r--sound/soc/rockchip/rk3288_hdmi_analog.c1
-rw-r--r--sound/soc/rockchip/rockchip_pcm.c3
-rw-r--r--sound/soc/samsung/tm2_wm5110.c13
-rw-r--r--sound/soc/sh/hac.c3
-rw-r--r--sound/soc/sh/rcar/adg.c4
-rw-r--r--sound/soc/sh/rcar/core.c124
-rw-r--r--sound/soc/sh/rcar/ctu.c2
-rw-r--r--sound/soc/sh/rcar/dma.c109
-rw-r--r--sound/soc/sh/rcar/gen.c33
-rw-r--r--sound/soc/sh/rcar/rsnd.h63
-rw-r--r--sound/soc/sh/rcar/src.c2
-rw-r--r--sound/soc/sh/rcar/ssi.c112
-rw-r--r--sound/soc/sh/rcar/ssiu.c92
-rw-r--r--sound/soc/soc-compress.c4
-rw-r--r--sound/soc/soc-core.c582
-rw-r--r--sound/soc/soc-dapm.c437
-rw-r--r--sound/soc/soc-ops.c4
-rw-r--r--sound/soc/soc-pcm.c253
-rw-r--r--sound/soc/soc-topology.c15
-rw-r--r--sound/soc/soc-utils.c4
-rw-r--r--sound/soc/stm/Kconfig1
-rw-r--r--sound/soc/stm/stm32_sai.c2
-rw-r--r--sound/soc/stm/stm32_sai.h3
-rw-r--r--sound/soc/stm/stm32_sai_sub.c281
-rw-r--r--sound/soc/sunxi/Kconfig17
-rw-r--r--sound/soc/sunxi/Makefile2
-rw-r--r--sound/soc/sunxi/sun4i-i2s.c82
-rw-r--r--sound/soc/sunxi/sun50i-codec-analog.c444
-rw-r--r--sound/soc/sunxi/sun8i-adda-pr-regmap.c102
-rw-r--r--sound/soc/sunxi/sun8i-adda-pr-regmap.h7
-rw-r--r--sound/soc/sunxi/sun8i-codec-analog.c79
-rw-r--r--sound/soc/sunxi/sun8i-codec.c22
-rw-r--r--sound/soc/tegra/tegra_sgtl5000.c17
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c3
-rw-r--r--sound/usb/caiaq/device.c1
-rw-r--r--sound/usb/midi.c3
-rw-r--r--sound/usb/mixer_quirks.c381
-rw-r--r--sound/usb/quirks-table.h9
-rw-r--r--sound/x86/intel_hdmi_audio.c29
-rw-r--r--sound/xen/xen_snd_front_alsa.c46
-rw-r--r--tools/Makefile13
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h10
-rw-r--r--tools/crypto/getstat.c294
-rw-r--r--tools/include/uapi/linux/kvm.h5
-rw-r--r--tools/pci/Build1
-rw-r--r--tools/pci/Makefile53
-rw-r--r--tools/pci/pcitest.c7
-rw-r--r--tools/perf/arch/powerpc/util/book3s_hv_exits.h1
-rw-r--r--tools/testing/nvdimm/Kbuild1
-rw-r--r--tools/testing/nvdimm/acpi_nfit_test.c8
-rw-r--r--tools/testing/nvdimm/test/nfit.c4
-rw-r--r--tools/testing/nvdimm/test/nfit_test.h24
-rw-r--r--tools/testing/selftests/kvm/.gitignore14
-rw-r--r--tools/testing/selftests/kvm/Makefile39
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c374
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h55
-rw-r--r--tools/testing/selftests/kvm/include/evmcs.h1098
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h169
-rw-r--r--tools/testing/selftests/kvm/include/sparsebit.h6
-rw-r--r--tools/testing/selftests/kvm/include/test_util.h6
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h (renamed from tools/testing/selftests/kvm/include/x86.h)28
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/vmx.h (renamed from tools/testing/selftests/kvm/include/vmx.h)35
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c311
-rw-r--r--tools/testing/selftests/kvm/lib/assert.c2
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c564
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util_internal.h33
-rw-r--r--tools/testing/selftests/kvm/lib/ucall.c144
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c (renamed from tools/testing/selftests/kvm/lib/x86.c)263
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c (renamed from tools/testing/selftests/kvm/lib/vmx.c)55
-rw-r--r--tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c (renamed from tools/testing/selftests/kvm/cr4_cpuid_sync_test.c)14
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c160
-rw-r--r--tools/testing/selftests/kvm/x86_64/platform_info_test.c (renamed from tools/testing/selftests/kvm/platform_info_test.c)14
-rw-r--r--tools/testing/selftests/kvm/x86_64/set_sregs_test.c (renamed from tools/testing/selftests/kvm/set_sregs_test.c)2
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c (renamed from tools/testing/selftests/kvm/state_test.c)47
-rw-r--r--tools/testing/selftests/kvm/x86_64/sync_regs_test.c (renamed from tools/testing/selftests/kvm/sync_regs_test.c)2
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c (renamed from tools/testing/selftests/kvm/vmx_tsc_adjust_test.c)24
-rw-r--r--tools/testing/selftests/powerpc/Makefile3
-rw-r--r--tools/testing/selftests/powerpc/include/reg.h1
-rw-r--r--tools/testing/selftests/powerpc/include/utils.h18
-rw-r--r--tools/testing/selftests/powerpc/mm/.gitignore3
-rw-r--r--tools/testing/selftests/powerpc/mm/Makefile4
-rw-r--r--tools/testing/selftests/powerpc/mm/wild_bctr.c155
-rw-r--r--tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c8
-rw-r--r--tools/testing/selftests/powerpc/ptrace/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/ptrace/ptrace-syscall.c228
-rw-r--r--tools/testing/selftests/powerpc/security/Makefile9
-rw-r--r--tools/testing/selftests/powerpc/security/rfi_flush.c132
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-tmspr.c27
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-unavailable.c9
-rw-r--r--tools/testing/selftests/powerpc/tm/tm.h9
-rw-r--r--tools/testing/selftests/powerpc/utils.c152
-rw-r--r--tools/usb/usbip/libsrc/usbip_host_common.c6
-rw-r--r--tools/usb/usbip/libsrc/vhci_driver.c2
-rw-r--r--virt/kvm/arm/arm.c26
-rw-r--r--virt/kvm/arm/mmu.c128
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c36
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c2
-rw-r--r--virt/kvm/coalesced_mmio.c12
-rw-r--r--virt/kvm/kvm_main.c39
2611 files changed, 106849 insertions, 58831 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
index e960cd027e1e..a9e123ba32cd 100644
--- a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
+++ b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
@@ -25,38 +25,3 @@ Description:
4.2.2.
The files are read only.
-
-
-What: /sys/bus/usb/drivers/usbtmc/*/TermChar
-Date: August 2008
-Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Description:
- This file is the TermChar value to be sent to the USB TMC
- device as described by the document, "Universal Serial Bus Test
- and Measurement Class Specification
- (USBTMC) Revision 1.0" as published by the USB-IF.
-
- Note that the TermCharEnabled file determines if this value is
- sent to the device or not.
-
-
-What: /sys/bus/usb/drivers/usbtmc/*/TermCharEnabled
-Date: August 2008
-Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Description:
- This file determines if the TermChar is to be sent to the
- device on every transaction or not. For more details about
- this, please see the document, "Universal Serial Bus Test and
- Measurement Class Specification (USBTMC) Revision 1.0" as
- published by the USB-IF.
-
-
-What: /sys/bus/usb/drivers/usbtmc/*/auto_abort
-Date: August 2008
-Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Description:
- This file determines if the transaction of the USB TMC
- device is to be automatically aborted if there is any error.
- For more details about this, please see the document,
- "Universal Serial Bus Test and Measurement Class Specification
- (USBTMC) Revision 1.0" as published by the USB-IF.
diff --git a/Documentation/ABI/testing/configfs-stp-policy-p_sys-t b/Documentation/ABI/testing/configfs-stp-policy-p_sys-t
new file mode 100644
index 000000000000..b290d1c00dcf
--- /dev/null
+++ b/Documentation/ABI/testing/configfs-stp-policy-p_sys-t
@@ -0,0 +1,41 @@
+What: /config/stp-policy/<device>:p_sys-t.<policy>/<node>/uuid
+Date: June 2018
+KernelVersion: 4.19
+Description:
+ UUID source identifier string, RW.
+ Default value is randomly generated at the mkdir <node> time.
+ Data coming from trace sources that use this <node> will be
+ tagged with this UUID in the MIPI SyS-T packet stream, to
+ allow the decoder to discern between different sources
+ within the same master/channel range, and identify the
+ higher level decoders that may be needed for each source.
+
+What: /config/stp-policy/<device>:p_sys-t.<policy>/<node>/do_len
+Date: June 2018
+KernelVersion: 4.19
+Description:
+ Include payload length in the MIPI SyS-T header, boolean.
+ If enabled, the SyS-T protocol encoder will include payload
+ length in each packet's metadata. This is normally redundant
+ if the underlying transport protocol supports marking message
+ boundaries (which STP does), so this is off by default.
+
+What: /config/stp-policy/<device>:p_sys-t.<policy>/<node>/ts_interval
+Date: June 2018
+KernelVersion: 4.19
+Description:
+ Time interval in milliseconds. Include a timestamp in the
+ MIPI SyS-T packet metadata, if this many milliseconds have
+ passed since the previous packet from this source. Zero is
+ the default and stands for "never send the timestamp".
+
+What: /config/stp-policy/<device>:p_sys-t.<policy>/<node>/clocksync_interval
+Date: June 2018
+KernelVersion: 4.19
+Description:
+ Time interval in milliseconds. Send a CLOCKSYNC packet if
+ this many milliseconds have passed since the previous
+ CLOCKSYNC packet from this source. Zero is the default and
+ stands for "never send the CLOCKSYNC". It makes sense to
+ use this option with sources that generate constant and/or
+ periodic data, like stm_heartbeat.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uvc b/Documentation/ABI/testing/configfs-usb-gadget-uvc
index 9281e2aa38df..809765bd9573 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uvc
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uvc
@@ -12,6 +12,10 @@ Date: Dec 2014
KernelVersion: 4.0
Description: Control descriptors
+ All attributes read only:
+ bInterfaceNumber - USB interface number for this
+ streaming interface
+
What: /config/usb-gadget/gadget/functions/uvc.name/control/class
Date: Dec 2014
KernelVersion: 4.0
@@ -109,6 +113,10 @@ Date: Dec 2014
KernelVersion: 4.0
Description: Streaming descriptors
+ All attributes read only:
+ bInterfaceNumber - USB interface number for this
+ streaming interface
+
What: /config/usb-gadget/gadget/functions/uvc.name/streaming/class
Date: Dec 2014
KernelVersion: 4.0
@@ -160,6 +168,10 @@ Description: Specific MJPEG format descriptors
All attributes read only,
except bmaControls and bDefaultFrameIndex:
+ bFormatIndex - unique id for this format descriptor;
+ only defined after parent header is
+ linked into the streaming class;
+ read-only
bmaControls - this format's data for bmaControls in
the streaming header
bmInterfaceFlags - specifies interlace information,
@@ -177,6 +189,10 @@ Date: Dec 2014
KernelVersion: 4.0
Description: Specific MJPEG frame descriptors
+ bFrameIndex - unique id for this framedescriptor;
+ only defined after parent format is
+ linked into the streaming header;
+ read-only
dwFrameInterval - indicates how frame interval can be
programmed; a number of values
separated by newline can be specified
@@ -204,6 +220,10 @@ Date: Dec 2014
KernelVersion: 4.0
Description: Specific uncompressed format descriptors
+ bFormatIndex - unique id for this format descriptor;
+ only defined after parent header is
+ linked into the streaming class;
+ read-only
bmaControls - this format's data for bmaControls in
the streaming header
bmInterfaceFlags - specifies interlace information,
@@ -224,6 +244,10 @@ Date: Dec 2014
KernelVersion: 4.0
Description: Specific uncompressed frame descriptors
+ bFrameIndex - unique id for this framedescriptor;
+ only defined after parent format is
+ linked into the streaming header;
+ read-only
dwFrameInterval - indicates how frame interval can be
programmed; a number of values
separated by newline can be specified
diff --git a/Documentation/ABI/testing/sysfs-bus-pci b/Documentation/ABI/testing/sysfs-bus-pci
index 44d4b2be92fd..8bfee557e50e 100644
--- a/Documentation/ABI/testing/sysfs-bus-pci
+++ b/Documentation/ABI/testing/sysfs-bus-pci
@@ -323,3 +323,27 @@ Description:
This is similar to /sys/bus/pci/drivers_autoprobe, but
affects only the VFs associated with a specific PF.
+
+What: /sys/bus/pci/devices/.../p2pmem/size
+Date: November 2017
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description:
+ If the device has any Peer-to-Peer memory registered, this
+ file contains the total amount of memory that the device
+ provides (in decimal).
+
+What: /sys/bus/pci/devices/.../p2pmem/available
+Date: November 2017
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description:
+ If the device has any Peer-to-Peer memory registered, this
+ file contains the amount of memory that has not been
+ allocated (in decimal).
+
+What: /sys/bus/pci/devices/.../p2pmem/published
+Date: November 2017
+Contact: Logan Gunthorpe <logang@deltatee.com>
+Description:
+ If the device has any Peer-to-Peer memory registered, this
+ file contains a '1' if the memory has been published for
+ use outside the driver that owns the device.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 08d456e07b53..559baa5c418c 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -189,6 +189,16 @@ Description:
The file will read "hotplug", "wired" and "not used" if the
information is available, and "unknown" otherwise.
+What: /sys/bus/usb/devices/.../(hub interface)/portX/location
+Date: October 2018
+Contact: Bjørn Mork <bjorn@mork.no>
+Description:
+ Some platforms provide usb port physical location through
+ firmware. This is used by the kernel to pair up logical ports
+ mapping to the same physical connector. The attribute exposes the
+ raw location value as a hex integer.
+
+
What: /sys/bus/usb/devices/.../(hub interface)/portX/quirks
Date: May 2018
Contact: Nicolas Boichat <drinkcat@chromium.org>
@@ -219,7 +229,14 @@ Description:
ports and report them to the kernel. This attribute is to expose
the number of over-current situation occurred on a specific port
to user space. This file will contain an unsigned 32 bit value
- which wraps to 0 after its maximum is reached.
+ which wraps to 0 after its maximum is reached. This file supports
+ poll() for monitoring changes to this value in user space.
+
+ Any time this value changes the corresponding hub device will send a
+ udev event with the following attributes:
+
+ OVER_CURRENT_PORT=/sys/bus/usb/devices/.../(hub interface)/portX
+ OVER_CURRENT_COUNT=[current value of this sysfs attribute]
What: /sys/bus/usb/devices/.../(hub interface)/portX/usb3_lpm_permit
Date: November 2015
diff --git a/Documentation/ABI/testing/sysfs-bus-vmbus b/Documentation/ABI/testing/sysfs-bus-vmbus
new file mode 100644
index 000000000000..91e6c065973c
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-vmbus
@@ -0,0 +1,21 @@
+What: /sys/bus/vmbus/devices/.../driver_override
+Date: August 2019
+Contact: Stephen Hemminger <sthemmin@microsoft.com>
+Description:
+ This file allows the driver for a device to be specified which
+ will override standard static and dynamic ID matching. When
+ specified, only a driver with a name matching the value written
+ to driver_override will have an opportunity to bind to the
+ device. The override is specified by writing a string to the
+ driver_override file (echo uio_hv_generic > driver_override) and
+ may be cleared with an empty string (echo > driver_override).
+ This returns the device to standard matching rules binding.
+ Writing to driver_override does not automatically unbind the
+ device from its current driver or make any attempt to
+ automatically load the specified driver. If no driver with a
+ matching name is currently loaded in the kernel, the device
+ will not bind to any driver. This also allows devices to
+ opt-out of driver binding using a driver_override name such as
+ "none". Only a single driver may be specified in the override,
+ there is no support for parsing delimiters.
+
diff --git a/Documentation/ABI/testing/sysfs-class-lcd-s6e63m0 b/Documentation/ABI/testing/sysfs-class-lcd-s6e63m0
deleted file mode 100644
index ae0a2d3dcc07..000000000000
--- a/Documentation/ABI/testing/sysfs-class-lcd-s6e63m0
+++ /dev/null
@@ -1,27 +0,0 @@
-sysfs interface for the S6E63M0 AMOLED LCD panel driver
--------------------------------------------------------
-
-What: /sys/class/lcd/<lcd>/gamma_mode
-Date: May, 2010
-KernelVersion: v2.6.35
-Contact: dri-devel@lists.freedesktop.org
-Description:
- (RW) Read or write the gamma mode. Following three modes are
- supported:
- 0 - gamma value 2.2,
- 1 - gamma value 1.9 and
- 2 - gamma value 1.7.
-
-
-What: /sys/class/lcd/<lcd>/gamma_table
-Date: May, 2010
-KernelVersion: v2.6.35
-Contact: dri-devel@lists.freedesktop.org
-Description:
- (RO) Displays the size of the gamma table i.e. the number of
- gamma modes available.
-
-This is a backlight lcd driver. These interfaces are an extension to the API
-documented in Documentation/ABI/testing/sysfs-class-lcd and in
-Documentation/ABI/stable/sysfs-class-backlight (under
-/sys/class/backlight/<backlight>/).
diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net
index e2e0fe553ad8..664a8f6a634f 100644
--- a/Documentation/ABI/testing/sysfs-class-net
+++ b/Documentation/ABI/testing/sysfs-class-net
@@ -91,6 +91,24 @@ Description:
stacked (e.g: VLAN interfaces) but still have the same MAC
address as their parent device.
+What: /sys/class/net/<iface>/dev_port
+Date: February 2014
+KernelVersion: 3.15
+Contact: netdev@vger.kernel.org
+Description:
+ Indicates the port number of this network device, formatted
+ as a decimal value. Some NICs have multiple independent ports
+ on the same PCI bus, device and function. This attribute allows
+ userspace to distinguish the respective interfaces.
+
+ Note: some device drivers started to use 'dev_id' for this
+ purpose since long before 3.15 and have not adopted the new
+ attribute ever since. To query the port number, some tools look
+ exclusively at 'dev_port', while others only consult 'dev_id'.
+ If a network device has multiple client adapter ports as
+ described in the previous paragraph and does not set this
+ attribute to its port number, it's a kernel bug.
+
What: /sys/class/net/<iface>/dormant
Date: March 2006
KernelVersion: 2.6.17
diff --git a/Documentation/PCI/endpoint/pci-test-howto.txt b/Documentation/PCI/endpoint/pci-test-howto.txt
index e40cf0fb58d7..040479f437a5 100644
--- a/Documentation/PCI/endpoint/pci-test-howto.txt
+++ b/Documentation/PCI/endpoint/pci-test-howto.txt
@@ -99,17 +99,20 @@ Note that the devices listed here correspond to the value populated in 1.4 above
2.2 Using Endpoint Test function Device
pcitest.sh added in tools/pci/ can be used to run all the default PCI endpoint
-tests. Before pcitest.sh can be used pcitest.c should be compiled using the
-following commands.
+tests. To compile this tool the following commands should be used:
- cd <kernel-dir>
- make headers_install ARCH=arm
- arm-linux-gnueabihf-gcc -Iusr/include tools/pci/pcitest.c -o pcitest
- cp pcitest <rootfs>/usr/sbin/
- cp tools/pci/pcitest.sh <rootfs>
+ # cd <kernel-dir>
+ # make -C tools/pci
+
+or if you desire to compile and install in your system:
+
+ # cd <kernel-dir>
+ # make -C tools/pci install
+
+The tool and script will be located in <rootfs>/usr/bin/
2.2.1 pcitest.sh Output
- # ./pcitest.sh
+ # pcitest.sh
BAR tests
BAR0: OKAY
diff --git a/Documentation/PCI/pci-error-recovery.txt b/Documentation/PCI/pci-error-recovery.txt
index 688b69121e82..0b6bb3ef449e 100644
--- a/Documentation/PCI/pci-error-recovery.txt
+++ b/Documentation/PCI/pci-error-recovery.txt
@@ -110,7 +110,7 @@ The actual steps taken by a platform to recover from a PCI error
event will be platform-dependent, but will follow the general
sequence described below.
-STEP 0: Error Event: ERR_NONFATAL
+STEP 0: Error Event
-------------------
A PCI bus error is detected by the PCI hardware. On powerpc, the slot
is isolated, in that all I/O is blocked: all reads return 0xffffffff,
@@ -228,7 +228,13 @@ proceeds to either STEP3 (Link Reset) or to STEP 5 (Resume Operations).
If any driver returned PCI_ERS_RESULT_NEED_RESET, then the platform
proceeds to STEP 4 (Slot Reset)
-STEP 3: Slot Reset
+STEP 3: Link Reset
+------------------
+The platform resets the link. This is a PCI-Express specific step
+and is done whenever a fatal error has been detected that can be
+"solved" by resetting the link.
+
+STEP 4: Slot Reset
------------------
In response to a return value of PCI_ERS_RESULT_NEED_RESET, the
@@ -314,7 +320,7 @@ Failure).
>>> However, it probably should.
-STEP 4: Resume Operations
+STEP 5: Resume Operations
-------------------------
The platform will call the resume() callback on all affected device
drivers if all drivers on the segment have returned
@@ -326,7 +332,7 @@ a result code.
At this point, if a new error happens, the platform will restart
a new error recovery sequence.
-STEP 5: Permanent Failure
+STEP 6: Permanent Failure
-------------------------
A "permanent failure" has occurred, and the platform cannot recover
the device. The platform will call error_detected() with a
@@ -349,27 +355,6 @@ errors. See the discussion in powerpc/eeh-pci-error-recovery.txt
for additional detail on real-life experience of the causes of
software errors.
-STEP 0: Error Event: ERR_FATAL
--------------------
-PCI bus error is detected by the PCI hardware. On powerpc, the slot is
-isolated, in that all I/O is blocked: all reads return 0xffffffff, all
-writes are ignored.
-
-STEP 1: Remove devices
---------------------
-Platform removes the devices depending on the error agent, it could be
-this port for all subordinates or upstream component (likely downstream
-port)
-
-STEP 2: Reset link
---------------------
-The platform resets the link. This is a PCI-Express specific step and is
-done whenever a fatal error has been detected that can be "solved" by
-resetting the link.
-
-STEP 3: Re-enumerate the devices
---------------------
-Initiates the re-enumeration.
Conclusion; General Remarks
---------------------------
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index e129cd8a6dcc..47ca5cda0eef 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1759,6 +1759,18 @@
nobypass [PPC/POWERNV]
Disable IOMMU bypass, using IOMMU for PCI devices.
+ iommu.strict= [ARM64] Configure TLB invalidation behaviour
+ Format: { "0" | "1" }
+ 0 - Lazy mode.
+ Request that DMA unmap operations use deferred
+ invalidation of hardware TLBs, for increased
+ throughput at the cost of reduced device isolation.
+ Will fall back to strict mode if not supported by
+ the relevant IOMMU driver.
+ 1 - Strict mode (default).
+ DMA unmap operations invalidate IOMMU hardware TLBs
+ synchronously.
+
iommu.passthrough=
[ARM64] Configure DMA to bypass the IOMMU by default.
Format: { "0" | "1" }
@@ -2416,7 +2428,7 @@
seconds. Use this parameter to check at some
other rate. 0 disables periodic checking.
- memtest= [KNL,X86,ARM] Enable memtest
+ memtest= [KNL,X86,ARM,PPC] Enable memtest
Format: <integer>
default : 0 <disable>
Specifies the number of memtest passes to be
@@ -4623,7 +4635,8 @@
usbcore.old_scheme_first=
[USB] Start with the old device initialization
- scheme (default 0 = off).
+ scheme, applies only to low and full-speed devices
+ (default 0 = off).
usbcore.usbfs_memory_mb=
[USB] Memory limit (in MB) for buffers allocated by
diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst
index 30491d91e93d..164bf71149fd 100644
--- a/Documentation/admin-guide/security-bugs.rst
+++ b/Documentation/admin-guide/security-bugs.rst
@@ -26,23 +26,34 @@ information is helpful. Any exploit code is very helpful and will not
be released without consent from the reporter unless it has already been
made public.
-Disclosure
-----------
-
-The goal of the Linux kernel security team is to work with the bug
-submitter to understand and fix the bug. We prefer to publish the fix as
-soon as possible, but try to avoid public discussion of the bug itself
-and leave that to others.
-
-Publishing the fix may be delayed when the bug or the fix is not yet
-fully understood, the solution is not well-tested or for vendor
-coordination. However, we expect these delays to be short, measurable in
-days, not weeks or months. A release date is negotiated by the security
-team working with the bug submitter as well as vendors. However, the
-kernel security team holds the final say when setting a timeframe. The
-timeframe varies from immediate (esp. if it's already publicly known bug)
-to a few weeks. As a basic default policy, we expect report date to
-release date to be on the order of 7 days.
+Disclosure and embargoed information
+------------------------------------
+
+The security list is not a disclosure channel. For that, see Coordination
+below.
+
+Once a robust fix has been developed, our preference is to release the
+fix in a timely fashion, treating it no differently than any of the other
+thousands of changes and fixes the Linux kernel project releases every
+month.
+
+However, at the request of the reporter, we will postpone releasing the
+fix for up to 5 business days after the date of the report or after the
+embargo has lifted; whichever comes first. The only exception to that
+rule is if the bug is publicly known, in which case the preference is to
+release the fix as soon as it's available.
+
+Whilst embargoed information may be shared with trusted individuals in
+order to develop a fix, such information will not be published alongside
+the fix or on any other disclosure channel without the permission of the
+reporter. This includes but is not limited to the original bug report
+and followup discussions (if any), exploits, CVE information or the
+identity of the reporter.
+
+In other words our only interest is in getting bugs fixed. All other
+information submitted to the security list and any followup discussions
+of the report are treated confidentially even after the embargo has been
+lifted, in perpetuity.
Coordination
------------
@@ -68,7 +79,7 @@ may delay the bug handling. If a reporter wishes to have a CVE identifier
assigned ahead of public disclosure, they will need to contact the private
linux-distros list, described above. When such a CVE identifier is known
before a patch is provided, it is desirable to mention it in the commit
-message, though.
+message if the reporter agrees.
Non-disclosure agreements
-------------------------
diff --git a/Documentation/cgroup-v1/rdma.txt b/Documentation/cgroup-v1/rdma.txt
index af618171e0eb..9bdb7fd03f83 100644
--- a/Documentation/cgroup-v1/rdma.txt
+++ b/Documentation/cgroup-v1/rdma.txt
@@ -27,7 +27,7 @@ cgroup.
Currently user space applications can easily take away all the rdma verb
specific resources such as AH, CQ, QP, MR etc. Due to which other applications
in other cgroup or kernel space ULPs may not even get chance to allocate any
-rdma resources. This can leads to service unavailability.
+rdma resources. This can lead to service unavailability.
Therefore RDMA controller is needed through which resource consumption
of processes can be limited. Through this controller different rdma
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index 86023c33906f..ff48b55040ef 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -420,9 +420,8 @@ struct clk
%pC pll1
%pCn pll1
-For printing struct clk structures. %pC and %pCn print the name
-(Common Clock Framework) or address (legacy clock framework) of the
-structure.
+For printing struct clk structures. %pC and %pCn print the name of the clock
+(Common Clock Framework) or a unique 32-bit ID (legacy clock framework).
Passed by reference.
diff --git a/Documentation/devicetree/bindings/arm/al,alpine.txt b/Documentation/devicetree/bindings/arm/al,alpine.txt
index f404a4f9b165..d00debe2e86f 100644
--- a/Documentation/devicetree/bindings/arm/al,alpine.txt
+++ b/Documentation/devicetree/bindings/arm/al,alpine.txt
@@ -14,75 +14,3 @@ compatible: must contain "al,alpine"
...
}
-
-* CPU node:
-
-The Alpine platform includes cortex-a15 cores.
-enable-method: must be "al,alpine-smp" to allow smp [1]
-
-Example:
-
-cpus {
- #address-cells = <1>;
- #size-cells = <0>;
- enable-method = "al,alpine-smp";
-
- cpu@0 {
- compatible = "arm,cortex-a15";
- device_type = "cpu";
- reg = <0>;
- };
-
- cpu@1 {
- compatible = "arm,cortex-a15";
- device_type = "cpu";
- reg = <1>;
- };
-
- cpu@2 {
- compatible = "arm,cortex-a15";
- device_type = "cpu";
- reg = <2>;
- };
-
- cpu@3 {
- compatible = "arm,cortex-a15";
- device_type = "cpu";
- reg = <3>;
- };
-};
-
-
-* Alpine CPU resume registers
-
-The CPU resume register are used to define required resume address after
-reset.
-
-Properties:
-- compatible : Should contain "al,alpine-cpu-resume".
-- reg : Offset and length of the register set for the device
-
-Example:
-
-cpu_resume {
- compatible = "al,alpine-cpu-resume";
- reg = <0xfbff5ed0 0x30>;
-};
-
-* Alpine System-Fabric Service Registers
-
-The System-Fabric Service Registers allow various operation on CPU and
-system fabric, like powering CPUs off.
-
-Properties:
-- compatible : Should contain "al,alpine-sysfabric-service" and "syscon".
-- reg : Offset and length of the register set for the device
-
-Example:
-
-nb_service {
- compatible = "al,alpine-sysfabric-service", "syscon";
- reg = <0xfb070000 0x10000>;
-};
-
-[1] arm/cpu-enable-method/al,alpine-smp
diff --git a/Documentation/devicetree/bindings/arm/atmel-at91.txt b/Documentation/devicetree/bindings/arm/atmel-at91.txt
index 31220b54d85d..4bf1b4da7659 100644
--- a/Documentation/devicetree/bindings/arm/atmel-at91.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-at91.txt
@@ -70,173 +70,3 @@ compatible: must be one of:
- "atmel,samv71q19"
- "atmel,samv71q20"
- "atmel,samv71q21"
-
-Chipid required properties:
-- compatible: Should be "atmel,sama5d2-chipid"
-- reg : Should contain registers location and length
-
-PIT Timer required properties:
-- compatible: Should be "atmel,at91sam9260-pit"
-- reg: Should contain registers location and length
-- interrupts: Should contain interrupt for the PIT which is the IRQ line
- shared across all System Controller members.
-
-System Timer (ST) required properties:
-- compatible: Should be "atmel,at91rm9200-st", "syscon", "simple-mfd"
-- reg: Should contain registers location and length
-- interrupts: Should contain interrupt for the ST which is the IRQ line
- shared across all System Controller members.
-- clocks: phandle to input clock.
-Its subnodes can be:
-- watchdog: compatible should be "atmel,at91rm9200-wdt"
-
-RSTC Reset Controller required properties:
-- compatible: Should be "atmel,<chip>-rstc".
- <chip> can be "at91sam9260" or "at91sam9g45" or "sama5d3"
-- reg: Should contain registers location and length
-- clocks: phandle to input clock.
-
-Example:
-
- rstc@fffffd00 {
- compatible = "atmel,at91sam9260-rstc";
- reg = <0xfffffd00 0x10>;
- clocks = <&clk32k>;
- };
-
-RAMC SDRAM/DDR Controller required properties:
-- compatible: Should be "atmel,at91rm9200-sdramc", "syscon"
- "atmel,at91sam9260-sdramc",
- "atmel,at91sam9g45-ddramc",
- "atmel,sama5d3-ddramc",
-- reg: Should contain registers location and length
-
-Examples:
-
- ramc0: ramc@ffffe800 {
- compatible = "atmel,at91sam9g45-ddramc";
- reg = <0xffffe800 0x200>;
- };
-
-SHDWC Shutdown Controller
-
-required properties:
-- compatible: Should be "atmel,<chip>-shdwc".
- <chip> can be "at91sam9260", "at91sam9rl" or "at91sam9x5".
-- reg: Should contain registers location and length
-- clocks: phandle to input clock.
-
-optional properties:
-- atmel,wakeup-mode: String, operation mode of the wakeup mode.
- Supported values are: "none", "high", "low", "any".
-- atmel,wakeup-counter: Counter on Wake-up 0 (between 0x0 and 0xf).
-
-optional at91sam9260 properties:
-- atmel,wakeup-rtt-timer: boolean to enable Real-time Timer Wake-up.
-
-optional at91sam9rl properties:
-- atmel,wakeup-rtc-timer: boolean to enable Real-time Clock Wake-up.
-- atmel,wakeup-rtt-timer: boolean to enable Real-time Timer Wake-up.
-
-optional at91sam9x5 properties:
-- atmel,wakeup-rtc-timer: boolean to enable Real-time Clock Wake-up.
-
-Example:
-
- shdwc@fffffd10 {
- compatible = "atmel,at91sam9260-shdwc";
- reg = <0xfffffd10 0x10>;
- clocks = <&clk32k>;
- };
-
-SHDWC SAMA5D2-Compatible Shutdown Controller
-
-1) shdwc node
-
-required properties:
-- compatible: should be "atmel,sama5d2-shdwc".
-- reg: should contain registers location and length
-- clocks: phandle to input clock.
-- #address-cells: should be one. The cell is the wake-up input index.
-- #size-cells: should be zero.
-
-optional properties:
-
-- debounce-delay-us: minimum wake-up inputs debouncer period in
- microseconds. It's usually a board-related property.
-- atmel,wakeup-rtc-timer: boolean to enable Real-Time Clock wake-up.
-
-The node contains child nodes for each wake-up input that the platform uses.
-
-2) input nodes
-
-Wake-up input nodes are usually described in the "board" part of the Device
-Tree. Note also that input 0 is linked to the wake-up pin and is frequently
-used.
-
-Required properties:
-- reg: should contain the wake-up input index [0 - 15].
-
-Optional properties:
-- atmel,wakeup-active-high: boolean, the corresponding wake-up input described
- by the child, forces the wake-up of the core power supply on a high level.
- The default is to be active low.
-
-Example:
-
-On the SoC side:
- shdwc@f8048010 {
- compatible = "atmel,sama5d2-shdwc";
- reg = <0xf8048010 0x10>;
- clocks = <&clk32k>;
- #address-cells = <1>;
- #size-cells = <0>;
- atmel,wakeup-rtc-timer;
- };
-
-On the board side:
- shdwc@f8048010 {
- debounce-delay-us = <976>;
-
- input@0 {
- reg = <0>;
- };
-
- input@1 {
- reg = <1>;
- atmel,wakeup-active-high;
- };
- };
-
-Special Function Registers (SFR)
-
-Special Function Registers (SFR) manage specific aspects of the integrated
-memory, bridge implementations, processor and other functionality not controlled
-elsewhere.
-
-required properties:
-- compatible: Should be "atmel,<chip>-sfr", "syscon" or
- "atmel,<chip>-sfrbu", "syscon"
- <chip> can be "sama5d3", "sama5d4" or "sama5d2".
-- reg: Should contain registers location and length
-
- sfr@f0038000 {
- compatible = "atmel,sama5d3-sfr", "syscon";
- reg = <0xf0038000 0x60>;
- };
-
-Security Module (SECUMOD)
-
-The Security Module macrocell provides all necessary secure functions to avoid
-voltage, temperature, frequency and mechanical attacks on the chip. It also
-embeds secure memories that can be scrambled
-
-required properties:
-- compatible: Should be "atmel,<chip>-secumod", "syscon".
- <chip> can be "sama5d2".
-- reg: Should contain registers location and length
-
- secumod@fc040000 {
- compatible = "atmel,sama5d2-secumod", "syscon";
- reg = <0xfc040000 0x100>;
- };
diff --git a/Documentation/devicetree/bindings/arm/atmel-sysregs.txt b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
new file mode 100644
index 000000000000..4b96608ad692
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/atmel-sysregs.txt
@@ -0,0 +1,171 @@
+Atmel system registers
+
+Chipid required properties:
+- compatible: Should be "atmel,sama5d2-chipid"
+- reg : Should contain registers location and length
+
+PIT Timer required properties:
+- compatible: Should be "atmel,at91sam9260-pit"
+- reg: Should contain registers location and length
+- interrupts: Should contain interrupt for the PIT which is the IRQ line
+ shared across all System Controller members.
+
+System Timer (ST) required properties:
+- compatible: Should be "atmel,at91rm9200-st", "syscon", "simple-mfd"
+- reg: Should contain registers location and length
+- interrupts: Should contain interrupt for the ST which is the IRQ line
+ shared across all System Controller members.
+- clocks: phandle to input clock.
+Its subnodes can be:
+- watchdog: compatible should be "atmel,at91rm9200-wdt"
+
+RSTC Reset Controller required properties:
+- compatible: Should be "atmel,<chip>-rstc".
+ <chip> can be "at91sam9260" or "at91sam9g45" or "sama5d3"
+- reg: Should contain registers location and length
+- clocks: phandle to input clock.
+
+Example:
+
+ rstc@fffffd00 {
+ compatible = "atmel,at91sam9260-rstc";
+ reg = <0xfffffd00 0x10>;
+ clocks = <&clk32k>;
+ };
+
+RAMC SDRAM/DDR Controller required properties:
+- compatible: Should be "atmel,at91rm9200-sdramc", "syscon"
+ "atmel,at91sam9260-sdramc",
+ "atmel,at91sam9g45-ddramc",
+ "atmel,sama5d3-ddramc",
+- reg: Should contain registers location and length
+
+Examples:
+
+ ramc0: ramc@ffffe800 {
+ compatible = "atmel,at91sam9g45-ddramc";
+ reg = <0xffffe800 0x200>;
+ };
+
+SHDWC Shutdown Controller
+
+required properties:
+- compatible: Should be "atmel,<chip>-shdwc".
+ <chip> can be "at91sam9260", "at91sam9rl" or "at91sam9x5".
+- reg: Should contain registers location and length
+- clocks: phandle to input clock.
+
+optional properties:
+- atmel,wakeup-mode: String, operation mode of the wakeup mode.
+ Supported values are: "none", "high", "low", "any".
+- atmel,wakeup-counter: Counter on Wake-up 0 (between 0x0 and 0xf).
+
+optional at91sam9260 properties:
+- atmel,wakeup-rtt-timer: boolean to enable Real-time Timer Wake-up.
+
+optional at91sam9rl properties:
+- atmel,wakeup-rtc-timer: boolean to enable Real-time Clock Wake-up.
+- atmel,wakeup-rtt-timer: boolean to enable Real-time Timer Wake-up.
+
+optional at91sam9x5 properties:
+- atmel,wakeup-rtc-timer: boolean to enable Real-time Clock Wake-up.
+
+Example:
+
+ shdwc@fffffd10 {
+ compatible = "atmel,at91sam9260-shdwc";
+ reg = <0xfffffd10 0x10>;
+ clocks = <&clk32k>;
+ };
+
+SHDWC SAMA5D2-Compatible Shutdown Controller
+
+1) shdwc node
+
+required properties:
+- compatible: should be "atmel,sama5d2-shdwc".
+- reg: should contain registers location and length
+- clocks: phandle to input clock.
+- #address-cells: should be one. The cell is the wake-up input index.
+- #size-cells: should be zero.
+
+optional properties:
+
+- debounce-delay-us: minimum wake-up inputs debouncer period in
+ microseconds. It's usually a board-related property.
+- atmel,wakeup-rtc-timer: boolean to enable Real-Time Clock wake-up.
+
+The node contains child nodes for each wake-up input that the platform uses.
+
+2) input nodes
+
+Wake-up input nodes are usually described in the "board" part of the Device
+Tree. Note also that input 0 is linked to the wake-up pin and is frequently
+used.
+
+Required properties:
+- reg: should contain the wake-up input index [0 - 15].
+
+Optional properties:
+- atmel,wakeup-active-high: boolean, the corresponding wake-up input described
+ by the child, forces the wake-up of the core power supply on a high level.
+ The default is to be active low.
+
+Example:
+
+On the SoC side:
+ shdwc@f8048010 {
+ compatible = "atmel,sama5d2-shdwc";
+ reg = <0xf8048010 0x10>;
+ clocks = <&clk32k>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ atmel,wakeup-rtc-timer;
+ };
+
+On the board side:
+ shdwc@f8048010 {
+ debounce-delay-us = <976>;
+
+ input@0 {
+ reg = <0>;
+ };
+
+ input@1 {
+ reg = <1>;
+ atmel,wakeup-active-high;
+ };
+ };
+
+Special Function Registers (SFR)
+
+Special Function Registers (SFR) manage specific aspects of the integrated
+memory, bridge implementations, processor and other functionality not controlled
+elsewhere.
+
+required properties:
+- compatible: Should be "atmel,<chip>-sfr", "syscon" or
+ "atmel,<chip>-sfrbu", "syscon"
+ <chip> can be "sama5d3", "sama5d4" or "sama5d2".
+- reg: Should contain registers location and length
+
+ sfr@f0038000 {
+ compatible = "atmel,sama5d3-sfr", "syscon";
+ reg = <0xf0038000 0x60>;
+ };
+
+Security Module (SECUMOD)
+
+The Security Module macrocell provides all necessary secure functions to avoid
+voltage, temperature, frequency and mechanical attacks on the chip. It also
+embeds secure memories that can be scrambled
+
+required properties:
+- compatible: Should be "atmel,<chip>-secumod", "syscon".
+ <chip> can be "sama5d2".
+- reg: Should contain registers location and length
+
+ secumod@fc040000 {
+ compatible = "atmel,sama5d2-secumod", "syscon";
+ reg = <0xfc040000 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index 5d1ad09bafb4..f8aff65ab921 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -54,9 +54,7 @@ its hardware characteristcs.
clocks the core of that coresight component. The latter clock
is optional.
- * port or ports: The representation of the component's port
- layout using the generic DT graph presentation found in
- "bindings/graph.txt".
+ * port or ports: see "Graph bindings for Coresight" below.
* Additional required properties for System Trace Macrocells (STM):
* reg: along with the physical base address and length of the register
@@ -73,7 +71,7 @@ its hardware characteristcs.
AMBA markee):
- "arm,coresight-replicator"
- * port or ports: same as above.
+ * port or ports: see "Graph bindings for Coresight" below.
* Optional properties for ETM/PTMs:
@@ -96,6 +94,20 @@ its hardware characteristcs.
* interrupts : Exactly one SPI may be listed for reporting the address
error
+Graph bindings for Coresight
+-------------------------------
+
+Coresight components are interconnected to create a data path for the flow of
+trace data generated from the "sources" to their collection points "sink".
+Each coresight component must describe the "input" and "output" connections.
+The connections must be described via generic DT graph bindings as described
+by the "bindings/graph.txt", where each "port" along with an "endpoint"
+component represents a hardware port and the connection.
+
+ * All output ports must be listed inside a child node named "out-ports"
+ * All input ports must be listed inside a child node named "in-ports".
+ * Port address must match the hardware port number.
+
Example:
1. Sinks
@@ -105,10 +117,11 @@ Example:
clocks = <&oscclk6a>;
clock-names = "apb_pclk";
- port {
- etb_in_port: endpoint@0 {
- slave-mode;
- remote-endpoint = <&replicator_out_port0>;
+ in-ports {
+ port {
+ etb_in_port: endpoint@0 {
+ remote-endpoint = <&replicator_out_port0>;
+ };
};
};
};
@@ -119,10 +132,11 @@ Example:
clocks = <&oscclk6a>;
clock-names = "apb_pclk";
- port {
- tpiu_in_port: endpoint@0 {
- slave-mode;
- remote-endpoint = <&replicator_out_port1>;
+ in-ports {
+ port {
+ tpiu_in_port: endpoint@0 {
+ remote-endpoint = <&replicator_out_port1>;
+ };
};
};
};
@@ -133,22 +147,16 @@ Example:
clocks = <&oscclk6a>;
clock-names = "apb_pclk";
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* input port */
- port@0 {
- reg = <0>;
+ in-ports {
+ port {
etr_in_port: endpoint {
- slave-mode;
remote-endpoint = <&replicator2_out_port0>;
};
};
+ };
- /* CATU link represented by output port */
- port@1 {
- reg = <1>;
+ out-ports {
+ port {
etr_out_port: endpoint {
remote-endpoint = <&catu_in_port>;
};
@@ -163,7 +171,7 @@ Example:
*/
compatible = "arm,coresight-replicator";
- ports {
+ out-ports {
#address-cells = <1>;
#size-cells = <0>;
@@ -181,12 +189,11 @@ Example:
remote-endpoint = <&tpiu_in_port>;
};
};
+ };
- /* replicator input port */
- port@2 {
- reg = <0>;
+ in-ports {
+ port {
replicator_in_port0: endpoint {
- slave-mode;
remote-endpoint = <&funnel_out_port0>;
};
};
@@ -199,40 +206,36 @@ Example:
clocks = <&oscclk6a>;
clock-names = "apb_pclk";
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* funnel output port */
- port@0 {
- reg = <0>;
+ out-ports {
+ port {
funnel_out_port0: endpoint {
remote-endpoint =
<&replicator_in_port0>;
};
};
+ };
- /* funnel input ports */
- port@1 {
+ in-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
reg = <0>;
funnel_in_port0: endpoint {
- slave-mode;
remote-endpoint = <&ptm0_out_port>;
};
};
- port@2 {
+ port@1 {
reg = <1>;
funnel_in_port1: endpoint {
- slave-mode;
remote-endpoint = <&ptm1_out_port>;
};
};
- port@3 {
+ port@2 {
reg = <2>;
funnel_in_port2: endpoint {
- slave-mode;
remote-endpoint = <&etm0_out_port>;
};
};
@@ -248,9 +251,11 @@ Example:
cpu = <&cpu0>;
clocks = <&oscclk6a>;
clock-names = "apb_pclk";
- port {
- ptm0_out_port: endpoint {
- remote-endpoint = <&funnel_in_port0>;
+ out-ports {
+ port {
+ ptm0_out_port: endpoint {
+ remote-endpoint = <&funnel_in_port0>;
+ };
};
};
};
@@ -262,9 +267,11 @@ Example:
cpu = <&cpu1>;
clocks = <&oscclk6a>;
clock-names = "apb_pclk";
- port {
- ptm1_out_port: endpoint {
- remote-endpoint = <&funnel_in_port1>;
+ out-ports {
+ port {
+ ptm1_out_port: endpoint {
+ remote-endpoint = <&funnel_in_port1>;
+ };
};
};
};
@@ -278,9 +285,11 @@ Example:
clocks = <&soc_smc50mhz>;
clock-names = "apb_pclk";
- port {
- stm_out_port: endpoint {
- remote-endpoint = <&main_funnel_in_port2>;
+ out-ports {
+ port {
+ stm_out_port: endpoint {
+ remote-endpoint = <&main_funnel_in_port2>;
+ };
};
};
};
@@ -295,10 +304,11 @@ Example:
clock-names = "apb_pclk";
interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
- port {
- catu_in_port: endpoint {
- slave-mode;
- remote-endpoint = <&etr_out_port>;
+ in-ports {
+ port {
+ catu_in_port: endpoint {
+ remote-endpoint = <&etr_out_port>;
+ };
};
};
};
diff --git a/Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp b/Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp
index c2e0cc5e4cfd..35e5afb6d9ad 100644
--- a/Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp
+++ b/Documentation/devicetree/bindings/arm/cpu-enable-method/al,alpine-smp
@@ -14,7 +14,28 @@ Related properties: (none)
Note:
This enable method requires valid nodes compatible with
-"al,alpine-cpu-resume" and "al,alpine-nb-service"[1].
+"al,alpine-cpu-resume" and "al,alpine-nb-service".
+
+
+* Alpine CPU resume registers
+
+The CPU resume register are used to define required resume address after
+reset.
+
+Properties:
+- compatible : Should contain "al,alpine-cpu-resume".
+- reg : Offset and length of the register set for the device
+
+
+* Alpine System-Fabric Service Registers
+
+The System-Fabric Service Registers allow various operation on CPU and
+system fabric, like powering CPUs off.
+
+Properties:
+- compatible : Should contain "al,alpine-sysfabric-service" and "syscon".
+- reg : Offset and length of the register set for the device
+
Example:
@@ -48,5 +69,12 @@ cpus {
};
};
---
-[1] arm/al,alpine.txt
+cpu_resume {
+ compatible = "al,alpine-cpu-resume";
+ reg = <0xfbff5ed0 0x30>;
+};
+
+nb_service {
+ compatible = "al,alpine-sysfabric-service", "syscon";
+ reg = <0xfb070000 0x10000>;
+};
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index 96dfccc0faa8..b0198a1cf403 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -276,7 +276,7 @@ described below.
Usage: optional
Value type: <prop-encoded-array>
Definition: A u32 value that represents the running time dynamic
- power coefficient in units of mW/MHz/uV^2. The
+ power coefficient in units of uW/MHz/V^2. The
coefficient can either be calculated from power
measurements or derived by analysis.
@@ -287,7 +287,7 @@ described below.
Pdyn = dynamic-power-coefficient * V^2 * f
- where voltage is in uV, frequency is in MHz.
+ where voltage is in V, frequency is in MHz.
Example 1 (dual-cluster big.LITTLE system 32-bit):
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-dcfg.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-dcfg.txt
new file mode 100644
index 000000000000..b5cb374dc47d
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-dcfg.txt
@@ -0,0 +1,19 @@
+Freescale DCFG
+
+DCFG is the device configuration unit, that provides general purpose
+configuration and status for the device. Such as setting the secondary
+core start address and release the secondary core from holdoff and startup.
+
+Required properties:
+ - compatible: Should contain a chip-specific compatible string,
+ Chip-specific strings are of the form "fsl,<chip>-dcfg",
+ The following <chip>s are known to be supported:
+ ls1012a, ls1021a, ls1043a, ls1046a, ls2080a.
+
+ - reg : should contain base address and length of DCFG memory-mapped registers
+
+Example:
+ dcfg: dcfg@1ee0000 {
+ compatible = "fsl,ls1021a-dcfg";
+ reg = <0x0 0x1ee0000 0x0 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-scfg.txt b/Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-scfg.txt
new file mode 100644
index 000000000000..0ab67b0b216d
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/freescale/fsl,layerscape-scfg.txt
@@ -0,0 +1,19 @@
+Freescale SCFG
+
+SCFG is the supplemental configuration unit, that provides SoC specific
+configuration and status registers for the chip. Such as getting PEX port
+status.
+
+Required properties:
+ - compatible: Should contain a chip-specific compatible string,
+ Chip-specific strings are of the form "fsl,<chip>-scfg",
+ The following <chip>s are known to be supported:
+ ls1012a, ls1021a, ls1043a, ls1046a, ls2080a.
+
+ - reg: should contain base address and length of SCFG memory-mapped registers
+
+Example:
+ scfg: scfg@1570000 {
+ compatible = "fsl,ls1021a-scfg";
+ reg = <0x0 0x1570000 0x0 0x10000>;
+ };
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
index 8a1baa2b9723..1e775aaa5c5b 100644
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ b/Documentation/devicetree/bindings/arm/fsl.txt
@@ -101,45 +101,6 @@ Freescale LS1021A Platform Device Tree Bindings
Required root node compatible properties:
- compatible = "fsl,ls1021a";
-Freescale SoC-specific Device Tree Bindings
--------------------------------------------
-
-Freescale SCFG
- SCFG is the supplemental configuration unit, that provides SoC specific
-configuration and status registers for the chip. Such as getting PEX port
-status.
- Required properties:
- - compatible: Should contain a chip-specific compatible string,
- Chip-specific strings are of the form "fsl,<chip>-scfg",
- The following <chip>s are known to be supported:
- ls1012a, ls1021a, ls1043a, ls1046a, ls2080a.
-
- - reg: should contain base address and length of SCFG memory-mapped registers
-
-Example:
- scfg: scfg@1570000 {
- compatible = "fsl,ls1021a-scfg";
- reg = <0x0 0x1570000 0x0 0x10000>;
- };
-
-Freescale DCFG
- DCFG is the device configuration unit, that provides general purpose
-configuration and status for the device. Such as setting the secondary
-core start address and release the secondary core from holdoff and startup.
- Required properties:
- - compatible: Should contain a chip-specific compatible string,
- Chip-specific strings are of the form "fsl,<chip>-dcfg",
- The following <chip>s are known to be supported:
- ls1012a, ls1021a, ls1043a, ls1046a, ls2080a.
-
- - reg : should contain base address and length of DCFG memory-mapped registers
-
-Example:
- dcfg: dcfg@1ee0000 {
- compatible = "fsl,ls1021a-dcfg";
- reg = <0x0 0x1ee0000 0x0 0x10000>;
- };
-
Freescale ARMv8 based Layerscape SoC family Device Tree Bindings
----------------------------------------------------------------
diff --git a/Documentation/devicetree/bindings/arm/secure.txt b/Documentation/devicetree/bindings/arm/secure.txt
index e31303fb233a..f27bbff2c780 100644
--- a/Documentation/devicetree/bindings/arm/secure.txt
+++ b/Documentation/devicetree/bindings/arm/secure.txt
@@ -32,7 +32,8 @@ describe the view of Secure world using the standard bindings. These
secure- bindings only need to be used where both the Secure and Normal
world views need to be described in a single device tree.
-Valid Secure world properties:
+Valid Secure world properties
+-----------------------------
- secure-status : specifies whether the device is present and usable
in the secure world. The combination of this with "status" allows
@@ -51,3 +52,19 @@ Valid Secure world properties:
status = "disabled"; secure-status = "okay"; /* S-only */
status = "disabled"; /* disabled in both */
status = "disabled"; secure-status = "disabled"; /* disabled in both */
+
+The secure-chosen node
+----------------------
+
+Similar to the /chosen node which serves as a place for passing data
+between firmware and the operating system, the /secure-chosen node may
+be used to pass data to the Secure OS. Only the properties defined
+below may appear in the /secure-chosen node.
+
+- stdout-path : specifies the device to be used by the Secure OS for
+ its console output. The syntax is the same as for /chosen/stdout-path.
+ If the /secure-chosen node exists but the stdout-path property is not
+ present, the Secure OS should not perform any console output. If
+ /secure-chosen does not exist, the Secure OS should use the value of
+ /chosen/stdout-path instead (that is, use the same device as the
+ Normal world OS).
diff --git a/Documentation/devicetree/bindings/arm/zte,sysctrl.txt b/Documentation/devicetree/bindings/arm/zte,sysctrl.txt
new file mode 100644
index 000000000000..7e66b7f7ba96
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/zte,sysctrl.txt
@@ -0,0 +1,30 @@
+ZTE sysctrl Registers
+
+Registers for 'zte,zx296702' SoC:
+
+System management required properties:
+ - compatible = "zte,sysctrl"
+
+Low power management required properties:
+ - compatible = "zte,zx296702-pcu"
+
+Bus matrix required properties:
+ - compatible = "zte,zx-bus-matrix"
+
+
+Registers for 'zte,zx296718' SoC:
+
+System management required properties:
+ - compatible = "zte,zx296718-aon-sysctrl"
+ - compatible = "zte,zx296718-sysctrl"
+
+Example:
+aon_sysctrl: aon-sysctrl@116000 {
+ compatible = "zte,zx296718-aon-sysctrl", "syscon";
+ reg = <0x116000 0x1000>;
+};
+
+sysctrl: sysctrl@1463000 {
+ compatible = "zte,zx296718-sysctrl", "syscon";
+ reg = <0x1463000 0x1000>;
+};
diff --git a/Documentation/devicetree/bindings/arm/zte.txt b/Documentation/devicetree/bindings/arm/zte.txt
index 83369785d29c..340612794a37 100644
--- a/Documentation/devicetree/bindings/arm/zte.txt
+++ b/Documentation/devicetree/bindings/arm/zte.txt
@@ -1,20 +1,10 @@
ZTE platforms device tree bindings
----------------------------------------
+---------------------------------------
- ZX296702 board:
Required root node properties:
- compatible = "zte,zx296702-ad1", "zte,zx296702"
-System management required properties:
- - compatible = "zte,sysctrl"
-
-Low power management required properties:
- - compatible = "zte,zx296702-pcu"
-
-Bus matrix required properties:
- - compatible = "zte,zx-bus-matrix"
-
-
---------------------------------------
- ZX296718 SoC:
Required root node properties:
@@ -22,18 +12,3 @@ Bus matrix required properties:
ZX296718 EVB board:
- "zte,zx296718-evb"
-
-System management required properties:
- - compatible = "zte,zx296718-aon-sysctrl"
- - compatible = "zte,zx296718-sysctrl"
-
-Example:
-aon_sysctrl: aon-sysctrl@116000 {
- compatible = "zte,zx296718-aon-sysctrl", "syscon";
- reg = <0x116000 0x1000>;
-};
-
-sysctrl: sysctrl@1463000 {
- compatible = "zte,zx296718-sysctrl", "syscon";
- reg = <0x1463000 0x1000>;
-};
diff --git a/Documentation/devicetree/bindings/connector/usb-connector.txt b/Documentation/devicetree/bindings/connector/usb-connector.txt
index 8855bfcfd778..d90e17e2428b 100644
--- a/Documentation/devicetree/bindings/connector/usb-connector.txt
+++ b/Documentation/devicetree/bindings/connector/usb-connector.txt
@@ -29,15 +29,15 @@ Required properties for usb-c-connector with power delivery support:
in "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.2
Source_Capabilities Message, the order of each entry(PDO) should follow
the PD spec chapter 6.4.1. Required for power source and power dual role.
- User can specify the source PDO array via PDO_FIXED/BATT/VAR() defined in
- dt-bindings/usb/pd.h.
+ User can specify the source PDO array via PDO_FIXED/BATT/VAR/PPS_APDO()
+ defined in dt-bindings/usb/pd.h.
- sink-pdos: An array of u32 with each entry providing supported power
sink data object(PDO), the detailed bit definitions of PDO can be found
in "Universal Serial Bus Power Delivery Specification" chapter 6.4.1.3
Sink Capabilities Message, the order of each entry(PDO) should follow
the PD spec chapter 6.4.1. Required for power sink and power dual role.
- User can specify the sink PDO array via PDO_FIXED/BATT/VAR() defined in
- dt-bindings/usb/pd.h.
+ User can specify the sink PDO array via PDO_FIXED/BATT/VAR/PPS_APDO() defined
+ in dt-bindings/usb/pd.h.
- op-sink-microwatt: Sink required operating power in microwatt, if source
can't offer the power, Capability Mismatch is set. Required for power
sink and power dual role.
diff --git a/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt b/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt
index 78d2db9d4de5..d28fd1af01b4 100644
--- a/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt
+++ b/Documentation/devicetree/bindings/crypto/hisilicon,hip07-sec.txt
@@ -24,7 +24,7 @@ Optional properties:
Example:
-p1_sec_a: crypto@400,d2000000 {
+p1_sec_a: crypto@400d2000000 {
compatible = "hisilicon,hip07-sec";
reg = <0x400 0xd0000000 0x0 0x10000
0x400 0xd2000000 0x0 0x10000
diff --git a/Documentation/devicetree/bindings/dma/jz4780-dma.txt b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
index 03e9cf7b42e0..636fcb26b164 100644
--- a/Documentation/devicetree/bindings/dma/jz4780-dma.txt
+++ b/Documentation/devicetree/bindings/dma/jz4780-dma.txt
@@ -2,8 +2,13 @@
Required properties:
-- compatible: Should be "ingenic,jz4780-dma"
-- reg: Should contain the DMA controller registers location and length.
+- compatible: Should be one of:
+ * ingenic,jz4740-dma
+ * ingenic,jz4725b-dma
+ * ingenic,jz4770-dma
+ * ingenic,jz4780-dma
+- reg: Should contain the DMA channel registers location and length, followed
+ by the DMA controller registers location and length.
- interrupts: Should contain the interrupt specifier of the DMA controller.
- clocks: Should contain a clock specifier for the JZ4780 PDMA clock.
- #dma-cells: Must be <2>. Number of integer cells in the dmas property of
@@ -19,9 +24,10 @@ Optional properties:
Example:
-dma: dma@13420000 {
+dma: dma-controller@13420000 {
compatible = "ingenic,jz4780-dma";
- reg = <0x13420000 0x10000>;
+ reg = <0x13420000 0x400
+ 0x13421000 0x40>;
interrupt-parent = <&intc>;
interrupts = <10>;
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
index 946229c48657..a5a7c3f5a1e3 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt
@@ -17,6 +17,7 @@ Required Properties:
- compatible: "renesas,dmac-<soctype>", "renesas,rcar-dmac" as fallback.
Examples with soctypes are:
- "renesas,dmac-r8a7743" (RZ/G1M)
+ - "renesas,dmac-r8a7744" (RZ/G1N)
- "renesas,dmac-r8a7745" (RZ/G1E)
- "renesas,dmac-r8a77470" (RZ/G1C)
- "renesas,dmac-r8a7790" (R-Car H2)
diff --git a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
index 482e54362d3e..1743017bd948 100644
--- a/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
+++ b/Documentation/devicetree/bindings/dma/renesas,usb-dmac.txt
@@ -4,6 +4,7 @@ Required Properties:
-compatible: "renesas,<soctype>-usb-dmac", "renesas,usb-dmac" as fallback.
Examples with soctypes are:
- "renesas,r8a7743-usb-dmac" (RZ/G1M)
+ - "renesas,r8a7744-usb-dmac" (RZ/G1N)
- "renesas,r8a7745-usb-dmac" (RZ/G1E)
- "renesas,r8a7790-usb-dmac" (R-Car H2)
- "renesas,r8a7791-usb-dmac" (R-Car M2-W)
diff --git a/Documentation/devicetree/bindings/fpga/fpga-region.txt b/Documentation/devicetree/bindings/fpga/fpga-region.txt
index 6db8aeda461a..90c44694a30b 100644
--- a/Documentation/devicetree/bindings/fpga/fpga-region.txt
+++ b/Documentation/devicetree/bindings/fpga/fpga-region.txt
@@ -415,7 +415,7 @@ DT Overlay contains:
firmware-name = "base.rbf";
fpga-bridge@4400 {
- compatible = "altr,freeze-bridge";
+ compatible = "altr,freeze-bridge-controller";
reg = <0x4400 0x10>;
fpga_region1: fpga-region1 {
@@ -427,7 +427,7 @@ DT Overlay contains:
};
fpga-bridge@4420 {
- compatible = "altr,freeze-bridge";
+ compatible = "altr,freeze-bridge-controller";
reg = <0x4420 0x10>;
fpga_region2: fpga-region2 {
diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt
index 11263982470e..44efafdfd7f5 100644
--- a/Documentation/devicetree/bindings/i2c/i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c.txt
@@ -84,7 +84,7 @@ Binding may contain optional "interrupts" property, describing interrupts
used by the device. I2C core will assign "irq" interrupt (or the very first
interrupt if not using interrupt names) as primary interrupt for the slave.
-Alternatively, devices supporting SMbus Host Notify, and connected to
+Alternatively, devices supporting SMBus Host Notify, and connected to
adapters that support this feature, may use "host-notify" property. I2C
core will create a virtual interrupt for Host Notify and assign it as
primary interrupt for the slave.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt
index aa8bf2ec8905..1c94a57a661e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,icu.txt
@@ -5,6 +5,8 @@ The Marvell ICU (Interrupt Consolidation Unit) controller is
responsible for collecting all wired-interrupt sources in the CP and
communicating them to the GIC in the AP, the unit translates interrupt
requests on input wires to MSG memory mapped transactions to the GIC.
+These messages will access a different GIC memory area depending on
+their type (NSR, SR, SEI, REI, etc).
Required properties:
@@ -12,20 +14,23 @@ Required properties:
- reg: Should contain ICU registers location and length.
-- #interrupt-cells: Specifies the number of cells needed to encode an
- interrupt source. The value shall be 3.
+Subnodes: Each group of interrupt is declared as a subnode of the ICU,
+with their own compatible.
+
+Required properties for the icu_nsr/icu_sei subnodes:
- The 1st cell is the group type of the ICU interrupt. Possible group
- types are:
+- compatible: Should be one of:
+ * "marvell,cp110-icu-nsr"
+ * "marvell,cp110-icu-sr"
+ * "marvell,cp110-icu-sei"
+ * "marvell,cp110-icu-rei"
- ICU_GRP_NSR (0x0) : Shared peripheral interrupt, non-secure
- ICU_GRP_SR (0x1) : Shared peripheral interrupt, secure
- ICU_GRP_SEI (0x4) : System error interrupt
- ICU_GRP_REI (0x5) : RAM error interrupt
+- #interrupt-cells: Specifies the number of cells needed to encode an
+ interrupt source. The value shall be 2.
- The 2nd cell is the index of the interrupt in the ICU unit.
+ The 1st cell is the index of the interrupt in the ICU unit.
- The 3rd cell is the type of the interrupt. See arm,gic.txt for
+ The 2nd cell is the type of the interrupt. See arm,gic.txt for
details.
- interrupt-controller: Identifies the node as an interrupt
@@ -35,17 +40,73 @@ Required properties:
that allows to trigger interrupts using MSG memory mapped
transactions.
+Note: each 'interrupts' property referring to any 'icu_xxx' node shall
+ have a different number within [0:206].
+
Example:
icu: interrupt-controller@1e0000 {
compatible = "marvell,cp110-icu";
- reg = <0x1e0000 0x10>;
+ reg = <0x1e0000 0x440>;
+
+ CP110_LABEL(icu_nsr): interrupt-controller@10 {
+ compatible = "marvell,cp110-icu-nsr";
+ reg = <0x10 0x20>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ msi-parent = <&gicp>;
+ };
+
+ CP110_LABEL(icu_sei): interrupt-controller@50 {
+ compatible = "marvell,cp110-icu-sei";
+ reg = <0x50 0x10>;
+ #interrupt-cells = <2>;
+ interrupt-controller;
+ msi-parent = <&sei>;
+ };
+};
+
+node1 {
+ interrupt-parent = <&icu_nsr>;
+ interrupts = <106 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+node2 {
+ interrupt-parent = <&icu_sei>;
+ interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+/* Would not work with the above nodes */
+node3 {
+ interrupt-parent = <&icu_nsr>;
+ interrupts = <107 IRQ_TYPE_LEVEL_HIGH>;
+};
+
+The legacy bindings were different in this way:
+
+- #interrupt-cells: The value was 3.
+ The 1st cell was the group type of the ICU interrupt. Possible
+ group types were:
+ ICU_GRP_NSR (0x0) : Shared peripheral interrupt, non-secure
+ ICU_GRP_SR (0x1) : Shared peripheral interrupt, secure
+ ICU_GRP_SEI (0x4) : System error interrupt
+ ICU_GRP_REI (0x5) : RAM error interrupt
+ The 2nd cell was the index of the interrupt in the ICU unit.
+ The 3rd cell was the type of the interrupt. See arm,gic.txt for
+ details.
+
+Example:
+
+icu: interrupt-controller@1e0000 {
+ compatible = "marvell,cp110-icu";
+ reg = <0x1e0000 0x440>;
+
#interrupt-cells = <3>;
interrupt-controller;
msi-parent = <&gicp>;
};
-usb3h0: usb3@500000 {
+node1 {
interrupt-parent = <&icu>;
interrupts = <ICU_GRP_NSR 106 IRQ_TYPE_LEVEL_HIGH>;
};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt
new file mode 100644
index 000000000000..0beafed502f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,sei.txt
@@ -0,0 +1,36 @@
+Marvell SEI (System Error Interrupt) Controller
+-----------------------------------------------
+
+Marvell SEI (System Error Interrupt) controller is an interrupt
+aggregator. It receives interrupts from several sources and aggregates
+them to a single interrupt line (an SPI) on the parent interrupt
+controller.
+
+This interrupt controller can handle up to 64 SEIs, a set comes from the
+AP and is wired while a second set comes from the CPs by the mean of
+MSIs.
+
+Required properties:
+
+- compatible: should be one of:
+ * "marvell,ap806-sei"
+- reg: SEI registers location and length.
+- interrupts: identifies the parent IRQ that will be triggered.
+- #interrupt-cells: number of cells to define an SEI wired interrupt
+ coming from the AP, should be 1. The cell is the IRQ
+ number.
+- interrupt-controller: identifies the node as an interrupt controller
+ for AP interrupts.
+- msi-controller: identifies the node as an MSI controller for the CPs
+ interrupts.
+
+Example:
+
+ sei: interrupt-controller@3f0200 {
+ compatible = "marvell,ap806-sei";
+ reg = <0x3f0200 0x40>;
+ interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ msi-controller;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
index a046ed374d80..8de96a4fb2d5 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.txt
@@ -2,10 +2,12 @@ DT bindings for the R-Mobile/R-Car/RZ/G interrupt controller
Required properties:
-- compatible: has to be "renesas,irqc-<soctype>", "renesas,irqc" as fallback.
+- compatible: must be "renesas,irqc-<soctype>" or "renesas,intc-ex-<soctype>",
+ and "renesas,irqc" as fallback.
Examples with soctypes are:
- "renesas,irqc-r8a73a4" (R-Mobile APE6)
- "renesas,irqc-r8a7743" (RZ/G1M)
+ - "renesas,irqc-r8a7744" (RZ/G1N)
- "renesas,irqc-r8a7745" (RZ/G1E)
- "renesas,irqc-r8a77470" (RZ/G1C)
- "renesas,irqc-r8a7790" (R-Car H2)
@@ -19,6 +21,7 @@ Required properties:
- "renesas,intc-ex-r8a77965" (R-Car M3-N)
- "renesas,intc-ex-r8a77970" (R-Car V3M)
- "renesas,intc-ex-r8a77980" (R-Car V3H)
+ - "renesas,intc-ex-r8a77990" (R-Car E3)
- "renesas,intc-ex-r8a77995" (R-Car D3)
- #interrupt-cells: has to be <2>: an interrupt index and flags, as defined in
interrupts.txt in this directory
diff --git a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
index c6e2d855fe13..377ee639d103 100644
--- a/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
+++ b/Documentation/devicetree/bindings/iommu/renesas,ipmmu-vmsa.txt
@@ -12,6 +12,7 @@ Required Properties:
- "renesas,ipmmu-r8a73a4" for the R8A73A4 (R-Mobile APE6) IPMMU.
- "renesas,ipmmu-r8a7743" for the R8A7743 (RZ/G1M) IPMMU.
+ - "renesas,ipmmu-r8a7744" for the R8A7744 (RZ/G1N) IPMMU.
- "renesas,ipmmu-r8a7745" for the R8A7745 (RZ/G1E) IPMMU.
- "renesas,ipmmu-r8a7790" for the R8A7790 (R-Car H2) IPMMU.
- "renesas,ipmmu-r8a7791" for the R8A7791 (R-Car M2-W) IPMMU.
diff --git a/Documentation/devicetree/bindings/mfd/arizona.txt b/Documentation/devicetree/bindings/mfd/arizona.txt
index 9b62831fdf3e..148ef621a5e5 100644
--- a/Documentation/devicetree/bindings/mfd/arizona.txt
+++ b/Documentation/devicetree/bindings/mfd/arizona.txt
@@ -76,7 +76,7 @@ Deprecated properties:
Also see child specific device properties:
Regulator - ../regulator/arizona-regulator.txt
Extcon - ../extcon/extcon-arizona.txt
- Sound - ../sound/arizona.txt
+ Sound - ../sound/wlf,arizona.txt
Example:
diff --git a/Documentation/devicetree/bindings/serial/atmel-usart.txt b/Documentation/devicetree/bindings/mfd/atmel-usart.txt
index 7c0d6b2f53e4..7f0cd72f47d2 100644
--- a/Documentation/devicetree/bindings/serial/atmel-usart.txt
+++ b/Documentation/devicetree/bindings/mfd/atmel-usart.txt
@@ -1,6 +1,6 @@
* Atmel Universal Synchronous Asynchronous Receiver/Transmitter (USART)
-Required properties:
+Required properties for USART:
- compatible: Should be "atmel,<chip>-usart" or "atmel,<chip>-dbgu"
The compatible <chip> indicated will be the first SoC to support an
additional mode or an USART new feature.
@@ -11,7 +11,13 @@ Required properties:
Required elements: "usart"
- clocks: phandles to input clocks.
-Optional properties:
+Required properties for USART in SPI mode:
+- #size-cells : Must be <0>
+- #address-cells : Must be <1>
+- cs-gpios: chipselects (internal cs not supported)
+- atmel,usart-mode : Must be <AT91_USART_MODE_SPI> (found in dt-bindings/mfd/at91-usart.h)
+
+Optional properties in serial mode:
- atmel,use-dma-rx: use of PDC or DMA for receiving data
- atmel,use-dma-tx: use of PDC or DMA for transmitting data
- {rts,cts,dtr,dsr,rng,dcd}-gpios: specify a GPIO for RTS/CTS/DTR/DSR/RI/DCD line respectively.
@@ -62,3 +68,18 @@ Example:
dma-names = "tx", "rx";
atmel,fifo-size = <32>;
};
+
+- SPI mode:
+ #include <dt-bindings/mfd/at91-usart.h>
+
+ spi0: spi@f001c000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "atmel,at91rm9200-usart", "atmel,at91sam9260-usart";
+ atmel,usart-mode = <AT91_USART_MODE_SPI>;
+ reg = <0xf001c000 0x100>;
+ interrupts = <12 IRQ_TYPE_LEVEL_HIGH 5>;
+ clocks = <&usart0_clk>;
+ clock-names = "usart";
+ cs-gpios = <&pioB 3 0>;
+ };
diff --git a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
index 6611a7c2053a..01fdc33a41d0 100644
--- a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
+++ b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
@@ -9,6 +9,25 @@ blocks that can be used to create functional hardware objects/devices
such as network interfaces, crypto accelerator instances, L2 switches,
etc.
+For an overview of the DPAA2 architecture and fsl-mc bus see:
+Documentation/networking/dpaa2/overview.rst
+
+As described in the above overview, all DPAA2 objects in a DPRC share the
+same hardware "isolation context" and a 10-bit value called an ICID
+(isolation context id) is expressed by the hardware to identify
+the requester.
+
+The generic 'iommus' property is insufficient to describe the relationship
+between ICIDs and IOMMUs, so an iommu-map property is used to define
+the set of possible ICIDs under a root DPRC and how they map to
+an IOMMU.
+
+For generic IOMMU bindings, see
+Documentation/devicetree/bindings/iommu/iommu.txt.
+
+For arm-smmu binding, see:
+Documentation/devicetree/bindings/iommu/arm,smmu.txt.
+
Required properties:
- compatible
@@ -88,14 +107,34 @@ Sub-nodes:
Value type: <phandle>
Definition: Specifies the phandle to the PHY device node associated
with the this dpmac.
+Optional properties:
+
+- iommu-map: Maps an ICID to an IOMMU and associated iommu-specifier
+ data.
+
+ The property is an arbitrary number of tuples of
+ (icid-base,iommu,iommu-base,length).
+
+ Any ICID i in the interval [icid-base, icid-base + length) is
+ associated with the listed IOMMU, with the iommu-specifier
+ (i - icid-base + iommu-base).
Example:
+ smmu: iommu@5000000 {
+ compatible = "arm,mmu-500";
+ #iommu-cells = <1>;
+ stream-match-mask = <0x7C00>;
+ ...
+ };
+
fsl_mc: fsl-mc@80c000000 {
compatible = "fsl,qoriq-mc";
reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
<0x00000000 0x08340000 0 0x40000>; /* MC control reg */
msi-parent = <&its>;
+ /* define map for ICIDs 23-64 */
+ iommu-map = <23 &smmu 23 41>;
#address-cells = <3>;
#size-cells = <1>;
diff --git a/Documentation/devicetree/bindings/misc/lwn-bk4.txt b/Documentation/devicetree/bindings/misc/lwn-bk4.txt
new file mode 100644
index 000000000000..d6a8c188c087
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/lwn-bk4.txt
@@ -0,0 +1,26 @@
+* Liebherr's BK4 controller external SPI
+
+A device which handles data acquisition from compatible industrial
+peripherals.
+The SPI is used for data and management purposes in both master and
+slave modes.
+
+Required properties:
+
+- compatible : Should be "lwn,bk4"
+
+Required SPI properties:
+
+- reg : Should be address of the device chip select within
+ the controller.
+
+- spi-max-frequency : Maximum SPI clocking speed of device in Hz, should be
+ 30MHz at most for the Liebherr's BK4 external bus.
+
+Example:
+
+spidev0: spi@0 {
+ compatible = "lwn,bk4";
+ spi-max-frequency = <30000000>;
+ reg = <0>;
+};
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
index 94a7f33ac5e9..cc4372842bf3 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_can.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt
@@ -3,6 +3,7 @@ Renesas R-Car CAN controller Device Tree Bindings
Required properties:
- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
+ "renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC.
"renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
"renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
"renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
index cb33421184a0..f37494d5a7be 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.txt
@@ -50,6 +50,7 @@ Additional required properties for imx7d-pcie:
- reset-names: Must contain the following entires:
- "pciephy"
- "apps"
+ - "turnoff"
Example:
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt
index 4dd17de549a7..2030ee0dc4f9 100644
--- a/Documentation/devicetree/bindings/pci/pci-keystone.txt
+++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt
@@ -19,6 +19,9 @@ pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
interrupt-cells: should be set to 1
interrupts: GIC interrupt lines connected to PCI MSI interrupt lines
+ti,syscon-pcie-id : phandle to the device control module required to set device
+ id and vendor id.
+
Example:
pcie_msi_intc: msi-interrupt-controller {
interrupt-controller;
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
index 9fe7e12a7bf3..b94078f58d8e 100644
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
@@ -7,6 +7,7 @@ OHCI and EHCI controllers.
Required properties:
- compatible: "renesas,pci-r8a7743" for the R8A7743 SoC;
+ "renesas,pci-r8a7744" for the R8A7744 SoC;
"renesas,pci-r8a7745" for the R8A7745 SoC;
"renesas,pci-r8a7790" for the R8A7790 SoC;
"renesas,pci-r8a7791" for the R8A7791 SoC;
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index a5f7fc62d10e..976ef7bfff93 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -2,6 +2,7 @@
Required properties:
compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
+ "renesas,pcie-r8a7744" for the R8A7744 SoC;
"renesas,pcie-r8a7779" for the R8A7779 SoC;
"renesas,pcie-r8a7790" for the R8A7790 SoC;
"renesas,pcie-r8a7791" for the R8A7791 SoC;
@@ -9,6 +10,7 @@ compatible: "renesas,pcie-r8a7743" for the R8A7743 SoC;
"renesas,pcie-r8a7795" for the R8A7795 SoC;
"renesas,pcie-r8a7796" for the R8A7796 SoC;
"renesas,pcie-r8a77980" for the R8A77980 SoC;
+ "renesas,pcie-r8a77990" for the R8A77990 SoC;
"renesas,pcie-rcar-gen2" for a generic R-Car Gen2 or
RZ/G1 compatible device.
"renesas,pcie-rcar-gen3" for a generic R-Car Gen3 compatible device.
diff --git a/Documentation/devicetree/bindings/pci/ti-pci.txt b/Documentation/devicetree/bindings/pci/ti-pci.txt
index 7f7af3044016..452fe48c4fdd 100644
--- a/Documentation/devicetree/bindings/pci/ti-pci.txt
+++ b/Documentation/devicetree/bindings/pci/ti-pci.txt
@@ -26,6 +26,11 @@ HOST MODE
ranges,
interrupt-map-mask,
interrupt-map : as specified in ../designware-pcie.txt
+ - ti,syscon-unaligned-access: phandle to the syscon DT node. The 1st argument
+ should contain the register offset within syscon
+ and the 2nd argument should contain the bit field
+ for setting the bit to enable unaligned
+ access.
DEVICE MODE
===========
diff --git a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt b/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
index 0aced97d8092..b640845fec67 100644
--- a/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
+++ b/Documentation/devicetree/bindings/phy/brcm-sata-phy.txt
@@ -8,6 +8,7 @@ Required properties:
"brcm,iproc-nsp-sata-phy"
"brcm,phy-sata3"
"brcm,iproc-sr-sata-phy"
+ "brcm,bcm63138-sata-phy"
- address-cells: should be 1
- size-cells: should be 0
- reg: register ranges for the PHY PCB interface
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-dp.txt b/Documentation/devicetree/bindings/phy/phy-cadence-dp.txt
new file mode 100644
index 000000000000..7f49fd54ebc1
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-cadence-dp.txt
@@ -0,0 +1,30 @@
+Cadence MHDP DisplayPort SD0801 PHY binding
+===========================================
+
+This binding describes the Cadence SD0801 PHY hardware included with
+the Cadence MHDP DisplayPort controller.
+
+-------------------------------------------------------------------------------
+Required properties (controller (parent) node):
+- compatible : Should be "cdns,dp-phy"
+- reg : Defines the following sets of registers in the parent
+ mhdp device:
+ - Offset of the DPTX PHY configuration registers
+ - Offset of the SD0801 PHY configuration registers
+- #phy-cells : from the generic PHY bindings, must be 0.
+
+Optional properties:
+- num_lanes : Number of DisplayPort lanes to use (1, 2 or 4)
+- max_bit_rate : Maximum DisplayPort link bit rate to use, in Mbps (2160,
+ 2430, 2700, 3240, 4320, 5400 or 8100)
+-------------------------------------------------------------------------------
+
+Example:
+ dp_phy: phy@f0fb030a00 {
+ compatible = "cdns,dp-phy";
+ reg = <0xf0 0xfb030a00 0x0 0x00000040>,
+ <0xf0 0xfb500000 0x0 0x00100000>;
+ num_lanes = <4>;
+ max_bit_rate = <8100>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-hdmi.txt b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-hdmi.txt
new file mode 100644
index 000000000000..710cccd5ee56
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-hdmi.txt
@@ -0,0 +1,43 @@
+ROCKCHIP HDMI PHY WITH INNO IP BLOCK
+
+Required properties:
+ - compatible : should be one of the listed compatibles:
+ * "rockchip,rk3228-hdmi-phy",
+ * "rockchip,rk3328-hdmi-phy";
+ - reg : Address and length of the hdmi phy control register set
+ - clocks : phandle + clock specifier for the phy clocks
+ - clock-names : string, clock name, must contain "sysclk" for system
+ control and register configuration, "refoclk" for crystal-
+ oscillator reference PLL clock input and "refpclk" for pclk-
+ based refeference PLL clock input.
+ - #clock-cells: should be 0.
+ - clock-output-names : shall be the name for the output clock.
+ - interrupts : phandle + interrupt specified for the hdmiphy interrupt
+ - #phy-cells : must be 0. See ./phy-bindings.txt for details.
+
+Optional properties for rk3328-hdmi-phy:
+ - nvmem-cells = phandle + nvmem specifier for the cpu-version efuse
+ - nvmem-cell-names : "cpu-version" to read the chip version, required
+ for adjustment to some frequency settings
+
+Example:
+ hdmi_phy: hdmi-phy@12030000 {
+ compatible = "rockchip,rk3228-hdmi-phy";
+ reg = <0x12030000 0x10000>;
+ #phy-cells = <0>;
+ clocks = <&cru PCLK_HDMI_PHY>, <&xin24m>, <&cru DCLK_HDMIPHY>;
+ clock-names = "sysclk", "refoclk", "refpclk";
+ #clock-cells = <0>;
+ clock-output-names = "hdmi_phy";
+ status = "disabled";
+ };
+
+Then the PHY can be used in other nodes such as:
+
+ hdmi: hdmi@200a0000 {
+ compatible = "rockchip,rk3228-dw-hdmi";
+ ...
+ phys = <&hdmi_phy>;
+ phy-names = "hdmi";
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
index 0c7629e88bf3..adf20b2bdf71 100644
--- a/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
+++ b/Documentation/devicetree/bindings/phy/qcom-qmp-phy.txt
@@ -10,16 +10,20 @@ Required properties:
"qcom,msm8996-qmp-pcie-phy" for 14nm PCIe phy on msm8996,
"qcom,msm8996-qmp-usb3-phy" for 14nm USB3 phy on msm8996,
"qcom,sdm845-qmp-usb3-phy" for USB3 QMP V3 phy on sdm845,
- "qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845.
+ "qcom,sdm845-qmp-usb3-uni-phy" for USB3 QMP V3 UNI phy on sdm845,
+ "qcom,sdm845-qmp-ufs-phy" for UFS QMP phy on sdm845.
- - reg:
- - For "qcom,sdm845-qmp-usb3-phy":
- - index 0: address and length of register set for PHY's common serdes
- block.
- - named register "dp_com" (using reg-names): address and length of the
- DP_COM control block.
- - For all others:
- - offset and length of register set for PHY's common serdes block.
+- reg:
+ - index 0: address and length of register set for PHY's common
+ serdes block.
+ - index 1: address and length of the DP_COM control block (for
+ "qcom,sdm845-qmp-usb3-phy" only).
+
+- reg-names:
+ - For "qcom,sdm845-qmp-usb3-phy":
+ - Should be: "reg-base", "dp_com"
+ - For all others:
+ - The reg-names property shouldn't be defined.
- #clock-cells: must be 1
- Phy pll outputs a bunch of clocks for Tx, Rx and Pipe
@@ -35,6 +39,7 @@ Required properties:
"aux" for phy aux clock,
"ref" for 19.2 MHz ref clk,
"com_aux" for phy common block aux clock,
+ "ref_aux" for phy reference aux clock,
For "qcom,msm8996-qmp-pcie-phy" must contain:
"aux", "cfg_ahb", "ref".
For "qcom,msm8996-qmp-usb3-phy" must contain:
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt b/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
index eeb9e1874ea6..4f0879a0ca12 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen2-phy.txt
@@ -5,6 +5,7 @@ This file provides information on what the device node for the R-Car generation
Required properties:
- compatible: "renesas,usb-phy-r8a7743" if the device is a part of R8A7743 SoC.
+ "renesas,usb-phy-r8a7744" if the device is a part of R8A7744 SoC.
"renesas,usb-phy-r8a7745" if the device is a part of R8A7745 SoC.
"renesas,usb-phy-r8a7790" if the device is a part of R8A7790 SoC.
"renesas,usb-phy-r8a7791" if the device is a part of R8A7791 SoC.
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
index fb4a204da2bf..de7b5393c163 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb2.txt
@@ -1,10 +1,12 @@
* Renesas R-Car generation 3 USB 2.0 PHY
This file provides information on what the device node for the R-Car generation
-3 USB 2.0 PHY contains.
+3 and RZ/G2 USB 2.0 PHY contain.
Required properties:
-- compatible: "renesas,usb2-phy-r8a7795" if the device is a part of an R8A7795
+- compatible: "renesas,usb2-phy-r8a774a1" if the device is a part of an R8A774A1
+ SoC.
+ "renesas,usb2-phy-r8a7795" if the device is a part of an R8A7795
SoC.
"renesas,usb2-phy-r8a7796" if the device is a part of an R8A7796
SoC.
@@ -14,7 +16,8 @@ Required properties:
R8A77990 SoC.
"renesas,usb2-phy-r8a77995" if the device is a part of an
R8A77995 SoC.
- "renesas,rcar-gen3-usb2-phy" for a generic R-Car Gen3 compatible device.
+ "renesas,rcar-gen3-usb2-phy" for a generic R-Car Gen3 or RZ/G2
+ compatible device.
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first
@@ -31,6 +34,8 @@ channel as USB OTG:
- interrupts: interrupt specifier for the PHY.
- vbus-supply: Phandle to a regulator that provides power to the VBUS. This
regulator will be managed during the PHY power on/off sequence.
+- renesas,no-otg-pins: boolean, specify when a board does not provide proper
+ otg pins.
Example (R-Car H3):
diff --git a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
index 47dd296ecead..9d9826609c2f 100644
--- a/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
+++ b/Documentation/devicetree/bindings/phy/rcar-gen3-phy-usb3.txt
@@ -1,20 +1,22 @@
* Renesas R-Car generation 3 USB 3.0 PHY
This file provides information on what the device node for the R-Car generation
-3 USB 3.0 PHY contains.
+3 and RZ/G2 USB 3.0 PHY contain.
If you want to enable spread spectrum clock (ssc), you should use USB_EXTAL
instead of USB3_CLK. However, if you don't want to these features, you don't
need this driver.
Required properties:
-- compatible: "renesas,r8a7795-usb3-phy" if the device is a part of an R8A7795
+- compatible: "renesas,r8a774a1-usb3-phy" if the device is a part of an R8A774A1
+ SoC.
+ "renesas,r8a7795-usb3-phy" if the device is a part of an R8A7795
SoC.
"renesas,r8a7796-usb3-phy" if the device is a part of an R8A7796
SoC.
"renesas,r8a77965-usb3-phy" if the device is a part of an
R8A77965 SoC.
- "renesas,rcar-gen3-usb3-phy" for a generic R-Car Gen3 compatible
- device.
+ "renesas,rcar-gen3-usb3-phy" for a generic R-Car Gen3 or RZ/G2
+ compatible device.
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first
diff --git a/Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt b/Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt
new file mode 100644
index 000000000000..1889d3b89d68
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/uniphier-pcie-phy.txt
@@ -0,0 +1,31 @@
+Socionext UniPhier PCIe PHY bindings
+
+This describes the devicetree bindings for PHY interface built into
+PCIe controller implemented on Socionext UniPhier SoCs.
+
+Required properties:
+- compatible: Should contain one of the following:
+ "socionext,uniphier-ld20-pcie-phy" - for LD20 PHY
+ "socionext,uniphier-pxs3-pcie-phy" - for PXs3 PHY
+- reg: Specifies offset and length of the register set for the device.
+- #phy-cells: Must be zero.
+- clocks: A phandle to the clock gate for PCIe glue layer including
+ this phy.
+- resets: A phandle to the reset line for PCIe glue layer including
+ this phy.
+
+Optional properties:
+- socionext,syscon: A phandle to system control to set configurations
+ for phy.
+
+Refer to phy/phy-bindings.txt for the generic PHY binding properties.
+
+Example:
+ pcie_phy: phy@66038000 {
+ compatible = "socionext,uniphier-ld20-pcie-phy";
+ reg = <0x66038000 0x4000>;
+ #phy-cells = <0>;
+ clocks = <&sys_clk 24>;
+ resets = <&sys_rst 24>;
+ socionext,syscon = <&soc_glue>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt b/Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt
new file mode 100644
index 000000000000..b43b28250cc0
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/uniphier-usb2-phy.txt
@@ -0,0 +1,45 @@
+Socionext UniPhier USB2 PHY
+
+This describes the devicetree bindings for PHY interface built into
+USB2 controller implemented on Socionext UniPhier SoCs.
+
+Pro4 SoC has both USB2 and USB3 host controllers, however, this USB3
+controller doesn't include its own High-Speed PHY. This needs to specify
+USB2 PHY instead of USB3 HS-PHY.
+
+Required properties:
+- compatible: Should contain one of the following:
+ "socionext,uniphier-pro4-usb2-phy" - for Pro4 SoC
+ "socionext,uniphier-ld11-usb2-phy" - for LD11 SoC
+
+Sub-nodes:
+Each PHY should be represented as a sub-node.
+
+Sub-nodes required properties:
+- #phy-cells: Should be 0.
+- reg: The number of the PHY.
+
+Sub-nodes optional properties:
+- vbus-supply: A phandle to the regulator for USB VBUS.
+
+Refer to phy/phy-bindings.txt for the generic PHY binding properties.
+
+Example:
+ soc-glue@5f800000 {
+ ...
+ usb-phy {
+ compatible = "socionext,uniphier-ld11-usb2-phy";
+ usb_phy0: phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+ ...
+ };
+ };
+
+ usb@5a800100 {
+ compatible = "socionext,uniphier-ehci", "generic-ehci";
+ ...
+ phy-names = "usb";
+ phys = <&usb_phy0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt b/Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt
new file mode 100644
index 000000000000..e8d8086a7ae9
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/uniphier-usb3-hsphy.txt
@@ -0,0 +1,69 @@
+Socionext UniPhier USB3 High-Speed (HS) PHY
+
+This describes the devicetree bindings for PHY interfaces built into
+USB3 controller implemented on Socionext UniPhier SoCs.
+Although the controller includes High-Speed PHY and Super-Speed PHY,
+this describes about High-Speed PHY.
+
+Required properties:
+- compatible: Should contain one of the following:
+ "socionext,uniphier-pro4-usb3-hsphy" - for Pro4 SoC
+ "socionext,uniphier-pxs2-usb3-hsphy" - for PXs2 SoC
+ "socionext,uniphier-ld20-usb3-hsphy" - for LD20 SoC
+ "socionext,uniphier-pxs3-usb3-hsphy" - for PXs3 SoC
+- reg: Specifies offset and length of the register set for the device.
+- #phy-cells: Should be 0.
+- clocks: A list of phandles to the clock gate for USB3 glue layer.
+ According to the clock-names, appropriate clocks are required.
+- clock-names: Should contain the following:
+ "gio", "link" - for Pro4 SoC
+ "phy", "phy-ext", "link" - for PXs3 SoC, "phy-ext" is optional.
+ "phy", "link" - for others
+- resets: A list of phandles to the reset control for USB3 glue layer.
+ According to the reset-names, appropriate resets are required.
+- reset-names: Should contain the following:
+ "gio", "link" - for Pro4 SoC
+ "phy", "link" - for others
+
+Optional properties:
+- vbus-supply: A phandle to the regulator for USB VBUS.
+- nvmem-cells: Phandles to nvmem cell that contains the trimming data.
+ Available only for HS-PHY implemented on LD20 and PXs3, and
+ if unspecified, default value is used.
+- nvmem-cell-names: Should be the following names, which correspond to
+ each nvmem-cells.
+ All of the 3 parameters associated with the following names are
+ required for each port, if any one is omitted, the trimming data
+ of the port will not be set at all.
+ "rterm", "sel_t", "hs_i" - Each cell name for phy parameters
+
+Refer to phy/phy-bindings.txt for the generic PHY binding properties.
+
+Example:
+
+ usb-glue@65b00000 {
+ compatible = "socionext,uniphier-ld20-dwc3-glue",
+ "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x65b00000 0x400>;
+
+ usb_vbus0: regulator {
+ ...
+ };
+
+ usb_hsphy0: hs-phy@200 {
+ compatible = "socionext,uniphier-ld20-usb3-hsphy";
+ reg = <0x200 0x10>;
+ #phy-cells = <0>;
+ clock-names = "link", "phy";
+ clocks = <&sys_clk 14>, <&sys_clk 16>;
+ reset-names = "link", "phy";
+ resets = <&sys_rst 14>, <&sys_rst 16>;
+ vbus-supply = <&usb_vbus0>;
+ nvmem-cell-names = "rterm", "sel_t", "hs_i";
+ nvmem-cells = <&usb_rterm0>, <&usb_sel_t0>,
+ <&usb_hs_i0>;
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt b/Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt
new file mode 100644
index 000000000000..490b815445e8
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/uniphier-usb3-ssphy.txt
@@ -0,0 +1,57 @@
+Socionext UniPhier USB3 Super-Speed (SS) PHY
+
+This describes the devicetree bindings for PHY interfaces built into
+USB3 controller implemented on Socionext UniPhier SoCs.
+Although the controller includes High-Speed PHY and Super-Speed PHY,
+this describes about Super-Speed PHY.
+
+Required properties:
+- compatible: Should contain one of the following:
+ "socionext,uniphier-pro4-usb3-ssphy" - for Pro4 SoC
+ "socionext,uniphier-pxs2-usb3-ssphy" - for PXs2 SoC
+ "socionext,uniphier-ld20-usb3-ssphy" - for LD20 SoC
+ "socionext,uniphier-pxs3-usb3-ssphy" - for PXs3 SoC
+- reg: Specifies offset and length of the register set for the device.
+- #phy-cells: Should be 0.
+- clocks: A list of phandles to the clock gate for USB3 glue layer.
+ According to the clock-names, appropriate clocks are required.
+- clock-names:
+ "gio", "link" - for Pro4 SoC
+ "phy", "phy-ext", "link" - for PXs3 SoC, "phy-ext" is optional.
+ "phy", "link" - for others
+- resets: A list of phandles to the reset control for USB3 glue layer.
+ According to the reset-names, appropriate resets are required.
+- reset-names:
+ "gio", "link" - for Pro4 SoC
+ "phy", "link" - for others
+
+Optional properties:
+- vbus-supply: A phandle to the regulator for USB VBUS.
+
+Refer to phy/phy-bindings.txt for the generic PHY binding properties.
+
+Example:
+
+ usb-glue@65b00000 {
+ compatible = "socionext,uniphier-ld20-dwc3-glue",
+ "simple-mfd";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0x65b00000 0x400>;
+
+ usb_vbus0: regulator {
+ ...
+ };
+
+ usb_ssphy0: ss-phy@300 {
+ compatible = "socionext,uniphier-ld20-usb3-ssphy";
+ reg = <0x300 0x10>;
+ #phy-cells = <0>;
+ clock-names = "link", "phy";
+ clocks = <&sys_clk 14>, <&sys_clk 16>;
+ reset-names = "link", "phy";
+ resets = <&sys_rst 14>, <&sys_rst 16>;
+ vbus-supply = <&usb_vbus0>;
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/power/reset/qcom,pon.txt b/Documentation/devicetree/bindings/power/reset/qcom,pon.txt
index 651491bb63b7..5705f575862d 100644
--- a/Documentation/devicetree/bindings/power/reset/qcom,pon.txt
+++ b/Documentation/devicetree/bindings/power/reset/qcom,pon.txt
@@ -6,7 +6,10 @@ and resin along with the Android reboot-mode.
This DT node has pwrkey and resin as sub nodes.
Required Properties:
--compatible: "qcom,pm8916-pon"
+-compatible: Must be one of:
+ "qcom,pm8916-pon"
+ "qcom,pms405-pon"
+
-reg: Specifies the physical address of the pon register
Optional subnode:
diff --git a/Documentation/devicetree/bindings/power/supply/bq25890.txt b/Documentation/devicetree/bindings/power/supply/bq25890.txt
index c9dd17d142ad..dc0568933359 100644
--- a/Documentation/devicetree/bindings/power/supply/bq25890.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq25890.txt
@@ -1,5 +1,8 @@
Binding for TI bq25890 Li-Ion Charger
+This driver will support the bq25896 and the bq25890. There are other ICs
+in the same family but those have not been tested.
+
Required properties:
- compatible: Should contain one of the following:
* "ti,bq25890"
diff --git a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
index 37994fdb18ca..4fa8e08df2b6 100644
--- a/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
+++ b/Documentation/devicetree/bindings/power/supply/bq27xxx.txt
@@ -23,6 +23,7 @@ Required properties:
* "ti,bq27546" - BQ27546
* "ti,bq27742" - BQ27742
* "ti,bq27545" - BQ27545
+ * "ti,bq27411" - BQ27411
* "ti,bq27421" - BQ27421
* "ti,bq27425" - BQ27425
* "ti,bq27426" - BQ27426
diff --git a/Documentation/devicetree/bindings/power/supply/sc2731_charger.txt b/Documentation/devicetree/bindings/power/supply/sc2731_charger.txt
new file mode 100644
index 000000000000..5266fab16575
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/supply/sc2731_charger.txt
@@ -0,0 +1,40 @@
+Spreadtrum SC2731 PMIC battery charger binding
+
+Required properties:
+ - compatible: Should be "sprd,sc2731-charger".
+ - reg: Address offset of charger register.
+ - phys: Contains a phandle to the USB phy.
+
+Optional Properties:
+- monitored-battery: phandle of battery characteristics devicetree node.
+ The charger uses the following battery properties:
+- charge-term-current-microamp: current for charge termination phase.
+- constant-charge-voltage-max-microvolt: maximum constant input voltage.
+ See Documentation/devicetree/bindings/power/supply/battery.txt
+
+Example:
+
+ bat: battery {
+ compatible = "simple-battery";
+ charge-term-current-microamp = <120000>;
+ constant-charge-voltage-max-microvolt = <4350000>;
+ ......
+ };
+
+ sc2731_pmic: pmic@0 {
+ compatible = "sprd,sc2731";
+ reg = <0>;
+ spi-max-frequency = <26000000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ charger@0 {
+ compatible = "sprd,sc2731-charger";
+ reg = <0x0>;
+ phys = <&ssphy>;
+ monitored-battery = <&bat>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
index 5e1afc3d8480..1ab1d109318e 100644
--- a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
+++ b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
@@ -5,7 +5,7 @@ Please also refer to reset.txt in this directory for common reset
controller binding usage.
Required properties:
-- compatible: Should be "fsl,imx7-src", "syscon"
+- compatible: Should be "fsl,imx7d-src", "syscon"
- reg: should be register base and length as documented in the
datasheet
- interrupts: Should contain SRC interrupt
diff --git a/Documentation/devicetree/bindings/sound/adi,adau1977.txt b/Documentation/devicetree/bindings/sound/adi,adau1977.txt
new file mode 100644
index 000000000000..e79aeef73f28
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/adi,adau1977.txt
@@ -0,0 +1,54 @@
+Analog Devices ADAU1977/ADAU1978/ADAU1979
+
+Datasheets:
+http://www.analog.com/media/en/technical-documentation/data-sheets/ADAU1977.pdf
+http://www.analog.com/media/en/technical-documentation/data-sheets/ADAU1978.pdf
+http://www.analog.com/media/en/technical-documentation/data-sheets/ADAU1979.pdf
+
+This driver supports both the I2C and SPI bus.
+
+Required properties:
+ - compatible: Should contain one of the following:
+ "adi,adau1977"
+ "adi,adau1978"
+ "adi,adau1979"
+
+ - AVDD-supply: analog power supply for the device, please consult
+ Documentation/devicetree/bindings/regulator/regulator.txt
+
+Optional properties:
+ - reset-gpio: the reset pin for the chip, for more details consult
+ Documentation/devicetree/bindings/gpio/gpio.txt
+
+ - DVDD-supply: supply voltage for the digital core, please consult
+ Documentation/devicetree/bindings/regulator/regulator.txt
+
+For required properties on SPI, please consult
+Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Required properties on I2C:
+
+ - reg: The i2c address. Value depends on the state of ADDR0
+ and ADDR1, as wired in hardware.
+
+Examples:
+
+ adau1977_spi: adau1977@0 {
+ compatible = "adi,adau1977";
+ spi-max-frequency = <600000>;
+
+ AVDD-supply = <&regulator>;
+ DVDD-supply = <&regulator_digital>;
+
+ reset_gpio = <&gpio 10 GPIO_ACTIVE_LOW>;
+ };
+
+ adau1977_i2c: adau1977@11 {
+ compatible = "adi,adau1977";
+ reg = <0x11>;
+
+ AVDD-supply = <&regulator>;
+ DVDD-supply = <&regulator_digital>;
+
+ reset_gpio = <&gpio 10 GPIO_ACTIVE_LOW>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt b/Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt
new file mode 100644
index 000000000000..5672d0bc5b16
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/amlogic,axg-pdm.txt
@@ -0,0 +1,24 @@
+* Amlogic Audio PDM input
+
+Required properties:
+- compatible: 'amlogic,axg-pdm'
+- reg: physical base address of the controller and length of memory
+ mapped region.
+- clocks: list of clock phandle, one for each entry clock-names.
+- clock-names: should contain the following:
+ * "pclk" : peripheral clock.
+ * "dclk" : pdm digital clock
+ * "sysclk" : dsp system clock
+- #sound-dai-cells: must be 0.
+
+Example of PDM on the A113 SoC:
+
+pdm: audio-controller@ff632000 {
+ compatible = "amlogic,axg-pdm";
+ reg = <0x0 0xff632000 0x0 0x34>;
+ #sound-dai-cells = <0>;
+ clocks = <&clkc_audio AUD_CLKID_PDM>,
+ <&clkc_audio AUD_CLKID_PDM_DCLK>,
+ <&clkc_audio AUD_CLKID_PDM_SYSCLK>;
+ clock-names = "pclk", "dclk", "sysclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/cs42l51.txt b/Documentation/devicetree/bindings/sound/cs42l51.txt
new file mode 100644
index 000000000000..4b5de33ce377
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cs42l51.txt
@@ -0,0 +1,17 @@
+CS42L51 audio CODEC
+
+Optional properties:
+
+ - clocks : a list of phandles + clock-specifiers, one for each entry in
+ clock-names
+
+ - clock-names : must contain "MCLK"
+
+Example:
+
+cs42l51: cs42l51@4a {
+ compatible = "cirrus,cs42l51";
+ reg = <0x4a>;
+ clocks = <&mclk_prov>;
+ clock-names = "MCLK";
+};
diff --git a/Documentation/devicetree/bindings/sound/maxim,max98088.txt b/Documentation/devicetree/bindings/sound/maxim,max98088.txt
new file mode 100644
index 000000000000..da764d913319
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/maxim,max98088.txt
@@ -0,0 +1,23 @@
+MAX98088 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible: "maxim,max98088" or "maxim,max98089".
+- reg: The I2C address of the device.
+
+Optional properties:
+
+- clocks: the clock provider of MCLK, see ../clock/clock-bindings.txt section
+ "consumer" for more information.
+- clock-names: must be set to "mclk"
+
+Example:
+
+max98089: codec@10 {
+ compatible = "maxim,max98089";
+ reg = <0x10>;
+ clocks = <&clks IMX6QDL_CLK_CKO2>;
+ clock-names = "mclk";
+};
diff --git a/Documentation/devicetree/bindings/sound/mikroe,mikroe-proto.txt b/Documentation/devicetree/bindings/sound/mikroe,mikroe-proto.txt
new file mode 100644
index 000000000000..912f8fae11c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mikroe,mikroe-proto.txt
@@ -0,0 +1,23 @@
+Mikroe-PROTO audio board
+
+Required properties:
+ - compatible: "mikroe,mikroe-proto"
+ - dai-format: Must be "i2s".
+ - i2s-controller: The phandle of the I2S controller.
+ - audio-codec: The phandle of the WM8731 audio codec.
+Optional properties:
+ - model: The user-visible name of this sound complex.
+ - bitclock-master: Indicates dai-link bit clock master; for details see simple-card.txt (1).
+ - frame-master: Indicates dai-link frame master; for details see simple-card.txt (1).
+
+(1) : There must be the same master for both bit and frame clocks.
+
+Example:
+ sound {
+ compatible = "mikroe,mikroe-proto";
+ model = "wm8731 @ sama5d2_xplained";
+ i2s-controller = <&i2s0>;
+ audio-codec = <&wm8731>;
+ dai-format = "i2s";
+ };
+};
diff --git a/Documentation/devicetree/bindings/sound/nau8822.txt b/Documentation/devicetree/bindings/sound/nau8822.txt
new file mode 100644
index 000000000000..a471d162d4e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nau8822.txt
@@ -0,0 +1,16 @@
+NAU8822 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+ - compatible : "nuvoton,nau8822"
+
+ - reg : the I2C address of the device.
+
+Example:
+
+codec: nau8822@1a {
+ compatible = "nuvoton,nau8822";
+ reg = <0x1a>;
+};
diff --git a/Documentation/devicetree/bindings/sound/pcm3060.txt b/Documentation/devicetree/bindings/sound/pcm3060.txt
new file mode 100644
index 000000000000..90fcb8523099
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/pcm3060.txt
@@ -0,0 +1,17 @@
+PCM3060 audio CODEC
+
+This driver supports both I2C and SPI.
+
+Required properties:
+
+- compatible: "ti,pcm3060"
+
+- reg : the I2C address of the device for I2C, the chip select
+ number for SPI.
+
+Examples:
+
+ pcm3060: pcm3060@46 {
+ compatible = "ti,pcm3060";
+ reg = <0x46>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/qcom,q6afe.txt b/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
index a8179409c194..d74888b9f1bb 100644
--- a/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,q6afe.txt
@@ -49,7 +49,7 @@ configuration of each dai. Must contain the following properties.
Usage: required for mi2s interface
Value type: <prop-encoded-array>
Definition: Must be list of serial data lines used by this dai.
- should be one or more of the 1-4 sd lines.
+ should be one or more of the 0-3 sd lines.
- qcom,tdm-sync-mode:
Usage: required for tdm interface
@@ -137,42 +137,42 @@ q6afe@4 {
prim-mi2s-rx@16 {
reg = <16>;
- qcom,sd-lines = <1 3>;
+ qcom,sd-lines = <0 2>;
};
prim-mi2s-tx@17 {
reg = <17>;
- qcom,sd-lines = <2>;
+ qcom,sd-lines = <1>;
};
sec-mi2s-rx@18 {
reg = <18>;
- qcom,sd-lines = <1 4>;
+ qcom,sd-lines = <0 3>;
};
sec-mi2s-tx@19 {
reg = <19>;
- qcom,sd-lines = <2>;
+ qcom,sd-lines = <1>;
};
tert-mi2s-rx@20 {
reg = <20>;
- qcom,sd-lines = <2 4>;
+ qcom,sd-lines = <1 3>;
};
tert-mi2s-tx@21 {
reg = <21>;
- qcom,sd-lines = <1>;
+ qcom,sd-lines = <0>;
};
quat-mi2s-rx@22 {
reg = <22>;
- qcom,sd-lines = <1>;
+ qcom,sd-lines = <0>;
};
quat-mi2s-tx@23 {
reg = <23>;
- qcom,sd-lines = <2>;
+ qcom,sd-lines = <1>;
};
};
};
diff --git a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
index 9e764270c36b..d92b705e7917 100644
--- a/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
+++ b/Documentation/devicetree/bindings/sound/renesas,rsnd.txt
@@ -340,10 +340,12 @@ Required properties:
- compatible : "renesas,rcar_sound-<soctype>", fallbacks
"renesas,rcar_sound-gen1" if generation1, and
"renesas,rcar_sound-gen2" if generation2 (or RZ/G1)
- "renesas,rcar_sound-gen3" if generation3
+ "renesas,rcar_sound-gen3" if generation3 (or RZ/G2)
Examples with soctypes are:
- "renesas,rcar_sound-r8a7743" (RZ/G1M)
+ - "renesas,rcar_sound-r8a7744" (RZ/G1N)
- "renesas,rcar_sound-r8a7745" (RZ/G1E)
+ - "renesas,rcar_sound-r8a774a1" (RZ/G2M)
- "renesas,rcar_sound-r8a7778" (R-Car M1A)
- "renesas,rcar_sound-r8a7779" (R-Car H1)
- "renesas,rcar_sound-r8a7790" (R-Car H2)
@@ -353,6 +355,7 @@ Required properties:
- "renesas,rcar_sound-r8a7795" (R-Car H3)
- "renesas,rcar_sound-r8a7796" (R-Car M3-W)
- "renesas,rcar_sound-r8a77965" (R-Car M3-N)
+ - "renesas,rcar_sound-r8a77990" (R-Car E3)
- reg : Should contain the register physical address.
required register is
SRU/ADG/SSI if generation1
diff --git a/Documentation/devicetree/bindings/sound/st,sta32x.txt b/Documentation/devicetree/bindings/sound/st,sta32x.txt
index 255de3ae5b2f..52265fb757c5 100644
--- a/Documentation/devicetree/bindings/sound/st,sta32x.txt
+++ b/Documentation/devicetree/bindings/sound/st,sta32x.txt
@@ -19,6 +19,10 @@ Required properties:
Optional properties:
+ - clocks, clock-names: Clock specifier for XTI input clock.
+ If specified, the clock will be enabled when the codec is probed,
+ and disabled when it is removed. The 'clock-names' must be set to 'xti'.
+
- st,output-conf: number, Selects the output configuration:
0: 2-channel (full-bridge) power, 2-channel data-out
1: 2 (half-bridge). 1 (full-bridge) on-board power
@@ -39,6 +43,9 @@ Optional properties:
- st,thermal-warning-recover:
If present, thermal warning recovery is enabled.
+ - st,fault-detect-recovery:
+ If present, fault detect recovery is enabled.
+
- st,thermal-warning-adjustment:
If present, thermal warning adjustment is enabled.
@@ -76,6 +83,8 @@ Example:
codec: sta32x@38 {
compatible = "st,sta32x";
reg = <0x1c>;
+ clocks = <&clock>;
+ clock-names = "xti";
reset-gpios = <&gpio1 19 0>;
power-down-gpios = <&gpio1 16 0>;
st,output-conf = /bits/ 8 <0x3>; // set output to 2-channel
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
index 3a3fc506e43a..3f4467ff0aa2 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
@@ -31,7 +31,11 @@ SAI subnodes required properties:
- reg: Base address and size of SAI sub-block register set.
- clocks: Must contain one phandle and clock specifier pair
for sai_ck which feeds the internal clock generator.
+ If the SAI shares a master clock, with another SAI set as MCLK
+ clock provider, SAI provider phandle must be specified here.
- clock-names: Must contain "sai_ck".
+ Must also contain "MCLK", if SAI shares a master clock,
+ with a SAI set as MCLK clock provider.
- dmas: see Documentation/devicetree/bindings/dma/stm32-dma.txt
- dma-names: identifier string for each DMA request line
"tx": if sai sub-block is configured as playback DAI
@@ -51,6 +55,9 @@ SAI subnodes Optional properties:
configured according to protocol defined in related DAI link node,
such as i2s, left justified, right justified, dsp and pdm protocols.
Note: ac97 protocol is not supported by SAI driver
+ - #clock-cells: should be 0. This property must be present if the SAI device
+ is a master clock provider, according to clocks bindings, described in
+ Documentation/devicetree/bindings/clock/clock-bindings.txt.
The device node should contain one 'port' child node with one child 'endpoint'
node, according to the bindings defined in Documentation/devicetree/bindings/
diff --git a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
index b9d50d6cdef3..61e71c1729e0 100644
--- a/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/sun4i-i2s.txt
@@ -10,6 +10,7 @@ Required properties:
- "allwinner,sun6i-a31-i2s"
- "allwinner,sun8i-a83t-i2s"
- "allwinner,sun8i-h3-i2s"
+ - "allwinner,sun50i-a64-codec-i2s"
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: should contain the I2S interrupt.
@@ -26,6 +27,7 @@ Required properties for the following compatibles:
- "allwinner,sun6i-a31-i2s"
- "allwinner,sun8i-a83t-i2s"
- "allwinner,sun8i-h3-i2s"
+ - "allwinner,sun50i-a64-codec-i2s"
- resets: phandle to the reset line for this codec
Example:
diff --git a/Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt b/Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt
new file mode 100644
index 000000000000..4f8ad0e04d20
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/sun50i-codec-analog.txt
@@ -0,0 +1,12 @@
+* Allwinner A64 Codec Analog Controls
+
+Required properties:
+- compatible: must be one of the following compatibles:
+ - "allwinner,sun50i-a64-codec-analog"
+- reg: must contain the registers location and length
+
+Example:
+ codec_analog: codec-analog@1f015c0 {
+ compatible = "allwinner,sun50i-a64-codec-analog";
+ reg = <0x01f015c0 0x4>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/ts3a227e.txt b/Documentation/devicetree/bindings/sound/ts3a227e.txt
index 3ed8359144d3..21ab45bc7e8f 100644
--- a/Documentation/devicetree/bindings/sound/ts3a227e.txt
+++ b/Documentation/devicetree/bindings/sound/ts3a227e.txt
@@ -14,7 +14,7 @@ Required properties:
Optional properies:
- ti,micbias: Intended MICBIAS voltage (datasheet section 9.6.7).
- Select 0/1/2/3/4/5/6/7 to specify MACBIAS voltage
+ Select 0/1/2/3/4/5/6/7 to specify MICBIAS voltage
2.1V/2.2V/2.3V/2.4V/2.5V/2.6V/2.7V/2.8V
Default value is "1" (2.2V).
diff --git a/Documentation/devicetree/bindings/sound/wm8782.txt b/Documentation/devicetree/bindings/sound/wm8782.txt
new file mode 100644
index 000000000000..256cdec6ec4d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/wm8782.txt
@@ -0,0 +1,17 @@
+WM8782 stereo ADC
+
+This device does not have any control interface or reset pins.
+
+Required properties:
+
+ - compatible : "wlf,wm8782"
+ - Vdda-supply : phandle to a regulator for the analog power supply (2.7V - 5.5V)
+ - Vdd-supply : phandle to a regulator for the digital power supply (2.7V - 3.6V)
+
+Example:
+
+wm8782: stereo-adc {
+ compatible = "wlf,wm8782";
+ Vdda-supply = <&vdda_supply>;
+ Vdd-supply = <&vdd_supply>;
+};
diff --git a/Documentation/devicetree/bindings/thermal/qcom-spmi-temp-alarm.txt b/Documentation/devicetree/bindings/thermal/qcom-spmi-temp-alarm.txt
index 290ec06fa33a..0273a92a2a84 100644
--- a/Documentation/devicetree/bindings/thermal/qcom-spmi-temp-alarm.txt
+++ b/Documentation/devicetree/bindings/thermal/qcom-spmi-temp-alarm.txt
@@ -6,8 +6,7 @@ interrupt signal and status register to identify high PMIC die temperature.
Required properties:
- compatible: Should contain "qcom,spmi-temp-alarm".
-- reg: Specifies the SPMI address and length of the controller's
- registers.
+- reg: Specifies the SPMI address.
- interrupts: PMIC temperature alarm interrupt.
- #thermal-sensor-cells: Should be 0. See thermal.txt for a description.
@@ -20,7 +19,7 @@ Example:
pm8941_temp: thermal-alarm@2400 {
compatible = "qcom,spmi-temp-alarm";
- reg = <0x2400 0x100>;
+ reg = <0x2400>;
interrupts = <0 0x24 0 IRQ_TYPE_EDGE_RISING>;
#thermal-sensor-cells = <0>;
@@ -36,19 +35,14 @@ Example:
thermal-sensors = <&pm8941_temp>;
trips {
- passive {
- temperature = <1050000>;
+ stage1 {
+ temperature = <105000>;
hysteresis = <2000>;
type = "passive";
};
- alert {
+ stage2 {
temperature = <125000>;
hysteresis = <2000>;
- type = "hot";
- };
- crit {
- temperature = <145000>;
- hysteresis = <2000>;
type = "critical";
};
};
diff --git a/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt b/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt
index 20ca4ef9d776..04cbb90a5d3e 100644
--- a/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/qoriq-thermal.txt
@@ -1,9 +1,9 @@
* Thermal Monitoring Unit (TMU) on Freescale QorIQ SoCs
Required properties:
-- compatible : Must include "fsl,qoriq-tmu". The version of the device is
- determined by the TMU IP Block Revision Register (IPBRR0) at
- offset 0x0BF8.
+- compatible : Must include "fsl,qoriq-tmu" or "fsl,imx8mq-tmu". The
+ version of the device is determined by the TMU IP Block Revision
+ Register (IPBRR0) at offset 0x0BF8.
Table of correspondences between IPBRR0 values and example chips:
Value Device
---------- -----
diff --git a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
index cfa154bb0fa7..ad9a435afef4 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rcar-gen3-thermal.txt
@@ -7,9 +7,11 @@ inside the LSI.
Required properties:
- compatible : "renesas,<soctype>-thermal",
Examples with soctypes are:
+ - "renesas,r8a774a1-thermal" (RZ/G2M)
- "renesas,r8a7795-thermal" (R-Car H3)
- "renesas,r8a7796-thermal" (R-Car M3-W)
- "renesas,r8a77965-thermal" (R-Car M3-N)
+ - "renesas,r8a77980-thermal" (R-Car V3H)
- reg : Address ranges of the thermal registers. Each sensor
needs one address range. Sorting must be done in
increasing order according to datasheet, i.e.
@@ -19,7 +21,8 @@ Required properties:
Optional properties:
-- interrupts : interrupts routed to the TSC (3 for H3, M3-W and M3-N)
+- interrupts : interrupts routed to the TSC (3 for H3, M3-W, M3-N,
+ and V3H)
- power-domain : Must contain a reference to the power domain. This
property is mandatory if the thermal sensor instance
is part of a controllable power domain.
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
index 67c563f1b4c4..73e1613d2cb0 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
@@ -4,15 +4,17 @@ Required properties:
- compatible : "renesas,thermal-<soctype>",
"renesas,rcar-gen2-thermal" (with thermal-zone) or
"renesas,rcar-thermal" (without thermal-zone) as
- fallback except R-Car D3.
+ fallback except R-Car V3M/D3.
Examples with soctypes are:
- "renesas,thermal-r8a73a4" (R-Mobile APE6)
- "renesas,thermal-r8a7743" (RZ/G1M)
+ - "renesas,thermal-r8a7744" (RZ/G1N)
- "renesas,thermal-r8a7779" (R-Car H1)
- "renesas,thermal-r8a7790" (R-Car H2)
- "renesas,thermal-r8a7791" (R-Car M2-W)
- "renesas,thermal-r8a7792" (R-Car V2H)
- "renesas,thermal-r8a7793" (R-Car M2-N)
+ - "renesas,thermal-r8a77970" (R-Car V3M)
- "renesas,thermal-r8a77995" (R-Car D3)
- reg : Address range of the thermal registers.
The 1st reg will be recognized as common register
@@ -21,7 +23,7 @@ Required properties:
Option properties:
- interrupts : If present should contain 3 interrupts for
- R-Car D3 or 1 interrupt otherwise.
+ R-Car V3M/D3 or 1 interrupt otherwise.
Example (non interrupt support):
diff --git a/Documentation/devicetree/bindings/thermal/stm32-thermal.txt b/Documentation/devicetree/bindings/thermal/stm32-thermal.txt
new file mode 100644
index 000000000000..8c0d5a4d8031
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/stm32-thermal.txt
@@ -0,0 +1,61 @@
+Binding for Thermal Sensor for STMicroelectronics STM32 series of SoCs.
+
+On STM32 SoCs, the Digital Temperature Sensor (DTS) is in charge of managing an
+analog block which delivers a frequency depending on the internal SoC's
+temperature. By using a reference frequency, DTS is able to provide a sample
+number which can be translated into a temperature by the user.
+
+DTS provides interrupt notification mechanism by threshold. This mechanism
+offers two temperature trip points: passive and critical. The first is intended
+for passive cooling notification while the second is used for over-temperature
+reset.
+
+Required parameters:
+-------------------
+
+compatible: Should be "st,stm32-thermal"
+reg: This should be the physical base address and length of the
+ sensor's registers.
+clocks: Phandle of the clock used by the thermal sensor.
+ See: Documentation/devicetree/bindings/clock/clock-bindings.txt
+clock-names: Should be "pclk" for register access clock and reference clock.
+ See: Documentation/devicetree/bindings/resource-names.txt
+#thermal-sensor-cells: Should be 0. See ./thermal.txt for a description.
+interrupts: Standard way to define interrupt number.
+
+Example:
+
+ thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+
+ thermal-sensors = <&thermal>;
+
+ trips {
+ cpu_alert1: cpu-alert1 {
+ temperature = <85000>;
+ hysteresis = <0>;
+ type = "passive";
+ };
+
+ cpu-crit: cpu-crit {
+ temperature = <120000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+
+ cooling-maps {
+ };
+ };
+ };
+
+ thermal: thermal@50028000 {
+ compatible = "st,stm32-thermal";
+ reg = <0x50028000 0x100>;
+ clocks = <&rcc TMPSENS>;
+ clock-names = "pclk";
+ #thermal-sensor-cells = <0>;
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt
index eb7ee91556a5..ca14ba959e0d 100644
--- a/Documentation/devicetree/bindings/thermal/thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/thermal.txt
@@ -152,7 +152,7 @@ Optional property:
Elem size: one cell the sensors listed in the thermal-sensors property.
Elem type: signed Coefficients defaults to 1, in case this property
is not specified. A simple linear polynomial is used:
- Z = c0 * x0 + c1 + x1 + ... + c(n-1) * x(n-1) + cn.
+ Z = c0 * x0 + c1 * x1 + ... + c(n-1) * x(n-1) + cn.
The coefficients are ordered and they match with sensors
by means of sensor ID. Additional coefficients are
diff --git a/Documentation/devicetree/bindings/timer/renesas,cmt.txt b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
index b40add2d9bb4..33992679a8bd 100644
--- a/Documentation/devicetree/bindings/timer/renesas,cmt.txt
+++ b/Documentation/devicetree/bindings/timer/renesas,cmt.txt
@@ -24,6 +24,8 @@ Required Properties:
- "renesas,r8a73a4-cmt1" for the 48-bit CMT1 device included in r8a73a4.
- "renesas,r8a7743-cmt0" for the 32-bit CMT0 device included in r8a7743.
- "renesas,r8a7743-cmt1" for the 48-bit CMT1 device included in r8a7743.
+ - "renesas,r8a7744-cmt0" for the 32-bit CMT0 device included in r8a7744.
+ - "renesas,r8a7744-cmt1" for the 48-bit CMT1 device included in r8a7744.
- "renesas,r8a7745-cmt0" for the 32-bit CMT0 device included in r8a7745.
- "renesas,r8a7745-cmt1" for the 48-bit CMT1 device included in r8a7745.
- "renesas,r8a7790-cmt0" for the 32-bit CMT0 device included in r8a7790.
@@ -34,6 +36,10 @@ Required Properties:
- "renesas,r8a7793-cmt1" for the 48-bit CMT1 device included in r8a7793.
- "renesas,r8a7794-cmt0" for the 32-bit CMT0 device included in r8a7794.
- "renesas,r8a7794-cmt1" for the 48-bit CMT1 device included in r8a7794.
+ - "renesas,r8a77970-cmt0" for the 32-bit CMT0 device included in r8a77970.
+ - "renesas,r8a77970-cmt1" for the 48-bit CMT1 device included in r8a77970.
+ - "renesas,r8a77980-cmt0" for the 32-bit CMT0 device included in r8a77980.
+ - "renesas,r8a77980-cmt1" for the 48-bit CMT1 device included in r8a77980.
- "renesas,rcar-gen2-cmt0" for 32-bit CMT0 devices included in R-Car Gen2
and RZ/G1.
@@ -41,6 +47,9 @@ Required Properties:
and RZ/G1.
These are fallbacks for r8a73a4, R-Car Gen2 and RZ/G1 entries
listed above.
+ - "renesas,rcar-gen3-cmt0" for 32-bit CMT0 devices included in R-Car Gen3.
+ - "renesas,rcar-gen3-cmt1" for 48-bit CMT1 devices included in R-Car Gen3.
+ These are fallbacks for R-Car Gen3 entries listed above.
- reg: base address and length of the registers block for the timer module.
- interrupts: interrupt-specifier for the timer, one per channel.
diff --git a/Documentation/devicetree/bindings/timer/renesas,ostm.txt b/Documentation/devicetree/bindings/timer/renesas,ostm.txt
index be3ae0fdf775..81a78f8bcf17 100644
--- a/Documentation/devicetree/bindings/timer/renesas,ostm.txt
+++ b/Documentation/devicetree/bindings/timer/renesas,ostm.txt
@@ -9,7 +9,8 @@ Channels are independent from each other.
Required Properties:
- compatible: must be one or more of the following:
- - "renesas,r7s72100-ostm" for the r7s72100 OSTM
+ - "renesas,r7s72100-ostm" for the R7S72100 (RZ/A1) OSTM
+ - "renesas,r7s9210-ostm" for the R7S9210 (RZ/A2) OSTM
- "renesas,ostm" for any OSTM
This is a fallback for the above renesas,*-ostm entries
diff --git a/Documentation/devicetree/bindings/trivial-devices.txt b/Documentation/devicetree/bindings/trivial-devices.txt
index 763a2808a95c..69c934aec13b 100644
--- a/Documentation/devicetree/bindings/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/trivial-devices.txt
@@ -35,7 +35,6 @@ at,24c08 i2c serial eeprom (24cxx)
atmel,at97sc3204t i2c trusted platform module (TPM)
capella,cm32181 CM32181: Ambient Light Sensor
capella,cm3232 CM3232: Ambient Light Sensor
-cirrus,cs42l51 Cirrus Logic CS42L51 audio codec
dallas,ds1374 I2C, 32-Bit Binary Counter Watchdog RTC with Trickle Charger and Reset Input/Output
dallas,ds1631 High-Precision Digital Thermometer
dallas,ds1672 Dallas DS1672 Real-time Clock
diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
index 2e9318151df7..529e51879fb2 100644
--- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
+++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt
@@ -80,6 +80,8 @@ Optional properties:
controller. It's expected that a mux state of 0 indicates device mode and a
mux state of 1 indicates host mode.
- mux-control-names: Shall be "usb_switch" if mux-controls is specified.
+- pinctrl-names: Names for optional pin modes in "default", "host", "device"
+- pinctrl-n: alternate pin modes
i.mx specific properties
- fsl,usbmisc: phandler of non-core register device, with one
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 3e4c38b806ac..636630fb92d7 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -19,6 +19,7 @@ Exception for clocks:
"cavium,octeon-7130-usb-uctl"
"qcom,dwc3"
"samsung,exynos5250-dwusb3"
+ "samsung,exynos5433-dwusb3"
"samsung,exynos7-dwusb3"
"sprd,sc9860-dwc3"
"st,stih407-dwc3"
diff --git a/Documentation/devicetree/bindings/usb/ehci-mv.txt b/Documentation/devicetree/bindings/usb/ehci-mv.txt
new file mode 100644
index 000000000000..335589895763
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/ehci-mv.txt
@@ -0,0 +1,23 @@
+* Marvell PXA/MMP EHCI controller.
+
+Required properties:
+
+- compatible: must be "marvell,pxau2o-ehci"
+- reg: physical base addresses of the controller and length of memory mapped region
+- interrupts: one EHCI controller interrupt should be described here
+- clocks: phandle list of usb clocks
+- clock-names: should be "USBCLK"
+- phys: phandle for the PHY device
+- phy-names: should be "usb"
+
+Example:
+
+ ehci0: usb-ehci@d4208000 {
+ compatible = "marvell,pxau2o-ehci";
+ reg = <0xd4208000 0x200>;
+ interrupts = <44>;
+ clocks = <&soc_clocks MMP2_CLK_USB>;
+ clock-names = "USBCLK";
+ phys = <&usb_otg_phy>;
+ phy-names = "usb";
+ };
diff --git a/Documentation/devicetree/bindings/usb/exynos-usb.txt b/Documentation/devicetree/bindings/usb/exynos-usb.txt
index c97374315049..b7111f43fa59 100644
--- a/Documentation/devicetree/bindings/usb/exynos-usb.txt
+++ b/Documentation/devicetree/bindings/usb/exynos-usb.txt
@@ -83,6 +83,8 @@ Required properties:
- compatible: should be one of the following -
"samsung,exynos5250-dwusb3": for USB 3.0 DWC3 controller on
Exynos5250/5420.
+ "samsung,exynos5433-dwusb3": for USB 3.0 DWC3 controller on
+ Exynos5433.
"samsung,exynos7-dwusb3": for USB 3.0 DWC3 controller on Exynos7.
- #address-cells, #size-cells : should be '1' if the device has sub-nodes
with 'reg' property.
diff --git a/Documentation/devicetree/bindings/usb/faraday,fotg210.txt b/Documentation/devicetree/bindings/usb/faraday,fotg210.txt
new file mode 100644
index 000000000000..06a2286e2054
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/faraday,fotg210.txt
@@ -0,0 +1,35 @@
+Faraday FOTG Host controller
+
+This OTG-capable USB host controller is found in Cortina Systems
+Gemini and other SoC products.
+
+Required properties:
+- compatible: should be one of:
+ "faraday,fotg210"
+ "cortina,gemini-usb", "faraday,fotg210"
+- reg: should contain one register range i.e. start and length
+- interrupts: description of the interrupt line
+
+Optional properties:
+- clocks: should contain the IP block clock
+- clock-names: should be "PCLK" for the IP block clock
+
+Required properties for "cortina,gemini-usb" compatible:
+- syscon: a phandle to the system controller to access PHY registers
+
+Optional properties for "cortina,gemini-usb" compatible:
+- cortina,gemini-mini-b: boolean property that indicates that a Mini-B
+ OTG connector is in use
+- wakeup-source: see power/wakeup-source.txt
+
+Example for Gemini:
+
+usb@68000000 {
+ compatible = "cortina,gemini-usb", "faraday,fotg210";
+ reg = <0x68000000 0x1000>;
+ interrupts = <10 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cc 12>;
+ clock-names = "PCLK";
+ syscon = <&syscon>;
+ wakeup-source;
+};
diff --git a/Documentation/devicetree/bindings/usb/fcs,fusb302.txt b/Documentation/devicetree/bindings/usb/fcs,fusb302.txt
index 6087dc7f209e..a5d011d2efc8 100644
--- a/Documentation/devicetree/bindings/usb/fcs,fusb302.txt
+++ b/Documentation/devicetree/bindings/usb/fcs,fusb302.txt
@@ -5,10 +5,19 @@ Required properties :
- reg : I2C slave address
- interrupts : Interrupt specifier
-Optional properties :
-- fcs,operating-sink-microwatt :
- Minimum amount of power accepted from a sink
- when negotiating
+Required sub-node:
+- connector : The "usb-c-connector" attached to the FUSB302 IC. The bindings
+ of the connector node are specified in:
+
+ Documentation/devicetree/bindings/connector/usb-connector.txt
+
+Deprecated properties :
+- fcs,max-sink-microvolt : Maximum sink voltage accepted by port controller
+- fcs,max-sink-microamp : Maximum sink current accepted by port controller
+- fcs,max-sink-microwatt : Maximum sink power accepted by port controller
+- fcs,operating-sink-microwatt : Minimum amount of power accepted from a sink
+ when negotiating
+
Example:
@@ -17,7 +26,16 @@ fusb302: typec-portc@54 {
reg = <0x54>;
interrupt-parent = <&nmi_intc>;
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
- fcs,max-sink-microvolt = <12000000>;
- fcs,max-sink-microamp = <3000000>;
- fcs,max-sink-microwatt = <36000000>;
+
+ usb_con: connector {
+ compatible = "usb-c-connector";
+ label = "USB-C";
+ power-role = "dual";
+ try-power-role = "sink";
+ source-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)>;
+ sink-pdos = <PDO_FIXED(5000, 3000, PDO_FIXED_USB_COMM)
+ PDO_VAR(3000, 12000, 3000)
+ PDO_PPS_APDO(3000, 11000, 3000)>;
+ op-sink-microwatt = <10000000>;
+ };
};
diff --git a/Documentation/devicetree/bindings/usb/renesas_usb3.txt b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
index 2c071bb5801e..d366555166d0 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usb3.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usb3.txt
@@ -2,11 +2,13 @@ Renesas Electronics USB3.0 Peripheral driver
Required properties:
- compatible: Must contain one of the following:
+ - "renesas,r8a774a1-usb3-peri"
- "renesas,r8a7795-usb3-peri"
- "renesas,r8a7796-usb3-peri"
- "renesas,r8a77965-usb3-peri"
- - "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 compatible
- device
+ - "renesas,r8a77990-usb3-peri"
+ - "renesas,rcar-gen3-usb3-peri" for a generic R-Car Gen3 or RZ/G2
+ compatible device
When compatible with the generic version, nodes must list the
SoC-specific version corresponding to the platform first
diff --git a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
index 43960faf5a88..90719f501852 100644
--- a/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
+++ b/Documentation/devicetree/bindings/usb/renesas_usbhs.txt
@@ -4,7 +4,9 @@ Required properties:
- compatible: Must contain one or more of the following:
- "renesas,usbhs-r8a7743" for r8a7743 (RZ/G1M) compatible device
+ - "renesas,usbhs-r8a7744" for r8a7744 (RZ/G1N) compatible device
- "renesas,usbhs-r8a7745" for r8a7745 (RZ/G1E) compatible device
+ - "renesas,usbhs-r8a774a1" for r8a774a1 (RZ/G2M) compatible device
- "renesas,usbhs-r8a7790" for r8a7790 (R-Car H2) compatible device
- "renesas,usbhs-r8a7791" for r8a7791 (R-Car M2-W) compatible device
- "renesas,usbhs-r8a7792" for r8a7792 (R-Car V2H) compatible device
@@ -13,10 +15,11 @@ Required properties:
- "renesas,usbhs-r8a7795" for r8a7795 (R-Car H3) compatible device
- "renesas,usbhs-r8a7796" for r8a7796 (R-Car M3-W) compatible device
- "renesas,usbhs-r8a77965" for r8a77965 (R-Car M3-N) compatible device
+ - "renesas,usbhs-r8a77990" for r8a77990 (R-Car E3) compatible device
- "renesas,usbhs-r8a77995" for r8a77995 (R-Car D3) compatible device
- "renesas,usbhs-r7s72100" for r7s72100 (RZ/A1) compatible device
- "renesas,rcar-gen2-usbhs" for R-Car Gen2 or RZ/G1 compatible devices
- - "renesas,rcar-gen3-usbhs" for R-Car Gen3 compatible device
+ - "renesas,rcar-gen3-usbhs" for R-Car Gen3 or RZ/G2 compatible devices
- "renesas,rza1-usbhs" for RZ/A1 compatible device
When compatible with the generic version, nodes must list the
@@ -25,7 +28,11 @@ Required properties:
- reg: Base address and length of the register for the USBHS
- interrupts: Interrupt specifier for the USBHS
- - clocks: A list of phandle + clock specifier pairs
+ - clocks: A list of phandle + clock specifier pairs.
+ - In case of "renesas,rcar-gen3-usbhs", two clocks are required.
+ First clock should be peripheral and second one should be host.
+ - In case of except above, one clock is required. First clock
+ should be peripheral.
Optional properties:
- renesas,buswait: Integer to use BUSWAIT register
diff --git a/Documentation/devicetree/bindings/usb/usb-ehci.txt b/Documentation/devicetree/bindings/usb/usb-ehci.txt
index 0f1b75386207..406252d14c6b 100644
--- a/Documentation/devicetree/bindings/usb/usb-ehci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-ehci.txt
@@ -15,7 +15,11 @@ Optional properties:
- needs-reset-on-resume : boolean, set this to force EHCI reset after resume
- has-transaction-translator : boolean, set this if EHCI have a Transaction
Translator built into the root hub.
- - clocks : a list of phandle + clock specifier pairs
+ - clocks : a list of phandle + clock specifier pairs. In case of Renesas
+ R-Car Gen3 SoCs:
+ - if a host only channel: first clock should be host.
+ - if a USB DRD channel: first clock should be host and second one
+ should be peripheral.
- phys : see usb-hcd.txt in the current directory
- resets : phandle + reset specifier pair
diff --git a/Documentation/devicetree/bindings/usb/usb-ohci.txt b/Documentation/devicetree/bindings/usb/usb-ohci.txt
index a8d2103d1f3d..aaaa5255c972 100644
--- a/Documentation/devicetree/bindings/usb/usb-ohci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-ohci.txt
@@ -12,7 +12,11 @@ Optional properties:
- no-big-frame-no : boolean, set if frame_no lives in bits [15:0] of HCCA
- remote-wakeup-connected: remote wakeup is wired on the platform
- num-ports : u32, to override the detected port count
-- clocks : a list of phandle + clock specifier pairs
+- clocks : a list of phandle + clock specifier pairs. In case of Renesas
+ R-Car Gen3 SoCs:
+ - if a host only channel: first clock should be host.
+ - if a USB DRD channel: first clock should be host and second one
+ should be peripheral.
- phys : see usb-hcd.txt in the current directory
- resets : a list of phandle + reset specifier pairs
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
index ac4cd0d6195a..fea8b1545751 100644
--- a/Documentation/devicetree/bindings/usb/usb-xhci.txt
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -8,6 +8,8 @@ Required properties:
- "marvell,armada-375-xhci" for Armada 375 SoCs
- "marvell,armada-380-xhci" for Armada 38x SoCs
- "renesas,xhci-r8a7743" for r8a7743 SoC
+ - "renesas,xhci-r8a7744" for r8a7744 SoC
+ - "renesas,xhci-r8a774a1" for r8a774a1 SoC
- "renesas,xhci-r8a7790" for r8a7790 SoC
- "renesas,xhci-r8a7791" for r8a7791 SoC
- "renesas,xhci-r8a7793" for r8a7793 SoC
@@ -17,7 +19,8 @@ Required properties:
- "renesas,xhci-r8a77990" for r8a77990 SoC
- "renesas,rcar-gen2-xhci" for a generic R-Car Gen2 or RZ/G1 compatible
device
- - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 compatible device
+ - "renesas,rcar-gen3-xhci" for a generic R-Car Gen3 or RZ/G2 compatible
+ device
- "xhci-platform" (deprecated)
When compatible with the generic version, nodes must list the
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 2c3fc512e746..376f24484182 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -127,6 +127,7 @@ everspin Everspin Technologies, Inc.
exar Exar Corporation
excito Excito
ezchip EZchip Semiconductor
+facebook Facebook
fairphone Fairphone B.V.
faraday Faraday Technology Corporation
fastrax Fastrax Oy
@@ -235,6 +236,7 @@ micrel Micrel Inc.
microchip Microchip Technology Inc.
microcrystal Micro Crystal AG
micron Micron Technology Inc.
+mikroe MikroElektronika d.o.o.
minix MINIX Technology Ltd.
miramems MiraMEMS Sensing Technology Co., Ltd.
mitsubishi Mitsubishi Electric Corporation
@@ -274,6 +276,7 @@ nxp NXP Semiconductors
okaya Okaya Electric America, Inc.
oki Oki Electric Industry Co., Ltd.
olimex OLIMEX Ltd.
+olpc One Laptop Per Child
onion Onion Corporation
onnn ON Semiconductor Corp.
ontat On Tat Industrial Company
diff --git a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
index 9407212a85a8..d72d1181ec62 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/renesas-wdt.txt
@@ -6,6 +6,7 @@ Required properties:
version.
Examples with soctypes are:
- "renesas,r8a7743-wdt" (RZ/G1M)
+ - "renesas,r8a7744-wdt" (RZ/G1N)
- "renesas,r8a7745-wdt" (RZ/G1E)
- "renesas,r8a774a1-wdt" (RZ/G2M)
- "renesas,r8a7790-wdt" (R-Car H2)
diff --git a/Documentation/driver-api/fpga/fpga-bridge.rst b/Documentation/driver-api/fpga/fpga-bridge.rst
index 2c2aaca894bf..71c5a40da320 100644
--- a/Documentation/driver-api/fpga/fpga-bridge.rst
+++ b/Documentation/driver-api/fpga/fpga-bridge.rst
@@ -4,6 +4,12 @@ FPGA Bridge
API to implement a new FPGA bridge
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+* struct :c:type:`fpga_bridge` — The FPGA Bridge structure
+* struct :c:type:`fpga_bridge_ops` — Low level Bridge driver ops
+* :c:func:`devm_fpga_bridge_create()` — Allocate and init a bridge struct
+* :c:func:`fpga_bridge_register()` — Register a bridge
+* :c:func:`fpga_bridge_unregister()` — Unregister a bridge
+
.. kernel-doc:: include/linux/fpga/fpga-bridge.h
:functions: fpga_bridge
@@ -11,39 +17,10 @@ API to implement a new FPGA bridge
:functions: fpga_bridge_ops
.. kernel-doc:: drivers/fpga/fpga-bridge.c
- :functions: fpga_bridge_create
-
-.. kernel-doc:: drivers/fpga/fpga-bridge.c
- :functions: fpga_bridge_free
+ :functions: devm_fpga_bridge_create
.. kernel-doc:: drivers/fpga/fpga-bridge.c
:functions: fpga_bridge_register
.. kernel-doc:: drivers/fpga/fpga-bridge.c
:functions: fpga_bridge_unregister
-
-API to control an FPGA bridge
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-You probably won't need these directly. FPGA regions should handle this.
-
-.. kernel-doc:: drivers/fpga/fpga-bridge.c
- :functions: of_fpga_bridge_get
-
-.. kernel-doc:: drivers/fpga/fpga-bridge.c
- :functions: fpga_bridge_get
-
-.. kernel-doc:: drivers/fpga/fpga-bridge.c
- :functions: fpga_bridge_put
-
-.. kernel-doc:: drivers/fpga/fpga-bridge.c
- :functions: fpga_bridge_get_to_list
-
-.. kernel-doc:: drivers/fpga/fpga-bridge.c
- :functions: of_fpga_bridge_get_to_list
-
-.. kernel-doc:: drivers/fpga/fpga-bridge.c
- :functions: fpga_bridge_enable
-
-.. kernel-doc:: drivers/fpga/fpga-bridge.c
- :functions: fpga_bridge_disable
diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
index 82b6dbbd31cd..576f1945eacd 100644
--- a/Documentation/driver-api/fpga/fpga-mgr.rst
+++ b/Documentation/driver-api/fpga/fpga-mgr.rst
@@ -49,18 +49,14 @@ probe function calls fpga_mgr_register(), such as::
* them in priv
*/
- mgr = fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager",
- &socfpga_fpga_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager",
+ &socfpga_fpga_ops, priv);
if (!mgr)
return -ENOMEM;
platform_set_drvdata(pdev, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int socfpga_fpga_remove(struct platform_device *pdev)
@@ -102,67 +98,19 @@ The ops include a .state function which will determine the state the FPGA is in
and return a code of type enum fpga_mgr_states. It doesn't result in a change
in state.
-How to write an image buffer to a supported FPGA
-------------------------------------------------
-
-Some sample code::
-
- #include <linux/fpga/fpga-mgr.h>
-
- struct fpga_manager *mgr;
- struct fpga_image_info *info;
- int ret;
-
- /*
- * Get a reference to FPGA manager. The manager is not locked, so you can
- * hold onto this reference without it preventing programming.
- *
- * This example uses the device node of the manager. Alternatively, use
- * fpga_mgr_get(dev) instead if you have the device.
- */
- mgr = of_fpga_mgr_get(mgr_node);
-
- /* struct with information about the FPGA image to program. */
- info = fpga_image_info_alloc(dev);
-
- /* flags indicates whether to do full or partial reconfiguration */
- info->flags = FPGA_MGR_PARTIAL_RECONFIG;
-
- /*
- * At this point, indicate where the image is. This is pseudo-code; you're
- * going to use one of these three.
- */
- if (image is in a scatter gather table) {
-
- info->sgt = [your scatter gather table]
-
- } else if (image is in a buffer) {
-
- info->buf = [your image buffer]
- info->count = [image buffer size]
-
- } else if (image is in a firmware file) {
-
- info->firmware_name = devm_kstrdup(dev, firmware_name, GFP_KERNEL);
-
- }
-
- /* Get exclusive control of FPGA manager */
- ret = fpga_mgr_lock(mgr);
-
- /* Load the buffer to the FPGA */
- ret = fpga_mgr_buf_load(mgr, &info, buf, count);
-
- /* Release the FPGA manager */
- fpga_mgr_unlock(mgr);
- fpga_mgr_put(mgr);
-
- /* Deallocate the image info if you're done with it */
- fpga_image_info_free(info);
-
API for implementing a new FPGA Manager driver
----------------------------------------------
+* ``fpga_mgr_states`` — Values for :c:member:`fpga_manager->state`.
+* struct :c:type:`fpga_manager` — the FPGA manager struct
+* struct :c:type:`fpga_manager_ops` — Low level FPGA manager driver ops
+* :c:func:`devm_fpga_mgr_create` — Allocate and init a manager struct
+* :c:func:`fpga_mgr_register` — Register an FPGA manager
+* :c:func:`fpga_mgr_unregister` — Unregister an FPGA manager
+
+.. kernel-doc:: include/linux/fpga/fpga-mgr.h
+ :functions: fpga_mgr_states
+
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
:functions: fpga_manager
@@ -170,56 +118,10 @@ API for implementing a new FPGA Manager driver
:functions: fpga_manager_ops
.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: fpga_mgr_create
-
-.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: fpga_mgr_free
+ :functions: devm_fpga_mgr_create
.. kernel-doc:: drivers/fpga/fpga-mgr.c
:functions: fpga_mgr_register
.. kernel-doc:: drivers/fpga/fpga-mgr.c
:functions: fpga_mgr_unregister
-
-API for programming an FPGA
----------------------------
-
-FPGA Manager flags
-
-.. kernel-doc:: include/linux/fpga/fpga-mgr.h
- :doc: FPGA Manager flags
-
-.. kernel-doc:: include/linux/fpga/fpga-mgr.h
- :functions: fpga_image_info
-
-.. kernel-doc:: include/linux/fpga/fpga-mgr.h
- :functions: fpga_mgr_states
-
-.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: fpga_image_info_alloc
-
-.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: fpga_image_info_free
-
-.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: of_fpga_mgr_get
-
-.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: fpga_mgr_get
-
-.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: fpga_mgr_put
-
-.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: fpga_mgr_lock
-
-.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: fpga_mgr_unlock
-
-.. kernel-doc:: include/linux/fpga/fpga-mgr.h
- :functions: fpga_mgr_states
-
-Note - use :c:func:`fpga_region_program_fpga()` instead of :c:func:`fpga_mgr_load()`
-
-.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: fpga_mgr_load
diff --git a/Documentation/driver-api/fpga/fpga-programming.rst b/Documentation/driver-api/fpga/fpga-programming.rst
new file mode 100644
index 000000000000..b5484df6ff0f
--- /dev/null
+++ b/Documentation/driver-api/fpga/fpga-programming.rst
@@ -0,0 +1,107 @@
+In-kernel API for FPGA Programming
+==================================
+
+Overview
+--------
+
+The in-kernel API for FPGA programming is a combination of APIs from
+FPGA manager, bridge, and regions. The actual function used to
+trigger FPGA programming is :c:func:`fpga_region_program_fpga()`.
+
+:c:func:`fpga_region_program_fpga()` uses functionality supplied by
+the FPGA manager and bridges. It will:
+
+ * lock the region's mutex
+ * lock the mutex of the region's FPGA manager
+ * build a list of FPGA bridges if a method has been specified to do so
+ * disable the bridges
+ * program the FPGA using info passed in :c:member:`fpga_region->info`.
+ * re-enable the bridges
+ * release the locks
+
+The struct fpga_image_info specifies what FPGA image to program. It is
+allocated/freed by :c:func:`fpga_image_info_alloc()` and freed with
+:c:func:`fpga_image_info_free()`
+
+How to program an FPGA using a region
+-------------------------------------
+
+When the FPGA region driver probed, it was given a pointer to an FPGA manager
+driver so it knows which manager to use. The region also either has a list of
+bridges to control during programming or it has a pointer to a function that
+will generate that list. Here's some sample code of what to do next::
+
+ #include <linux/fpga/fpga-mgr.h>
+ #include <linux/fpga/fpga-region.h>
+
+ struct fpga_image_info *info;
+ int ret;
+
+ /*
+ * First, alloc the struct with information about the FPGA image to
+ * program.
+ */
+ info = fpga_image_info_alloc(dev);
+ if (!info)
+ return -ENOMEM;
+
+ /* Set flags as needed, such as: */
+ info->flags = FPGA_MGR_PARTIAL_RECONFIG;
+
+ /*
+ * Indicate where the FPGA image is. This is pseudo-code; you're
+ * going to use one of these three.
+ */
+ if (image is in a scatter gather table) {
+
+ info->sgt = [your scatter gather table]
+
+ } else if (image is in a buffer) {
+
+ info->buf = [your image buffer]
+ info->count = [image buffer size]
+
+ } else if (image is in a firmware file) {
+
+ info->firmware_name = devm_kstrdup(dev, firmware_name,
+ GFP_KERNEL);
+
+ }
+
+ /* Add info to region and do the programming */
+ region->info = info;
+ ret = fpga_region_program_fpga(region);
+
+ /* Deallocate the image info if you're done with it */
+ region->info = NULL;
+ fpga_image_info_free(info);
+
+ if (ret)
+ return ret;
+
+ /* Now enumerate whatever hardware has appeared in the FPGA. */
+
+API for programming an FPGA
+---------------------------
+
+* :c:func:`fpga_region_program_fpga` — Program an FPGA
+* :c:type:`fpga_image_info` — Specifies what FPGA image to program
+* :c:func:`fpga_image_info_alloc()` — Allocate an FPGA image info struct
+* :c:func:`fpga_image_info_free()` — Free an FPGA image info struct
+
+.. kernel-doc:: drivers/fpga/fpga-region.c
+ :functions: fpga_region_program_fpga
+
+FPGA Manager flags
+
+.. kernel-doc:: include/linux/fpga/fpga-mgr.h
+ :doc: FPGA Manager flags
+
+.. kernel-doc:: include/linux/fpga/fpga-mgr.h
+ :functions: fpga_image_info
+
+.. kernel-doc:: drivers/fpga/fpga-mgr.c
+ :functions: fpga_image_info_alloc
+
+.. kernel-doc:: drivers/fpga/fpga-mgr.c
+ :functions: fpga_image_info_free
diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst
index f30333ce828e..0529b2d2231a 100644
--- a/Documentation/driver-api/fpga/fpga-region.rst
+++ b/Documentation/driver-api/fpga/fpga-region.rst
@@ -34,41 +34,6 @@ fpga_image_info including:
* flags indicating specifics such as whether the image is for partial
reconfiguration.
-How to program an FPGA using a region
--------------------------------------
-
-First, allocate the info struct::
-
- info = fpga_image_info_alloc(dev);
- if (!info)
- return -ENOMEM;
-
-Set flags as needed, i.e.::
-
- info->flags |= FPGA_MGR_PARTIAL_RECONFIG;
-
-Point to your FPGA image, such as::
-
- info->sgt = &sgt;
-
-Add info to region and do the programming::
-
- region->info = info;
- ret = fpga_region_program_fpga(region);
-
-:c:func:`fpga_region_program_fpga()` operates on info passed in the
-fpga_image_info (region->info). This function will attempt to:
-
- * lock the region's mutex
- * lock the region's FPGA manager
- * build a list of FPGA bridges if a method has been specified to do so
- * disable the bridges
- * program the FPGA
- * re-enable the bridges
- * release the locks
-
-Then you will want to enumerate whatever hardware has appeared in the FPGA.
-
How to add a new FPGA region
----------------------------
@@ -77,26 +42,62 @@ An example of usage can be seen in the probe function of [#f2]_.
.. [#f1] ../devicetree/bindings/fpga/fpga-region.txt
.. [#f2] ../../drivers/fpga/of-fpga-region.c
-API to program an FPGA
-----------------------
-
-.. kernel-doc:: drivers/fpga/fpga-region.c
- :functions: fpga_region_program_fpga
-
API to add a new FPGA region
----------------------------
+* struct :c:type:`fpga_region` — The FPGA region struct
+* :c:func:`devm_fpga_region_create` — Allocate and init a region struct
+* :c:func:`fpga_region_register` — Register an FPGA region
+* :c:func:`fpga_region_unregister` — Unregister an FPGA region
+
+The FPGA region's probe function will need to get a reference to the FPGA
+Manager it will be using to do the programming. This usually would happen
+during the region's probe function.
+
+* :c:func:`fpga_mgr_get` — Get a reference to an FPGA manager, raise ref count
+* :c:func:`of_fpga_mgr_get` — Get a reference to an FPGA manager, raise ref count,
+ given a device node.
+* :c:func:`fpga_mgr_put` — Put an FPGA manager
+
+The FPGA region will need to specify which bridges to control while programming
+the FPGA. The region driver can build a list of bridges during probe time
+(:c:member:`fpga_region->bridge_list`) or it can have a function that creates
+the list of bridges to program just before programming
+(:c:member:`fpga_region->get_bridges`). The FPGA bridge framework supplies the
+following APIs to handle building or tearing down that list.
+
+* :c:func:`fpga_bridge_get_to_list` — Get a ref of an FPGA bridge, add it to a
+ list
+* :c:func:`of_fpga_bridge_get_to_list` — Get a ref of an FPGA bridge, add it to a
+ list, given a device node
+* :c:func:`fpga_bridges_put` — Given a list of bridges, put them
+
.. kernel-doc:: include/linux/fpga/fpga-region.h
:functions: fpga_region
.. kernel-doc:: drivers/fpga/fpga-region.c
- :functions: fpga_region_create
-
-.. kernel-doc:: drivers/fpga/fpga-region.c
- :functions: fpga_region_free
+ :functions: devm_fpga_region_create
.. kernel-doc:: drivers/fpga/fpga-region.c
:functions: fpga_region_register
.. kernel-doc:: drivers/fpga/fpga-region.c
:functions: fpga_region_unregister
+
+.. kernel-doc:: drivers/fpga/fpga-mgr.c
+ :functions: fpga_mgr_get
+
+.. kernel-doc:: drivers/fpga/fpga-mgr.c
+ :functions: of_fpga_mgr_get
+
+.. kernel-doc:: drivers/fpga/fpga-mgr.c
+ :functions: fpga_mgr_put
+
+.. kernel-doc:: drivers/fpga/fpga-bridge.c
+ :functions: fpga_bridge_get_to_list
+
+.. kernel-doc:: drivers/fpga/fpga-bridge.c
+ :functions: of_fpga_bridge_get_to_list
+
+.. kernel-doc:: drivers/fpga/fpga-bridge.c
+ :functions: fpga_bridges_put
diff --git a/Documentation/driver-api/fpga/index.rst b/Documentation/driver-api/fpga/index.rst
index c51e5ebd544a..31a4773bd2e6 100644
--- a/Documentation/driver-api/fpga/index.rst
+++ b/Documentation/driver-api/fpga/index.rst
@@ -11,3 +11,5 @@ FPGA Subsystem
fpga-mgr
fpga-bridge
fpga-region
+ fpga-programming
+
diff --git a/Documentation/driver-api/fpga/intro.rst b/Documentation/driver-api/fpga/intro.rst
index 50d1cab84950..f54c7dabcc7d 100644
--- a/Documentation/driver-api/fpga/intro.rst
+++ b/Documentation/driver-api/fpga/intro.rst
@@ -44,7 +44,7 @@ FPGA Region
-----------
If you are adding a new interface to the FPGA framework, add it on top
-of an FPGA region to allow the most reuse of your interface.
+of an FPGA region.
The FPGA Region framework (fpga-region.c) associates managers and
bridges as reconfigurable regions. A region may refer to the whole
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 1f0cdba2b2bf..909f991b4c0d 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -30,7 +30,7 @@ available subsections can be seen below.
input
usb/index
firewire
- pci
+ pci/index
spi
i2c
hsi
diff --git a/Documentation/driver-api/pci/index.rst b/Documentation/driver-api/pci/index.rst
new file mode 100644
index 000000000000..c6cf1fef61ce
--- /dev/null
+++ b/Documentation/driver-api/pci/index.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================================
+The Linux PCI driver implementer's API guide
+============================================
+
+.. class:: toc-title
+
+ Table of contents
+
+.. toctree::
+ :maxdepth: 2
+
+ pci
+ p2pdma
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/driver-api/pci/p2pdma.rst b/Documentation/driver-api/pci/p2pdma.rst
new file mode 100644
index 000000000000..4c577fa7bef9
--- /dev/null
+++ b/Documentation/driver-api/pci/p2pdma.rst
@@ -0,0 +1,145 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============================
+PCI Peer-to-Peer DMA Support
+============================
+
+The PCI bus has pretty decent support for performing DMA transfers
+between two devices on the bus. This type of transaction is henceforth
+called Peer-to-Peer (or P2P). However, there are a number of issues that
+make P2P transactions tricky to do in a perfectly safe way.
+
+One of the biggest issues is that PCI doesn't require forwarding
+transactions between hierarchy domains, and in PCIe, each Root Port
+defines a separate hierarchy domain. To make things worse, there is no
+simple way to determine if a given Root Complex supports this or not.
+(See PCIe r4.0, sec 1.3.1). Therefore, as of this writing, the kernel
+only supports doing P2P when the endpoints involved are all behind the
+same PCI bridge, as such devices are all in the same PCI hierarchy
+domain, and the spec guarantees that all transactions within the
+hierarchy will be routable, but it does not require routing
+between hierarchies.
+
+The second issue is that to make use of existing interfaces in Linux,
+memory that is used for P2P transactions needs to be backed by struct
+pages. However, PCI BARs are not typically cache coherent so there are
+a few corner case gotchas with these pages so developers need to
+be careful about what they do with them.
+
+
+Driver Writer's Guide
+=====================
+
+In a given P2P implementation there may be three or more different
+types of kernel drivers in play:
+
+* Provider - A driver which provides or publishes P2P resources like
+ memory or doorbell registers to other drivers.
+* Client - A driver which makes use of a resource by setting up a
+ DMA transaction to or from it.
+* Orchestrator - A driver which orchestrates the flow of data between
+ clients and providers.
+
+In many cases there could be overlap between these three types (i.e.,
+it may be typical for a driver to be both a provider and a client).
+
+For example, in the NVMe Target Copy Offload implementation:
+
+* The NVMe PCI driver is both a client, provider and orchestrator
+ in that it exposes any CMB (Controller Memory Buffer) as a P2P memory
+ resource (provider), it accepts P2P memory pages as buffers in requests
+ to be used directly (client) and it can also make use of the CMB as
+ submission queue entries (orchastrator).
+* The RDMA driver is a client in this arrangement so that an RNIC
+ can DMA directly to the memory exposed by the NVMe device.
+* The NVMe Target driver (nvmet) can orchestrate the data from the RNIC
+ to the P2P memory (CMB) and then to the NVMe device (and vice versa).
+
+This is currently the only arrangement supported by the kernel but
+one could imagine slight tweaks to this that would allow for the same
+functionality. For example, if a specific RNIC added a BAR with some
+memory behind it, its driver could add support as a P2P provider and
+then the NVMe Target could use the RNIC's memory instead of the CMB
+in cases where the NVMe cards in use do not have CMB support.
+
+
+Provider Drivers
+----------------
+
+A provider simply needs to register a BAR (or a portion of a BAR)
+as a P2P DMA resource using :c:func:`pci_p2pdma_add_resource()`.
+This will register struct pages for all the specified memory.
+
+After that it may optionally publish all of its resources as
+P2P memory using :c:func:`pci_p2pmem_publish()`. This will allow
+any orchestrator drivers to find and use the memory. When marked in
+this way, the resource must be regular memory with no side effects.
+
+For the time being this is fairly rudimentary in that all resources
+are typically going to be P2P memory. Future work will likely expand
+this to include other types of resources like doorbells.
+
+
+Client Drivers
+--------------
+
+A client driver typically only has to conditionally change its DMA map
+routine to use the mapping function :c:func:`pci_p2pdma_map_sg()` instead
+of the usual :c:func:`dma_map_sg()` function. Memory mapped in this
+way does not need to be unmapped.
+
+The client may also, optionally, make use of
+:c:func:`is_pci_p2pdma_page()` to determine when to use the P2P mapping
+functions and when to use the regular mapping functions. In some
+situations, it may be more appropriate to use a flag to indicate a
+given request is P2P memory and map appropriately. It is important to
+ensure that struct pages that back P2P memory stay out of code that
+does not have support for them as other code may treat the pages as
+regular memory which may not be appropriate.
+
+
+Orchestrator Drivers
+--------------------
+
+The first task an orchestrator driver must do is compile a list of
+all client devices that will be involved in a given transaction. For
+example, the NVMe Target driver creates a list including the namespace
+block device and the RNIC in use. If the orchestrator has access to
+a specific P2P provider to use it may check compatibility using
+:c:func:`pci_p2pdma_distance()` otherwise it may find a memory provider
+that's compatible with all clients using :c:func:`pci_p2pmem_find()`.
+If more than one provider is supported, the one nearest to all the clients will
+be chosen first. If more than one provider is an equal distance away, the
+one returned will be chosen at random (it is not an arbitrary but
+truely random). This function returns the PCI device to use for the provider
+with a reference taken and therefore when it's no longer needed it should be
+returned with pci_dev_put().
+
+Once a provider is selected, the orchestrator can then use
+:c:func:`pci_alloc_p2pmem()` and :c:func:`pci_free_p2pmem()` to
+allocate P2P memory from the provider. :c:func:`pci_p2pmem_alloc_sgl()`
+and :c:func:`pci_p2pmem_free_sgl()` are convenience functions for
+allocating scatter-gather lists with P2P memory.
+
+Struct Page Caveats
+-------------------
+
+Driver writers should be very careful about not passing these special
+struct pages to code that isn't prepared for it. At this time, the kernel
+interfaces do not have any checks for ensuring this. This obviously
+precludes passing these pages to userspace.
+
+P2P memory is also technically IO memory but should never have any side
+effects behind it. Thus, the order of loads and stores should not be important
+and ioreadX(), iowriteX() and friends should not be necessary.
+However, as the memory is not cache coherent, if access ever needs to
+be protected by a spinlock then :c:func:`mmiowb()` must be used before
+unlocking the lock. (See ACQUIRES VS I/O ACCESSES in
+Documentation/memory-barriers.txt)
+
+
+P2P DMA Support Library
+=======================
+
+.. kernel-doc:: drivers/pci/p2pdma.c
+ :export:
diff --git a/Documentation/driver-api/pci.rst b/Documentation/driver-api/pci/pci.rst
index ca85e5e78b2c..ca85e5e78b2c 100644
--- a/Documentation/driver-api/pci.rst
+++ b/Documentation/driver-api/pci/pci.rst
diff --git a/Documentation/driver-api/soundwire/stream.rst b/Documentation/driver-api/soundwire/stream.rst
index 29121aa55fb9..26a6064503fd 100644
--- a/Documentation/driver-api/soundwire/stream.rst
+++ b/Documentation/driver-api/soundwire/stream.rst
@@ -101,6 +101,34 @@ interface. ::
+--------------------+ | |
+----------------+
+Example 5: Stereo Stream with L and R channel is rendered by 2 Masters, each
+rendering one channel, and is received by two different Slaves, each
+receiving one channel. Both Masters and both Slaves are using single port. ::
+
+ +---------------+ Clock Signal +---------------+
+ | Master +----------------------------------+ Slave |
+ | Interface | | Interface |
+ | 1 | | 1 |
+ | | Data Signal | |
+ | L +----------------------------------+ L |
+ | (Data) | Data Direction | (Data) |
+ +---------------+ +-----------------------> +---------------+
+
+ +---------------+ Clock Signal +---------------+
+ | Master +----------------------------------+ Slave |
+ | Interface | | Interface |
+ | 2 | | 2 |
+ | | Data Signal | |
+ | R +----------------------------------+ R |
+ | (Data) | Data Direction | (Data) |
+ +---------------+ +-----------------------> +---------------+
+
+Note: In multi-link cases like above, to lock, one would acquire a global
+lock and then go on locking bus instances. But, in this case the caller
+framework(ASoC DPCM) guarantees that stream operations on a card are
+always serialized. So, there is no race condition and hence no need for
+global lock.
+
SoundWire Stream Management flow
================================
@@ -174,6 +202,7 @@ per stream. From ASoC DPCM framework, this stream state maybe linked to
.startup() operation.
.. code-block:: c
+
int sdw_alloc_stream(char * stream_name);
@@ -200,6 +229,7 @@ only be invoked once by respective Master(s) and Slave(s). From ASoC DPCM
framework, this stream state is linked to .hw_params() operation.
.. code-block:: c
+
int sdw_stream_add_master(struct sdw_bus * bus,
struct sdw_stream_config * stream_config,
struct sdw_ports_config * ports_config,
@@ -245,6 +275,7 @@ stream. From ASoC DPCM framework, this stream state is linked to
.prepare() operation.
.. code-block:: c
+
int sdw_prepare_stream(struct sdw_stream_runtime * stream);
@@ -274,6 +305,7 @@ stream. From ASoC DPCM framework, this stream state is linked to
.trigger() start operation.
.. code-block:: c
+
int sdw_enable_stream(struct sdw_stream_runtime * stream);
SDW_STREAM_DISABLED
@@ -301,6 +333,7 @@ per stream. From ASoC DPCM framework, this stream state is linked to
.trigger() stop operation.
.. code-block:: c
+
int sdw_disable_stream(struct sdw_stream_runtime * stream);
@@ -325,6 +358,7 @@ per stream. From ASoC DPCM framework, this stream state is linked to
.trigger() stop operation.
.. code-block:: c
+
int sdw_deprepare_stream(struct sdw_stream_runtime * stream);
@@ -349,6 +383,7 @@ all the Master(s) and Slave(s) associated with stream. From ASoC DPCM
framework, this stream state is linked to .hw_free() operation.
.. code-block:: c
+
int sdw_stream_remove_master(struct sdw_bus * bus,
struct sdw_stream_runtime * stream);
int sdw_stream_remove_slave(struct sdw_slave * slave,
@@ -361,6 +396,7 @@ stream assigned as part of ALLOCATED state.
In .shutdown() the data structure maintaining stream state are freed up.
.. code-block:: c
+
void sdw_release_stream(struct sdw_stream_runtime * stream);
Not Supported
diff --git a/Documentation/driver-api/uio-howto.rst b/Documentation/driver-api/uio-howto.rst
index fb2eb73be4a3..25f50eace28b 100644
--- a/Documentation/driver-api/uio-howto.rst
+++ b/Documentation/driver-api/uio-howto.rst
@@ -463,8 +463,8 @@ Getting information about your UIO device
Information about all UIO devices is available in sysfs. The first thing
you should do in your driver is check ``name`` and ``version`` to make
-sure your talking to the right device and that its kernel driver has the
-version you expect.
+sure you're talking to the right device and that its kernel driver has
+the version you expect.
You should also make sure that the memory mapping you need exists and
has the size you expect.
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
index 48b424de85bb..cfbc18f0d9c9 100644
--- a/Documentation/filesystems/fscrypt.rst
+++ b/Documentation/filesystems/fscrypt.rst
@@ -191,21 +191,11 @@ Currently, the following pairs of encryption modes are supported:
- AES-256-XTS for contents and AES-256-CTS-CBC for filenames
- AES-128-CBC for contents and AES-128-CTS-CBC for filenames
-- Speck128/256-XTS for contents and Speck128/256-CTS-CBC for filenames
It is strongly recommended to use AES-256-XTS for contents encryption.
AES-128-CBC was added only for low-powered embedded devices with
crypto accelerators such as CAAM or CESA that do not support XTS.
-Similarly, Speck128/256 support was only added for older or low-end
-CPUs which cannot do AES fast enough -- especially ARM CPUs which have
-NEON instructions but not the Cryptography Extensions -- and for which
-it would not otherwise be feasible to use encryption at all. It is
-not recommended to use Speck on CPUs that have AES instructions.
-Speck support is only available if it has been enabled in the crypto
-API via CONFIG_CRYPTO_SPECK. Also, on ARM platforms, to get
-acceptable performance CONFIG_CRYPTO_SPECK_NEON must be enabled.
-
New encryption modes can be added relatively easily, without changes
to individual filesystems. However, authenticated encryption (AE)
modes are not currently supported because of the difficulty of dealing
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 7b7b845c490a..321d74b73937 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -622,3 +622,14 @@ in your dentry operations instead.
alloc_file_clone(file, flags, ops) does not affect any caller's references.
On success you get a new struct file sharing the mount/dentry with the
original, on failure - ERR_PTR().
+--
+[recommended]
+ ->lookup() instances doing an equivalent of
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+ return d_splice_alias(inode, dentry);
+ don't need to bother with the check - d_splice_alias() will do the
+ right thing when given ERR_PTR(...) as inode. Moreover, passing NULL
+ inode to d_splice_alias() will also do the right thing (equivalent of
+ d_add(dentry, NULL); return NULL;), so that kind of special cases
+ also doesn't need a separate treatment.
diff --git a/Documentation/input/event-codes.rst b/Documentation/input/event-codes.rst
index a8c0873beb95..cef220c176a4 100644
--- a/Documentation/input/event-codes.rst
+++ b/Documentation/input/event-codes.rst
@@ -190,7 +190,16 @@ A few EV_REL codes have special meanings:
* REL_WHEEL, REL_HWHEEL:
- These codes are used for vertical and horizontal scroll wheels,
- respectively.
+ respectively. The value is the number of "notches" moved on the wheel, the
+ physical size of which varies by device. For high-resolution wheels (which
+ report multiple events for each notch of movement, or do not have notches)
+ this may be an approximation based on the high-resolution scroll events.
+
+* REL_WHEEL_HI_RES:
+
+ - If a vertical scroll wheel supports high-resolution scrolling, this code
+ will be emitted in addition to REL_WHEEL. The value is the (approximate)
+ distance travelled by the user's finger, in microns.
EV_ABS
------
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index 13a7c999c04a..d05d93761653 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -201,7 +201,7 @@ Code Seq#(hex) Include File Comments
'X' 01 linux/pktcdvd.h conflict!
'Y' all linux/cyclades.h
'Z' 14-15 drivers/message/fusion/mptctl.h
-'[' 00-07 linux/usb/tmc.h USB Test and Measurement Devices
+'[' 00-3F linux/usb/tmc.h USB Test and Measurement Devices
<mailto:gregkh@linuxfoundation.org>
'a' all linux/atm*.h, linux/sonet.h ATM on linux
<http://lrcwww.epfl.ch/>
diff --git a/Documentation/nvmem/nvmem.txt b/Documentation/nvmem/nvmem.txt
index 8d8d8f58f96f..fc2fe4b18655 100644
--- a/Documentation/nvmem/nvmem.txt
+++ b/Documentation/nvmem/nvmem.txt
@@ -58,6 +58,37 @@ static int qfprom_probe(struct platform_device *pdev)
It is mandatory that the NVMEM provider has a regmap associated with its
struct device. Failure to do would return error code from nvmem_register().
+Users of board files can define and register nvmem cells using the
+nvmem_cell_table struct:
+
+static struct nvmem_cell_info foo_nvmem_cells[] = {
+ {
+ .name = "macaddr",
+ .offset = 0x7f00,
+ .bytes = ETH_ALEN,
+ }
+};
+
+static struct nvmem_cell_table foo_nvmem_cell_table = {
+ .nvmem_name = "i2c-eeprom",
+ .cells = foo_nvmem_cells,
+ .ncells = ARRAY_SIZE(foo_nvmem_cells),
+};
+
+nvmem_add_cell_table(&foo_nvmem_cell_table);
+
+Additionally it is possible to create nvmem cell lookup entries and register
+them with the nvmem framework from machine code as shown in the example below:
+
+static struct nvmem_cell_lookup foo_nvmem_lookup = {
+ .nvmem_name = "i2c-eeprom",
+ .cell_name = "macaddr",
+ .dev_id = "foo_mac.0",
+ .con_id = "mac-address",
+};
+
+nvmem_add_cell_lookups(&foo_nvmem_lookup, 1);
+
NVMEM Consumers
+++++++++++++++
diff --git a/Documentation/s390/vfio-ap.txt b/Documentation/s390/vfio-ap.txt
new file mode 100644
index 000000000000..65167cfe4485
--- /dev/null
+++ b/Documentation/s390/vfio-ap.txt
@@ -0,0 +1,837 @@
+Introduction:
+============
+The Adjunct Processor (AP) facility is an IBM Z cryptographic facility comprised
+of three AP instructions and from 1 up to 256 PCIe cryptographic adapter cards.
+The AP devices provide cryptographic functions to all CPUs assigned to a
+linux system running in an IBM Z system LPAR.
+
+The AP adapter cards are exposed via the AP bus. The motivation for vfio-ap
+is to make AP cards available to KVM guests using the VFIO mediated device
+framework. This implementation relies considerably on the s390 virtualization
+facilities which do most of the hard work of providing direct access to AP
+devices.
+
+AP Architectural Overview:
+=========================
+To facilitate the comprehension of the design, let's start with some
+definitions:
+
+* AP adapter
+
+ An AP adapter is an IBM Z adapter card that can perform cryptographic
+ functions. There can be from 0 to 256 adapters assigned to an LPAR. Adapters
+ assigned to the LPAR in which a linux host is running will be available to
+ the linux host. Each adapter is identified by a number from 0 to 255; however,
+ the maximum adapter number is determined by machine model and/or adapter type.
+ When installed, an AP adapter is accessed by AP instructions executed by any
+ CPU.
+
+ The AP adapter cards are assigned to a given LPAR via the system's Activation
+ Profile which can be edited via the HMC. When the linux host system is IPL'd
+ in the LPAR, the AP bus detects the AP adapter cards assigned to the LPAR and
+ creates a sysfs device for each assigned adapter. For example, if AP adapters
+ 4 and 10 (0x0a) are assigned to the LPAR, the AP bus will create the following
+ sysfs device entries:
+
+ /sys/devices/ap/card04
+ /sys/devices/ap/card0a
+
+ Symbolic links to these devices will also be created in the AP bus devices
+ sub-directory:
+
+ /sys/bus/ap/devices/[card04]
+ /sys/bus/ap/devices/[card04]
+
+* AP domain
+
+ An adapter is partitioned into domains. An adapter can hold up to 256 domains
+ depending upon the adapter type and hardware configuration. A domain is
+ identified by a number from 0 to 255; however, the maximum domain number is
+ determined by machine model and/or adapter type.. A domain can be thought of
+ as a set of hardware registers and memory used for processing AP commands. A
+ domain can be configured with a secure private key used for clear key
+ encryption. A domain is classified in one of two ways depending upon how it
+ may be accessed:
+
+ * Usage domains are domains that are targeted by an AP instruction to
+ process an AP command.
+
+ * Control domains are domains that are changed by an AP command sent to a
+ usage domain; for example, to set the secure private key for the control
+ domain.
+
+ The AP usage and control domains are assigned to a given LPAR via the system's
+ Activation Profile which can be edited via the HMC. When a linux host system
+ is IPL'd in the LPAR, the AP bus module detects the AP usage and control
+ domains assigned to the LPAR. The domain number of each usage domain and
+ adapter number of each AP adapter are combined to create AP queue devices
+ (see AP Queue section below). The domain number of each control domain will be
+ represented in a bitmask and stored in a sysfs file
+ /sys/bus/ap/ap_control_domain_mask. The bits in the mask, from most to least
+ significant bit, correspond to domains 0-255.
+
+* AP Queue
+
+ An AP queue is the means by which an AP command is sent to a usage domain
+ inside a specific adapter. An AP queue is identified by a tuple
+ comprised of an AP adapter ID (APID) and an AP queue index (APQI). The
+ APQI corresponds to a given usage domain number within the adapter. This tuple
+ forms an AP Queue Number (APQN) uniquely identifying an AP queue. AP
+ instructions include a field containing the APQN to identify the AP queue to
+ which the AP command is to be sent for processing.
+
+ The AP bus will create a sysfs device for each APQN that can be derived from
+ the cross product of the AP adapter and usage domain numbers detected when the
+ AP bus module is loaded. For example, if adapters 4 and 10 (0x0a) and usage
+ domains 6 and 71 (0x47) are assigned to the LPAR, the AP bus will create the
+ following sysfs entries:
+
+ /sys/devices/ap/card04/04.0006
+ /sys/devices/ap/card04/04.0047
+ /sys/devices/ap/card0a/0a.0006
+ /sys/devices/ap/card0a/0a.0047
+
+ The following symbolic links to these devices will be created in the AP bus
+ devices subdirectory:
+
+ /sys/bus/ap/devices/[04.0006]
+ /sys/bus/ap/devices/[04.0047]
+ /sys/bus/ap/devices/[0a.0006]
+ /sys/bus/ap/devices/[0a.0047]
+
+* AP Instructions:
+
+ There are three AP instructions:
+
+ * NQAP: to enqueue an AP command-request message to a queue
+ * DQAP: to dequeue an AP command-reply message from a queue
+ * PQAP: to administer the queues
+
+ AP instructions identify the domain that is targeted to process the AP
+ command; this must be one of the usage domains. An AP command may modify a
+ domain that is not one of the usage domains, but the modified domain
+ must be one of the control domains.
+
+AP and SIE:
+==========
+Let's now take a look at how AP instructions executed on a guest are interpreted
+by the hardware.
+
+A satellite control block called the Crypto Control Block (CRYCB) is attached to
+our main hardware virtualization control block. The CRYCB contains three fields
+to identify the adapters, usage domains and control domains assigned to the KVM
+guest:
+
+* The AP Mask (APM) field is a bit mask that identifies the AP adapters assigned
+ to the KVM guest. Each bit in the mask, from left to right (i.e. from most
+ significant to least significant bit in big endian order), corresponds to
+ an APID from 0-255. If a bit is set, the corresponding adapter is valid for
+ use by the KVM guest.
+
+* The AP Queue Mask (AQM) field is a bit mask identifying the AP usage domains
+ assigned to the KVM guest. Each bit in the mask, from left to right (i.e. from
+ most significant to least significant bit in big endian order), corresponds to
+ an AP queue index (APQI) from 0-255. If a bit is set, the corresponding queue
+ is valid for use by the KVM guest.
+
+* The AP Domain Mask field is a bit mask that identifies the AP control domains
+ assigned to the KVM guest. The ADM bit mask controls which domains can be
+ changed by an AP command-request message sent to a usage domain from the
+ guest. Each bit in the mask, from left to right (i.e. from most significant to
+ least significant bit in big endian order), corresponds to a domain from
+ 0-255. If a bit is set, the corresponding domain can be modified by an AP
+ command-request message sent to a usage domain.
+
+If you recall from the description of an AP Queue, AP instructions include
+an APQN to identify the AP queue to which an AP command-request message is to be
+sent (NQAP and PQAP instructions), or from which a command-reply message is to
+be received (DQAP instruction). The validity of an APQN is defined by the matrix
+calculated from the APM and AQM; it is the cross product of all assigned adapter
+numbers (APM) with all assigned queue indexes (AQM). For example, if adapters 1
+and 2 and usage domains 5 and 6 are assigned to a guest, the APQNs (1,5), (1,6),
+(2,5) and (2,6) will be valid for the guest.
+
+The APQNs can provide secure key functionality - i.e., a private key is stored
+on the adapter card for each of its domains - so each APQN must be assigned to
+at most one guest or to the linux host.
+
+ Example 1: Valid configuration:
+ ------------------------------
+ Guest1: adapters 1,2 domains 5,6
+ Guest2: adapter 1,2 domain 7
+
+ This is valid because both guests have a unique set of APQNs:
+ Guest1 has APQNs (1,5), (1,6), (2,5), (2,6);
+ Guest2 has APQNs (1,7), (2,7)
+
+ Example 2: Valid configuration:
+ ------------------------------
+ Guest1: adapters 1,2 domains 5,6
+ Guest2: adapters 3,4 domains 5,6
+
+ This is also valid because both guests have a unique set of APQNs:
+ Guest1 has APQNs (1,5), (1,6), (2,5), (2,6);
+ Guest2 has APQNs (3,5), (3,6), (4,5), (4,6)
+
+ Example 3: Invalid configuration:
+ --------------------------------
+ Guest1: adapters 1,2 domains 5,6
+ Guest2: adapter 1 domains 6,7
+
+ This is an invalid configuration because both guests have access to
+ APQN (1,6).
+
+The Design:
+===========
+The design introduces three new objects:
+
+1. AP matrix device
+2. VFIO AP device driver (vfio_ap.ko)
+3. VFIO AP mediated matrix pass-through device
+
+The VFIO AP device driver
+-------------------------
+The VFIO AP (vfio_ap) device driver serves the following purposes:
+
+1. Provides the interfaces to secure APQNs for exclusive use of KVM guests.
+
+2. Sets up the VFIO mediated device interfaces to manage a mediated matrix
+ device and creates the sysfs interfaces for assigning adapters, usage
+ domains, and control domains comprising the matrix for a KVM guest.
+
+3. Configures the APM, AQM and ADM in the CRYCB referenced by a KVM guest's
+ SIE state description to grant the guest access to a matrix of AP devices
+
+Reserve APQNs for exclusive use of KVM guests
+---------------------------------------------
+The following block diagram illustrates the mechanism by which APQNs are
+reserved:
+
+ +------------------+
+ 7 remove | |
+ +--------------------> cex4queue driver |
+ | | |
+ | +------------------+
+ |
+ |
+ | +------------------+ +-----------------+
+ | 5 register driver | | 3 create | |
+ | +----------------> Device core +----------> matrix device |
+ | | | | | |
+ | | +--------^---------+ +-----------------+
+ | | |
+ | | +-------------------+
+ | | +-----------------------------------+ |
+ | | | 4 register AP driver | | 2 register device
+ | | | | |
++--------+---+-v---+ +--------+-------+-+
+| | | |
+| ap_bus +--------------------- > vfio_ap driver |
+| | 8 probe | |
++--------^---------+ +--^--^------------+
+6 edit | | |
+ apmask | +-----------------------------+ | 9 mdev create
+ aqmask | | 1 modprobe |
++--------+-----+---+ +----------------+-+ +------------------+
+| | | |8 create | mediated |
+| admin | | VFIO device core |---------> matrix |
+| + | | | device |
++------+-+---------+ +--------^---------+ +--------^---------+
+ | | | |
+ | | 9 create vfio_ap-passthrough | |
+ | +------------------------------+ |
+ +-------------------------------------------------------------+
+ 10 assign adapter/domain/control domain
+
+The process for reserving an AP queue for use by a KVM guest is:
+
+1. The administrator loads the vfio_ap device driver
+2. The vfio-ap driver during its initialization will register a single 'matrix'
+ device with the device core. This will serve as the parent device for
+ all mediated matrix devices used to configure an AP matrix for a guest.
+3. The /sys/devices/vfio_ap/matrix device is created by the device core
+4 The vfio_ap device driver will register with the AP bus for AP queue devices
+ of type 10 and higher (CEX4 and newer). The driver will provide the vfio_ap
+ driver's probe and remove callback interfaces. Devices older than CEX4 queues
+ are not supported to simplify the implementation by not needlessly
+ complicating the design by supporting older devices that will go out of
+ service in the relatively near future, and for which there are few older
+ systems around on which to test.
+5. The AP bus registers the vfio_ap device driver with the device core
+6. The administrator edits the AP adapter and queue masks to reserve AP queues
+ for use by the vfio_ap device driver.
+7. The AP bus removes the AP queues reserved for the vfio_ap driver from the
+ default zcrypt cex4queue driver.
+8. The AP bus probes the vfio_ap device driver to bind the queues reserved for
+ it.
+9. The administrator creates a passthrough type mediated matrix device to be
+ used by a guest
+10 The administrator assigns the adapters, usage domains and control domains
+ to be exclusively used by a guest.
+
+Set up the VFIO mediated device interfaces
+------------------------------------------
+The VFIO AP device driver utilizes the common interface of the VFIO mediated
+device core driver to:
+* Register an AP mediated bus driver to add a mediated matrix device to and
+ remove it from a VFIO group.
+* Create and destroy a mediated matrix device
+* Add a mediated matrix device to and remove it from the AP mediated bus driver
+* Add a mediated matrix device to and remove it from an IOMMU group
+
+The following high-level block diagram shows the main components and interfaces
+of the VFIO AP mediated matrix device driver:
+
+ +-------------+
+ | |
+ | +---------+ | mdev_register_driver() +--------------+
+ | | Mdev | +<-----------------------+ |
+ | | bus | | | vfio_mdev.ko |
+ | | driver | +----------------------->+ |<-> VFIO user
+ | +---------+ | probe()/remove() +--------------+ APIs
+ | |
+ | MDEV CORE |
+ | MODULE |
+ | mdev.ko |
+ | +---------+ | mdev_register_device() +--------------+
+ | |Physical | +<-----------------------+ |
+ | | device | | | vfio_ap.ko |<-> matrix
+ | |interface| +----------------------->+ | device
+ | +---------+ | callback +--------------+
+ +-------------+
+
+During initialization of the vfio_ap module, the matrix device is registered
+with an 'mdev_parent_ops' structure that provides the sysfs attribute
+structures, mdev functions and callback interfaces for managing the mediated
+matrix device.
+
+* sysfs attribute structures:
+ * supported_type_groups
+ The VFIO mediated device framework supports creation of user-defined
+ mediated device types. These mediated device types are specified
+ via the 'supported_type_groups' structure when a device is registered
+ with the mediated device framework. The registration process creates the
+ sysfs structures for each mediated device type specified in the
+ 'mdev_supported_types' sub-directory of the device being registered. Along
+ with the device type, the sysfs attributes of the mediated device type are
+ provided.
+
+ The VFIO AP device driver will register one mediated device type for
+ passthrough devices:
+ /sys/devices/vfio_ap/matrix/mdev_supported_types/vfio_ap-passthrough
+ Only the read-only attributes required by the VFIO mdev framework will
+ be provided:
+ ... name
+ ... device_api
+ ... available_instances
+ ... device_api
+ Where:
+ * name: specifies the name of the mediated device type
+ * device_api: the mediated device type's API
+ * available_instances: the number of mediated matrix passthrough devices
+ that can be created
+ * device_api: specifies the VFIO API
+ * mdev_attr_groups
+ This attribute group identifies the user-defined sysfs attributes of the
+ mediated device. When a device is registered with the VFIO mediated device
+ framework, the sysfs attribute files identified in the 'mdev_attr_groups'
+ structure will be created in the mediated matrix device's directory. The
+ sysfs attributes for a mediated matrix device are:
+ * assign_adapter:
+ * unassign_adapter:
+ Write-only attributes for assigning/unassigning an AP adapter to/from the
+ mediated matrix device. To assign/unassign an adapter, the APID of the
+ adapter is echoed to the respective attribute file.
+ * assign_domain:
+ * unassign_domain:
+ Write-only attributes for assigning/unassigning an AP usage domain to/from
+ the mediated matrix device. To assign/unassign a domain, the domain
+ number of the the usage domain is echoed to the respective attribute
+ file.
+ * matrix:
+ A read-only file for displaying the APQNs derived from the cross product
+ of the adapter and domain numbers assigned to the mediated matrix device.
+ * assign_control_domain:
+ * unassign_control_domain:
+ Write-only attributes for assigning/unassigning an AP control domain
+ to/from the mediated matrix device. To assign/unassign a control domain,
+ the ID of the domain to be assigned/unassigned is echoed to the respective
+ attribute file.
+ * control_domains:
+ A read-only file for displaying the control domain numbers assigned to the
+ mediated matrix device.
+
+* functions:
+ * create:
+ allocates the ap_matrix_mdev structure used by the vfio_ap driver to:
+ * Store the reference to the KVM structure for the guest using the mdev
+ * Store the AP matrix configuration for the adapters, domains, and control
+ domains assigned via the corresponding sysfs attributes files
+ * remove:
+ deallocates the mediated matrix device's ap_matrix_mdev structure. This will
+ be allowed only if a running guest is not using the mdev.
+
+* callback interfaces
+ * open:
+ The vfio_ap driver uses this callback to register a
+ VFIO_GROUP_NOTIFY_SET_KVM notifier callback function for the mdev matrix
+ device. The open is invoked when QEMU connects the VFIO iommu group
+ for the mdev matrix device to the MDEV bus. Access to the KVM structure used
+ to configure the KVM guest is provided via this callback. The KVM structure,
+ is used to configure the guest's access to the AP matrix defined via the
+ mediated matrix device's sysfs attribute files.
+ * release:
+ unregisters the VFIO_GROUP_NOTIFY_SET_KVM notifier callback function for the
+ mdev matrix device and deconfigures the guest's AP matrix.
+
+Configure the APM, AQM and ADM in the CRYCB:
+-------------------------------------------
+Configuring the AP matrix for a KVM guest will be performed when the
+VFIO_GROUP_NOTIFY_SET_KVM notifier callback is invoked. The notifier
+function is called when QEMU connects to KVM. The guest's AP matrix is
+configured via it's CRYCB by:
+* Setting the bits in the APM corresponding to the APIDs assigned to the
+ mediated matrix device via its 'assign_adapter' interface.
+* Setting the bits in the AQM corresponding to the domains assigned to the
+ mediated matrix device via its 'assign_domain' interface.
+* Setting the bits in the ADM corresponding to the domain dIDs assigned to the
+ mediated matrix device via its 'assign_control_domains' interface.
+
+The CPU model features for AP
+-----------------------------
+The AP stack relies on the presence of the AP instructions as well as two
+facilities: The AP Facilities Test (APFT) facility; and the AP Query
+Configuration Information (QCI) facility. These features/facilities are made
+available to a KVM guest via the following CPU model features:
+
+1. ap: Indicates whether the AP instructions are installed on the guest. This
+ feature will be enabled by KVM only if the AP instructions are installed
+ on the host.
+
+2. apft: Indicates the APFT facility is available on the guest. This facility
+ can be made available to the guest only if it is available on the host (i.e.,
+ facility bit 15 is set).
+
+3. apqci: Indicates the AP QCI facility is available on the guest. This facility
+ can be made available to the guest only if it is available on the host (i.e.,
+ facility bit 12 is set).
+
+Note: If the user chooses to specify a CPU model different than the 'host'
+model to QEMU, the CPU model features and facilities need to be turned on
+explicitly; for example:
+
+ /usr/bin/qemu-system-s390x ... -cpu z13,ap=on,apqci=on,apft=on
+
+A guest can be precluded from using AP features/facilities by turning them off
+explicitly; for example:
+
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=off,apqci=off,apft=off
+
+Note: If the APFT facility is turned off (apft=off) for the guest, the guest
+will not see any AP devices. The zcrypt device drivers that register for type 10
+and newer AP devices - i.e., the cex4card and cex4queue device drivers - need
+the APFT facility to ascertain the facilities installed on a given AP device. If
+the APFT facility is not installed on the guest, then the probe of device
+drivers will fail since only type 10 and newer devices can be configured for
+guest use.
+
+Example:
+=======
+Let's now provide an example to illustrate how KVM guests may be given
+access to AP facilities. For this example, we will show how to configure
+three guests such that executing the lszcrypt command on the guests would
+look like this:
+
+Guest1
+------
+CARD.DOMAIN TYPE MODE
+------------------------------
+05 CEX5C CCA-Coproc
+05.0004 CEX5C CCA-Coproc
+05.00ab CEX5C CCA-Coproc
+06 CEX5A Accelerator
+06.0004 CEX5A Accelerator
+06.00ab CEX5C CCA-Coproc
+
+Guest2
+------
+CARD.DOMAIN TYPE MODE
+------------------------------
+05 CEX5A Accelerator
+05.0047 CEX5A Accelerator
+05.00ff CEX5A Accelerator
+
+Guest2
+------
+CARD.DOMAIN TYPE MODE
+------------------------------
+06 CEX5A Accelerator
+06.0047 CEX5A Accelerator
+06.00ff CEX5A Accelerator
+
+These are the steps:
+
+1. Install the vfio_ap module on the linux host. The dependency chain for the
+ vfio_ap module is:
+ * iommu
+ * s390
+ * zcrypt
+ * vfio
+ * vfio_mdev
+ * vfio_mdev_device
+ * KVM
+
+ To build the vfio_ap module, the kernel build must be configured with the
+ following Kconfig elements selected:
+ * IOMMU_SUPPORT
+ * S390
+ * ZCRYPT
+ * S390_AP_IOMMU
+ * VFIO
+ * VFIO_MDEV
+ * VFIO_MDEV_DEVICE
+ * KVM
+
+ If using make menuconfig select the following to build the vfio_ap module:
+ -> Device Drivers
+ -> IOMMU Hardware Support
+ select S390 AP IOMMU Support
+ -> VFIO Non-Privileged userspace driver framework
+ -> Mediated device driver frramework
+ -> VFIO driver for Mediated devices
+ -> I/O subsystem
+ -> VFIO support for AP devices
+
+2. Secure the AP queues to be used by the three guests so that the host can not
+ access them. To secure them, there are two sysfs files that specify
+ bitmasks marking a subset of the APQN range as 'usable by the default AP
+ queue device drivers' or 'not usable by the default device drivers' and thus
+ available for use by the vfio_ap device driver'. The location of the sysfs
+ files containing the masks are:
+
+ /sys/bus/ap/apmask
+ /sys/bus/ap/aqmask
+
+ The 'apmask' is a 256-bit mask that identifies a set of AP adapter IDs
+ (APID). Each bit in the mask, from left to right (i.e., from most significant
+ to least significant bit in big endian order), corresponds to an APID from
+ 0-255. If a bit is set, the APID is marked as usable only by the default AP
+ queue device drivers; otherwise, the APID is usable by the vfio_ap
+ device driver.
+
+ The 'aqmask' is a 256-bit mask that identifies a set of AP queue indexes
+ (APQI). Each bit in the mask, from left to right (i.e., from most significant
+ to least significant bit in big endian order), corresponds to an APQI from
+ 0-255. If a bit is set, the APQI is marked as usable only by the default AP
+ queue device drivers; otherwise, the APQI is usable by the vfio_ap device
+ driver.
+
+ Take, for example, the following mask:
+
+ 0x7dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+
+ It indicates:
+
+ 1, 2, 3, 4, 5, and 7-255 belong to the default drivers' pool, and 0 and 6
+ belong to the vfio_ap device driver's pool.
+
+ The APQN of each AP queue device assigned to the linux host is checked by the
+ AP bus against the set of APQNs derived from the cross product of APIDs
+ and APQIs marked as usable only by the default AP queue device drivers. If a
+ match is detected, only the default AP queue device drivers will be probed;
+ otherwise, the vfio_ap device driver will be probed.
+
+ By default, the two masks are set to reserve all APQNs for use by the default
+ AP queue device drivers. There are two ways the default masks can be changed:
+
+ 1. The sysfs mask files can be edited by echoing a string into the
+ respective sysfs mask file in one of two formats:
+
+ * An absolute hex string starting with 0x - like "0x12345678" - sets
+ the mask. If the given string is shorter than the mask, it is padded
+ with 0s on the right; for example, specifying a mask value of 0x41 is
+ the same as specifying:
+
+ 0x4100000000000000000000000000000000000000000000000000000000000000
+
+ Keep in mind that the mask reads from left to right (i.e., most
+ significant to least significant bit in big endian order), so the mask
+ above identifies device numbers 1 and 7 (01000001).
+
+ If the string is longer than the mask, the operation is terminated with
+ an error (EINVAL).
+
+ * Individual bits in the mask can be switched on and off by specifying
+ each bit number to be switched in a comma separated list. Each bit
+ number string must be prepended with a ('+') or minus ('-') to indicate
+ the corresponding bit is to be switched on ('+') or off ('-'). Some
+ valid values are:
+
+ "+0" switches bit 0 on
+ "-13" switches bit 13 off
+ "+0x41" switches bit 65 on
+ "-0xff" switches bit 255 off
+
+ The following example:
+ +0,-6,+0x47,-0xf0
+
+ Switches bits 0 and 71 (0x47) on
+ Switches bits 6 and 240 (0xf0) off
+
+ Note that the bits not specified in the list remain as they were before
+ the operation.
+
+ 2. The masks can also be changed at boot time via parameters on the kernel
+ command line like this:
+
+ ap.apmask=0xffff ap.aqmask=0x40
+
+ This would create the following masks:
+
+ apmask:
+ 0xffff000000000000000000000000000000000000000000000000000000000000
+
+ aqmask:
+ 0x4000000000000000000000000000000000000000000000000000000000000000
+
+ Resulting in these two pools:
+
+ default drivers pool: adapter 0-15, domain 1
+ alternate drivers pool: adapter 16-255, domains 0, 2-255
+
+ Securing the APQNs for our example:
+ ----------------------------------
+ To secure the AP queues 05.0004, 05.0047, 05.00ab, 05.00ff, 06.0004, 06.0047,
+ 06.00ab, and 06.00ff for use by the vfio_ap device driver, the corresponding
+ APQNs can either be removed from the default masks:
+
+ echo -5,-6 > /sys/bus/ap/apmask
+
+ echo -4,-0x47,-0xab,-0xff > /sys/bus/ap/aqmask
+
+ Or the masks can be set as follows:
+
+ echo 0xf9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff \
+ > apmask
+
+ echo 0xf7fffffffffffffffeffffffffffffffffffffffffeffffffffffffffffffffe \
+ > aqmask
+
+ This will result in AP queues 05.0004, 05.0047, 05.00ab, 05.00ff, 06.0004,
+ 06.0047, 06.00ab, and 06.00ff getting bound to the vfio_ap device driver. The
+ sysfs directory for the vfio_ap device driver will now contain symbolic links
+ to the AP queue devices bound to it:
+
+ /sys/bus/ap
+ ... [drivers]
+ ...... [vfio_ap]
+ ......... [05.0004]
+ ......... [05.0047]
+ ......... [05.00ab]
+ ......... [05.00ff]
+ ......... [06.0004]
+ ......... [06.0047]
+ ......... [06.00ab]
+ ......... [06.00ff]
+
+ Keep in mind that only type 10 and newer adapters (i.e., CEX4 and later)
+ can be bound to the vfio_ap device driver. The reason for this is to
+ simplify the implementation by not needlessly complicating the design by
+ supporting older devices that will go out of service in the relatively near
+ future and for which there are few older systems on which to test.
+
+ The administrator, therefore, must take care to secure only AP queues that
+ can be bound to the vfio_ap device driver. The device type for a given AP
+ queue device can be read from the parent card's sysfs directory. For example,
+ to see the hardware type of the queue 05.0004:
+
+ cat /sys/bus/ap/devices/card05/hwtype
+
+ The hwtype must be 10 or higher (CEX4 or newer) in order to be bound to the
+ vfio_ap device driver.
+
+3. Create the mediated devices needed to configure the AP matrixes for the
+ three guests and to provide an interface to the vfio_ap driver for
+ use by the guests:
+
+ /sys/devices/vfio_ap/matrix/
+ --- [mdev_supported_types]
+ ------ [vfio_ap-passthrough] (passthrough mediated matrix device type)
+ --------- create
+ --------- [devices]
+
+ To create the mediated devices for the three guests:
+
+ uuidgen > create
+ uuidgen > create
+ uuidgen > create
+
+ or
+
+ echo $uuid1 > create
+ echo $uuid2 > create
+ echo $uuid3 > create
+
+ This will create three mediated devices in the [devices] subdirectory named
+ after the UUID written to the create attribute file. We call them $uuid1,
+ $uuid2 and $uuid3 and this is the sysfs directory structure after creation:
+
+ /sys/devices/vfio_ap/matrix/
+ --- [mdev_supported_types]
+ ------ [vfio_ap-passthrough]
+ --------- [devices]
+ ------------ [$uuid1]
+ --------------- assign_adapter
+ --------------- assign_control_domain
+ --------------- assign_domain
+ --------------- matrix
+ --------------- unassign_adapter
+ --------------- unassign_control_domain
+ --------------- unassign_domain
+
+ ------------ [$uuid2]
+ --------------- assign_adapter
+ --------------- assign_control_domain
+ --------------- assign_domain
+ --------------- matrix
+ --------------- unassign_adapter
+ ----------------unassign_control_domain
+ ----------------unassign_domain
+
+ ------------ [$uuid3]
+ --------------- assign_adapter
+ --------------- assign_control_domain
+ --------------- assign_domain
+ --------------- matrix
+ --------------- unassign_adapter
+ ----------------unassign_control_domain
+ ----------------unassign_domain
+
+4. The administrator now needs to configure the matrixes for the mediated
+ devices $uuid1 (for Guest1), $uuid2 (for Guest2) and $uuid3 (for Guest3).
+
+ This is how the matrix is configured for Guest1:
+
+ echo 5 > assign_adapter
+ echo 6 > assign_adapter
+ echo 4 > assign_domain
+ echo 0xab > assign_domain
+
+ Control domains can similarly be assigned using the assign_control_domain
+ sysfs file.
+
+ If a mistake is made configuring an adapter, domain or control domain,
+ you can use the unassign_xxx files to unassign the adapter, domain or
+ control domain.
+
+ To display the matrix configuration for Guest1:
+
+ cat matrix
+
+ This is how the matrix is configured for Guest2:
+
+ echo 5 > assign_adapter
+ echo 0x47 > assign_domain
+ echo 0xff > assign_domain
+
+ This is how the matrix is configured for Guest3:
+
+ echo 6 > assign_adapter
+ echo 0x47 > assign_domain
+ echo 0xff > assign_domain
+
+ In order to successfully assign an adapter:
+
+ * The adapter number specified must represent a value from 0 up to the
+ maximum adapter number configured for the system. If an adapter number
+ higher than the maximum is specified, the operation will terminate with
+ an error (ENODEV).
+
+ * All APQNs that can be derived from the adapter ID and the IDs of
+ the previously assigned domains must be bound to the vfio_ap device
+ driver. If no domains have yet been assigned, then there must be at least
+ one APQN with the specified APID bound to the vfio_ap driver. If no such
+ APQNs are bound to the driver, the operation will terminate with an
+ error (EADDRNOTAVAIL).
+
+ No APQN that can be derived from the adapter ID and the IDs of the
+ previously assigned domains can be assigned to another mediated matrix
+ device. If an APQN is assigned to another mediated matrix device, the
+ operation will terminate with an error (EADDRINUSE).
+
+ In order to successfully assign a domain:
+
+ * The domain number specified must represent a value from 0 up to the
+ maximum domain number configured for the system. If a domain number
+ higher than the maximum is specified, the operation will terminate with
+ an error (ENODEV).
+
+ * All APQNs that can be derived from the domain ID and the IDs of
+ the previously assigned adapters must be bound to the vfio_ap device
+ driver. If no domains have yet been assigned, then there must be at least
+ one APQN with the specified APQI bound to the vfio_ap driver. If no such
+ APQNs are bound to the driver, the operation will terminate with an
+ error (EADDRNOTAVAIL).
+
+ No APQN that can be derived from the domain ID and the IDs of the
+ previously assigned adapters can be assigned to another mediated matrix
+ device. If an APQN is assigned to another mediated matrix device, the
+ operation will terminate with an error (EADDRINUSE).
+
+ In order to successfully assign a control domain, the domain number
+ specified must represent a value from 0 up to the maximum domain number
+ configured for the system. If a control domain number higher than the maximum
+ is specified, the operation will terminate with an error (ENODEV).
+
+5. Start Guest1:
+
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid1 ...
+
+7. Start Guest2:
+
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid2 ...
+
+7. Start Guest3:
+
+ /usr/bin/qemu-system-s390x ... -cpu host,ap=on,apqci=on,apft=on \
+ -device vfio-ap,sysfsdev=/sys/devices/vfio_ap/matrix/$uuid3 ...
+
+When the guest is shut down, the mediated matrix devices may be removed.
+
+Using our example again, to remove the mediated matrix device $uuid1:
+
+ /sys/devices/vfio_ap/matrix/
+ --- [mdev_supported_types]
+ ------ [vfio_ap-passthrough]
+ --------- [devices]
+ ------------ [$uuid1]
+ --------------- remove
+
+
+ echo 1 > remove
+
+ This will remove all of the mdev matrix device's sysfs structures including
+ the mdev device itself. To recreate and reconfigure the mdev matrix device,
+ all of the steps starting with step 3 will have to be performed again. Note
+ that the remove will fail if a guest using the mdev is still running.
+
+ It is not necessary to remove an mdev matrix device, but one may want to
+ remove it if no guest will use it during the remaining lifetime of the linux
+ host. If the mdev matrix device is removed, one may want to also reconfigure
+ the pool of adapters and queues reserved for use by the default drivers.
+
+Limitations
+===========
+* The KVM/kernel interfaces do not provide a way to prevent restoring an APQN
+ to the default drivers pool of a queue that is still assigned to a mediated
+ device in use by a guest. It is incumbent upon the administrator to
+ ensure there is no mediated device in use by a guest to which the APQN is
+ assigned lest the host be given access to the private data of the AP queue
+ device such as a private key configured specifically for the guest.
+
+* Dynamically modifying the AP matrix for a running guest (which would amount to
+ hot(un)plug of AP devices for the guest) is currently not supported
+
+* Live guest migration is not supported for guests using AP devices.
diff --git a/Documentation/scsi/ufs.txt b/Documentation/scsi/ufs.txt
index 41a6164592aa..520b5b033256 100644
--- a/Documentation/scsi/ufs.txt
+++ b/Documentation/scsi/ufs.txt
@@ -128,6 +128,26 @@ The current UFSHCD implementation supports following functionality,
In this version of UFSHCD Query requests and power management
functionality are not implemented.
+4. BSG Support
+------------------
+
+This transport driver supports exchanging UFS protocol information units
+(UPIUs) with a UFS device. Typically, user space will allocate
+struct ufs_bsg_request and struct ufs_bsg_reply (see ufs_bsg.h) as
+request_upiu and reply_upiu respectively. Filling those UPIUs should
+be done in accordance with JEDEC spec UFS2.1 paragraph 10.7.
+*Caveat emptor*: The driver makes no further input validations and sends the
+UPIU to the device as it is. Open the bsg device in /dev/ufs-bsg and
+send SG_IO with the applicable sg_io_v4:
+
+ io_hdr_v4.guard = 'Q';
+ io_hdr_v4.protocol = BSG_PROTOCOL_SCSI;
+ io_hdr_v4.subprotocol = BSG_SUB_PROTOCOL_SCSI_TRANSPORT;
+ io_hdr_v4.response = (__u64)reply_upiu;
+ io_hdr_v4.max_response_len = reply_len;
+ io_hdr_v4.request_len = request_len;
+ io_hdr_v4.request = (__u64)request_upiu;
+
UFS Specifications can be found at,
UFS - http://www.jedec.org/sites/default/files/docs/JESD220.pdf
UFSHCI - http://www.jedec.org/sites/default/files/docs/JESD223.pdf
diff --git a/Documentation/sound/hd-audio/models.rst b/Documentation/sound/hd-audio/models.rst
index e06238131f77..368a07a165f5 100644
--- a/Documentation/sound/hd-audio/models.rst
+++ b/Documentation/sound/hd-audio/models.rst
@@ -309,6 +309,8 @@ asus-nx50
ASUS Nx50 fixups
asus-nx51
ASUS Nx51 fixups
+asus-g751
+ ASUS G751 fixups
alc891-headset
Headset mode support on ALC891
alc891-headset-multi
diff --git a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
index a0b268466cb1..b37234afdfa1 100644
--- a/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
+++ b/Documentation/sound/kernel-api/writing-an-alsa-driver.rst
@@ -3,8 +3,6 @@ Writing an ALSA Driver
======================
:Author: Takashi Iwai <tiwai@suse.de>
-:Date: Oct 15, 2007
-:Edition: 0.3.7
Preface
=======
@@ -21,11 +19,6 @@ explain the general topic of linux kernel coding and doesn't cover
low-level driver implementation details. It only describes the standard
way to write a PCI sound driver on ALSA.
-If you are already familiar with the older ALSA ver.0.5.x API, you can
-check the drivers such as ``sound/pci/es1938.c`` or
-``sound/pci/maestro3.c`` which have also almost the same code-base in
-the ALSA 0.5.x tree, so you can compare the differences.
-
This document is still a draft version. Any feedback and corrections,
please!!
@@ -35,24 +28,7 @@ File Tree Structure
General
-------
-The ALSA drivers are provided in two ways.
-
-One is the trees provided as a tarball or via cvs from the ALSA's ftp
-site, and another is the 2.6 (or later) Linux kernel tree. To
-synchronize both, the ALSA driver tree is split into two different
-trees: alsa-kernel and alsa-driver. The former contains purely the
-source code for the Linux 2.6 (or later) tree. This tree is designed
-only for compilation on 2.6 or later environment. The latter,
-alsa-driver, contains many subtle files for compiling ALSA drivers
-outside of the Linux kernel tree, wrapper functions for older 2.2 and
-2.4 kernels, to adapt the latest kernel API, and additional drivers
-which are still in development or in tests. The drivers in alsa-driver
-tree will be moved to alsa-kernel (and eventually to the 2.6 kernel
-tree) when they are finished and confirmed to work fine.
-
-The file tree structure of ALSA driver is depicted below. Both
-alsa-kernel and alsa-driver have almost the same file structure, except
-for “core” directory. It's named as “acore” in alsa-driver tree.
+The file tree structure of ALSA driver is depicted below.
::
@@ -61,14 +37,11 @@ for “core” directory. It's named as “acore” in alsa-driver tree.
/oss
/seq
/oss
- /instr
- /ioctl32
/include
/drivers
/mpu401
/opl3
/i2c
- /l3
/synth
/emux
/pci
@@ -80,6 +53,7 @@ for “core” directory. It's named as “acore” in alsa-driver tree.
/sparc
/usb
/pcmcia /(cards)
+ /soc
/oss
@@ -99,13 +73,6 @@ directory. The rawmidi OSS emulation is included in the ALSA rawmidi
code since it's quite small. The sequencer code is stored in
``core/seq/oss`` directory (see `below <#core-seq-oss>`__).
-core/ioctl32
-~~~~~~~~~~~~
-
-This directory contains the 32bit-ioctl wrappers for 64bit architectures
-such like x86-64, ppc64 and sparc64. For 32bit and alpha architectures,
-these are not compiled.
-
core/seq
~~~~~~~~
@@ -119,11 +86,6 @@ core/seq/oss
This contains the OSS sequencer emulation codes.
-core/seq/instr
-~~~~~~~~~~~~~~
-
-This directory contains the modules for the sequencer instrument layer.
-
include directory
-----------------
@@ -161,11 +123,6 @@ Although there is a standard i2c layer on Linux, ALSA has its own i2c
code for some cards, because the soundcard needs only a simple operation
and the standard i2c API is too complicated for such a purpose.
-i2c/l3
-~~~~~~
-
-This is a sub-directory for ARM L3 i2c.
-
synth directory
---------------
@@ -209,11 +166,19 @@ The PCMCIA, especially PCCard drivers will go here. CardBus drivers will
be in the pci directory, because their API is identical to that of
standard PCI cards.
+soc directory
+-------------
+
+This directory contains the codes for ASoC (ALSA System on Chip)
+layer including ASoC core, codec and machine drivers.
+
oss directory
-------------
-The OSS/Lite source files are stored here in Linux 2.6 (or later) tree.
-In the ALSA driver tarball, this directory is empty, of course :)
+Here contains OSS/Lite codes.
+All codes have been deprecated except for dmasound on m68k as of
+writing this.
+
Basic Flow for PCI Drivers
==========================
@@ -352,10 +317,8 @@ to details explained in the following section.
/* (3) */
err = snd_mychip_create(card, pci, &chip);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto error;
/* (4) */
strcpy(card->driver, "My Chip");
@@ -368,22 +331,23 @@ to details explained in the following section.
/* (6) */
err = snd_card_register(card);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto error;
/* (7) */
pci_set_drvdata(pci, card);
dev++;
return 0;
+
+ error:
+ snd_card_free(card);
+ return err;
}
/* destructor -- see the "Destructor" sub-section */
static void snd_mychip_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
- pci_set_drvdata(pci, NULL);
}
@@ -445,14 +409,26 @@ In this part, the PCI resources are allocated.
struct mychip *chip;
....
err = snd_mychip_create(card, pci, &chip);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto error;
The details will be explained in the section `PCI Resource
Management`_.
+When something goes wrong, the probe function needs to deal with the
+error. In this example, we have a single error handling path placed
+at the end of the function.
+
+::
+
+ error:
+ snd_card_free(card);
+ return err;
+
+Since each component can be properly freed, the single
+:c:func:`snd_card_free()` call should suffice in most cases.
+
+
4) Set the driver ID and name strings.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -486,10 +462,8 @@ too.
::
err = snd_card_register(card);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto error;
Will be explained in the section `Management of Cards and
Components`_, too.
@@ -513,14 +487,13 @@ The destructor, remove callback, simply releases the card instance. Then
the ALSA middle layer will release all the attached components
automatically.
-It would be typically like the following:
+It would be typically just :c:func:`calling snd_card_free()`:
::
static void snd_mychip_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
- pci_set_drvdata(pci, NULL);
}
@@ -546,7 +519,7 @@ in the source file. If the code is split into several files, the files
without module options don't need them.
In addition to these headers, you'll need ``<linux/interrupt.h>`` for
-interrupt handling, and ``<asm/io.h>`` for I/O access. If you use the
+interrupt handling, and ``<linux/io.h>`` for I/O access. If you use the
:c:func:`mdelay()` or :c:func:`udelay()` functions, you'll need
to include ``<linux/delay.h>`` too.
@@ -720,6 +693,13 @@ function, which will call the real destructor.
where :c:func:`snd_mychip_free()` is the real destructor.
+The demerit of this method is the obviously more amount of codes.
+The merit is, however, you can trigger the own callback at registering
+and disconnecting the card via setting in snd_device_ops.
+About the registering and disconnecting the card, see the subsections
+below.
+
+
Registration and Release
------------------------
@@ -905,10 +885,8 @@ Resource Allocation
-------------------
The allocation of I/O ports and irqs is done via standard kernel
-functions. Unlike ALSA ver.0.5.x., there are no helpers for that. And
-these resources must be released in the destructor function (see below).
-Also, on ALSA 0.9.x, you don't need to allocate (pseudo-)DMA for PCI
-like in ALSA 0.5.x.
+functions. These resources must be released in the destructor
+function (see below).
Now assume that the PCI device has an I/O port with 8 bytes and an
interrupt. Then :c:type:`struct mychip <mychip>` will have the
@@ -1064,7 +1042,8 @@ and the allocation would be like below:
::
- if ((err = pci_request_regions(pci, "My Chip")) < 0) {
+ err = pci_request_regions(pci, "My Chip");
+ if (err < 0) {
kfree(chip);
return err;
}
@@ -1086,6 +1065,21 @@ and the corresponding destructor would be:
....
}
+Of course, a modern way with :c:func:`pci_iomap()` will make things a
+bit easier, too.
+
+::
+
+ err = pci_request_regions(pci, "My Chip");
+ if (err < 0) {
+ kfree(chip);
+ return err;
+ }
+ chip->iobase_virt = pci_iomap(pci, 0, 0);
+
+which is paired with :c:func:`pci_iounmap()` at destructor.
+
+
PCI Entries
-----------
@@ -1154,13 +1148,6 @@ And at last, the module entries:
Note that these module entries are tagged with ``__init`` and ``__exit``
prefixes.
-Oh, one thing was forgotten. If you have no exported symbols, you need
-to declare it in 2.2 or 2.4 kernels (it's not necessary in 2.6 kernels).
-
-::
-
- EXPORT_NO_SYMBOLS;
-
That's all!
PCM Interface
@@ -2113,6 +2100,16 @@ non-contiguous buffers. The mmap calls this callback to get the page
address. Some examples will be explained in the later section `Buffer
and Memory Management`_, too.
+mmap calllback
+~~~~~~~~~~~~~~
+
+This is another optional callback for controlling mmap behavior.
+Once when defined, PCM core calls this callback when a page is
+memory-mapped instead of dealing via the standard helper.
+If you need special handling (due to some architecture or
+device-specific issues), implement everything here as you like.
+
+
PCM Interrupt Handler
---------------------
@@ -2370,6 +2367,27 @@ to define the inverse rule:
hw_rule_format_by_channels, NULL,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
+One typical usage of the hw constraints is to align the buffer size
+with the period size. As default, ALSA PCM core doesn't enforce the
+buffer size to be aligned with the period size. For example, it'd be
+possible to have a combination like 256 period bytes with 999 buffer
+bytes.
+
+Many device chips, however, require the buffer to be a multiple of
+periods. In such a case, call
+:c:func:`snd_pcm_hw_constraint_integer()` for
+``SNDRV_PCM_HW_PARAM_PERIODS``.
+
+::
+
+ snd_pcm_hw_constraint_integer(substream->runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+
+This assures that the number of periods is integer, hence the buffer
+size is aligned with the period size.
+
+The hw constraint is a very much powerful mechanism to define the
+preferred PCM configuration, and there are relevant helpers.
I won't give more details here, rather I would like to say, “Luke, use
the source.”
@@ -3712,7 +3730,14 @@ example, for an intermediate buffer. Since the allocated pages are not
contiguous, you need to set the ``page`` callback to obtain the physical
address at every offset.
-The implementation of ``page`` callback would be like this:
+The easiest way to achieve it would be to use
+:c:func:`snd_pcm_lib_alloc_vmalloc_buffer()` for allocating the buffer
+via :c:func:`vmalloc()`, and set :c:func:`snd_pcm_sgbuf_ops_page()` to
+the ``page`` callback. At release, you need to call
+:c:func:`snd_pcm_lib_free_vmalloc_buffer()`.
+
+If you want to implementation the ``page`` manually, it would be like
+this:
::
@@ -3848,7 +3873,9 @@ Power Management
If the chip is supposed to work with suspend/resume functions, you need
to add power-management code to the driver. The additional code for
-power-management should be ifdef-ed with ``CONFIG_PM``.
+power-management should be ifdef-ed with ``CONFIG_PM``, or annotated
+with __maybe_unused attribute; otherwise the compiler will complain
+you.
If the driver *fully* supports suspend/resume that is, the device can be
properly resumed to its state when suspend was called, you can set the
@@ -3879,18 +3906,16 @@ the case of PCI drivers, the callbacks look like below:
::
- #ifdef CONFIG_PM
- static int snd_my_suspend(struct pci_dev *pci, pm_message_t state)
+ static int __maybe_unused snd_my_suspend(struct device *dev)
{
.... /* do things for suspend */
return 0;
}
- static int snd_my_resume(struct pci_dev *pci)
+ static int __maybe_unused snd_my_resume(struct device *dev)
{
.... /* do things for suspend */
return 0;
}
- #endif
The scheme of the real suspend job is as follows.
@@ -3909,18 +3934,14 @@ The scheme of the real suspend job is as follows.
6. Stop the hardware if necessary.
-7. Disable the PCI device by calling
- :c:func:`pci_disable_device()`. Then, call
- :c:func:`pci_save_state()` at last.
-
A typical code would be like:
::
- static int mychip_suspend(struct pci_dev *pci, pm_message_t state)
+ static int __maybe_unused mychip_suspend(struct device *dev)
{
/* (1) */
- struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_card *card = dev_get_drvdata(dev);
struct mychip *chip = card->private_data;
/* (2) */
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -3932,9 +3953,6 @@ A typical code would be like:
snd_mychip_save_registers(chip);
/* (6) */
snd_mychip_stop_hardware(chip);
- /* (7) */
- pci_disable_device(pci);
- pci_save_state(pci);
return 0;
}
@@ -3943,44 +3961,35 @@ The scheme of the real resume job is as follows.
1. Retrieve the card and the chip data.
-2. Set up PCI. First, call :c:func:`pci_restore_state()`. Then
- enable the pci device again by calling
- :c:func:`pci_enable_device()`. Call
- :c:func:`pci_set_master()` if necessary, too.
+2. Re-initialize the chip.
-3. Re-initialize the chip.
+3. Restore the saved registers if necessary.
-4. Restore the saved registers if necessary.
+4. Resume the mixer, e.g. calling :c:func:`snd_ac97_resume()`.
-5. Resume the mixer, e.g. calling :c:func:`snd_ac97_resume()`.
+5. Restart the hardware (if any).
-6. Restart the hardware (if any).
-
-7. Call :c:func:`snd_power_change_state()` with
+6. Call :c:func:`snd_power_change_state()` with
``SNDRV_CTL_POWER_D0`` to notify the processes.
A typical code would be like:
::
- static int mychip_resume(struct pci_dev *pci)
+ static int __maybe_unused mychip_resume(struct pci_dev *pci)
{
/* (1) */
- struct snd_card *card = pci_get_drvdata(pci);
+ struct snd_card *card = dev_get_drvdata(dev);
struct mychip *chip = card->private_data;
/* (2) */
- pci_restore_state(pci);
- pci_enable_device(pci);
- pci_set_master(pci);
- /* (3) */
snd_mychip_reinit_chip(chip);
- /* (4) */
+ /* (3) */
snd_mychip_restore_registers(chip);
- /* (5) */
+ /* (4) */
snd_ac97_resume(chip->ac97);
- /* (6) */
+ /* (5) */
snd_mychip_restart_chip(chip);
- /* (7) */
+ /* (6) */
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
@@ -4046,15 +4055,14 @@ And next, set suspend/resume callbacks to the pci_driver.
::
+ static SIMPLE_DEV_PM_OPS(snd_my_pm_ops, mychip_suspend, mychip_resume);
+
static struct pci_driver driver = {
.name = KBUILD_MODNAME,
.id_table = snd_my_ids,
.probe = snd_my_probe,
.remove = snd_my_remove,
- #ifdef CONFIG_PM
- .suspend = snd_my_suspend,
- .resume = snd_my_resume,
- #endif
+ .driver.pm = &snd_my_pm_ops,
};
Module Parameters
@@ -4078,7 +4086,7 @@ variables, instead. ``enable`` option is not always necessary in this
case, but it would be better to have a dummy option for compatibility.
The module parameters must be declared with the standard
-``module_param()()``, ``module_param_array()()`` and
+``module_param()``, ``module_param_array()`` and
:c:func:`MODULE_PARM_DESC()` macros.
The typical coding would be like below:
@@ -4094,15 +4102,14 @@ The typical coding would be like below:
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard.");
-Also, don't forget to define the module description, classes, license
-and devices. Especially, the recent modprobe requires to define the
+Also, don't forget to define the module description and the license.
+Especially, the recent modprobe requires to define the
module license as GPL, etc., otherwise the system is shown as “tainted”.
::
- MODULE_DESCRIPTION("My Chip");
+ MODULE_DESCRIPTION("Sound driver for My Chip");
MODULE_LICENSE("GPL");
- MODULE_SUPPORTED_DEVICE("{{Vendor,My Chip Name}}");
How To Put Your Driver Into ALSA Tree
@@ -4117,21 +4124,17 @@ a question now: how to put my own driver into the ALSA driver tree? Here
Suppose that you create a new PCI driver for the card “xyz”. The card
module name would be snd-xyz. The new driver is usually put into the
-alsa-driver tree, ``alsa-driver/pci`` directory in the case of PCI
-cards. Then the driver is evaluated, audited and tested by developers
-and users. After a certain time, the driver will go to the alsa-kernel
-tree (to the corresponding directory, such as ``alsa-kernel/pci``) and
-eventually will be integrated into the Linux 2.6 tree (the directory
-would be ``linux/sound/pci``).
+alsa-driver tree, ``sound/pci`` directory in the case of PCI
+cards.
In the following sections, the driver code is supposed to be put into
-alsa-driver tree. The two cases are covered: a driver consisting of a
+Linux kernel tree. The two cases are covered: a driver consisting of a
single source file and one consisting of several source files.
Driver with A Single Source File
--------------------------------
-1. Modify alsa-driver/pci/Makefile
+1. Modify sound/pci/Makefile
Suppose you have a file xyz.c. Add the following two lines
@@ -4160,52 +4163,43 @@ Driver with A Single Source File
For the details of Kconfig script, refer to the kbuild documentation.
-3. Run cvscompile script to re-generate the configure script and build
- the whole stuff again.
-
Drivers with Several Source Files
---------------------------------
Suppose that the driver snd-xyz have several source files. They are
-located in the new subdirectory, pci/xyz.
+located in the new subdirectory, sound/pci/xyz.
-1. Add a new directory (``xyz``) in ``alsa-driver/pci/Makefile`` as
- below
+1. Add a new directory (``sound/pci/xyz``) in ``sound/pci/Makefile``
+ as below
::
- obj-$(CONFIG_SND) += xyz/
+ obj-$(CONFIG_SND) += sound/pci/xyz/
-2. Under the directory ``xyz``, create a Makefile
+2. Under the directory ``sound/pci/xyz``, create a Makefile
::
- ifndef SND_TOPDIR
- SND_TOPDIR=../..
- endif
-
- include $(SND_TOPDIR)/toplevel.config
- include $(SND_TOPDIR)/Makefile.conf
-
snd-xyz-objs := xyz.o abc.o def.o
-
obj-$(CONFIG_SND_XYZ) += snd-xyz.o
- include $(SND_TOPDIR)/Rules.make
-
3. Create the Kconfig entry
This procedure is as same as in the last section.
-4. Run cvscompile script to re-generate the configure script and build
- the whole stuff again.
Useful Functions
================
:c:func:`snd_printk()` and friends
----------------------------------------
+----------------------------------
+
+.. note:: This subsection describes a few helper functions for
+ decorating a bit more on the standard :c:func:`printk()` & co.
+ However, in general, the use of such helpers is no longer recommended.
+ If possible, try to stick with the standard functions like
+ :c:func:`dev_err()` or :c:func:`pr_err()`.
ALSA provides a verbose version of the :c:func:`printk()` function.
If a kernel config ``CONFIG_SND_VERBOSE_PRINTK`` is set, this function
@@ -4221,13 +4215,10 @@ just like :c:func:`snd_printk()`. If the ALSA is compiled without
the debugging flag, it's ignored.
:c:func:`snd_printdd()` is compiled in only when
-``CONFIG_SND_DEBUG_VERBOSE`` is set. Please note that
-``CONFIG_SND_DEBUG_VERBOSE`` is not set as default even if you configure
-the alsa-driver with ``--with-debug=full`` option. You need to give
-explicitly ``--with-debug=detect`` option instead.
+``CONFIG_SND_DEBUG_VERBOSE`` is set.
:c:func:`snd_BUG()`
-------------------------
+-------------------
It shows the ``BUG?`` message and stack trace as well as
:c:func:`snd_BUG_ON()` at the point. It's useful to show that a
@@ -4236,7 +4227,7 @@ fatal error happens there.
When no debug flag is set, this macro is ignored.
:c:func:`snd_BUG_ON()`
-----------------------------
+----------------------
:c:func:`snd_BUG_ON()` macro is similar with
:c:func:`WARN_ON()` macro. For example, snd_BUG_ON(!pointer); or
diff --git a/Documentation/switchtec.txt b/Documentation/switchtec.txt
index f788264921ff..30d6a64e53f7 100644
--- a/Documentation/switchtec.txt
+++ b/Documentation/switchtec.txt
@@ -23,7 +23,7 @@ The primary means of communicating with the Switchtec management firmware is
through the Memory-mapped Remote Procedure Call (MRPC) interface.
Commands are submitted to the interface with a 4-byte command
identifier and up to 1KB of command specific data. The firmware will
-respond with a 4 bytes return code and up to 1KB of command specific
+respond with a 4-byte return code and up to 1KB of command-specific
data. The interface only processes a single command at a time.
@@ -36,8 +36,8 @@ device: /dev/switchtec#, one for each management endpoint in the system.
The char device has the following semantics:
* A write must consist of at least 4 bytes and no more than 1028 bytes.
- The first four bytes will be interpreted as the command to run and
- the remainder will be used as the input data. A write will send the
+ The first 4 bytes will be interpreted as the Command ID and the
+ remainder will be used as the input data. A write will send the
command to the firmware to begin processing.
* Each write must be followed by exactly one read. Any double write will
@@ -45,9 +45,9 @@ The char device has the following semantics:
produce an error.
* A read will block until the firmware completes the command and return
- the four bytes of status plus up to 1024 bytes of output data. (The
- length will be specified by the size parameter of the read call --
- reading less than 4 bytes will produce an error.
+ the 4-byte Command Return Value plus up to 1024 bytes of output
+ data. (The length will be specified by the size parameter of the read
+ call -- reading less than 4 bytes will produce an error.)
* The poll call will also be supported for userspace applications that
need to do other things while waiting for the command to complete.
@@ -83,10 +83,20 @@ The following IOCTLs are also supported by the device:
Non-Transparent Bridge (NTB) Driver
===================================
-An NTB driver is provided for the switchtec hardware in switchtec_ntb.
-Currently, it only supports switches configured with exactly 2
-partitions. It also requires the following configuration settings:
+An NTB hardware driver is provided for the Switchtec hardware in
+ntb_hw_switchtec. Currently, it only supports switches configured with
+exactly 2 NT partitions and zero or more non-NT partitions. It also requires
+the following configuration settings:
-* Both partitions must be able to access each other's GAS spaces.
+* Both NT partitions must be able to access each other's GAS spaces.
Thus, the bits in the GAS Access Vector under Management Settings
must be set to support this.
+* Kernel configuration MUST include support for NTB (CONFIG_NTB needs
+ to be set)
+
+NT EP BAR 2 will be dynamically configured as a Direct Window, and
+the configuration file does not need to configure it explicitly.
+
+Please refer to Documentation/ntb.txt in Linux source tree for an overall
+understanding of the Linux NTB stack. ntb_hw_switchtec works as an NTB
+Hardware Driver in this stack.
diff --git a/Documentation/trace/stm.rst b/Documentation/trace/stm.rst
index 2c22ddb7fd3e..99f99963e5e7 100644
--- a/Documentation/trace/stm.rst
+++ b/Documentation/trace/stm.rst
@@ -1,3 +1,5 @@
+.. SPDX-License-Identifier: GPL-2.0
+
===================
System Trace Module
===================
@@ -53,12 +55,30 @@ under "user" directory from the example above and this new rule will
be used for trace sources with the id string of "user/dummy".
Trace sources have to open the stm class device's node and write their
-trace data into its file descriptor. In order to identify themselves
-to the policy, they need to do a STP_POLICY_ID_SET ioctl on this file
-descriptor providing their id string. Otherwise, they will be
-automatically allocated a master/channel pair upon first write to this
-file descriptor according to the "default" rule of the policy, if such
-exists.
+trace data into its file descriptor.
+
+In order to find an appropriate policy node for a given trace source,
+several mechanisms can be used. First, a trace source can explicitly
+identify itself by calling an STP_POLICY_ID_SET ioctl on the character
+device's file descriptor, providing their id string, before they write
+any data there. Secondly, if they chose not to perform the explicit
+identification (because you may not want to patch existing software
+to do this), they can just start writing the data, at which point the
+stm core will try to find a policy node with the name matching the
+task's name (e.g., "syslogd") and if one exists, it will be used.
+Thirdly, if the task name can't be found among the policy nodes, the
+catch-all entry "default" will be used, if it exists. This entry also
+needs to be created and configured by the system administrator or
+whatever tools are taking care of the policy configuration. Finally,
+if all the above steps failed, the write() to an stm file descriptor
+will return a error (EINVAL).
+
+Previously, if no policy nodes were found for a trace source, the stm
+class would silently fall back to allocating the first available
+contiguous range of master/channels from the beginning of the device's
+master/channel range. The new requirement for a policy node to exist
+will help programmers and sysadmins identify gaps in configuration
+and have better control over the un-identified sources.
Some STM devices may allow direct mapping of the channel mmio regions
to userspace for zero-copy writing. One mappable page (in terms of
@@ -92,9 +112,9 @@ allocated for the device according to the policy configuration. If
there's a node in the root of the policy directory that matches the
stm_source device's name (for example, "console"), this node will be
used to allocate master and channel numbers. If there's no such policy
-node, the stm core will pick the first contiguous chunk of channels
-within the first available master. Note that the node must exist
-before the stm_source device is connected to its stm device.
+node, the stm core will use the catch-all entry "default", if one
+exists. If neither policy nodes exist, the write() to stm_source_link
+will return an error.
stm_console
===========
diff --git a/Documentation/trace/sys-t.rst b/Documentation/trace/sys-t.rst
new file mode 100644
index 000000000000..3d8eb92735e9
--- /dev/null
+++ b/Documentation/trace/sys-t.rst
@@ -0,0 +1,62 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+MIPI SyS-T over STP
+===================
+
+The MIPI SyS-T protocol driver can be used with STM class devices to
+generate standardized trace stream. Aside from being a standard, it
+provides better trace source identification and timestamp correlation.
+
+In order to use the MIPI SyS-T protocol driver with your STM device,
+first, you'll need CONFIG_STM_PROTO_SYS_T.
+
+Now, you can select which protocol driver you want to use when you create
+a policy for your STM device, by specifying it in the policy name:
+
+# mkdir /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/
+
+In other words, the policy name format is extended like this:
+
+ <device_name>:<protocol_name>.<policy_name>
+
+With Intel TH, therefore it can look like "0-sth:p_sys-t.my-policy".
+
+If the protocol name is omitted, the STM class will chose whichever
+protocol driver was loaded first.
+
+You can also double check that everything is working as expected by
+
+# cat /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/protocol
+p_sys-t
+
+Now, with the MIPI SyS-T protocol driver, each policy node in the
+configfs gets a few additional attributes, which determine per-source
+parameters specific to the protocol:
+
+# mkdir /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/default
+# ls /config/stp-policy/dummy_stm.0:p_sys-t.my-policy/default
+channels
+clocksync_interval
+do_len
+masters
+ts_interval
+uuid
+
+The most important one here is the "uuid", which determines the UUID
+that will be used to tag all data coming from this source. It is
+automatically generated when a new node is created, but it is likely
+that you would want to change it.
+
+do_len switches on/off the additional "payload length" field in the
+MIPI SyS-T message header. It is off by default as the STP already
+marks message boundaries.
+
+ts_interval and clocksync_interval determine how much time in milliseconds
+can pass before we need to include a protocol (not transport, aka STP)
+timestamp in a message header or send a CLOCKSYNC packet, respectively.
+
+See Documentation/ABI/testing/configfs-stp-policy-p_sys-t for more
+details.
+
+* [1] https://www.mipi.org/specifications/sys-t
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 647f94128a85..cd209f7730af 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -123,6 +123,37 @@ memory layout to fit in user mode), check KVM_CAP_MIPS_VZ and use the
flag KVM_VM_MIPS_VZ.
+On arm64, the physical address size for a VM (IPA Size limit) is limited
+to 40bits by default. The limit can be configured if the host supports the
+extension KVM_CAP_ARM_VM_IPA_SIZE. When supported, use
+KVM_VM_TYPE_ARM_IPA_SIZE(IPA_Bits) to set the size in the machine type
+identifier, where IPA_Bits is the maximum width of any physical
+address used by the VM. The IPA_Bits is encoded in bits[7-0] of the
+machine type identifier.
+
+e.g, to configure a guest to use 48bit physical address size :
+
+ vm_fd = ioctl(dev_fd, KVM_CREATE_VM, KVM_VM_TYPE_ARM_IPA_SIZE(48));
+
+The requested size (IPA_Bits) must be :
+ 0 - Implies default size, 40bits (for backward compatibility)
+
+ or
+
+ N - Implies N bits, where N is a positive integer such that,
+ 32 <= N <= Host_IPA_Limit
+
+Host_IPA_Limit is the maximum possible value for IPA_Bits on the host and
+is dependent on the CPU capability and the kernel configuration. The limit can
+be retrieved using KVM_CAP_ARM_VM_IPA_SIZE of the KVM_CHECK_EXTENSION
+ioctl() at run-time.
+
+Please note that configuring the IPA size does not affect the capability
+exposed by the guest CPUs in ID_AA64MMFR0_EL1[PARange]. It only affects
+size of the address translated by the stage2 level (guest physical to
+host physical address translations).
+
+
4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST
Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST
@@ -850,7 +881,7 @@ struct kvm_vcpu_events {
__u8 injected;
__u8 nr;
__u8 has_error_code;
- __u8 pad;
+ __u8 pending;
__u32 error_code;
} exception;
struct {
@@ -873,15 +904,23 @@ struct kvm_vcpu_events {
__u8 smm_inside_nmi;
__u8 latched_init;
} smi;
+ __u8 reserved[27];
+ __u8 exception_has_payload;
+ __u64 exception_payload;
};
-Only two fields are defined in the flags field:
+The following bits are defined in the flags field:
-- KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
+- KVM_VCPUEVENT_VALID_SHADOW may be set to signal that
interrupt.shadow contains a valid state.
-- KVM_VCPUEVENT_VALID_SMM may be set in the flags field to signal that
- smi contains a valid state.
+- KVM_VCPUEVENT_VALID_SMM may be set to signal that smi contains a
+ valid state.
+
+- KVM_VCPUEVENT_VALID_PAYLOAD may be set to signal that the
+ exception_has_payload, exception_payload, and exception.pending
+ fields contain a valid state. This bit will be set whenever
+ KVM_CAP_EXCEPTION_PAYLOAD is enabled.
ARM/ARM64:
@@ -961,6 +1000,11 @@ shall be written into the VCPU.
KVM_VCPUEVENT_VALID_SMM can only be set if KVM_CAP_X86_SMM is available.
+If KVM_CAP_EXCEPTION_PAYLOAD is enabled, KVM_VCPUEVENT_VALID_PAYLOAD
+can be set in the flags field to signal that the
+exception_has_payload, exception_payload, and exception.pending fields
+contain a valid state and shall be written into the VCPU.
+
ARM/ARM64:
Set the pending SError exception state for this VCPU. It is not possible to
@@ -1922,6 +1966,7 @@ registers, find a list below:
PPC | KVM_REG_PPC_TIDR | 64
PPC | KVM_REG_PPC_PSSCR | 64
PPC | KVM_REG_PPC_DEC_EXPIRY | 64
+ PPC | KVM_REG_PPC_PTCR | 64
PPC | KVM_REG_PPC_TM_GPR0 | 64
...
PPC | KVM_REG_PPC_TM_GPR31 | 64
@@ -2269,6 +2314,10 @@ The supported flags are:
The emulated MMU supports 1T segments in addition to the
standard 256M ones.
+ - KVM_PPC_NO_HASH
+ This flag indicates that HPT guests are not supported by KVM,
+ thus all guests must use radix MMU mode.
+
The "slb_size" field indicates how many SLB entries are supported
The "sps" array contains 8 entries indicating the supported base
@@ -3676,6 +3725,34 @@ Returns: 0 on success, -1 on error
This copies the vcpu's kvm_nested_state struct from userspace to the kernel. For
the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE.
+4.116 KVM_(UN)REGISTER_COALESCED_MMIO
+
+Capability: KVM_CAP_COALESCED_MMIO (for coalesced mmio)
+ KVM_CAP_COALESCED_PIO (for coalesced pio)
+Architectures: all
+Type: vm ioctl
+Parameters: struct kvm_coalesced_mmio_zone
+Returns: 0 on success, < 0 on error
+
+Coalesced I/O is a performance optimization that defers hardware
+register write emulation so that userspace exits are avoided. It is
+typically used to reduce the overhead of emulating frequently accessed
+hardware registers.
+
+When a hardware register is configured for coalesced I/O, write accesses
+do not exit to userspace and their value is recorded in a ring buffer
+that is shared between kernel and userspace.
+
+Coalesced I/O is used if one or more write accesses to a hardware
+register can be deferred until a read or a write to another hardware
+register on the same device. This last access will cause a vmexit and
+userspace will process accesses from the ring buffer before emulating
+it. That will avoid exiting to userspace on repeated writes.
+
+Coalesced pio is based on coalesced mmio. There is little difference
+between coalesced mmio and pio except that coalesced pio records accesses
+to I/O ports.
+
5. The kvm_run structure
------------------------
@@ -4522,7 +4599,7 @@ hpage module parameter is not set to 1, -EINVAL is returned.
While it is generally possible to create a huge page backed VM without
this capability, the VM will not be able to run.
-7.14 KVM_CAP_MSR_PLATFORM_INFO
+7.15 KVM_CAP_MSR_PLATFORM_INFO
Architectures: x86
Parameters: args[0] whether feature should be enabled or not
@@ -4531,6 +4608,45 @@ With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise,
a #GP would be raised when the guest tries to access. Currently, this
capability does not enable write permissions of this MSR for the guest.
+7.16 KVM_CAP_PPC_NESTED_HV
+
+Architectures: ppc
+Parameters: none
+Returns: 0 on success, -EINVAL when the implementation doesn't support
+ nested-HV virtualization.
+
+HV-KVM on POWER9 and later systems allows for "nested-HV"
+virtualization, which provides a way for a guest VM to run guests that
+can run using the CPU's supervisor mode (privileged non-hypervisor
+state). Enabling this capability on a VM depends on the CPU having
+the necessary functionality and on the facility being enabled with a
+kvm-hv module parameter.
+
+7.17 KVM_CAP_EXCEPTION_PAYLOAD
+
+Architectures: x86
+Parameters: args[0] whether feature should be enabled or not
+
+With this capability enabled, CR2 will not be modified prior to the
+emulated VM-exit when L1 intercepts a #PF exception that occurs in
+L2. Similarly, for kvm-intel only, DR6 will not be modified prior to
+the emulated VM-exit when L1 intercepts a #DB exception that occurs in
+L2. As a result, when KVM_GET_VCPU_EVENTS reports a pending #PF (or
+#DB) exception for L2, exception.has_payload will be set and the
+faulting address (or the new DR6 bits*) will be reported in the
+exception_payload field. Similarly, when userspace injects a #PF (or
+#DB) into L2 using KVM_SET_VCPU_EVENTS, it is expected to set
+exception.has_payload and to put the faulting address (or the new DR6
+bits*) in the exception_payload field.
+
+This capability also enables exception.pending in struct
+kvm_vcpu_events, which allows userspace to distinguish between pending
+and injected exceptions.
+
+
+* For the new DR6 bits, note that bit 16 is set iff the #DB exception
+ will clear DR6.RTM.
+
8. Other capabilities.
----------------------
@@ -4772,3 +4888,10 @@ CPU when the exception is taken. If this virtual SError is taken to EL1 using
AArch64, this value will be reported in the ISS field of ESR_ELx.
See KVM_CAP_VCPU_EVENTS for more details.
+8.20 KVM_CAP_HYPERV_SEND_IPI
+
+Architectures: x86
+
+This capability indicates that KVM supports paravirtualized Hyper-V IPI send
+hypercalls:
+HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx.
diff --git a/MAINTAINERS b/MAINTAINERS
index 49e8fa2e95d5..fdb6a298c7e7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -932,6 +932,7 @@ M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
M: Arve Hjønnevåg <arve@android.com>
M: Todd Kjos <tkjos@android.com>
M: Martijn Coenen <maco@android.com>
+M: Joel Fernandes <joel@joelfernandes.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
L: devel@driverdev.osuosl.org
S: Supported
@@ -1180,7 +1181,7 @@ N: owl
F: arch/arm/mach-actions/
F: arch/arm/boot/dts/owl-*
F: arch/arm64/boot/dts/actions/
-F: drivers/clocksource/owl-*
+F: drivers/clocksource/timer-owl*
F: drivers/pinctrl/actions/*
F: drivers/soc/actions/
F: include/dt-bindings/power/owl-*
@@ -1603,7 +1604,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/boot/dts/lpc43*
F: drivers/clk/nxp/clk-lpc18xx*
-F: drivers/clocksource/time-lpc32xx.c
+F: drivers/clocksource/timer-lpc32xx.c
F: drivers/i2c/busses/i2c-lpc2k.c
F: drivers/memory/pl172.c
F: drivers/mtd/spi-nor/nxp-spifi.c
@@ -2220,7 +2221,7 @@ F: arch/arm/mach-vexpress/
F: */*/vexpress*
F: */*/*/vexpress*
F: drivers/clk/versatile/clk-vexpress-osc.c
-F: drivers/clocksource/versatile.c
+F: drivers/clocksource/timer-versatile.c
N: mps2
ARM/VFP SUPPORT
@@ -2242,7 +2243,7 @@ M: Tony Prisk <linux@prisktech.co.nz>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-vt8500/
-F: drivers/clocksource/vt8500_timer.c
+F: drivers/clocksource/timer-vt8500.c
F: drivers/i2c/busses/i2c-wmt.c
F: drivers/mmc/host/wmt-sdmmc.c
F: drivers/pwm/pwm-vt8500.c
@@ -2307,7 +2308,7 @@ F: drivers/cpuidle/cpuidle-zynq.c
F: drivers/block/xsysace.c
N: zynq
N: xilinx
-F: drivers/clocksource/cadence_ttc_timer.c
+F: drivers/clocksource/timer-cadence-ttc.c
F: drivers/i2c/busses/i2c-cadence.c
F: drivers/mmc/host/sdhci-of-arasan.c
F: drivers/edac/synopsys_edac.c
@@ -4055,7 +4056,7 @@ M: Uma Krishnan <ukrishn@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/cxlflash/
-F: include/uapi/scsi/cxlflash_ioctls.h
+F: include/uapi/scsi/cxlflash_ioctl.h
F: Documentation/powerpc/cxlflash.txt
CYBERPRO FB DRIVER
@@ -5364,7 +5365,8 @@ S: Maintained
F: drivers/edac/r82600_edac.c
EDAC-SBRIDGE
-M: Mauro Carvalho Chehab <mchehab@kernel.org>
+M: Tony Luck <tony.luck@intel.com>
+R: Qiuxu Zhuo <qiuxu.zhuo@intel.com>
L: linux-edac@vger.kernel.org
S: Maintained
F: drivers/edac/sb_edac.c
@@ -7546,6 +7548,14 @@ F: drivers/platform/x86/intel_punit_ipc.c
F: arch/x86/include/asm/intel_pmc_ipc.h
F: arch/x86/include/asm/intel_punit_ipc.h
+INTEL MULTIFUNCTION PMIC DEVICE DRIVERS
+R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+S: Maintained
+F: drivers/mfd/intel_msic.c
+F: drivers/mfd/intel_soc_pmic*
+F: include/linux/mfd/intel_msic.h
+F: include/linux/mfd/intel_soc_pmic*
+
INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
M: Stanislav Yakovlev <stas.yakovlev@gmail.com>
L: linux-wireless@vger.kernel.org
@@ -7569,14 +7579,6 @@ S: Supported
F: drivers/infiniband/hw/i40iw/
F: include/uapi/rdma/i40iw-abi.h
-INTEL SHA MULTIBUFFER DRIVER
-M: Megha Dey <megha.dey@linux.intel.com>
-R: Tim Chen <tim.c.chen@linux.intel.com>
-L: linux-crypto@vger.kernel.org
-S: Supported
-F: arch/x86/crypto/sha*-mb/
-F: crypto/mcryptd.c
-
INTEL TELEMETRY DRIVER
M: Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>
L: platform-driver-x86@vger.kernel.org
@@ -8156,6 +8158,7 @@ F: security/keys/encrypted-keys/
KEYS-TRUSTED
M: James Bottomley <jejb@linux.vnet.ibm.com>
+M: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
M: Mimi Zohar <zohar@linux.vnet.ibm.com>
L: linux-integrity@vger.kernel.org
L: keyrings@vger.kernel.org
@@ -9589,6 +9592,7 @@ M: Richard Genoud <richard.genoud@gmail.com>
S: Maintained
F: drivers/tty/serial/atmel_serial.c
F: drivers/tty/serial/atmel_serial.h
+F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
MICROCHIP / ATMEL DMA DRIVER
M: Ludovic Desroches <ludovic.desroches@microchip.com>
@@ -9620,6 +9624,21 @@ S: Supported
F: drivers/mtd/nand/raw/atmel/*
F: Documentation/devicetree/bindings/mtd/atmel-nand.txt
+MICROCHIP AT91 USART MFD DRIVER
+M: Radu Pirea <radu_nicolae.pirea@upb.ro>
+L: linux-kernel@vger.kernel.org
+S: Supported
+F: drivers/mfd/at91-usart.c
+F: include/dt-bindings/mfd/at91-usart.h
+F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
+
+MICROCHIP AT91 USART SPI DRIVER
+M: Radu Pirea <radu_nicolae.pirea@upb.ro>
+L: linux-spi@vger.kernel.org
+S: Supported
+F: drivers/spi/spi-at91-usart.c
+F: Documentation/devicetree/bindings/mfd/atmel-usart.txt
+
MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
M: Woojung Huh <Woojung.Huh@microchip.com>
M: Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
@@ -9986,6 +10005,13 @@ S: Supported
F: drivers/gpu/drm/mxsfb/
F: Documentation/devicetree/bindings/display/mxsfb.txt
+MYLEX DAC960 PCI RAID Controller
+M: Hannes Reinecke <hare@kernel.org>
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/myrb.*
+F: drivers/scsi/myrs.*
+
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
M: Chris Lee <christopher.lee@cspi.com>
L: netdev@vger.kernel.org
@@ -11274,7 +11300,7 @@ M: Murali Karicheri <m-karicheri2@ti.com>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-F: drivers/pci/controller/dwc/*keystone*
+F: drivers/pci/controller/dwc/pci-keystone.c
PCI ENDPOINT SUBSYSTEM
M: Kishon Vijay Abraham I <kishon@ti.com>
@@ -12775,6 +12801,18 @@ W: http://www.ibm.com/developerworks/linux/linux390/
S: Supported
F: drivers/s390/crypto/
+S390 VFIO AP DRIVER
+M: Tony Krowiak <akrowiak@linux.ibm.com>
+M: Pierre Morel <pmorel@linux.ibm.com>
+M: Halil Pasic <pasic@linux.ibm.com>
+L: linux-s390@vger.kernel.org
+W: http://www.ibm.com/developerworks/linux/linux390/
+S: Supported
+F: drivers/s390/crypto/vfio_ap_drv.c
+F: drivers/s390/crypto/vfio_ap_private.h
+F: drivers/s390/crypto/vfio_ap_ops.c
+F: Documentation/s390/vfio-ap.txt
+
S390 ZFCP DRIVER
M: Steffen Maier <maier@linux.ibm.com>
M: Benjamin Block <bblock@linux.ibm.com>
@@ -13720,7 +13758,7 @@ F: sound/soc/
F: include/sound/soc*
SOUNDWIRE SUBSYSTEM
-M: Vinod Koul <vinod.koul@intel.com>
+M: Vinod Koul <vkoul@kernel.org>
M: Sanyog Kale <sanyog.r.kale@intel.com>
R: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -14727,6 +14765,13 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/ti/netcp*
+TI PCM3060 ASoC CODEC DRIVER
+M: Kirill Marinushkin <kmarinushkin@birdec.tech>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+S: Maintained
+F: Documentation/devicetree/bindings/sound/pcm3060.txt
+F: sound/soc/codecs/pcm3060*
+
TI TAS571X FAMILY ASoC CODEC DRIVER
M: Kevin Cernekee <cernekee@chromium.org>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
@@ -15399,6 +15444,12 @@ F: Documentation/driver-api/usb/typec_bus.rst
F: drivers/usb/typec/altmodes/
F: include/linux/usb/typec_altmode.h
+USB TYPEC PORT CONTROLLER DRIVERS
+M: Guenter Roeck <linux@roeck-us.net>
+L: linux-usb@vger.kernel.org
+S: Maintained
+F: drivers/usb/typec/tcpm/
+
USB UHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
@@ -15473,13 +15524,19 @@ F: arch/x86/um/
F: fs/hostfs/
F: fs/hppfs/
+USERSPACE COPYIN/COPYOUT (UIOVEC)
+M: Alexander Viro <viro@zeniv.linux.org.uk>
+S: Maintained
+F: lib/iov_iter.c
+F: include/linux/uio.h
+
USERSPACE I/O (UIO)
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: Documentation/driver-api/uio-howto.rst
F: drivers/uio/
-F: include/linux/uio*.h
+F: include/linux/uio_driver.h
UTIL-LINUX PACKAGE
M: Karel Zak <kzak@redhat.com>
diff --git a/Makefile b/Makefile
index 2fc5732a4f9e..7d4ba5196010 100644
--- a/Makefile
+++ b/Makefile
@@ -1063,7 +1063,7 @@ include/config/kernel.release: $(srctree)/Makefile FORCE
# Carefully list dependencies so we do not try to build scripts twice
# in parallel
PHONY += scripts
-scripts: scripts_basic asm-generic gcc-plugins $(autoksyms_h)
+scripts: scripts_basic scripts_dtc asm-generic gcc-plugins $(autoksyms_h)
$(Q)$(MAKE) $(build)=$(@)
# Things we need to do before we recursively start building the kernel
@@ -1213,6 +1213,35 @@ kselftest-merge:
+$(Q)$(MAKE) -f $(srctree)/Makefile olddefconfig
# ---------------------------------------------------------------------------
+# Devicetree files
+
+ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/boot/dts/),)
+dtstree := arch/$(SRCARCH)/boot/dts
+endif
+
+ifneq ($(dtstree),)
+
+%.dtb: prepare3 scripts_dtc
+ $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
+
+PHONY += dtbs dtbs_install
+dtbs: prepare3 scripts_dtc
+ $(Q)$(MAKE) $(build)=$(dtstree)
+
+dtbs_install:
+ $(Q)$(MAKE) $(dtbinst)=$(dtstree)
+
+ifdef CONFIG_OF_EARLY_FLATTREE
+all: dtbs
+endif
+
+endif
+
+PHONY += scripts_dtc
+scripts_dtc: scripts_basic
+ $(Q)$(MAKE) $(build)=scripts/dtc
+
+# ---------------------------------------------------------------------------
# Modules
ifdef CONFIG_MODULES
@@ -1421,6 +1450,12 @@ help:
@echo ' kselftest-merge - Merge all the config dependencies of kselftest to existing'
@echo ' .config.'
@echo ''
+ @$(if $(dtstree), \
+ echo 'Devicetree:'; \
+ echo '* dtbs - Build device tree blobs for enabled boards'; \
+ echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'; \
+ echo '')
+
@echo 'Userspace tools targets:'
@echo ' use "make tools/help"'
@echo ' or "cd tools; make help"'
diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h
index d6e29a1de4cc..9ff37aa1165f 100644
--- a/arch/alpha/include/asm/unistd.h
+++ b/arch/alpha/include/asm/unistd.h
@@ -6,6 +6,7 @@
#define NR_SYSCALLS 523
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
@@ -13,6 +14,7 @@
#define __ARCH_WANT_SYS_GETPGRP
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
+#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
index c64806a2daf5..2e09248f8324 100644
--- a/arch/alpha/kernel/entry.S
+++ b/arch/alpha/kernel/entry.S
@@ -473,7 +473,7 @@ entSys:
bne $3, strace
beq $4, 1f
ldq $27, 0($5)
-1: jsr $26, ($27), alpha_ni_syscall
+1: jsr $26, ($27), sys_ni_syscall
ldgp $gp, 0($26)
blt $0, $syscall_error /* the call failed */
stq $0, 0($sp)
@@ -587,7 +587,7 @@ strace:
/* get the system call pointer.. */
lda $1, NR_SYSCALLS($31)
lda $2, sys_call_table
- lda $27, alpha_ni_syscall
+ lda $27, sys_ni_syscall
cmpult $0, $1, $1
s8addq $0, $2, $2
beq $1, 1f
@@ -791,7 +791,7 @@ ret_from_kernel_thread:
/*
* Special system calls. Most of these are special in that they either
- * have to play switch_stack games or in some way use the pt_regs struct.
+ * have to play switch_stack games.
*/
.macro fork_like name
@@ -812,46 +812,41 @@ fork_like fork
fork_like vfork
fork_like clone
+.macro sigreturn_like name
.align 4
- .globl sys_sigreturn
- .ent sys_sigreturn
-sys_sigreturn:
+ .globl sys_\name
+ .ent sys_\name
+sys_\name:
.prologue 0
lda $9, ret_from_straced
cmpult $26, $9, $9
lda $sp, -SWITCH_STACK_SIZE($sp)
- jsr $26, do_sigreturn
+ jsr $26, do_\name
bne $9, 1f
jsr $26, syscall_trace_leave
1: br $1, undo_switch_stack
br ret_from_sys_call
-.end sys_sigreturn
+.end sys_\name
+.endm
- .align 4
- .globl sys_rt_sigreturn
- .ent sys_rt_sigreturn
-sys_rt_sigreturn:
- .prologue 0
- lda $9, ret_from_straced
- cmpult $26, $9, $9
- lda $sp, -SWITCH_STACK_SIZE($sp)
- jsr $26, do_rt_sigreturn
- bne $9, 1f
- jsr $26, syscall_trace_leave
-1: br $1, undo_switch_stack
- br ret_from_sys_call
-.end sys_rt_sigreturn
+sigreturn_like sigreturn
+sigreturn_like rt_sigreturn
.align 4
- .globl alpha_ni_syscall
- .ent alpha_ni_syscall
-alpha_ni_syscall:
+ .globl alpha_syscall_zero
+ .ent alpha_syscall_zero
+alpha_syscall_zero:
.prologue 0
- /* Special because it also implements overflow handling via
- syscall number 0. And if you recall, zero is a special
- trigger for "not an error". Store large non-zero there. */
+ /* Special because it needs to do something opposite to
+ force_successful_syscall_return(). We use the saved
+ syscall number for that, zero meaning "not an error".
+ That works nicely, but for real syscall 0 we need to
+ make sure that this logics doesn't get confused.
+ Store a non-zero there - -ENOSYS we need in register
+ for our return value will do just fine.
+ */
lda $0, -ENOSYS
unop
stq $0, 0($sp)
ret
-.end alpha_ni_syscall
+.end alpha_syscall_zero
diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S
index 1374e591511f..5b2e8ecb7ce3 100644
--- a/arch/alpha/kernel/systbls.S
+++ b/arch/alpha/kernel/systbls.S
@@ -11,93 +11,93 @@
.align 3
.globl sys_call_table
sys_call_table:
- .quad alpha_ni_syscall /* 0 */
+ .quad alpha_syscall_zero /* 0 */
.quad sys_exit
.quad alpha_fork
.quad sys_read
.quad sys_write
- .quad alpha_ni_syscall /* 5 */
+ .quad sys_ni_syscall /* 5 */
.quad sys_close
.quad sys_osf_wait4
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_link
.quad sys_unlink /* 10 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_chdir
.quad sys_fchdir
.quad sys_mknod
.quad sys_chmod /* 15 */
.quad sys_chown
.quad sys_osf_brk
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_lseek
.quad sys_getxpid /* 20 */
.quad sys_osf_mount
.quad sys_umount
.quad sys_setuid
.quad sys_getxuid
- .quad alpha_ni_syscall /* 25 */
+ .quad sys_ni_syscall /* 25 */
.quad sys_ptrace
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 30 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 30 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_access
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 35 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 35 */
.quad sys_sync
.quad sys_kill
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_setpgid
- .quad alpha_ni_syscall /* 40 */
+ .quad sys_ni_syscall /* 40 */
.quad sys_dup
.quad sys_alpha_pipe
.quad sys_osf_set_program_attributes
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_open /* 45 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getxgid
.quad sys_osf_sigprocmask
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 50 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 50 */
.quad sys_acct
.quad sys_sigpending
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_ioctl
- .quad alpha_ni_syscall /* 55 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall /* 55 */
+ .quad sys_ni_syscall
.quad sys_symlink
.quad sys_readlink
.quad sys_execve
.quad sys_umask /* 60 */
.quad sys_chroot
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getpgrp
.quad sys_getpagesize
- .quad alpha_ni_syscall /* 65 */
+ .quad sys_ni_syscall /* 65 */
.quad alpha_vfork
.quad sys_newstat
.quad sys_newlstat
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 70 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 70 */
.quad sys_osf_mmap
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_munmap
.quad sys_mprotect
.quad sys_madvise /* 75 */
.quad sys_vhangup
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getgroups
/* map BSD's setpgrp to sys_setpgid for binary compatibility: */
.quad sys_setgroups /* 80 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_setpgid
.quad sys_osf_setitimer
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 85 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 85 */
.quad sys_osf_getitimer
.quad sys_gethostname
.quad sys_sethostname
@@ -119,19 +119,19 @@ sys_call_table:
.quad sys_bind
.quad sys_setsockopt /* 105 */
.quad sys_listen
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 110 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 110 */
.quad sys_sigsuspend
.quad sys_osf_sigstack
.quad sys_recvmsg
.quad sys_sendmsg
- .quad alpha_ni_syscall /* 115 */
+ .quad sys_ni_syscall /* 115 */
.quad sys_osf_gettimeofday
.quad sys_osf_getrusage
.quad sys_getsockopt
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
#ifdef CONFIG_OSF4_COMPAT
.quad sys_osf_readv /* 120 */
.quad sys_osf_writev
@@ -156,66 +156,66 @@ sys_call_table:
.quad sys_mkdir
.quad sys_rmdir
.quad sys_osf_utimes
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 140 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 140 */
.quad sys_getpeername
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getrlimit
.quad sys_setrlimit /* 145 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_setsid
.quad sys_quotactl
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getsockname /* 150 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 155 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 155 */
.quad sys_osf_sigaction
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_osf_getdirentries
.quad sys_osf_statfs /* 160 */
.quad sys_osf_fstatfs
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_osf_getdomainname /* 165 */
.quad sys_setdomainname
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 170 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 175 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 180 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 185 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 190 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 195 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 170 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 175 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 180 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 185 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 190 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 195 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
/* The OSF swapon has two extra arguments, but we ignore them. */
.quad sys_swapon
.quad sys_msgctl /* 200 */
@@ -231,93 +231,93 @@ sys_call_table:
.quad sys_shmctl /* 210 */
.quad sys_shmdt
.quad sys_shmget
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 215 */
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 215 */
+ .quad sys_ni_syscall
.quad sys_msync
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 220 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 220 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_osf_stat
.quad sys_osf_lstat /* 225 */
.quad sys_osf_fstat
.quad sys_osf_statfs64
.quad sys_osf_fstatfs64
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 230 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 230 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_getpgid
.quad sys_getsid
.quad sys_sigaltstack /* 235 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 240 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 240 */
.quad sys_osf_sysinfo
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_osf_proplist_syscall
- .quad alpha_ni_syscall /* 245 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 250 */
+ .quad sys_ni_syscall /* 245 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 250 */
.quad sys_osf_usleep_thread
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
.quad sys_sysfs
- .quad alpha_ni_syscall /* 255 */
+ .quad sys_ni_syscall /* 255 */
.quad sys_osf_getsysinfo
.quad sys_osf_setsysinfo
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 260 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 265 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 270 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 275 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 280 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 285 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 290 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall /* 295 */
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
- .quad alpha_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 260 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 265 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 270 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 275 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 280 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 285 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 290 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall /* 295 */
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
+ .quad sys_ni_syscall
/* linux-specific system calls start at 300 */
.quad sys_bdflush /* 300 */
.quad sys_sethae
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 644815c0516e..c64c505d966c 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -102,11 +102,5 @@ boot_targets += uImage uImage.bin uImage.gz
$(boot_targets): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-%.dtb %.dtb.S %.dtb.o: scripts
- $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
-
-dtbs: scripts
- $(Q)$(MAKE) $(build)=$(boot)/dts
-
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 517178b1daef..3b3543fd151c 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -17,6 +17,7 @@
#define _UAPI_ASM_ARC_UNISTD_H
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 5c91e0093ee8..05a91d8b89f3 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -303,12 +303,7 @@ else
KBUILD_IMAGE := $(boot)/zImage
endif
-# Build the DT binary blobs if we have OF configured
-ifeq ($(CONFIG_USE_OF),y)
-KBUILD_DTBS := dtbs
-endif
-
-all: $(notdir $(KBUILD_IMAGE)) $(KBUILD_DTBS)
+all: $(notdir $(KBUILD_IMAGE))
archheaders:
@@ -335,17 +330,6 @@ $(BOOT_TARGETS): vmlinux
$(INSTALL_TARGETS):
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
-%.dtb: | scripts
- $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@
-
-PHONY += dtbs dtbs_install
-
-dtbs: prepare scripts
- $(Q)$(MAKE) $(build)=$(boot)/dts
-
-dtbs_install:
- $(Q)$(MAKE) $(dtbinst)=$(boot)/dts
-
PHONY += vdso_install
vdso_install:
ifeq ($(CONFIG_VDSO),y)
@@ -367,8 +351,6 @@ define archhelp
echo ' uImage - U-Boot wrapped zImage'
echo ' bootpImage - Combined zImage and initial RAM disk'
echo ' (supply initrd image via make variable INITRD=<path>)'
- echo '* dtbs - Build device tree blobs for enabled boards'
- echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'
echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel'
echo ' uinstall - Install U-Boot wrapped compressed kernel'
diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h
index 07437816e098..b36c0289a308 100644
--- a/arch/arm/boot/compressed/libfdt_env.h
+++ b/arch/arm/boot/compressed/libfdt_env.h
@@ -6,6 +6,8 @@
#include <linux/string.h>
#include <asm/byteorder.h>
+#define INT_MAX ((int)(~0U>>1))
+
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
typedef __be64 fdt64_t;
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 7234e8330a57..efbdeaaa8dcd 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -146,8 +146,9 @@
fsl,max-link-speed = <2>;
power-domains = <&pgc_pcie_phy>;
resets = <&src IMX7_RESET_PCIEPHY>,
- <&src IMX7_RESET_PCIE_CTRL_APPS_EN>;
- reset-names = "pciephy", "apps";
+ <&src IMX7_RESET_PCIE_CTRL_APPS_EN>,
+ <&src IMX7_RESET_PCIE_CTRL_APPS_TURNOFF>;
+ reset-names = "pciephy", "apps", "turnoff";
status = "disabled";
};
};
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 925d1364727a..ef0c7feea6e2 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -99,6 +99,7 @@ config CRYPTO_GHASH_ARM_CE
depends on KERNEL_MODE_NEON
select CRYPTO_HASH
select CRYPTO_CRYPTD
+ select CRYPTO_GF128MUL
help
Use an implementation of GHASH (used by the GCM AEAD chaining mode)
that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
@@ -121,10 +122,4 @@ config CRYPTO_CHACHA20_NEON
select CRYPTO_BLKCIPHER
select CRYPTO_CHACHA20
-config CRYPTO_SPECK_NEON
- tristate "NEON accelerated Speck cipher algorithms"
- depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
- select CRYPTO_SPECK
-
endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 8de542c48ade..bd5bceef0605 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
-obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
@@ -54,7 +53,6 @@ ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o
chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
-speck-neon-y := speck-neon-core.o speck-neon-glue.o
ifdef REGENERATE_ARM_CRYPTO
quiet_cmd_perl = PERL $@
diff --git a/arch/arm/crypto/chacha20-neon-core.S b/arch/arm/crypto/chacha20-neon-core.S
index 451a849ad518..50e7b9896818 100644
--- a/arch/arm/crypto/chacha20-neon-core.S
+++ b/arch/arm/crypto/chacha20-neon-core.S
@@ -18,6 +18,34 @@
* (at your option) any later version.
*/
+ /*
+ * NEON doesn't have a rotate instruction. The alternatives are, more or less:
+ *
+ * (a) vshl.u32 + vsri.u32 (needs temporary register)
+ * (b) vshl.u32 + vshr.u32 + vorr (needs temporary register)
+ * (c) vrev32.16 (16-bit rotations only)
+ * (d) vtbl.8 + vtbl.8 (multiple of 8 bits rotations only,
+ * needs index vector)
+ *
+ * ChaCha20 has 16, 12, 8, and 7-bit rotations. For the 12 and 7-bit
+ * rotations, the only choices are (a) and (b). We use (a) since it takes
+ * two-thirds the cycles of (b) on both Cortex-A7 and Cortex-A53.
+ *
+ * For the 16-bit rotation, we use vrev32.16 since it's consistently fastest
+ * and doesn't need a temporary register.
+ *
+ * For the 8-bit rotation, we use vtbl.8 + vtbl.8. On Cortex-A7, this sequence
+ * is twice as fast as (a), even when doing (a) on multiple registers
+ * simultaneously to eliminate the stall between vshl and vsri. Also, it
+ * parallelizes better when temporary registers are scarce.
+ *
+ * A disadvantage is that on Cortex-A53, the vtbl sequence is the same speed as
+ * (a), so the need to load the rotation table actually makes the vtbl method
+ * slightly slower overall on that CPU (~1.3% slower ChaCha20). Still, it
+ * seems to be a good compromise to get a more significant speed boost on some
+ * CPUs, e.g. ~4.8% faster ChaCha20 on Cortex-A7.
+ */
+
#include <linux/linkage.h>
.text
@@ -46,7 +74,9 @@ ENTRY(chacha20_block_xor_neon)
vmov q10, q2
vmov q11, q3
+ adr ip, .Lrol8_table
mov r3, #10
+ vld1.8 {d10}, [ip, :64]
.Ldoubleround:
// x0 += x1, x3 = rotl32(x3 ^ x0, 16)
@@ -62,9 +92,9 @@ ENTRY(chacha20_block_xor_neon)
// x0 += x1, x3 = rotl32(x3 ^ x0, 8)
vadd.i32 q0, q0, q1
- veor q4, q3, q0
- vshl.u32 q3, q4, #8
- vsri.u32 q3, q4, #24
+ veor q3, q3, q0
+ vtbl.8 d6, {d6}, d10
+ vtbl.8 d7, {d7}, d10
// x2 += x3, x1 = rotl32(x1 ^ x2, 7)
vadd.i32 q2, q2, q3
@@ -92,9 +122,9 @@ ENTRY(chacha20_block_xor_neon)
// x0 += x1, x3 = rotl32(x3 ^ x0, 8)
vadd.i32 q0, q0, q1
- veor q4, q3, q0
- vshl.u32 q3, q4, #8
- vsri.u32 q3, q4, #24
+ veor q3, q3, q0
+ vtbl.8 d6, {d6}, d10
+ vtbl.8 d7, {d7}, d10
// x2 += x3, x1 = rotl32(x1 ^ x2, 7)
vadd.i32 q2, q2, q3
@@ -139,13 +169,17 @@ ENTRY(chacha20_block_xor_neon)
bx lr
ENDPROC(chacha20_block_xor_neon)
+ .align 4
+.Lctrinc: .word 0, 1, 2, 3
+.Lrol8_table: .byte 3, 0, 1, 2, 7, 4, 5, 6
+
.align 5
ENTRY(chacha20_4block_xor_neon)
- push {r4-r6, lr}
- mov ip, sp // preserve the stack pointer
- sub r3, sp, #0x20 // allocate a 32 byte buffer
- bic r3, r3, #0x1f // aligned to 32 bytes
- mov sp, r3
+ push {r4-r5}
+ mov r4, sp // preserve the stack pointer
+ sub ip, sp, #0x20 // allocate a 32 byte buffer
+ bic ip, ip, #0x1f // aligned to 32 bytes
+ mov sp, ip
// r0: Input state matrix, s
// r1: 4 data blocks output, o
@@ -155,25 +189,24 @@ ENTRY(chacha20_4block_xor_neon)
// This function encrypts four consecutive ChaCha20 blocks by loading
// the state matrix in NEON registers four times. The algorithm performs
// each operation on the corresponding word of each state matrix, hence
- // requires no word shuffling. For final XORing step we transpose the
- // matrix by interleaving 32- and then 64-bit words, which allows us to
- // do XOR in NEON registers.
+ // requires no word shuffling. The words are re-interleaved before the
+ // final addition of the original state and the XORing step.
//
- // x0..15[0-3] = s0..3[0..3]
- add r3, r0, #0x20
+ // x0..15[0-3] = s0..15[0-3]
+ add ip, r0, #0x20
vld1.32 {q0-q1}, [r0]
- vld1.32 {q2-q3}, [r3]
+ vld1.32 {q2-q3}, [ip]
- adr r3, CTRINC
+ adr r5, .Lctrinc
vdup.32 q15, d7[1]
vdup.32 q14, d7[0]
- vld1.32 {q11}, [r3, :128]
+ vld1.32 {q4}, [r5, :128]
vdup.32 q13, d6[1]
vdup.32 q12, d6[0]
- vadd.i32 q12, q12, q11 // x12 += counter values 0-3
vdup.32 q11, d5[1]
vdup.32 q10, d5[0]
+ vadd.u32 q12, q12, q4 // x12 += counter values 0-3
vdup.32 q9, d4[1]
vdup.32 q8, d4[0]
vdup.32 q7, d3[1]
@@ -185,9 +218,13 @@ ENTRY(chacha20_4block_xor_neon)
vdup.32 q1, d0[1]
vdup.32 q0, d0[0]
+ adr ip, .Lrol8_table
mov r3, #10
+ b 1f
.Ldoubleround4:
+ vld1.32 {q8-q9}, [sp, :256]
+1:
// x0 += x4, x12 = rotl32(x12 ^ x0, 16)
// x1 += x5, x13 = rotl32(x13 ^ x1, 16)
// x2 += x6, x14 = rotl32(x14 ^ x2, 16)
@@ -236,24 +273,25 @@ ENTRY(chacha20_4block_xor_neon)
// x1 += x5, x13 = rotl32(x13 ^ x1, 8)
// x2 += x6, x14 = rotl32(x14 ^ x2, 8)
// x3 += x7, x15 = rotl32(x15 ^ x3, 8)
+ vld1.8 {d16}, [ip, :64]
vadd.i32 q0, q0, q4
vadd.i32 q1, q1, q5
vadd.i32 q2, q2, q6
vadd.i32 q3, q3, q7
- veor q8, q12, q0
- veor q9, q13, q1
- vshl.u32 q12, q8, #8
- vshl.u32 q13, q9, #8
- vsri.u32 q12, q8, #24
- vsri.u32 q13, q9, #24
+ veor q12, q12, q0
+ veor q13, q13, q1
+ veor q14, q14, q2
+ veor q15, q15, q3
- veor q8, q14, q2
- veor q9, q15, q3
- vshl.u32 q14, q8, #8
- vshl.u32 q15, q9, #8
- vsri.u32 q14, q8, #24
- vsri.u32 q15, q9, #24
+ vtbl.8 d24, {d24}, d16
+ vtbl.8 d25, {d25}, d16
+ vtbl.8 d26, {d26}, d16
+ vtbl.8 d27, {d27}, d16
+ vtbl.8 d28, {d28}, d16
+ vtbl.8 d29, {d29}, d16
+ vtbl.8 d30, {d30}, d16
+ vtbl.8 d31, {d31}, d16
vld1.32 {q8-q9}, [sp, :256]
@@ -332,24 +370,25 @@ ENTRY(chacha20_4block_xor_neon)
// x1 += x6, x12 = rotl32(x12 ^ x1, 8)
// x2 += x7, x13 = rotl32(x13 ^ x2, 8)
// x3 += x4, x14 = rotl32(x14 ^ x3, 8)
+ vld1.8 {d16}, [ip, :64]
vadd.i32 q0, q0, q5
vadd.i32 q1, q1, q6
vadd.i32 q2, q2, q7
vadd.i32 q3, q3, q4
- veor q8, q15, q0
- veor q9, q12, q1
- vshl.u32 q15, q8, #8
- vshl.u32 q12, q9, #8
- vsri.u32 q15, q8, #24
- vsri.u32 q12, q9, #24
+ veor q15, q15, q0
+ veor q12, q12, q1
+ veor q13, q13, q2
+ veor q14, q14, q3
- veor q8, q13, q2
- veor q9, q14, q3
- vshl.u32 q13, q8, #8
- vshl.u32 q14, q9, #8
- vsri.u32 q13, q8, #24
- vsri.u32 q14, q9, #24
+ vtbl.8 d30, {d30}, d16
+ vtbl.8 d31, {d31}, d16
+ vtbl.8 d24, {d24}, d16
+ vtbl.8 d25, {d25}, d16
+ vtbl.8 d26, {d26}, d16
+ vtbl.8 d27, {d27}, d16
+ vtbl.8 d28, {d28}, d16
+ vtbl.8 d29, {d29}, d16
vld1.32 {q8-q9}, [sp, :256]
@@ -379,104 +418,76 @@ ENTRY(chacha20_4block_xor_neon)
vsri.u32 q6, q9, #25
subs r3, r3, #1
- beq 0f
-
- vld1.32 {q8-q9}, [sp, :256]
- b .Ldoubleround4
-
- // x0[0-3] += s0[0]
- // x1[0-3] += s0[1]
- // x2[0-3] += s0[2]
- // x3[0-3] += s0[3]
-0: ldmia r0!, {r3-r6}
- vdup.32 q8, r3
- vdup.32 q9, r4
- vadd.i32 q0, q0, q8
- vadd.i32 q1, q1, q9
- vdup.32 q8, r5
- vdup.32 q9, r6
- vadd.i32 q2, q2, q8
- vadd.i32 q3, q3, q9
-
- // x4[0-3] += s1[0]
- // x5[0-3] += s1[1]
- // x6[0-3] += s1[2]
- // x7[0-3] += s1[3]
- ldmia r0!, {r3-r6}
- vdup.32 q8, r3
- vdup.32 q9, r4
- vadd.i32 q4, q4, q8
- vadd.i32 q5, q5, q9
- vdup.32 q8, r5
- vdup.32 q9, r6
- vadd.i32 q6, q6, q8
- vadd.i32 q7, q7, q9
-
- // interleave 32-bit words in state n, n+1
- vzip.32 q0, q1
- vzip.32 q2, q3
- vzip.32 q4, q5
- vzip.32 q6, q7
-
- // interleave 64-bit words in state n, n+2
+ bne .Ldoubleround4
+
+ // x0..7[0-3] are in q0-q7, x10..15[0-3] are in q10-q15.
+ // x8..9[0-3] are on the stack.
+
+ // Re-interleave the words in the first two rows of each block (x0..7).
+ // Also add the counter values 0-3 to x12[0-3].
+ vld1.32 {q8}, [r5, :128] // load counter values 0-3
+ vzip.32 q0, q1 // => (0 1 0 1) (0 1 0 1)
+ vzip.32 q2, q3 // => (2 3 2 3) (2 3 2 3)
+ vzip.32 q4, q5 // => (4 5 4 5) (4 5 4 5)
+ vzip.32 q6, q7 // => (6 7 6 7) (6 7 6 7)
+ vadd.u32 q12, q8 // x12 += counter values 0-3
vswp d1, d4
vswp d3, d6
+ vld1.32 {q8-q9}, [r0]! // load s0..7
vswp d9, d12
vswp d11, d14
- // xor with corresponding input, write to output
+ // Swap q1 and q4 so that we'll free up consecutive registers (q0-q1)
+ // after XORing the first 32 bytes.
+ vswp q1, q4
+
+ // First two rows of each block are (q0 q1) (q2 q6) (q4 q5) (q3 q7)
+
+ // x0..3[0-3] += s0..3[0-3] (add orig state to 1st row of each block)
+ vadd.u32 q0, q0, q8
+ vadd.u32 q2, q2, q8
+ vadd.u32 q4, q4, q8
+ vadd.u32 q3, q3, q8
+
+ // x4..7[0-3] += s4..7[0-3] (add orig state to 2nd row of each block)
+ vadd.u32 q1, q1, q9
+ vadd.u32 q6, q6, q9
+ vadd.u32 q5, q5, q9
+ vadd.u32 q7, q7, q9
+
+ // XOR first 32 bytes using keystream from first two rows of first block
vld1.8 {q8-q9}, [r2]!
veor q8, q8, q0
- veor q9, q9, q4
+ veor q9, q9, q1
vst1.8 {q8-q9}, [r1]!
+ // Re-interleave the words in the last two rows of each block (x8..15).
vld1.32 {q8-q9}, [sp, :256]
-
- // x8[0-3] += s2[0]
- // x9[0-3] += s2[1]
- // x10[0-3] += s2[2]
- // x11[0-3] += s2[3]
- ldmia r0!, {r3-r6}
- vdup.32 q0, r3
- vdup.32 q4, r4
- vadd.i32 q8, q8, q0
- vadd.i32 q9, q9, q4
- vdup.32 q0, r5
- vdup.32 q4, r6
- vadd.i32 q10, q10, q0
- vadd.i32 q11, q11, q4
-
- // x12[0-3] += s3[0]
- // x13[0-3] += s3[1]
- // x14[0-3] += s3[2]
- // x15[0-3] += s3[3]
- ldmia r0!, {r3-r6}
- vdup.32 q0, r3
- vdup.32 q4, r4
- adr r3, CTRINC
- vadd.i32 q12, q12, q0
- vld1.32 {q0}, [r3, :128]
- vadd.i32 q13, q13, q4
- vadd.i32 q12, q12, q0 // x12 += counter values 0-3
-
- vdup.32 q0, r5
- vdup.32 q4, r6
- vadd.i32 q14, q14, q0
- vadd.i32 q15, q15, q4
-
- // interleave 32-bit words in state n, n+1
- vzip.32 q8, q9
- vzip.32 q10, q11
- vzip.32 q12, q13
- vzip.32 q14, q15
-
- // interleave 64-bit words in state n, n+2
- vswp d17, d20
- vswp d19, d22
+ vzip.32 q12, q13 // => (12 13 12 13) (12 13 12 13)
+ vzip.32 q14, q15 // => (14 15 14 15) (14 15 14 15)
+ vzip.32 q8, q9 // => (8 9 8 9) (8 9 8 9)
+ vzip.32 q10, q11 // => (10 11 10 11) (10 11 10 11)
+ vld1.32 {q0-q1}, [r0] // load s8..15
vswp d25, d28
vswp d27, d30
+ vswp d17, d20
+ vswp d19, d22
+
+ // Last two rows of each block are (q8 q12) (q10 q14) (q9 q13) (q11 q15)
+
+ // x8..11[0-3] += s8..11[0-3] (add orig state to 3rd row of each block)
+ vadd.u32 q8, q8, q0
+ vadd.u32 q10, q10, q0
+ vadd.u32 q9, q9, q0
+ vadd.u32 q11, q11, q0
+
+ // x12..15[0-3] += s12..15[0-3] (add orig state to 4th row of each block)
+ vadd.u32 q12, q12, q1
+ vadd.u32 q14, q14, q1
+ vadd.u32 q13, q13, q1
+ vadd.u32 q15, q15, q1
- vmov q4, q1
+ // XOR the rest of the data with the keystream
vld1.8 {q0-q1}, [r2]!
veor q0, q0, q8
@@ -509,13 +520,11 @@ ENTRY(chacha20_4block_xor_neon)
vst1.8 {q0-q1}, [r1]!
vld1.8 {q0-q1}, [r2]
+ mov sp, r4 // restore original stack pointer
veor q0, q0, q11
veor q1, q1, q15
vst1.8 {q0-q1}, [r1]
- mov sp, ip
- pop {r4-r6, pc}
+ pop {r4-r5}
+ bx lr
ENDPROC(chacha20_4block_xor_neon)
-
- .align 4
-CTRINC: .word 0, 1, 2, 3
diff --git a/arch/arm/crypto/crc32-ce-glue.c b/arch/arm/crypto/crc32-ce-glue.c
index 96e62ec105d0..cd9e93b46c2d 100644
--- a/arch/arm/crypto/crc32-ce-glue.c
+++ b/arch/arm/crypto/crc32-ce-glue.c
@@ -236,7 +236,7 @@ static void __exit crc32_pmull_mod_exit(void)
ARRAY_SIZE(crc32_pmull_algs));
}
-static const struct cpu_feature crc32_cpu_feature[] = {
+static const struct cpu_feature __maybe_unused crc32_cpu_feature[] = {
{ cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { }
};
MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature);
diff --git a/arch/arm/crypto/ghash-ce-core.S b/arch/arm/crypto/ghash-ce-core.S
index 2f78c10b1881..406009afa9cf 100644
--- a/arch/arm/crypto/ghash-ce-core.S
+++ b/arch/arm/crypto/ghash-ce-core.S
@@ -63,6 +63,33 @@
k48 .req d31
SHASH2_p64 .req d31
+ HH .req q10
+ HH3 .req q11
+ HH4 .req q12
+ HH34 .req q13
+
+ HH_L .req d20
+ HH_H .req d21
+ HH3_L .req d22
+ HH3_H .req d23
+ HH4_L .req d24
+ HH4_H .req d25
+ HH34_L .req d26
+ HH34_H .req d27
+ SHASH2_H .req d29
+
+ XL2 .req q5
+ XM2 .req q6
+ XH2 .req q7
+ T3 .req q8
+
+ XL2_L .req d10
+ XL2_H .req d11
+ XM2_L .req d12
+ XM2_H .req d13
+ T3_L .req d16
+ T3_H .req d17
+
.text
.fpu crypto-neon-fp-armv8
@@ -175,12 +202,77 @@
beq 0f
vld1.64 {T1}, [ip]
teq r0, #0
- b 1f
+ b 3f
+
+0: .ifc \pn, p64
+ tst r0, #3 // skip until #blocks is a
+ bne 2f // round multiple of 4
+
+ vld1.8 {XL2-XM2}, [r2]!
+1: vld1.8 {T3-T2}, [r2]!
+ vrev64.8 XL2, XL2
+ vrev64.8 XM2, XM2
+
+ subs r0, r0, #4
+
+ vext.8 T1, XL2, XL2, #8
+ veor XL2_H, XL2_H, XL_L
+ veor XL, XL, T1
+
+ vrev64.8 T3, T3
+ vrev64.8 T1, T2
+
+ vmull.p64 XH, HH4_H, XL_H // a1 * b1
+ veor XL2_H, XL2_H, XL_H
+ vmull.p64 XL, HH4_L, XL_L // a0 * b0
+ vmull.p64 XM, HH34_H, XL2_H // (a1 + a0)(b1 + b0)
+
+ vmull.p64 XH2, HH3_H, XM2_L // a1 * b1
+ veor XM2_L, XM2_L, XM2_H
+ vmull.p64 XL2, HH3_L, XM2_H // a0 * b0
+ vmull.p64 XM2, HH34_L, XM2_L // (a1 + a0)(b1 + b0)
+
+ veor XH, XH, XH2
+ veor XL, XL, XL2
+ veor XM, XM, XM2
+
+ vmull.p64 XH2, HH_H, T3_L // a1 * b1
+ veor T3_L, T3_L, T3_H
+ vmull.p64 XL2, HH_L, T3_H // a0 * b0
+ vmull.p64 XM2, SHASH2_H, T3_L // (a1 + a0)(b1 + b0)
+
+ veor XH, XH, XH2
+ veor XL, XL, XL2
+ veor XM, XM, XM2
+
+ vmull.p64 XH2, SHASH_H, T1_L // a1 * b1
+ veor T1_L, T1_L, T1_H
+ vmull.p64 XL2, SHASH_L, T1_H // a0 * b0
+ vmull.p64 XM2, SHASH2_p64, T1_L // (a1 + a0)(b1 + b0)
+
+ veor XH, XH, XH2
+ veor XL, XL, XL2
+ veor XM, XM, XM2
-0: vld1.64 {T1}, [r2]!
+ beq 4f
+
+ vld1.8 {XL2-XM2}, [r2]!
+
+ veor T1, XL, XH
+ veor XM, XM, T1
+
+ __pmull_reduce_p64
+
+ veor T1, T1, XH
+ veor XL, XL, T1
+
+ b 1b
+ .endif
+
+2: vld1.64 {T1}, [r2]!
subs r0, r0, #1
-1: /* multiply XL by SHASH in GF(2^128) */
+3: /* multiply XL by SHASH in GF(2^128) */
#ifndef CONFIG_CPU_BIG_ENDIAN
vrev64.8 T1, T1
#endif
@@ -193,7 +285,7 @@
__pmull_\pn XL, XL_L, SHASH_L, s1l, s2l, s3l, s4l @ a0 * b0
__pmull_\pn XM, T1_L, SHASH2_\pn @ (a1+a0)(b1+b0)
- veor T1, XL, XH
+4: veor T1, XL, XH
veor XM, XM, T1
__pmull_reduce_\pn
@@ -212,8 +304,14 @@
* struct ghash_key const *k, const char *head)
*/
ENTRY(pmull_ghash_update_p64)
- vld1.64 {SHASH}, [r3]
+ vld1.64 {SHASH}, [r3]!
+ vld1.64 {HH}, [r3]!
+ vld1.64 {HH3-HH4}, [r3]
+
veor SHASH2_p64, SHASH_L, SHASH_H
+ veor SHASH2_H, HH_L, HH_H
+ veor HH34_L, HH3_L, HH3_H
+ veor HH34_H, HH4_L, HH4_H
vmov.i8 MASK, #0xe1
vshl.u64 MASK, MASK, #57
diff --git a/arch/arm/crypto/ghash-ce-glue.c b/arch/arm/crypto/ghash-ce-glue.c
index 8930fc4e7c22..b7d30b6cf49c 100644
--- a/arch/arm/crypto/ghash-ce-glue.c
+++ b/arch/arm/crypto/ghash-ce-glue.c
@@ -1,7 +1,7 @@
/*
* Accelerated GHASH implementation with ARMv8 vmull.p64 instructions.
*
- * Copyright (C) 2015 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ * Copyright (C) 2015 - 2018 Linaro Ltd. <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -28,8 +28,10 @@ MODULE_ALIAS_CRYPTO("ghash");
#define GHASH_DIGEST_SIZE 16
struct ghash_key {
- u64 a;
- u64 b;
+ u64 h[2];
+ u64 h2[2];
+ u64 h3[2];
+ u64 h4[2];
};
struct ghash_desc_ctx {
@@ -117,26 +119,40 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
return 0;
}
+static void ghash_reflect(u64 h[], const be128 *k)
+{
+ u64 carry = be64_to_cpu(k->a) >> 63;
+
+ h[0] = (be64_to_cpu(k->b) << 1) | carry;
+ h[1] = (be64_to_cpu(k->a) << 1) | (be64_to_cpu(k->b) >> 63);
+
+ if (carry)
+ h[1] ^= 0xc200000000000000UL;
+}
+
static int ghash_setkey(struct crypto_shash *tfm,
const u8 *inkey, unsigned int keylen)
{
struct ghash_key *key = crypto_shash_ctx(tfm);
- u64 a, b;
+ be128 h, k;
if (keylen != GHASH_BLOCK_SIZE) {
crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
- /* perform multiplication by 'x' in GF(2^128) */
- b = get_unaligned_be64(inkey);
- a = get_unaligned_be64(inkey + 8);
+ memcpy(&k, inkey, GHASH_BLOCK_SIZE);
+ ghash_reflect(key->h, &k);
+
+ h = k;
+ gf128mul_lle(&h, &k);
+ ghash_reflect(key->h2, &h);
- key->a = (a << 1) | (b >> 63);
- key->b = (b << 1) | (a >> 63);
+ gf128mul_lle(&h, &k);
+ ghash_reflect(key->h3, &h);
- if (b >> 63)
- key->b ^= 0xc200000000000000UL;
+ gf128mul_lle(&h, &k);
+ ghash_reflect(key->h4, &h);
return 0;
}
diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S
deleted file mode 100644
index 57caa742016e..000000000000
--- a/arch/arm/crypto/speck-neon-core.S
+++ /dev/null
@@ -1,434 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Author: Eric Biggers <ebiggers@google.com>
- */
-
-#include <linux/linkage.h>
-
- .text
- .fpu neon
-
- // arguments
- ROUND_KEYS .req r0 // const {u64,u32} *round_keys
- NROUNDS .req r1 // int nrounds
- DST .req r2 // void *dst
- SRC .req r3 // const void *src
- NBYTES .req r4 // unsigned int nbytes
- TWEAK .req r5 // void *tweak
-
- // registers which hold the data being encrypted/decrypted
- X0 .req q0
- X0_L .req d0
- X0_H .req d1
- Y0 .req q1
- Y0_H .req d3
- X1 .req q2
- X1_L .req d4
- X1_H .req d5
- Y1 .req q3
- Y1_H .req d7
- X2 .req q4
- X2_L .req d8
- X2_H .req d9
- Y2 .req q5
- Y2_H .req d11
- X3 .req q6
- X3_L .req d12
- X3_H .req d13
- Y3 .req q7
- Y3_H .req d15
-
- // the round key, duplicated in all lanes
- ROUND_KEY .req q8
- ROUND_KEY_L .req d16
- ROUND_KEY_H .req d17
-
- // index vector for vtbl-based 8-bit rotates
- ROTATE_TABLE .req d18
-
- // multiplication table for updating XTS tweaks
- GF128MUL_TABLE .req d19
- GF64MUL_TABLE .req d19
-
- // current XTS tweak value(s)
- TWEAKV .req q10
- TWEAKV_L .req d20
- TWEAKV_H .req d21
-
- TMP0 .req q12
- TMP0_L .req d24
- TMP0_H .req d25
- TMP1 .req q13
- TMP2 .req q14
- TMP3 .req q15
-
- .align 4
-.Lror64_8_table:
- .byte 1, 2, 3, 4, 5, 6, 7, 0
-.Lror32_8_table:
- .byte 1, 2, 3, 0, 5, 6, 7, 4
-.Lrol64_8_table:
- .byte 7, 0, 1, 2, 3, 4, 5, 6
-.Lrol32_8_table:
- .byte 3, 0, 1, 2, 7, 4, 5, 6
-.Lgf128mul_table:
- .byte 0, 0x87
- .fill 14
-.Lgf64mul_table:
- .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b
- .fill 12
-
-/*
- * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
- *
- * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
- * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
- * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
- *
- * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because
- * the vtbl approach is faster on some processors and the same speed on others.
- */
-.macro _speck_round_128bytes n
-
- // x = ror(x, 8)
- vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
- vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
- vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
- vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
- vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
- vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
- vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
- vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
-
- // x += y
- vadd.u\n X0, Y0
- vadd.u\n X1, Y1
- vadd.u\n X2, Y2
- vadd.u\n X3, Y3
-
- // x ^= k
- veor X0, ROUND_KEY
- veor X1, ROUND_KEY
- veor X2, ROUND_KEY
- veor X3, ROUND_KEY
-
- // y = rol(y, 3)
- vshl.u\n TMP0, Y0, #3
- vshl.u\n TMP1, Y1, #3
- vshl.u\n TMP2, Y2, #3
- vshl.u\n TMP3, Y3, #3
- vsri.u\n TMP0, Y0, #(\n - 3)
- vsri.u\n TMP1, Y1, #(\n - 3)
- vsri.u\n TMP2, Y2, #(\n - 3)
- vsri.u\n TMP3, Y3, #(\n - 3)
-
- // y ^= x
- veor Y0, TMP0, X0
- veor Y1, TMP1, X1
- veor Y2, TMP2, X2
- veor Y3, TMP3, X3
-.endm
-
-/*
- * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
- *
- * This is the inverse of _speck_round_128bytes().
- */
-.macro _speck_unround_128bytes n
-
- // y ^= x
- veor TMP0, Y0, X0
- veor TMP1, Y1, X1
- veor TMP2, Y2, X2
- veor TMP3, Y3, X3
-
- // y = ror(y, 3)
- vshr.u\n Y0, TMP0, #3
- vshr.u\n Y1, TMP1, #3
- vshr.u\n Y2, TMP2, #3
- vshr.u\n Y3, TMP3, #3
- vsli.u\n Y0, TMP0, #(\n - 3)
- vsli.u\n Y1, TMP1, #(\n - 3)
- vsli.u\n Y2, TMP2, #(\n - 3)
- vsli.u\n Y3, TMP3, #(\n - 3)
-
- // x ^= k
- veor X0, ROUND_KEY
- veor X1, ROUND_KEY
- veor X2, ROUND_KEY
- veor X3, ROUND_KEY
-
- // x -= y
- vsub.u\n X0, Y0
- vsub.u\n X1, Y1
- vsub.u\n X2, Y2
- vsub.u\n X3, Y3
-
- // x = rol(x, 8);
- vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
- vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
- vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
- vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
- vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
- vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
- vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
- vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
-.endm
-
-.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp
-
- // Load the next source block
- vld1.8 {\dst_reg}, [SRC]!
-
- // Save the current tweak in the tweak buffer
- vst1.8 {TWEAKV}, [\tweak_buf:128]!
-
- // XOR the next source block with the current tweak
- veor \dst_reg, TWEAKV
-
- /*
- * Calculate the next tweak by multiplying the current one by x,
- * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
- */
- vshr.u64 \tmp, TWEAKV, #63
- vshl.u64 TWEAKV, #1
- veor TWEAKV_H, \tmp\()_L
- vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H
- veor TWEAKV_L, \tmp\()_H
-.endm
-
-.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp
-
- // Load the next two source blocks
- vld1.8 {\dst_reg}, [SRC]!
-
- // Save the current two tweaks in the tweak buffer
- vst1.8 {TWEAKV}, [\tweak_buf:128]!
-
- // XOR the next two source blocks with the current two tweaks
- veor \dst_reg, TWEAKV
-
- /*
- * Calculate the next two tweaks by multiplying the current ones by x^2,
- * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
- */
- vshr.u64 \tmp, TWEAKV, #62
- vshl.u64 TWEAKV, #2
- vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L
- vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H
- veor TWEAKV, \tmp
-.endm
-
-/*
- * _speck_xts_crypt() - Speck-XTS encryption/decryption
- *
- * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
- * using Speck-XTS, specifically the variant with a block size of '2n' and round
- * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
- * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
- * nonzero multiple of 128.
- */
-.macro _speck_xts_crypt n, decrypting
- push {r4-r7}
- mov r7, sp
-
- /*
- * The first four parameters were passed in registers r0-r3. Load the
- * additional parameters, which were passed on the stack.
- */
- ldr NBYTES, [sp, #16]
- ldr TWEAK, [sp, #20]
-
- /*
- * If decrypting, modify the ROUND_KEYS parameter to point to the last
- * round key rather than the first, since for decryption the round keys
- * are used in reverse order.
- */
-.if \decrypting
-.if \n == 64
- add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3
- sub ROUND_KEYS, #8
-.else
- add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2
- sub ROUND_KEYS, #4
-.endif
-.endif
-
- // Load the index vector for vtbl-based 8-bit rotates
-.if \decrypting
- ldr r12, =.Lrol\n\()_8_table
-.else
- ldr r12, =.Lror\n\()_8_table
-.endif
- vld1.8 {ROTATE_TABLE}, [r12:64]
-
- // One-time XTS preparation
-
- /*
- * Allocate stack space to store 128 bytes worth of tweaks. For
- * performance, this space is aligned to a 16-byte boundary so that we
- * can use the load/store instructions that declare 16-byte alignment.
- * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'.
- */
- sub r12, sp, #128
- bic r12, #0xf
- mov sp, r12
-
-.if \n == 64
- // Load first tweak
- vld1.8 {TWEAKV}, [TWEAK]
-
- // Load GF(2^128) multiplication table
- ldr r12, =.Lgf128mul_table
- vld1.8 {GF128MUL_TABLE}, [r12:64]
-.else
- // Load first tweak
- vld1.8 {TWEAKV_L}, [TWEAK]
-
- // Load GF(2^64) multiplication table
- ldr r12, =.Lgf64mul_table
- vld1.8 {GF64MUL_TABLE}, [r12:64]
-
- // Calculate second tweak, packing it together with the first
- vshr.u64 TMP0_L, TWEAKV_L, #63
- vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L
- vshl.u64 TWEAKV_H, TWEAKV_L, #1
- veor TWEAKV_H, TMP0_L
-.endif
-
-.Lnext_128bytes_\@:
-
- /*
- * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak
- * values, and save the tweaks on the stack for later. Then
- * de-interleave the 'x' and 'y' elements of each block, i.e. make it so
- * that the X[0-3] registers contain only the second halves of blocks,
- * and the Y[0-3] registers contain only the first halves of blocks.
- * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
- */
- mov r12, sp
-.if \n == 64
- _xts128_precrypt_one X0, r12, TMP0
- _xts128_precrypt_one Y0, r12, TMP0
- _xts128_precrypt_one X1, r12, TMP0
- _xts128_precrypt_one Y1, r12, TMP0
- _xts128_precrypt_one X2, r12, TMP0
- _xts128_precrypt_one Y2, r12, TMP0
- _xts128_precrypt_one X3, r12, TMP0
- _xts128_precrypt_one Y3, r12, TMP0
- vswp X0_L, Y0_H
- vswp X1_L, Y1_H
- vswp X2_L, Y2_H
- vswp X3_L, Y3_H
-.else
- _xts64_precrypt_two X0, r12, TMP0
- _xts64_precrypt_two Y0, r12, TMP0
- _xts64_precrypt_two X1, r12, TMP0
- _xts64_precrypt_two Y1, r12, TMP0
- _xts64_precrypt_two X2, r12, TMP0
- _xts64_precrypt_two Y2, r12, TMP0
- _xts64_precrypt_two X3, r12, TMP0
- _xts64_precrypt_two Y3, r12, TMP0
- vuzp.32 Y0, X0
- vuzp.32 Y1, X1
- vuzp.32 Y2, X2
- vuzp.32 Y3, X3
-.endif
-
- // Do the cipher rounds
-
- mov r12, ROUND_KEYS
- mov r6, NROUNDS
-
-.Lnext_round_\@:
-.if \decrypting
-.if \n == 64
- vld1.64 ROUND_KEY_L, [r12]
- sub r12, #8
- vmov ROUND_KEY_H, ROUND_KEY_L
-.else
- vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]
- sub r12, #4
-.endif
- _speck_unround_128bytes \n
-.else
-.if \n == 64
- vld1.64 ROUND_KEY_L, [r12]!
- vmov ROUND_KEY_H, ROUND_KEY_L
-.else
- vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]!
-.endif
- _speck_round_128bytes \n
-.endif
- subs r6, r6, #1
- bne .Lnext_round_\@
-
- // Re-interleave the 'x' and 'y' elements of each block
-.if \n == 64
- vswp X0_L, Y0_H
- vswp X1_L, Y1_H
- vswp X2_L, Y2_H
- vswp X3_L, Y3_H
-.else
- vzip.32 Y0, X0
- vzip.32 Y1, X1
- vzip.32 Y2, X2
- vzip.32 Y3, X3
-.endif
-
- // XOR the encrypted/decrypted blocks with the tweaks we saved earlier
- mov r12, sp
- vld1.8 {TMP0, TMP1}, [r12:128]!
- vld1.8 {TMP2, TMP3}, [r12:128]!
- veor X0, TMP0
- veor Y0, TMP1
- veor X1, TMP2
- veor Y1, TMP3
- vld1.8 {TMP0, TMP1}, [r12:128]!
- vld1.8 {TMP2, TMP3}, [r12:128]!
- veor X2, TMP0
- veor Y2, TMP1
- veor X3, TMP2
- veor Y3, TMP3
-
- // Store the ciphertext in the destination buffer
- vst1.8 {X0, Y0}, [DST]!
- vst1.8 {X1, Y1}, [DST]!
- vst1.8 {X2, Y2}, [DST]!
- vst1.8 {X3, Y3}, [DST]!
-
- // Continue if there are more 128-byte chunks remaining, else return
- subs NBYTES, #128
- bne .Lnext_128bytes_\@
-
- // Store the next tweak
-.if \n == 64
- vst1.8 {TWEAKV}, [TWEAK]
-.else
- vst1.8 {TWEAKV_L}, [TWEAK]
-.endif
-
- mov sp, r7
- pop {r4-r7}
- bx lr
-.endm
-
-ENTRY(speck128_xts_encrypt_neon)
- _speck_xts_crypt n=64, decrypting=0
-ENDPROC(speck128_xts_encrypt_neon)
-
-ENTRY(speck128_xts_decrypt_neon)
- _speck_xts_crypt n=64, decrypting=1
-ENDPROC(speck128_xts_decrypt_neon)
-
-ENTRY(speck64_xts_encrypt_neon)
- _speck_xts_crypt n=32, decrypting=0
-ENDPROC(speck64_xts_encrypt_neon)
-
-ENTRY(speck64_xts_decrypt_neon)
- _speck_xts_crypt n=32, decrypting=1
-ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c
deleted file mode 100644
index f012c3ea998f..000000000000
--- a/arch/arm/crypto/speck-neon-glue.c
+++ /dev/null
@@ -1,288 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Note: the NIST recommendation for XTS only specifies a 128-bit block size,
- * but a 64-bit version (needed for Speck64) is fairly straightforward; the math
- * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial
- * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004:
- * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes
- * OCB and PMAC"), represented as 0x1B.
- */
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <crypto/algapi.h>
-#include <crypto/gf128mul.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/speck.h>
-#include <crypto/xts.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-/* The assembly functions only handle multiples of 128 bytes */
-#define SPECK_NEON_CHUNK_SIZE 128
-
-/* Speck128 */
-
-struct speck128_xts_tfm_ctx {
- struct speck128_tfm_ctx main_key;
- struct speck128_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck128_xts_crypt(struct skcipher_request *req,
- speck128_crypt_one_t crypt_one,
- speck128_xts_crypt_many_t crypt_many)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- le128 tweak;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
-
- crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- le128_xor((le128 *)dst, (const le128 *)src, &tweak);
- (*crypt_one)(&ctx->main_key, dst, dst);
- le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
- gf128mul_x_ble(&tweak, &tweak);
-
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static int speck128_xts_encrypt(struct skcipher_request *req)
-{
- return __speck128_xts_crypt(req, crypto_speck128_encrypt,
- speck128_xts_encrypt_neon);
-}
-
-static int speck128_xts_decrypt(struct skcipher_request *req)
-{
- return __speck128_xts_crypt(req, crypto_speck128_decrypt,
- speck128_xts_decrypt_neon);
-}
-
-static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- keylen /= 2;
-
- err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-/* Speck64 */
-
-struct speck64_xts_tfm_ctx {
- struct speck64_tfm_ctx main_key;
- struct speck64_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
- speck64_xts_crypt_many_t crypt_many)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- __le64 tweak;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
-
- crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- *(__le64 *)dst = *(__le64 *)src ^ tweak;
- (*crypt_one)(&ctx->main_key, dst, dst);
- *(__le64 *)dst ^= tweak;
- tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
- ((tweak & cpu_to_le64(1ULL << 63)) ?
- 0x1B : 0));
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static int speck64_xts_encrypt(struct skcipher_request *req)
-{
- return __speck64_xts_crypt(req, crypto_speck64_encrypt,
- speck64_xts_encrypt_neon);
-}
-
-static int speck64_xts_decrypt(struct skcipher_request *req)
-{
- return __speck64_xts_crypt(req, crypto_speck64_decrypt,
- speck64_xts_decrypt_neon);
-}
-
-static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- keylen /= 2;
-
- err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-static struct skcipher_alg speck_algs[] = {
- {
- .base.cra_name = "xts(speck128)",
- .base.cra_driver_name = "xts-speck128-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = SPECK128_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * SPECK128_128_KEY_SIZE,
- .max_keysize = 2 * SPECK128_256_KEY_SIZE,
- .ivsize = SPECK128_BLOCK_SIZE,
- .walksize = SPECK_NEON_CHUNK_SIZE,
- .setkey = speck128_xts_setkey,
- .encrypt = speck128_xts_encrypt,
- .decrypt = speck128_xts_decrypt,
- }, {
- .base.cra_name = "xts(speck64)",
- .base.cra_driver_name = "xts-speck64-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = SPECK64_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * SPECK64_96_KEY_SIZE,
- .max_keysize = 2 * SPECK64_128_KEY_SIZE,
- .ivsize = SPECK64_BLOCK_SIZE,
- .walksize = SPECK_NEON_CHUNK_SIZE,
- .setkey = speck64_xts_setkey,
- .encrypt = speck64_xts_encrypt,
- .decrypt = speck64_xts_decrypt,
- }
-};
-
-static int __init speck_neon_module_init(void)
-{
- if (!(elf_hwcap & HWCAP_NEON))
- return -ENODEV;
- return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-static void __exit speck_neon_module_exit(void)
-{
- crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-module_init(speck_neon_module_init);
-module_exit(speck_neon_module_exit);
-
-MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
-MODULE_ALIAS_CRYPTO("xts(speck128)");
-MODULE_ALIAS_CRYPTO("xts-speck128-neon");
-MODULE_ALIAS_CRYPTO("xts(speck64)");
-MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 2d43dca29c72..b95f8d0d9f17 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -133,8 +133,7 @@
* space.
*/
#define KVM_PHYS_SHIFT (40)
-#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT)
-#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL))
+
#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
/* Virtualization Translation Control Register (VTCR) bits */
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 3ad482d2f1eb..5ca5d9af0c26 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -273,7 +273,7 @@ static inline void __cpu_init_stage2(void)
kvm_call_hyp(__init_stage2_translation);
}
-static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
+static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
return 0;
}
@@ -354,4 +354,15 @@ static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
struct kvm *kvm_arch_alloc_vm(void);
void kvm_arch_free_vm(struct kvm *kvm);
+static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
+{
+ /*
+ * On 32bit ARM, VMs get a static 40bit IPA stage2 setup,
+ * so any non-zero value used as type is illegal.
+ */
+ if (type)
+ return -EINVAL;
+ return 0;
+}
+
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 847f01fa429d..1098ffc3d54b 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -35,16 +35,12 @@
addr; \
})
-/*
- * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
- */
-#define KVM_MMU_CACHE_MIN_PAGES 2
-
#ifndef __ASSEMBLY__
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/kvm_arm.h>
#include <asm/kvm_hyp.h>
#include <asm/pgalloc.h>
#include <asm/stage2_pgtable.h>
@@ -52,6 +48,13 @@
/* Ensure compatibility with arm64 */
#define VA_BITS 32
+#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT
+#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm))
+#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL)
+#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK
+
+#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t))
+
int create_hyp_mappings(void *from, void *to, pgprot_t prot);
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **kaddr,
@@ -355,6 +358,8 @@ static inline int hyp_map_aux_data(void)
#define kvm_phys_to_vttbr(addr) (addr)
+static inline void kvm_set_ipa_limit(void) {}
+
static inline bool kvm_cpu_has_cnp(void)
{
return false;
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h
index 460d616bb2d6..f6a7ea805232 100644
--- a/arch/arm/include/asm/stage2_pgtable.h
+++ b/arch/arm/include/asm/stage2_pgtable.h
@@ -19,43 +19,53 @@
#ifndef __ARM_S2_PGTABLE_H_
#define __ARM_S2_PGTABLE_H_
-#define stage2_pgd_none(pgd) pgd_none(pgd)
-#define stage2_pgd_clear(pgd) pgd_clear(pgd)
-#define stage2_pgd_present(pgd) pgd_present(pgd)
-#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud)
-#define stage2_pud_offset(pgd, address) pud_offset(pgd, address)
-#define stage2_pud_free(pud) pud_free(NULL, pud)
-
-#define stage2_pud_none(pud) pud_none(pud)
-#define stage2_pud_clear(pud) pud_clear(pud)
-#define stage2_pud_present(pud) pud_present(pud)
-#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd)
-#define stage2_pmd_offset(pud, address) pmd_offset(pud, address)
-#define stage2_pmd_free(pmd) pmd_free(NULL, pmd)
-
-#define stage2_pud_huge(pud) pud_huge(pud)
+/*
+ * kvm_mmu_cache_min_pages() is the number of pages required
+ * to install a stage-2 translation. We pre-allocate the entry
+ * level table at VM creation. Since we have a 3 level page-table,
+ * we need only two pages to add a new mapping.
+ */
+#define kvm_mmu_cache_min_pages(kvm) 2
+
+#define stage2_pgd_none(kvm, pgd) pgd_none(pgd)
+#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd)
+#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
+#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud)
+#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address)
+#define stage2_pud_free(kvm, pud) pud_free(NULL, pud)
+
+#define stage2_pud_none(kvm, pud) pud_none(pud)
+#define stage2_pud_clear(kvm, pud) pud_clear(pud)
+#define stage2_pud_present(kvm, pud) pud_present(pud)
+#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd)
+#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address)
+#define stage2_pmd_free(kvm, pmd) pmd_free(NULL, pmd)
+
+#define stage2_pud_huge(kvm, pud) pud_huge(pud)
/* Open coded p*d_addr_end that can deal with 64bit addresses */
-static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end)
+static inline phys_addr_t
+stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK;
return (boundary - 1 < end - 1) ? boundary : end;
}
-#define stage2_pud_addr_end(addr, end) (end)
+#define stage2_pud_addr_end(kvm, addr, end) (end)
-static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end)
+static inline phys_addr_t
+stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK;
return (boundary - 1 < end - 1) ? boundary : end;
}
-#define stage2_pgd_index(addr) pgd_index(addr)
+#define stage2_pgd_index(kvm, addr) pgd_index(addr)
-#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep)
-#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
-#define stage2_pud_table_empty(pudp) false
+#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
+#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
+#define stage2_pud_table_empty(kvm, pudp) false
#endif /* __ARM_S2_PGTABLE_H_ */
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index 076090d2dbf5..88ef2ce1f69a 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -16,23 +16,23 @@
#include <uapi/asm/unistd.h>
#include <asm/unistd-nr.h>
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_SYS_OLD_MMAP
#define __ARCH_WANT_SYS_OLD_SELECT
+#define __ARCH_WANT_SYS_UTIME
#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
#define __ARCH_WANT_SYS_TIME
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_UTIME
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_SOCKETCALL
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index ecaa68dd1af5..13bcd3b867cb 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -87,14 +87,11 @@ void __init arm_dt_init_cpu_maps(void)
if (!cpus)
return;
- for_each_child_of_node(cpus, cpu) {
+ for_each_of_cpu_node(cpu) {
const __be32 *cell;
int prop_bytes;
u32 hwid;
- if (of_node_cmp(cpu->type, "cpu"))
- continue;
-
pr_debug(" * %pOF...\n", cpu);
/*
* A device tree containing CPU nodes with missing "reg"
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 24ac3cab411d..60e375ce1ab2 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -94,12 +94,6 @@ static void __init parse_dt_topology(void)
__cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
GFP_NOWAIT);
- cn = of_find_node_by_path("/cpus");
- if (!cn) {
- pr_err("No CPU information found in DT\n");
- return;
- }
-
for_each_possible_cpu(cpu) {
const u32 *rate;
int len;
diff --git a/arch/arm/mach-at91/pm_suspend.S b/arch/arm/mach-at91/pm_suspend.S
index a7c6ae13c945..bfe1c4d06901 100644
--- a/arch/arm/mach-at91/pm_suspend.S
+++ b/arch/arm/mach-at91/pm_suspend.S
@@ -149,6 +149,14 @@ exit_suspend:
ENDPROC(at91_pm_suspend_in_sram)
ENTRY(at91_backup_mode)
+ /* Switch the master clock source to slow clock. */
+ ldr pmc, .pmc_base
+ ldr tmp1, [pmc, #AT91_PMC_MCKR]
+ bic tmp1, tmp1, #AT91_PMC_CSS
+ str tmp1, [pmc, #AT91_PMC_MCKR]
+
+ wait_mckrdy
+
/*BUMEN*/
ldr r0, .sfr
mov tmp1, #0x1
diff --git a/arch/arm/mach-mmp/devices.c b/arch/arm/mach-mmp/devices.c
index 671c7a09ab3d..0fca63c80e1a 100644
--- a/arch/arm/mach-mmp/devices.c
+++ b/arch/arm/mach-mmp/devices.c
@@ -277,21 +277,12 @@ struct platform_device pxa168_device_u2o = {
#if IS_ENABLED(CONFIG_USB_EHCI_MV_U2O)
struct resource pxa168_u2oehci_resources[] = {
- /* regbase */
[0] = {
- .start = PXA168_U2O_REGBASE + U2x_CAPREGS_OFFSET,
+ .start = PXA168_U2O_REGBASE,
.end = PXA168_U2O_REGBASE + USB_REG_RANGE,
.flags = IORESOURCE_MEM,
- .name = "capregs",
},
- /* phybase */
[1] = {
- .start = PXA168_U2O_PHYBASE,
- .end = PXA168_U2O_PHYBASE + USB_PHY_RANGE,
- .flags = IORESOURCE_MEM,
- .name = "phyregs",
- },
- [2] = {
.start = IRQ_PXA168_USB1,
.end = IRQ_PXA168_USB1,
.flags = IORESOURCE_IRQ,
diff --git a/arch/arm/mach-shmobile/pm-rcar-gen2.c b/arch/arm/mach-shmobile/pm-rcar-gen2.c
index 345af3ebcc3a..7efe95bd584f 100644
--- a/arch/arm/mach-shmobile/pm-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/pm-rcar-gen2.c
@@ -50,7 +50,7 @@ void __init rcar_gen2_pm_init(void)
void __iomem *p;
u32 bar;
static int once;
- struct device_node *np, *cpus;
+ struct device_node *np;
bool has_a7 = false;
bool has_a15 = false;
struct resource res;
@@ -59,11 +59,7 @@ void __init rcar_gen2_pm_init(void)
if (once++)
return;
- cpus = of_find_node_by_path("/cpus");
- if (!cpus)
- return;
-
- for_each_child_of_node(cpus, np) {
+ for_each_of_cpu_node(np) {
if (of_device_is_compatible(np, "arm,cortex-a15"))
has_a15 = true;
else if (of_device_is_compatible(np, "arm,cortex-a7"))
diff --git a/arch/arm/mach-shmobile/pm-rmobile.c b/arch/arm/mach-shmobile/pm-rmobile.c
index e348bcfe389d..94fdeef11b81 100644
--- a/arch/arm/mach-shmobile/pm-rmobile.c
+++ b/arch/arm/mach-shmobile/pm-rmobile.c
@@ -202,7 +202,7 @@ static void __init get_special_pds(void)
const struct of_device_id *id;
/* PM domains containing CPUs */
- for_each_node_by_type(np, "cpu")
+ for_each_of_cpu_node(np)
add_special_pd(np, PD_CPU);
/* PM domain containing console */
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index 828e8aea037e..e48b0939693f 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -22,22 +22,16 @@
void __init shmobile_init_delay(void)
{
- struct device_node *np, *cpus;
+ struct device_node *np;
u32 max_freq = 0;
- cpus = of_find_node_by_path("/cpus");
- if (!cpus)
- return;
-
- for_each_child_of_node(cpus, np) {
+ for_each_of_cpu_node(np) {
u32 freq;
if (!of_property_read_u32(np, "clock-frequency", &freq))
max_freq = max(max_freq, freq);
}
- of_node_put(cpus);
-
if (!max_freq)
return;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c03cd0d765d3..964f682a2b7b 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -11,6 +11,8 @@ config ARM64
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
+ select ARCH_HAS_DMA_COHERENT_TO_PFN
+ select ARCH_HAS_DMA_MMAP_PGPROT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FAST_MULTIPLIER
@@ -24,6 +26,8 @@ config ARM64
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_STRICT_MODULE_RWX
+ select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYSCALL_WRAPPER
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_NMI_SAFE_CMPXCHG
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 393d2b524284..5a89a957641b 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -128,6 +128,7 @@ config ARCH_MVEBU
select MVEBU_ICU
select MVEBU_ODMI
select MVEBU_PIC
+ select MVEBU_SEI
select OF_GPIO
select PINCTRL
select PINCTRL_ARMADA_37XX
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 106039d25e2f..b4e994cd3a42 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -113,9 +113,8 @@ core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
# Default target when executing plain make
boot := arch/arm64/boot
KBUILD_IMAGE := $(boot)/Image.gz
-KBUILD_DTBS := dtbs
-all: Image.gz $(KBUILD_DTBS)
+all: Image.gz
Image: vmlinux
@@ -127,17 +126,6 @@ Image.%: Image
zinstall install:
$(Q)$(MAKE) $(build)=$(boot) $@
-%.dtb: scripts
- $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
-
-PHONY += dtbs dtbs_install
-
-dtbs: prepare scripts
- $(Q)$(MAKE) $(build)=$(boot)/dts
-
-dtbs_install:
- $(Q)$(MAKE) $(dtbinst)=$(boot)/dts
-
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
@@ -145,7 +133,6 @@ vdso_install:
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
- $(Q)$(MAKE) $(clean)=$(boot)/dts
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
# In order to do that, we should use the archprepare target, but we can't since
@@ -160,8 +147,6 @@ vdso_prepare: prepare0
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
- echo '* dtbs - Build device tree blobs for enabled boards'
- echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'
echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel'
echo ' Install using (your) ~/bin/installkernel or'
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index 8a2641c742ae..fb3d2ee77c56 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -473,16 +473,51 @@
status = "disabled";
};
+ sdr: sdr@f8011100 {
+ compatible = "altr,sdr-ctl", "syscon";
+ reg = <0xf8011100 0xc0>;
+ };
+
eccmgr {
- compatible = "altr,socfpga-s10-ecc-manager";
+ compatible = "altr,socfpga-a10-ecc-manager";
+ altr,sysmgr-syscon = <&sysmgr>;
+ #address-cells = <1>;
+ #size-cells = <1>;
interrupts = <0 15 4>, <0 95 4>;
interrupt-controller;
#interrupt-cells = <2>;
+ ranges;
sdramedac {
compatible = "altr,sdram-edac-s10";
+ altr,sdr-syscon = <&sdr>;
interrupts = <16 4>, <48 4>;
};
+
+ usb0-ecc@ff8c4000 {
+ compatible = "altr,socfpga-usb-ecc";
+ reg = <0xff8c4000 0x100>;
+ altr,ecc-parent = <&usb0>;
+ interrupts = <2 4>,
+ <34 4>;
+ };
+
+ emac0-rx-ecc@ff8c0000 {
+ compatible = "altr,socfpga-eth-mac-ecc";
+ reg = <0xff8c0000 0x100>;
+ altr,ecc-parent = <&gmac0>;
+ interrupts = <4 4>,
+ <36 4>;
+ };
+
+ emac0-tx-ecc@ff8c0400 {
+ compatible = "altr,socfpga-eth-mac-ecc";
+ reg = <0xff8c0400 0x100>;
+ altr,ecc-parent = <&gmac0>;
+ interrupts = <5 4>,
+ <37 4>;
+ };
+
};
qspi: spi@ff8d2000 {
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
index 8cb78dd99672..90c0faf8579f 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
+++ b/arch/arm64/boot/dts/freescale/fsl-ls208xa.dtsi
@@ -148,6 +148,7 @@
#address-cells = <2>;
#size-cells = <2>;
ranges;
+ dma-ranges = <0x0 0x0 0x0 0x0 0x10000 0x00000000>;
clockgen: clocking@1300000 {
compatible = "fsl,ls2080a-clockgen";
@@ -321,6 +322,8 @@
reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */
<0x00000000 0x08340000 0 0x40000>; /* MC control reg */
msi-parent = <&its>;
+ iommu-map = <0 &smmu 0 0>; /* This is fixed-up by u-boot */
+ dma-coherent;
#address-cells = <3>;
#size-cells = <1>;
@@ -424,6 +427,9 @@
compatible = "arm,mmu-500";
reg = <0 0x5000000 0 0x800000>;
#global-interrupts = <12>;
+ #iommu-cells = <1>;
+ stream-match-mask = <0x7C00>;
+ dma-coherent;
interrupts = <0 13 4>, /* global secure fault */
<0 14 4>, /* combined secure interrupt */
<0 15 4>, /* global non-secure fault */
@@ -466,7 +472,6 @@
<0 204 4>, <0 205 4>,
<0 206 4>, <0 207 4>,
<0 208 4>, <0 209 4>;
- mmu-masters = <&fsl_mc 0x300 0>;
};
dspi: dspi@2100000 {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index db8d364f8476..3d165b4cdd2a 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -698,6 +698,7 @@ CONFIG_MEMTEST=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
@@ -706,7 +707,6 @@ CONFIG_CRYPTO_SHA3_ARM64=m
CONFIG_CRYPTO_SM3_ARM64_CE=m
CONFIG_CRYPTO_GHASH_ARM64_CE=y
CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
-CONFIG_CRYPTO_CRC32_ARM64_CE=m
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_CHACHA20_NEON=m
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index e3fdb0fd6f70..a5606823ed4d 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -66,11 +66,6 @@ config CRYPTO_CRCT10DIF_ARM64_CE
depends on KERNEL_MODE_NEON && CRC_T10DIF
select CRYPTO_HASH
-config CRYPTO_CRC32_ARM64_CE
- tristate "CRC32 and CRC32C digest algorithms using ARMv8 extensions"
- depends on CRC32
- select CRYPTO_HASH
-
config CRYPTO_AES_ARM64
tristate "AES core cipher using scalar instructions"
select CRYPTO_AES
@@ -119,10 +114,4 @@ config CRYPTO_AES_ARM64_BS
select CRYPTO_AES_ARM64
select CRYPTO_SIMD
-config CRYPTO_SPECK_NEON
- tristate "NEON accelerated Speck cipher algorithms"
- depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
- select CRYPTO_SPECK
-
endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index bcafd016618e..f476fede09ba 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -32,9 +32,6 @@ ghash-ce-y := ghash-ce-glue.o ghash-ce-core.o
obj-$(CONFIG_CRYPTO_CRCT10DIF_ARM64_CE) += crct10dif-ce.o
crct10dif-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o
-obj-$(CONFIG_CRYPTO_CRC32_ARM64_CE) += crc32-ce.o
-crc32-ce-y:= crc32-ce-core.o crc32-ce-glue.o
-
obj-$(CONFIG_CRYPTO_AES_ARM64_CE) += aes-ce-cipher.o
aes-ce-cipher-y := aes-ce-core.o aes-ce-glue.o
@@ -56,9 +53,6 @@ sha512-arm64-y := sha512-glue.o sha512-core.o
obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha20-neon.o
chacha20-neon-y := chacha20-neon-core.o chacha20-neon-glue.o
-obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
-speck-neon-y := speck-neon-core.o speck-neon-glue.o
-
obj-$(CONFIG_CRYPTO_AES_ARM64) += aes-arm64.o
aes-arm64-y := aes-cipher-core.o aes-cipher-glue.o
diff --git a/arch/arm64/crypto/aes-ce.S b/arch/arm64/crypto/aes-ce.S
index 623e74ed1c67..143070510809 100644
--- a/arch/arm64/crypto/aes-ce.S
+++ b/arch/arm64/crypto/aes-ce.S
@@ -17,6 +17,11 @@
.arch armv8-a+crypto
+ xtsmask .req v16
+
+ .macro xts_reload_mask, tmp
+ .endm
+
/* preload all round keys */
.macro load_round_keys, rounds, rk
cmp \rounds, #12
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index adcb83eb683c..1e676625ef33 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -15,6 +15,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/simd.h>
#include <crypto/internal/skcipher.h>
+#include <crypto/scatterwalk.h>
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <crypto/xts.h>
@@ -31,6 +32,8 @@
#define aes_ecb_decrypt ce_aes_ecb_decrypt
#define aes_cbc_encrypt ce_aes_cbc_encrypt
#define aes_cbc_decrypt ce_aes_cbc_decrypt
+#define aes_cbc_cts_encrypt ce_aes_cbc_cts_encrypt
+#define aes_cbc_cts_decrypt ce_aes_cbc_cts_decrypt
#define aes_ctr_encrypt ce_aes_ctr_encrypt
#define aes_xts_encrypt ce_aes_xts_encrypt
#define aes_xts_decrypt ce_aes_xts_decrypt
@@ -45,6 +48,8 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
#define aes_ecb_decrypt neon_aes_ecb_decrypt
#define aes_cbc_encrypt neon_aes_cbc_encrypt
#define aes_cbc_decrypt neon_aes_cbc_decrypt
+#define aes_cbc_cts_encrypt neon_aes_cbc_cts_encrypt
+#define aes_cbc_cts_decrypt neon_aes_cbc_cts_decrypt
#define aes_ctr_encrypt neon_aes_ctr_encrypt
#define aes_xts_encrypt neon_aes_xts_encrypt
#define aes_xts_decrypt neon_aes_xts_decrypt
@@ -63,30 +68,41 @@ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
/* defined in aes-modes.S */
-asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u8 const rk[],
+asmlinkage void aes_ecb_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks);
-asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u8 const rk[],
+asmlinkage void aes_ecb_decrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks);
-asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u8 const rk[],
+asmlinkage void aes_cbc_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 iv[]);
-asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u8 const rk[],
+asmlinkage void aes_cbc_decrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 iv[]);
-asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[],
+asmlinkage void aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int bytes, u8 const iv[]);
+asmlinkage void aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
+ int rounds, int bytes, u8 const iv[]);
+
+asmlinkage void aes_ctr_encrypt(u8 out[], u8 const in[], u32 const rk[],
int rounds, int blocks, u8 ctr[]);
-asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u8 const rk1[],
- int rounds, int blocks, u8 const rk2[], u8 iv[],
+asmlinkage void aes_xts_encrypt(u8 out[], u8 const in[], u32 const rk1[],
+ int rounds, int blocks, u32 const rk2[], u8 iv[],
int first);
-asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u8 const rk1[],
- int rounds, int blocks, u8 const rk2[], u8 iv[],
+asmlinkage void aes_xts_decrypt(u8 out[], u8 const in[], u32 const rk1[],
+ int rounds, int blocks, u32 const rk2[], u8 iv[],
int first);
asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
int blocks, u8 dg[], int enc_before,
int enc_after);
+struct cts_cbc_req_ctx {
+ struct scatterlist sg_src[2];
+ struct scatterlist sg_dst[2];
+ struct skcipher_request subreq;
+};
+
struct crypto_aes_xts_ctx {
struct crypto_aes_ctx key1;
struct crypto_aes_ctx __aligned(8) key2;
@@ -142,7 +158,7 @@ static int ecb_encrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin();
aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_enc, rounds, blocks);
+ ctx->key_enc, rounds, blocks);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -162,7 +178,7 @@ static int ecb_decrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin();
aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_dec, rounds, blocks);
+ ctx->key_dec, rounds, blocks);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -182,7 +198,7 @@ static int cbc_encrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin();
aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
+ ctx->key_enc, rounds, blocks, walk.iv);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -202,13 +218,149 @@ static int cbc_decrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin();
aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_dec, rounds, blocks, walk.iv);
+ ctx->key_dec, rounds, blocks, walk.iv);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
+static int cts_cbc_init_tfm(struct crypto_skcipher *tfm)
+{
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct cts_cbc_req_ctx));
+ return 0;
+}
+
+static int cts_cbc_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req);
+ int err, rounds = 6 + ctx->key_length / 4;
+ int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
+ struct scatterlist *src = req->src, *dst = req->dst;
+ struct skcipher_walk walk;
+
+ skcipher_request_set_tfm(&rctx->subreq, tfm);
+
+ if (req->cryptlen <= AES_BLOCK_SIZE) {
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+ cbc_blocks = 1;
+ }
+
+ if (cbc_blocks > 0) {
+ unsigned int blocks;
+
+ skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst,
+ cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &rctx->subreq, false);
+
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ kernel_neon_begin();
+ aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_enc, rounds, blocks, walk.iv);
+ kernel_neon_end();
+ err = skcipher_walk_done(&walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ if (err)
+ return err;
+
+ if (req->cryptlen == AES_BLOCK_SIZE)
+ return 0;
+
+ dst = src = scatterwalk_ffwd(rctx->sg_src, req->src,
+ rctx->subreq.cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(rctx->sg_dst, req->dst,
+ rctx->subreq.cryptlen);
+ }
+
+ /* handle ciphertext stealing */
+ skcipher_request_set_crypt(&rctx->subreq, src, dst,
+ req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &rctx->subreq, false);
+ if (err)
+ return err;
+
+ kernel_neon_begin();
+ aes_cbc_cts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_enc, rounds, walk.nbytes, walk.iv);
+ kernel_neon_end();
+
+ return skcipher_walk_done(&walk, 0);
+}
+
+static int cts_cbc_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ struct crypto_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct cts_cbc_req_ctx *rctx = skcipher_request_ctx(req);
+ int err, rounds = 6 + ctx->key_length / 4;
+ int cbc_blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
+ struct scatterlist *src = req->src, *dst = req->dst;
+ struct skcipher_walk walk;
+
+ skcipher_request_set_tfm(&rctx->subreq, tfm);
+
+ if (req->cryptlen <= AES_BLOCK_SIZE) {
+ if (req->cryptlen < AES_BLOCK_SIZE)
+ return -EINVAL;
+ cbc_blocks = 1;
+ }
+
+ if (cbc_blocks > 0) {
+ unsigned int blocks;
+
+ skcipher_request_set_crypt(&rctx->subreq, req->src, req->dst,
+ cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &rctx->subreq, false);
+
+ while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
+ kernel_neon_begin();
+ aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_dec, rounds, blocks, walk.iv);
+ kernel_neon_end();
+ err = skcipher_walk_done(&walk,
+ walk.nbytes % AES_BLOCK_SIZE);
+ }
+ if (err)
+ return err;
+
+ if (req->cryptlen == AES_BLOCK_SIZE)
+ return 0;
+
+ dst = src = scatterwalk_ffwd(rctx->sg_src, req->src,
+ rctx->subreq.cryptlen);
+ if (req->dst != req->src)
+ dst = scatterwalk_ffwd(rctx->sg_dst, req->dst,
+ rctx->subreq.cryptlen);
+ }
+
+ /* handle ciphertext stealing */
+ skcipher_request_set_crypt(&rctx->subreq, src, dst,
+ req->cryptlen - cbc_blocks * AES_BLOCK_SIZE,
+ req->iv);
+
+ err = skcipher_walk_virt(&walk, &rctx->subreq, false);
+ if (err)
+ return err;
+
+ kernel_neon_begin();
+ aes_cbc_cts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ ctx->key_dec, rounds, walk.nbytes, walk.iv);
+ kernel_neon_end();
+
+ return skcipher_walk_done(&walk, 0);
+}
+
static int ctr_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
@@ -222,7 +374,7 @@ static int ctr_encrypt(struct skcipher_request *req)
while ((blocks = (walk.nbytes / AES_BLOCK_SIZE))) {
kernel_neon_begin();
aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
+ ctx->key_enc, rounds, blocks, walk.iv);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -238,7 +390,7 @@ static int ctr_encrypt(struct skcipher_request *req)
blocks = -1;
kernel_neon_begin();
- aes_ctr_encrypt(tail, NULL, (u8 *)ctx->key_enc, rounds,
+ aes_ctr_encrypt(tail, NULL, ctx->key_enc, rounds,
blocks, walk.iv);
kernel_neon_end();
crypto_xor_cpy(tdst, tsrc, tail, nbytes);
@@ -272,8 +424,8 @@ static int xts_encrypt(struct skcipher_request *req)
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
kernel_neon_begin();
aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key1.key_enc, rounds, blocks,
- (u8 *)ctx->key2.key_enc, walk.iv, first);
+ ctx->key1.key_enc, rounds, blocks,
+ ctx->key2.key_enc, walk.iv, first);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -294,8 +446,8 @@ static int xts_decrypt(struct skcipher_request *req)
for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
kernel_neon_begin();
aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
- (u8 *)ctx->key1.key_dec, rounds, blocks,
- (u8 *)ctx->key2.key_enc, walk.iv, first);
+ ctx->key1.key_dec, rounds, blocks,
+ ctx->key2.key_enc, walk.iv, first);
kernel_neon_end();
err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
@@ -336,6 +488,24 @@ static struct skcipher_alg aes_algs[] = { {
.decrypt = cbc_decrypt,
}, {
.base = {
+ .cra_name = "__cts(cbc(aes))",
+ .cra_driver_name = "__cts-cbc-aes-" MODE,
+ .cra_priority = PRIO,
+ .cra_flags = CRYPTO_ALG_INTERNAL,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_module = THIS_MODULE,
+ },
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .walksize = 2 * AES_BLOCK_SIZE,
+ .setkey = skcipher_aes_setkey,
+ .encrypt = cts_cbc_encrypt,
+ .decrypt = cts_cbc_decrypt,
+ .init = cts_cbc_init_tfm,
+}, {
+ .base = {
.cra_name = "__ctr(aes)",
.cra_driver_name = "__ctr-aes-" MODE,
.cra_priority = PRIO,
@@ -412,7 +582,6 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
{
struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
be128 *consts = (be128 *)ctx->consts;
- u8 *rk = (u8 *)ctx->key.key_enc;
int rounds = 6 + key_len / 4;
int err;
@@ -422,7 +591,8 @@ static int cmac_setkey(struct crypto_shash *tfm, const u8 *in_key,
/* encrypt the zero vector */
kernel_neon_begin();
- aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, rk, rounds, 1);
+ aes_ecb_encrypt(ctx->consts, (u8[AES_BLOCK_SIZE]){}, ctx->key.key_enc,
+ rounds, 1);
kernel_neon_end();
cmac_gf128_mul_by_x(consts, consts);
@@ -441,7 +611,6 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
};
struct mac_tfm_ctx *ctx = crypto_shash_ctx(tfm);
- u8 *rk = (u8 *)ctx->key.key_enc;
int rounds = 6 + key_len / 4;
u8 key[AES_BLOCK_SIZE];
int err;
@@ -451,8 +620,8 @@ static int xcbc_setkey(struct crypto_shash *tfm, const u8 *in_key,
return err;
kernel_neon_begin();
- aes_ecb_encrypt(key, ks[0], rk, rounds, 1);
- aes_ecb_encrypt(ctx->consts, ks[1], rk, rounds, 2);
+ aes_ecb_encrypt(key, ks[0], ctx->key.key_enc, rounds, 1);
+ aes_ecb_encrypt(ctx->consts, ks[1], ctx->key.key_enc, rounds, 2);
kernel_neon_end();
return cbcmac_setkey(tfm, key, sizeof(key));
diff --git a/arch/arm64/crypto/aes-modes.S b/arch/arm64/crypto/aes-modes.S
index 483a7130cf0e..67700045a0e0 100644
--- a/arch/arm64/crypto/aes-modes.S
+++ b/arch/arm64/crypto/aes-modes.S
@@ -14,12 +14,12 @@
.align 4
aes_encrypt_block4x:
- encrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7
+ encrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret
ENDPROC(aes_encrypt_block4x)
aes_decrypt_block4x:
- decrypt_block4x v0, v1, v2, v3, w22, x21, x8, w7
+ decrypt_block4x v0, v1, v2, v3, w3, x2, x8, w7
ret
ENDPROC(aes_decrypt_block4x)
@@ -31,71 +31,57 @@ ENDPROC(aes_decrypt_block4x)
*/
AES_ENTRY(aes_ecb_encrypt)
- frame_push 5
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
-
-.Lecbencrestart:
- enc_prepare w22, x21, x5
+ enc_prepare w3, x2, x5
.LecbencloopNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lecbenc1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
bl aes_encrypt_block4x
- st1 {v0.16b-v3.16b}, [x19], #64
- cond_yield_neon .Lecbencrestart
+ st1 {v0.16b-v3.16b}, [x0], #64
b .LecbencloopNx
.Lecbenc1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lecbencout
.Lecbencloop:
- ld1 {v0.16b}, [x20], #16 /* get next pt block */
- encrypt_block v0, w22, x21, x5, w6
- st1 {v0.16b}, [x19], #16
- subs w23, w23, #1
+ ld1 {v0.16b}, [x1], #16 /* get next pt block */
+ encrypt_block v0, w3, x2, x5, w6
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
bne .Lecbencloop
.Lecbencout:
- frame_pop
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_ecb_encrypt)
AES_ENTRY(aes_ecb_decrypt)
- frame_push 5
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
-
-.Lecbdecrestart:
- dec_prepare w22, x21, x5
+ dec_prepare w3, x2, x5
.LecbdecloopNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lecbdec1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
bl aes_decrypt_block4x
- st1 {v0.16b-v3.16b}, [x19], #64
- cond_yield_neon .Lecbdecrestart
+ st1 {v0.16b-v3.16b}, [x0], #64
b .LecbdecloopNx
.Lecbdec1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lecbdecout
.Lecbdecloop:
- ld1 {v0.16b}, [x20], #16 /* get next ct block */
- decrypt_block v0, w22, x21, x5, w6
- st1 {v0.16b}, [x19], #16
- subs w23, w23, #1
+ ld1 {v0.16b}, [x1], #16 /* get next ct block */
+ decrypt_block v0, w3, x2, x5, w6
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
bne .Lecbdecloop
.Lecbdecout:
- frame_pop
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_ecb_decrypt)
@@ -108,162 +94,211 @@ AES_ENDPROC(aes_ecb_decrypt)
*/
AES_ENTRY(aes_cbc_encrypt)
- frame_push 6
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
-
-.Lcbcencrestart:
- ld1 {v4.16b}, [x24] /* get iv */
- enc_prepare w22, x21, x6
+ ld1 {v4.16b}, [x5] /* get iv */
+ enc_prepare w3, x2, x6
.Lcbcencloop4x:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lcbcenc1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
eor v0.16b, v0.16b, v4.16b /* ..and xor with iv */
- encrypt_block v0, w22, x21, x6, w7
+ encrypt_block v0, w3, x2, x6, w7
eor v1.16b, v1.16b, v0.16b
- encrypt_block v1, w22, x21, x6, w7
+ encrypt_block v1, w3, x2, x6, w7
eor v2.16b, v2.16b, v1.16b
- encrypt_block v2, w22, x21, x6, w7
+ encrypt_block v2, w3, x2, x6, w7
eor v3.16b, v3.16b, v2.16b
- encrypt_block v3, w22, x21, x6, w7
- st1 {v0.16b-v3.16b}, [x19], #64
+ encrypt_block v3, w3, x2, x6, w7
+ st1 {v0.16b-v3.16b}, [x0], #64
mov v4.16b, v3.16b
- st1 {v4.16b}, [x24] /* return iv */
- cond_yield_neon .Lcbcencrestart
b .Lcbcencloop4x
.Lcbcenc1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lcbcencout
.Lcbcencloop:
- ld1 {v0.16b}, [x20], #16 /* get next pt block */
+ ld1 {v0.16b}, [x1], #16 /* get next pt block */
eor v4.16b, v4.16b, v0.16b /* ..and xor with iv */
- encrypt_block v4, w22, x21, x6, w7
- st1 {v4.16b}, [x19], #16
- subs w23, w23, #1
+ encrypt_block v4, w3, x2, x6, w7
+ st1 {v4.16b}, [x0], #16
+ subs w4, w4, #1
bne .Lcbcencloop
.Lcbcencout:
- st1 {v4.16b}, [x24] /* return iv */
- frame_pop
+ st1 {v4.16b}, [x5] /* return iv */
ret
AES_ENDPROC(aes_cbc_encrypt)
AES_ENTRY(aes_cbc_decrypt)
- frame_push 6
-
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
-.Lcbcdecrestart:
- ld1 {v7.16b}, [x24] /* get iv */
- dec_prepare w22, x21, x6
+ ld1 {v7.16b}, [x5] /* get iv */
+ dec_prepare w3, x2, x6
.LcbcdecloopNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lcbcdec1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
mov v4.16b, v0.16b
mov v5.16b, v1.16b
mov v6.16b, v2.16b
bl aes_decrypt_block4x
- sub x20, x20, #16
+ sub x1, x1, #16
eor v0.16b, v0.16b, v7.16b
eor v1.16b, v1.16b, v4.16b
- ld1 {v7.16b}, [x20], #16 /* reload 1 ct block */
+ ld1 {v7.16b}, [x1], #16 /* reload 1 ct block */
eor v2.16b, v2.16b, v5.16b
eor v3.16b, v3.16b, v6.16b
- st1 {v0.16b-v3.16b}, [x19], #64
- st1 {v7.16b}, [x24] /* return iv */
- cond_yield_neon .Lcbcdecrestart
+ st1 {v0.16b-v3.16b}, [x0], #64
b .LcbcdecloopNx
.Lcbcdec1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lcbcdecout
.Lcbcdecloop:
- ld1 {v1.16b}, [x20], #16 /* get next ct block */
+ ld1 {v1.16b}, [x1], #16 /* get next ct block */
mov v0.16b, v1.16b /* ...and copy to v0 */
- decrypt_block v0, w22, x21, x6, w7
+ decrypt_block v0, w3, x2, x6, w7
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
mov v7.16b, v1.16b /* ct is next iv */
- st1 {v0.16b}, [x19], #16
- subs w23, w23, #1
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
bne .Lcbcdecloop
.Lcbcdecout:
- st1 {v7.16b}, [x24] /* return iv */
- frame_pop
+ st1 {v7.16b}, [x5] /* return iv */
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_cbc_decrypt)
/*
+ * aes_cbc_cts_encrypt(u8 out[], u8 const in[], u32 const rk[],
+ * int rounds, int bytes, u8 const iv[])
+ * aes_cbc_cts_decrypt(u8 out[], u8 const in[], u32 const rk[],
+ * int rounds, int bytes, u8 const iv[])
+ */
+
+AES_ENTRY(aes_cbc_cts_encrypt)
+ adr_l x8, .Lcts_permute_table
+ sub x4, x4, #16
+ add x9, x8, #32
+ add x8, x8, x4
+ sub x9, x9, x4
+ ld1 {v3.16b}, [x8]
+ ld1 {v4.16b}, [x9]
+
+ ld1 {v0.16b}, [x1], x4 /* overlapping loads */
+ ld1 {v1.16b}, [x1]
+
+ ld1 {v5.16b}, [x5] /* get iv */
+ enc_prepare w3, x2, x6
+
+ eor v0.16b, v0.16b, v5.16b /* xor with iv */
+ tbl v1.16b, {v1.16b}, v4.16b
+ encrypt_block v0, w3, x2, x6, w7
+
+ eor v1.16b, v1.16b, v0.16b
+ tbl v0.16b, {v0.16b}, v3.16b
+ encrypt_block v1, w3, x2, x6, w7
+
+ add x4, x0, x4
+ st1 {v0.16b}, [x4] /* overlapping stores */
+ st1 {v1.16b}, [x0]
+ ret
+AES_ENDPROC(aes_cbc_cts_encrypt)
+
+AES_ENTRY(aes_cbc_cts_decrypt)
+ adr_l x8, .Lcts_permute_table
+ sub x4, x4, #16
+ add x9, x8, #32
+ add x8, x8, x4
+ sub x9, x9, x4
+ ld1 {v3.16b}, [x8]
+ ld1 {v4.16b}, [x9]
+
+ ld1 {v0.16b}, [x1], x4 /* overlapping loads */
+ ld1 {v1.16b}, [x1]
+
+ ld1 {v5.16b}, [x5] /* get iv */
+ dec_prepare w3, x2, x6
+
+ tbl v2.16b, {v1.16b}, v4.16b
+ decrypt_block v0, w3, x2, x6, w7
+ eor v2.16b, v2.16b, v0.16b
+
+ tbx v0.16b, {v1.16b}, v4.16b
+ tbl v2.16b, {v2.16b}, v3.16b
+ decrypt_block v0, w3, x2, x6, w7
+ eor v0.16b, v0.16b, v5.16b /* xor with iv */
+
+ add x4, x0, x4
+ st1 {v2.16b}, [x4] /* overlapping stores */
+ st1 {v0.16b}, [x0]
+ ret
+AES_ENDPROC(aes_cbc_cts_decrypt)
+
+ .section ".rodata", "a"
+ .align 6
+.Lcts_permute_table:
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+ .byte 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .previous
+
+
+ /*
* aes_ctr_encrypt(u8 out[], u8 const in[], u8 const rk[], int rounds,
* int blocks, u8 ctr[])
*/
AES_ENTRY(aes_ctr_encrypt)
- frame_push 6
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x5
-
-.Lctrrestart:
- enc_prepare w22, x21, x6
- ld1 {v4.16b}, [x24]
+ enc_prepare w3, x2, x6
+ ld1 {v4.16b}, [x5]
umov x6, v4.d[1] /* keep swabbed ctr in reg */
rev x6, x6
+ cmn w6, w4 /* 32 bit overflow? */
+ bcs .Lctrloop
.LctrloopNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lctr1x
- cmn w6, #4 /* 32 bit overflow? */
- bcs .Lctr1x
- ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
- dup v7.4s, w6
+ add w7, w6, #1
mov v0.16b, v4.16b
- add v7.4s, v7.4s, v8.4s
+ add w8, w6, #2
mov v1.16b, v4.16b
- rev32 v8.16b, v7.16b
+ add w9, w6, #3
mov v2.16b, v4.16b
+ rev w7, w7
mov v3.16b, v4.16b
- mov v1.s[3], v8.s[0]
- mov v2.s[3], v8.s[1]
- mov v3.s[3], v8.s[2]
- ld1 {v5.16b-v7.16b}, [x20], #48 /* get 3 input blocks */
+ rev w8, w8
+ mov v1.s[3], w7
+ rev w9, w9
+ mov v2.s[3], w8
+ mov v3.s[3], w9
+ ld1 {v5.16b-v7.16b}, [x1], #48 /* get 3 input blocks */
bl aes_encrypt_block4x
eor v0.16b, v5.16b, v0.16b
- ld1 {v5.16b}, [x20], #16 /* get 1 input block */
+ ld1 {v5.16b}, [x1], #16 /* get 1 input block */
eor v1.16b, v6.16b, v1.16b
eor v2.16b, v7.16b, v2.16b
eor v3.16b, v5.16b, v3.16b
- st1 {v0.16b-v3.16b}, [x19], #64
+ st1 {v0.16b-v3.16b}, [x0], #64
add x6, x6, #4
rev x7, x6
ins v4.d[1], x7
- cbz w23, .Lctrout
- st1 {v4.16b}, [x24] /* return next CTR value */
- cond_yield_neon .Lctrrestart
+ cbz w4, .Lctrout
b .LctrloopNx
.Lctr1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lctrout
.Lctrloop:
mov v0.16b, v4.16b
- encrypt_block v0, w22, x21, x8, w7
+ encrypt_block v0, w3, x2, x8, w7
adds x6, x6, #1 /* increment BE ctr */
rev x7, x6
@@ -271,22 +306,22 @@ AES_ENTRY(aes_ctr_encrypt)
bcs .Lctrcarry /* overflow? */
.Lctrcarrydone:
- subs w23, w23, #1
+ subs w4, w4, #1
bmi .Lctrtailblock /* blocks <0 means tail block */
- ld1 {v3.16b}, [x20], #16
+ ld1 {v3.16b}, [x1], #16
eor v3.16b, v0.16b, v3.16b
- st1 {v3.16b}, [x19], #16
+ st1 {v3.16b}, [x0], #16
bne .Lctrloop
.Lctrout:
- st1 {v4.16b}, [x24] /* return next CTR value */
-.Lctrret:
- frame_pop
+ st1 {v4.16b}, [x5] /* return next CTR value */
+ ldp x29, x30, [sp], #16
ret
.Lctrtailblock:
- st1 {v0.16b}, [x19]
- b .Lctrret
+ st1 {v0.16b}, [x0]
+ ldp x29, x30, [sp], #16
+ ret
.Lctrcarry:
umov x7, v4.d[0] /* load upper word of ctr */
@@ -296,7 +331,6 @@ AES_ENTRY(aes_ctr_encrypt)
ins v4.d[0], x7
b .Lctrcarrydone
AES_ENDPROC(aes_ctr_encrypt)
- .ltorg
/*
@@ -306,150 +340,132 @@ AES_ENDPROC(aes_ctr_encrypt)
* int blocks, u8 const rk2[], u8 iv[], int first)
*/
- .macro next_tweak, out, in, const, tmp
+ .macro next_tweak, out, in, tmp
sshr \tmp\().2d, \in\().2d, #63
- and \tmp\().16b, \tmp\().16b, \const\().16b
+ and \tmp\().16b, \tmp\().16b, xtsmask.16b
add \out\().2d, \in\().2d, \in\().2d
ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
eor \out\().16b, \out\().16b, \tmp\().16b
.endm
-.Lxts_mul_x:
-CPU_LE( .quad 1, 0x87 )
-CPU_BE( .quad 0x87, 1 )
+ .macro xts_load_mask, tmp
+ movi xtsmask.2s, #0x1
+ movi \tmp\().2s, #0x87
+ uzp1 xtsmask.4s, xtsmask.4s, \tmp\().4s
+ .endm
AES_ENTRY(aes_xts_encrypt)
- frame_push 6
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x6
-
- ld1 {v4.16b}, [x24]
+ ld1 {v4.16b}, [x6]
+ xts_load_mask v8
cbz w7, .Lxtsencnotfirst
enc_prepare w3, x5, x8
encrypt_block v4, w3, x5, x8, w7 /* first tweak */
enc_switch_key w3, x2, x8
- ldr q7, .Lxts_mul_x
b .LxtsencNx
-.Lxtsencrestart:
- ld1 {v4.16b}, [x24]
.Lxtsencnotfirst:
- enc_prepare w22, x21, x8
+ enc_prepare w3, x2, x8
.LxtsencloopNx:
- ldr q7, .Lxts_mul_x
- next_tweak v4, v4, v7, v8
+ next_tweak v4, v4, v8
.LxtsencNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lxtsenc1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 pt blocks */
- next_tweak v5, v4, v7, v8
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 pt blocks */
+ next_tweak v5, v4, v8
eor v0.16b, v0.16b, v4.16b
- next_tweak v6, v5, v7, v8
+ next_tweak v6, v5, v8
eor v1.16b, v1.16b, v5.16b
eor v2.16b, v2.16b, v6.16b
- next_tweak v7, v6, v7, v8
+ next_tweak v7, v6, v8
eor v3.16b, v3.16b, v7.16b
bl aes_encrypt_block4x
eor v3.16b, v3.16b, v7.16b
eor v0.16b, v0.16b, v4.16b
eor v1.16b, v1.16b, v5.16b
eor v2.16b, v2.16b, v6.16b
- st1 {v0.16b-v3.16b}, [x19], #64
+ st1 {v0.16b-v3.16b}, [x0], #64
mov v4.16b, v7.16b
- cbz w23, .Lxtsencout
- st1 {v4.16b}, [x24]
- cond_yield_neon .Lxtsencrestart
+ cbz w4, .Lxtsencout
+ xts_reload_mask v8
b .LxtsencloopNx
.Lxtsenc1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lxtsencout
.Lxtsencloop:
- ld1 {v1.16b}, [x20], #16
+ ld1 {v1.16b}, [x1], #16
eor v0.16b, v1.16b, v4.16b
- encrypt_block v0, w22, x21, x8, w7
+ encrypt_block v0, w3, x2, x8, w7
eor v0.16b, v0.16b, v4.16b
- st1 {v0.16b}, [x19], #16
- subs w23, w23, #1
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
beq .Lxtsencout
- next_tweak v4, v4, v7, v8
+ next_tweak v4, v4, v8
b .Lxtsencloop
.Lxtsencout:
- st1 {v4.16b}, [x24]
- frame_pop
+ st1 {v4.16b}, [x6]
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_xts_encrypt)
AES_ENTRY(aes_xts_decrypt)
- frame_push 6
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
- mov x19, x0
- mov x20, x1
- mov x21, x2
- mov x22, x3
- mov x23, x4
- mov x24, x6
-
- ld1 {v4.16b}, [x24]
+ ld1 {v4.16b}, [x6]
+ xts_load_mask v8
cbz w7, .Lxtsdecnotfirst
enc_prepare w3, x5, x8
encrypt_block v4, w3, x5, x8, w7 /* first tweak */
dec_prepare w3, x2, x8
- ldr q7, .Lxts_mul_x
b .LxtsdecNx
-.Lxtsdecrestart:
- ld1 {v4.16b}, [x24]
.Lxtsdecnotfirst:
- dec_prepare w22, x21, x8
+ dec_prepare w3, x2, x8
.LxtsdecloopNx:
- ldr q7, .Lxts_mul_x
- next_tweak v4, v4, v7, v8
+ next_tweak v4, v4, v8
.LxtsdecNx:
- subs w23, w23, #4
+ subs w4, w4, #4
bmi .Lxtsdec1x
- ld1 {v0.16b-v3.16b}, [x20], #64 /* get 4 ct blocks */
- next_tweak v5, v4, v7, v8
+ ld1 {v0.16b-v3.16b}, [x1], #64 /* get 4 ct blocks */
+ next_tweak v5, v4, v8
eor v0.16b, v0.16b, v4.16b
- next_tweak v6, v5, v7, v8
+ next_tweak v6, v5, v8
eor v1.16b, v1.16b, v5.16b
eor v2.16b, v2.16b, v6.16b
- next_tweak v7, v6, v7, v8
+ next_tweak v7, v6, v8
eor v3.16b, v3.16b, v7.16b
bl aes_decrypt_block4x
eor v3.16b, v3.16b, v7.16b
eor v0.16b, v0.16b, v4.16b
eor v1.16b, v1.16b, v5.16b
eor v2.16b, v2.16b, v6.16b
- st1 {v0.16b-v3.16b}, [x19], #64
+ st1 {v0.16b-v3.16b}, [x0], #64
mov v4.16b, v7.16b
- cbz w23, .Lxtsdecout
- st1 {v4.16b}, [x24]
- cond_yield_neon .Lxtsdecrestart
+ cbz w4, .Lxtsdecout
+ xts_reload_mask v8
b .LxtsdecloopNx
.Lxtsdec1x:
- adds w23, w23, #4
+ adds w4, w4, #4
beq .Lxtsdecout
.Lxtsdecloop:
- ld1 {v1.16b}, [x20], #16
+ ld1 {v1.16b}, [x1], #16
eor v0.16b, v1.16b, v4.16b
- decrypt_block v0, w22, x21, x8, w7
+ decrypt_block v0, w3, x2, x8, w7
eor v0.16b, v0.16b, v4.16b
- st1 {v0.16b}, [x19], #16
- subs w23, w23, #1
+ st1 {v0.16b}, [x0], #16
+ subs w4, w4, #1
beq .Lxtsdecout
- next_tweak v4, v4, v7, v8
+ next_tweak v4, v4, v8
b .Lxtsdecloop
.Lxtsdecout:
- st1 {v4.16b}, [x24]
- frame_pop
+ st1 {v4.16b}, [x6]
+ ldp x29, x30, [sp], #16
ret
AES_ENDPROC(aes_xts_decrypt)
diff --git a/arch/arm64/crypto/aes-neon.S b/arch/arm64/crypto/aes-neon.S
index 1c7b45b7268e..29100f692e8a 100644
--- a/arch/arm64/crypto/aes-neon.S
+++ b/arch/arm64/crypto/aes-neon.S
@@ -14,6 +14,12 @@
#define AES_ENTRY(func) ENTRY(neon_ ## func)
#define AES_ENDPROC(func) ENDPROC(neon_ ## func)
+ xtsmask .req v7
+
+ .macro xts_reload_mask, tmp
+ xts_load_mask \tmp
+ .endm
+
/* multiply by polynomial 'x' in GF(2^8) */
.macro mul_by_x, out, in, temp, const
sshr \temp, \in, #7
diff --git a/arch/arm64/crypto/crc32-ce-core.S b/arch/arm64/crypto/crc32-ce-core.S
deleted file mode 100644
index 8061bf0f9c66..000000000000
--- a/arch/arm64/crypto/crc32-ce-core.S
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Accelerated CRC32(C) using arm64 CRC, NEON and Crypto Extensions instructions
- *
- * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see http://www.gnu.org/licenses
- *
- * Please visit http://www.xyratex.com/contact if you need additional
- * information or have any questions.
- *
- * GPL HEADER END
- */
-
-/*
- * Copyright 2012 Xyratex Technology Limited
- *
- * Using hardware provided PCLMULQDQ instruction to accelerate the CRC32
- * calculation.
- * CRC32 polynomial:0x04c11db7(BE)/0xEDB88320(LE)
- * PCLMULQDQ is a new instruction in Intel SSE4.2, the reference can be found
- * at:
- * http://www.intel.com/products/processor/manuals/
- * Intel(R) 64 and IA-32 Architectures Software Developer's Manual
- * Volume 2B: Instruction Set Reference, N-Z
- *
- * Authors: Gregory Prestas <Gregory_Prestas@us.xyratex.com>
- * Alexander Boyko <Alexander_Boyko@xyratex.com>
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-
- .section ".rodata", "a"
- .align 6
- .cpu generic+crypto+crc
-
-.Lcrc32_constants:
- /*
- * [x4*128+32 mod P(x) << 32)]' << 1 = 0x154442bd4
- * #define CONSTANT_R1 0x154442bd4LL
- *
- * [(x4*128-32 mod P(x) << 32)]' << 1 = 0x1c6e41596
- * #define CONSTANT_R2 0x1c6e41596LL
- */
- .octa 0x00000001c6e415960000000154442bd4
-
- /*
- * [(x128+32 mod P(x) << 32)]' << 1 = 0x1751997d0
- * #define CONSTANT_R3 0x1751997d0LL
- *
- * [(x128-32 mod P(x) << 32)]' << 1 = 0x0ccaa009e
- * #define CONSTANT_R4 0x0ccaa009eLL
- */
- .octa 0x00000000ccaa009e00000001751997d0
-
- /*
- * [(x64 mod P(x) << 32)]' << 1 = 0x163cd6124
- * #define CONSTANT_R5 0x163cd6124LL
- */
- .quad 0x0000000163cd6124
- .quad 0x00000000FFFFFFFF
-
- /*
- * #define CRCPOLY_TRUE_LE_FULL 0x1DB710641LL
- *
- * Barrett Reduction constant (u64`) = u` = (x**64 / P(x))`
- * = 0x1F7011641LL
- * #define CONSTANT_RU 0x1F7011641LL
- */
- .octa 0x00000001F701164100000001DB710641
-
-.Lcrc32c_constants:
- .octa 0x000000009e4addf800000000740eef02
- .octa 0x000000014cd00bd600000000f20c0dfe
- .quad 0x00000000dd45aab8
- .quad 0x00000000FFFFFFFF
- .octa 0x00000000dea713f10000000105ec76f0
-
- vCONSTANT .req v0
- dCONSTANT .req d0
- qCONSTANT .req q0
-
- BUF .req x19
- LEN .req x20
- CRC .req x21
- CONST .req x22
-
- vzr .req v9
-
- /**
- * Calculate crc32
- * BUF - buffer
- * LEN - sizeof buffer (multiple of 16 bytes), LEN should be > 63
- * CRC - initial crc32
- * return %eax crc32
- * uint crc32_pmull_le(unsigned char const *buffer,
- * size_t len, uint crc32)
- */
- .text
-ENTRY(crc32_pmull_le)
- adr_l x3, .Lcrc32_constants
- b 0f
-
-ENTRY(crc32c_pmull_le)
- adr_l x3, .Lcrc32c_constants
-
-0: frame_push 4, 64
-
- mov BUF, x0
- mov LEN, x1
- mov CRC, x2
- mov CONST, x3
-
- bic LEN, LEN, #15
- ld1 {v1.16b-v4.16b}, [BUF], #0x40
- movi vzr.16b, #0
- fmov dCONSTANT, CRC
- eor v1.16b, v1.16b, vCONSTANT.16b
- sub LEN, LEN, #0x40
- cmp LEN, #0x40
- b.lt less_64
-
- ldr qCONSTANT, [CONST]
-
-loop_64: /* 64 bytes Full cache line folding */
- sub LEN, LEN, #0x40
-
- pmull2 v5.1q, v1.2d, vCONSTANT.2d
- pmull2 v6.1q, v2.2d, vCONSTANT.2d
- pmull2 v7.1q, v3.2d, vCONSTANT.2d
- pmull2 v8.1q, v4.2d, vCONSTANT.2d
-
- pmull v1.1q, v1.1d, vCONSTANT.1d
- pmull v2.1q, v2.1d, vCONSTANT.1d
- pmull v3.1q, v3.1d, vCONSTANT.1d
- pmull v4.1q, v4.1d, vCONSTANT.1d
-
- eor v1.16b, v1.16b, v5.16b
- ld1 {v5.16b}, [BUF], #0x10
- eor v2.16b, v2.16b, v6.16b
- ld1 {v6.16b}, [BUF], #0x10
- eor v3.16b, v3.16b, v7.16b
- ld1 {v7.16b}, [BUF], #0x10
- eor v4.16b, v4.16b, v8.16b
- ld1 {v8.16b}, [BUF], #0x10
-
- eor v1.16b, v1.16b, v5.16b
- eor v2.16b, v2.16b, v6.16b
- eor v3.16b, v3.16b, v7.16b
- eor v4.16b, v4.16b, v8.16b
-
- cmp LEN, #0x40
- b.lt less_64
-
- if_will_cond_yield_neon
- stp q1, q2, [sp, #.Lframe_local_offset]
- stp q3, q4, [sp, #.Lframe_local_offset + 32]
- do_cond_yield_neon
- ldp q1, q2, [sp, #.Lframe_local_offset]
- ldp q3, q4, [sp, #.Lframe_local_offset + 32]
- ldr qCONSTANT, [CONST]
- movi vzr.16b, #0
- endif_yield_neon
- b loop_64
-
-less_64: /* Folding cache line into 128bit */
- ldr qCONSTANT, [CONST, #16]
-
- pmull2 v5.1q, v1.2d, vCONSTANT.2d
- pmull v1.1q, v1.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v5.16b
- eor v1.16b, v1.16b, v2.16b
-
- pmull2 v5.1q, v1.2d, vCONSTANT.2d
- pmull v1.1q, v1.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v5.16b
- eor v1.16b, v1.16b, v3.16b
-
- pmull2 v5.1q, v1.2d, vCONSTANT.2d
- pmull v1.1q, v1.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v5.16b
- eor v1.16b, v1.16b, v4.16b
-
- cbz LEN, fold_64
-
-loop_16: /* Folding rest buffer into 128bit */
- subs LEN, LEN, #0x10
-
- ld1 {v2.16b}, [BUF], #0x10
- pmull2 v5.1q, v1.2d, vCONSTANT.2d
- pmull v1.1q, v1.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v5.16b
- eor v1.16b, v1.16b, v2.16b
-
- b.ne loop_16
-
-fold_64:
- /* perform the last 64 bit fold, also adds 32 zeroes
- * to the input stream */
- ext v2.16b, v1.16b, v1.16b, #8
- pmull2 v2.1q, v2.2d, vCONSTANT.2d
- ext v1.16b, v1.16b, vzr.16b, #8
- eor v1.16b, v1.16b, v2.16b
-
- /* final 32-bit fold */
- ldr dCONSTANT, [CONST, #32]
- ldr d3, [CONST, #40]
-
- ext v2.16b, v1.16b, vzr.16b, #4
- and v1.16b, v1.16b, v3.16b
- pmull v1.1q, v1.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v2.16b
-
- /* Finish up with the bit-reversed barrett reduction 64 ==> 32 bits */
- ldr qCONSTANT, [CONST, #48]
-
- and v2.16b, v1.16b, v3.16b
- ext v2.16b, vzr.16b, v2.16b, #8
- pmull2 v2.1q, v2.2d, vCONSTANT.2d
- and v2.16b, v2.16b, v3.16b
- pmull v2.1q, v2.1d, vCONSTANT.1d
- eor v1.16b, v1.16b, v2.16b
- mov w0, v1.s[1]
-
- frame_pop
- ret
-ENDPROC(crc32_pmull_le)
-ENDPROC(crc32c_pmull_le)
-
- .macro __crc32, c
-0: subs x2, x2, #16
- b.mi 8f
- ldp x3, x4, [x1], #16
-CPU_BE( rev x3, x3 )
-CPU_BE( rev x4, x4 )
- crc32\c\()x w0, w0, x3
- crc32\c\()x w0, w0, x4
- b.ne 0b
- ret
-
-8: tbz x2, #3, 4f
- ldr x3, [x1], #8
-CPU_BE( rev x3, x3 )
- crc32\c\()x w0, w0, x3
-4: tbz x2, #2, 2f
- ldr w3, [x1], #4
-CPU_BE( rev w3, w3 )
- crc32\c\()w w0, w0, w3
-2: tbz x2, #1, 1f
- ldrh w3, [x1], #2
-CPU_BE( rev16 w3, w3 )
- crc32\c\()h w0, w0, w3
-1: tbz x2, #0, 0f
- ldrb w3, [x1]
- crc32\c\()b w0, w0, w3
-0: ret
- .endm
-
- .align 5
-ENTRY(crc32_armv8_le)
- __crc32
-ENDPROC(crc32_armv8_le)
-
- .align 5
-ENTRY(crc32c_armv8_le)
- __crc32 c
-ENDPROC(crc32c_armv8_le)
diff --git a/arch/arm64/crypto/crc32-ce-glue.c b/arch/arm64/crypto/crc32-ce-glue.c
deleted file mode 100644
index 34b4e3d46aab..000000000000
--- a/arch/arm64/crypto/crc32-ce-glue.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Accelerated CRC32(C) using arm64 NEON and Crypto Extensions instructions
- *
- * Copyright (C) 2016 - 2017 Linaro Ltd <ard.biesheuvel@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/cpufeature.h>
-#include <linux/crc32.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/string.h>
-
-#include <crypto/internal/hash.h>
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <asm/unaligned.h>
-
-#define PMULL_MIN_LEN 64L /* minimum size of buffer
- * for crc32_pmull_le_16 */
-#define SCALE_F 16L /* size of NEON register */
-
-asmlinkage u32 crc32_pmull_le(const u8 buf[], u64 len, u32 init_crc);
-asmlinkage u32 crc32_armv8_le(u32 init_crc, const u8 buf[], size_t len);
-
-asmlinkage u32 crc32c_pmull_le(const u8 buf[], u64 len, u32 init_crc);
-asmlinkage u32 crc32c_armv8_le(u32 init_crc, const u8 buf[], size_t len);
-
-static u32 (*fallback_crc32)(u32 init_crc, const u8 buf[], size_t len);
-static u32 (*fallback_crc32c)(u32 init_crc, const u8 buf[], size_t len);
-
-static int crc32_pmull_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = 0;
- return 0;
-}
-
-static int crc32c_pmull_cra_init(struct crypto_tfm *tfm)
-{
- u32 *key = crypto_tfm_ctx(tfm);
-
- *key = ~0;
- return 0;
-}
-
-static int crc32_pmull_setkey(struct crypto_shash *hash, const u8 *key,
- unsigned int keylen)
-{
- u32 *mctx = crypto_shash_ctx(hash);
-
- if (keylen != sizeof(u32)) {
- crypto_shash_set_flags(hash, CRYPTO_TFM_RES_BAD_KEY_LEN);
- return -EINVAL;
- }
- *mctx = le32_to_cpup((__le32 *)key);
- return 0;
-}
-
-static int crc32_pmull_init(struct shash_desc *desc)
-{
- u32 *mctx = crypto_shash_ctx(desc->tfm);
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = *mctx;
- return 0;
-}
-
-static int crc32_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = crc32_armv8_le(*crc, data, length);
- return 0;
-}
-
-static int crc32c_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- *crc = crc32c_armv8_le(*crc, data, length);
- return 0;
-}
-
-static int crc32_pmull_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
- unsigned int l;
-
- if ((u64)data % SCALE_F) {
- l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F));
-
- *crc = fallback_crc32(*crc, data, l);
-
- data += l;
- length -= l;
- }
-
- if (length >= PMULL_MIN_LEN && may_use_simd()) {
- l = round_down(length, SCALE_F);
-
- kernel_neon_begin();
- *crc = crc32_pmull_le(data, l, *crc);
- kernel_neon_end();
-
- data += l;
- length -= l;
- }
-
- if (length > 0)
- *crc = fallback_crc32(*crc, data, length);
-
- return 0;
-}
-
-static int crc32c_pmull_update(struct shash_desc *desc, const u8 *data,
- unsigned int length)
-{
- u32 *crc = shash_desc_ctx(desc);
- unsigned int l;
-
- if ((u64)data % SCALE_F) {
- l = min_t(u32, length, SCALE_F - ((u64)data % SCALE_F));
-
- *crc = fallback_crc32c(*crc, data, l);
-
- data += l;
- length -= l;
- }
-
- if (length >= PMULL_MIN_LEN && may_use_simd()) {
- l = round_down(length, SCALE_F);
-
- kernel_neon_begin();
- *crc = crc32c_pmull_le(data, l, *crc);
- kernel_neon_end();
-
- data += l;
- length -= l;
- }
-
- if (length > 0) {
- *crc = fallback_crc32c(*crc, data, length);
- }
-
- return 0;
-}
-
-static int crc32_pmull_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le32(*crc, out);
- return 0;
-}
-
-static int crc32c_pmull_final(struct shash_desc *desc, u8 *out)
-{
- u32 *crc = shash_desc_ctx(desc);
-
- put_unaligned_le32(~*crc, out);
- return 0;
-}
-
-static struct shash_alg crc32_pmull_algs[] = { {
- .setkey = crc32_pmull_setkey,
- .init = crc32_pmull_init,
- .update = crc32_update,
- .final = crc32_pmull_final,
- .descsize = sizeof(u32),
- .digestsize = sizeof(u32),
-
- .base.cra_ctxsize = sizeof(u32),
- .base.cra_init = crc32_pmull_cra_init,
- .base.cra_name = "crc32",
- .base.cra_driver_name = "crc32-arm64-ce",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = 1,
- .base.cra_module = THIS_MODULE,
-}, {
- .setkey = crc32_pmull_setkey,
- .init = crc32_pmull_init,
- .update = crc32c_update,
- .final = crc32c_pmull_final,
- .descsize = sizeof(u32),
- .digestsize = sizeof(u32),
-
- .base.cra_ctxsize = sizeof(u32),
- .base.cra_init = crc32c_pmull_cra_init,
- .base.cra_name = "crc32c",
- .base.cra_driver_name = "crc32c-arm64-ce",
- .base.cra_priority = 200,
- .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY,
- .base.cra_blocksize = 1,
- .base.cra_module = THIS_MODULE,
-} };
-
-static int __init crc32_pmull_mod_init(void)
-{
- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_PMULL)) {
- crc32_pmull_algs[0].update = crc32_pmull_update;
- crc32_pmull_algs[1].update = crc32c_pmull_update;
-
- if (elf_hwcap & HWCAP_CRC32) {
- fallback_crc32 = crc32_armv8_le;
- fallback_crc32c = crc32c_armv8_le;
- } else {
- fallback_crc32 = crc32_le;
- fallback_crc32c = __crc32c_le;
- }
- } else if (!(elf_hwcap & HWCAP_CRC32)) {
- return -ENODEV;
- }
- return crypto_register_shashes(crc32_pmull_algs,
- ARRAY_SIZE(crc32_pmull_algs));
-}
-
-static void __exit crc32_pmull_mod_exit(void)
-{
- crypto_unregister_shashes(crc32_pmull_algs,
- ARRAY_SIZE(crc32_pmull_algs));
-}
-
-static const struct cpu_feature crc32_cpu_feature[] = {
- { cpu_feature(CRC32) }, { cpu_feature(PMULL) }, { }
-};
-MODULE_DEVICE_TABLE(cpu, crc32_cpu_feature);
-
-module_init(crc32_pmull_mod_init);
-module_exit(crc32_pmull_mod_exit);
-
-MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
-MODULE_LICENSE("GPL v2");
diff --git a/arch/arm64/crypto/crct10dif-ce-core.S b/arch/arm64/crypto/crct10dif-ce-core.S
index 663ea71cdb38..9e82e8e8ed05 100644
--- a/arch/arm64/crypto/crct10dif-ce-core.S
+++ b/arch/arm64/crypto/crct10dif-ce-core.S
@@ -80,7 +80,186 @@
vzr .req v13
-ENTRY(crc_t10dif_pmull)
+ ad .req v14
+ bd .req v10
+
+ k00_16 .req v15
+ k32_48 .req v16
+
+ t3 .req v17
+ t4 .req v18
+ t5 .req v19
+ t6 .req v20
+ t7 .req v21
+ t8 .req v22
+ t9 .req v23
+
+ perm1 .req v24
+ perm2 .req v25
+ perm3 .req v26
+ perm4 .req v27
+
+ bd1 .req v28
+ bd2 .req v29
+ bd3 .req v30
+ bd4 .req v31
+
+ .macro __pmull_init_p64
+ .endm
+
+ .macro __pmull_pre_p64, bd
+ .endm
+
+ .macro __pmull_init_p8
+ // k00_16 := 0x0000000000000000_000000000000ffff
+ // k32_48 := 0x00000000ffffffff_0000ffffffffffff
+ movi k32_48.2d, #0xffffffff
+ mov k32_48.h[2], k32_48.h[0]
+ ushr k00_16.2d, k32_48.2d, #32
+
+ // prepare the permutation vectors
+ mov_q x5, 0x080f0e0d0c0b0a09
+ movi perm4.8b, #8
+ dup perm1.2d, x5
+ eor perm1.16b, perm1.16b, perm4.16b
+ ushr perm2.2d, perm1.2d, #8
+ ushr perm3.2d, perm1.2d, #16
+ ushr perm4.2d, perm1.2d, #24
+ sli perm2.2d, perm1.2d, #56
+ sli perm3.2d, perm1.2d, #48
+ sli perm4.2d, perm1.2d, #40
+ .endm
+
+ .macro __pmull_pre_p8, bd
+ tbl bd1.16b, {\bd\().16b}, perm1.16b
+ tbl bd2.16b, {\bd\().16b}, perm2.16b
+ tbl bd3.16b, {\bd\().16b}, perm3.16b
+ tbl bd4.16b, {\bd\().16b}, perm4.16b
+ .endm
+
+__pmull_p8_core:
+.L__pmull_p8_core:
+ ext t4.8b, ad.8b, ad.8b, #1 // A1
+ ext t5.8b, ad.8b, ad.8b, #2 // A2
+ ext t6.8b, ad.8b, ad.8b, #3 // A3
+
+ pmull t4.8h, t4.8b, bd.8b // F = A1*B
+ pmull t8.8h, ad.8b, bd1.8b // E = A*B1
+ pmull t5.8h, t5.8b, bd.8b // H = A2*B
+ pmull t7.8h, ad.8b, bd2.8b // G = A*B2
+ pmull t6.8h, t6.8b, bd.8b // J = A3*B
+ pmull t9.8h, ad.8b, bd3.8b // I = A*B3
+ pmull t3.8h, ad.8b, bd4.8b // K = A*B4
+ b 0f
+
+.L__pmull_p8_core2:
+ tbl t4.16b, {ad.16b}, perm1.16b // A1
+ tbl t5.16b, {ad.16b}, perm2.16b // A2
+ tbl t6.16b, {ad.16b}, perm3.16b // A3
+
+ pmull2 t4.8h, t4.16b, bd.16b // F = A1*B
+ pmull2 t8.8h, ad.16b, bd1.16b // E = A*B1
+ pmull2 t5.8h, t5.16b, bd.16b // H = A2*B
+ pmull2 t7.8h, ad.16b, bd2.16b // G = A*B2
+ pmull2 t6.8h, t6.16b, bd.16b // J = A3*B
+ pmull2 t9.8h, ad.16b, bd3.16b // I = A*B3
+ pmull2 t3.8h, ad.16b, bd4.16b // K = A*B4
+
+0: eor t4.16b, t4.16b, t8.16b // L = E + F
+ eor t5.16b, t5.16b, t7.16b // M = G + H
+ eor t6.16b, t6.16b, t9.16b // N = I + J
+
+ uzp1 t8.2d, t4.2d, t5.2d
+ uzp2 t4.2d, t4.2d, t5.2d
+ uzp1 t7.2d, t6.2d, t3.2d
+ uzp2 t6.2d, t6.2d, t3.2d
+
+ // t4 = (L) (P0 + P1) << 8
+ // t5 = (M) (P2 + P3) << 16
+ eor t8.16b, t8.16b, t4.16b
+ and t4.16b, t4.16b, k32_48.16b
+
+ // t6 = (N) (P4 + P5) << 24
+ // t7 = (K) (P6 + P7) << 32
+ eor t7.16b, t7.16b, t6.16b
+ and t6.16b, t6.16b, k00_16.16b
+
+ eor t8.16b, t8.16b, t4.16b
+ eor t7.16b, t7.16b, t6.16b
+
+ zip2 t5.2d, t8.2d, t4.2d
+ zip1 t4.2d, t8.2d, t4.2d
+ zip2 t3.2d, t7.2d, t6.2d
+ zip1 t6.2d, t7.2d, t6.2d
+
+ ext t4.16b, t4.16b, t4.16b, #15
+ ext t5.16b, t5.16b, t5.16b, #14
+ ext t6.16b, t6.16b, t6.16b, #13
+ ext t3.16b, t3.16b, t3.16b, #12
+
+ eor t4.16b, t4.16b, t5.16b
+ eor t6.16b, t6.16b, t3.16b
+ ret
+ENDPROC(__pmull_p8_core)
+
+ .macro __pmull_p8, rq, ad, bd, i
+ .ifnc \bd, v10
+ .err
+ .endif
+ mov ad.16b, \ad\().16b
+ .ifb \i
+ pmull \rq\().8h, \ad\().8b, bd.8b // D = A*B
+ .else
+ pmull2 \rq\().8h, \ad\().16b, bd.16b // D = A*B
+ .endif
+
+ bl .L__pmull_p8_core\i
+
+ eor \rq\().16b, \rq\().16b, t4.16b
+ eor \rq\().16b, \rq\().16b, t6.16b
+ .endm
+
+ .macro fold64, p, reg1, reg2
+ ldp q11, q12, [arg2], #0x20
+
+ __pmull_\p v8, \reg1, v10, 2
+ __pmull_\p \reg1, \reg1, v10
+
+CPU_LE( rev64 v11.16b, v11.16b )
+CPU_LE( rev64 v12.16b, v12.16b )
+
+ __pmull_\p v9, \reg2, v10, 2
+ __pmull_\p \reg2, \reg2, v10
+
+CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
+CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
+
+ eor \reg1\().16b, \reg1\().16b, v8.16b
+ eor \reg2\().16b, \reg2\().16b, v9.16b
+ eor \reg1\().16b, \reg1\().16b, v11.16b
+ eor \reg2\().16b, \reg2\().16b, v12.16b
+ .endm
+
+ .macro fold16, p, reg, rk
+ __pmull_\p v8, \reg, v10
+ __pmull_\p \reg, \reg, v10, 2
+ .ifnb \rk
+ ldr_l q10, \rk, x8
+ __pmull_pre_\p v10
+ .endif
+ eor v7.16b, v7.16b, v8.16b
+ eor v7.16b, v7.16b, \reg\().16b
+ .endm
+
+ .macro __pmull_p64, rd, rn, rm, n
+ .ifb \n
+ pmull \rd\().1q, \rn\().1d, \rm\().1d
+ .else
+ pmull2 \rd\().1q, \rn\().2d, \rm\().2d
+ .endif
+ .endm
+
+ .macro crc_t10dif_pmull, p
frame_push 3, 128
mov arg1_low32, w0
@@ -89,6 +268,8 @@ ENTRY(crc_t10dif_pmull)
movi vzr.16b, #0 // init zero register
+ __pmull_init_\p
+
// adjust the 16-bit initial_crc value, scale it to 32 bits
lsl arg1_low32, arg1_low32, #16
@@ -96,7 +277,7 @@ ENTRY(crc_t10dif_pmull)
cmp arg3, #256
// for sizes less than 128, we can't fold 64B at a time...
- b.lt _less_than_128
+ b.lt .L_less_than_128_\@
// load the initial crc value
// crc value does not need to be byte-reflected, but it needs
@@ -137,6 +318,7 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
ldr_l q10, rk3, x8 // xmm10 has rk3 and rk4
// type of pmull instruction
// will determine which constant to use
+ __pmull_pre_\p v10
//
// we subtract 256 instead of 128 to save one instruction from the loop
@@ -147,41 +329,19 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
// buffer. The _fold_64_B_loop will fold 64B at a time
// until we have 64+y Bytes of buffer
-
// fold 64B at a time. This section of the code folds 4 vector
// registers in parallel
-_fold_64_B_loop:
+.L_fold_64_B_loop_\@:
- .macro fold64, reg1, reg2
- ldp q11, q12, [arg2], #0x20
-
- pmull2 v8.1q, \reg1\().2d, v10.2d
- pmull \reg1\().1q, \reg1\().1d, v10.1d
-
-CPU_LE( rev64 v11.16b, v11.16b )
-CPU_LE( rev64 v12.16b, v12.16b )
-
- pmull2 v9.1q, \reg2\().2d, v10.2d
- pmull \reg2\().1q, \reg2\().1d, v10.1d
-
-CPU_LE( ext v11.16b, v11.16b, v11.16b, #8 )
-CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
-
- eor \reg1\().16b, \reg1\().16b, v8.16b
- eor \reg2\().16b, \reg2\().16b, v9.16b
- eor \reg1\().16b, \reg1\().16b, v11.16b
- eor \reg2\().16b, \reg2\().16b, v12.16b
- .endm
-
- fold64 v0, v1
- fold64 v2, v3
- fold64 v4, v5
- fold64 v6, v7
+ fold64 \p, v0, v1
+ fold64 \p, v2, v3
+ fold64 \p, v4, v5
+ fold64 \p, v6, v7
subs arg3, arg3, #128
// check if there is another 64B in the buffer to be able to fold
- b.lt _fold_64_B_end
+ b.lt .L_fold_64_B_end_\@
if_will_cond_yield_neon
stp q0, q1, [sp, #.Lframe_local_offset]
@@ -195,11 +355,13 @@ CPU_LE( ext v12.16b, v12.16b, v12.16b, #8 )
ldp q6, q7, [sp, #.Lframe_local_offset + 96]
ldr_l q10, rk3, x8
movi vzr.16b, #0 // init zero register
+ __pmull_init_\p
+ __pmull_pre_\p v10
endif_yield_neon
- b _fold_64_B_loop
+ b .L_fold_64_B_loop_\@
-_fold_64_B_end:
+.L_fold_64_B_end_\@:
// at this point, the buffer pointer is pointing at the last y Bytes
// of the buffer the 64B of folded data is in 4 of the vector
// registers: v0, v1, v2, v3
@@ -208,38 +370,29 @@ _fold_64_B_end:
// constants
ldr_l q10, rk9, x8
+ __pmull_pre_\p v10
- .macro fold16, reg, rk
- pmull v8.1q, \reg\().1d, v10.1d
- pmull2 \reg\().1q, \reg\().2d, v10.2d
- .ifnb \rk
- ldr_l q10, \rk, x8
- .endif
- eor v7.16b, v7.16b, v8.16b
- eor v7.16b, v7.16b, \reg\().16b
- .endm
-
- fold16 v0, rk11
- fold16 v1, rk13
- fold16 v2, rk15
- fold16 v3, rk17
- fold16 v4, rk19
- fold16 v5, rk1
- fold16 v6
+ fold16 \p, v0, rk11
+ fold16 \p, v1, rk13
+ fold16 \p, v2, rk15
+ fold16 \p, v3, rk17
+ fold16 \p, v4, rk19
+ fold16 \p, v5, rk1
+ fold16 \p, v6
// instead of 64, we add 48 to the loop counter to save 1 instruction
// from the loop instead of a cmp instruction, we use the negative
// flag with the jl instruction
adds arg3, arg3, #(128-16)
- b.lt _final_reduction_for_128
+ b.lt .L_final_reduction_for_128_\@
// now we have 16+y bytes left to reduce. 16 Bytes is in register v7
// and the rest is in memory. We can fold 16 bytes at a time if y>=16
// continue folding 16B at a time
-_16B_reduction_loop:
- pmull v8.1q, v7.1d, v10.1d
- pmull2 v7.1q, v7.2d, v10.2d
+.L_16B_reduction_loop_\@:
+ __pmull_\p v8, v7, v10
+ __pmull_\p v7, v7, v10, 2
eor v7.16b, v7.16b, v8.16b
ldr q0, [arg2], #16
@@ -251,22 +404,22 @@ CPU_LE( ext v0.16b, v0.16b, v0.16b, #8 )
// instead of a cmp instruction, we utilize the flags with the
// jge instruction equivalent of: cmp arg3, 16-16
// check if there is any more 16B in the buffer to be able to fold
- b.ge _16B_reduction_loop
+ b.ge .L_16B_reduction_loop_\@
// now we have 16+z bytes left to reduce, where 0<= z < 16.
// first, we reduce the data in the xmm7 register
-_final_reduction_for_128:
+.L_final_reduction_for_128_\@:
// check if any more data to fold. If not, compute the CRC of
// the final 128 bits
adds arg3, arg3, #16
- b.eq _128_done
+ b.eq .L_128_done_\@
// here we are getting data that is less than 16 bytes.
// since we know that there was data before the pointer, we can
// offset the input pointer before the actual point, to receive
// exactly 16 bytes. after that the registers need to be adjusted.
-_get_last_two_regs:
+.L_get_last_two_regs_\@:
add arg2, arg2, arg3
ldr q1, [arg2, #-16]
CPU_LE( rev64 v1.16b, v1.16b )
@@ -291,47 +444,48 @@ CPU_LE( ext v1.16b, v1.16b, v1.16b, #8 )
bsl v0.16b, v2.16b, v1.16b
// fold 16 Bytes
- pmull v8.1q, v7.1d, v10.1d
- pmull2 v7.1q, v7.2d, v10.2d
+ __pmull_\p v8, v7, v10
+ __pmull_\p v7, v7, v10, 2
eor v7.16b, v7.16b, v8.16b
eor v7.16b, v7.16b, v0.16b
-_128_done:
+.L_128_done_\@:
// compute crc of a 128-bit value
ldr_l q10, rk5, x8 // rk5 and rk6 in xmm10
+ __pmull_pre_\p v10
// 64b fold
ext v0.16b, vzr.16b, v7.16b, #8
mov v7.d[0], v7.d[1]
- pmull v7.1q, v7.1d, v10.1d
+ __pmull_\p v7, v7, v10
eor v7.16b, v7.16b, v0.16b
// 32b fold
ext v0.16b, v7.16b, vzr.16b, #4
mov v7.s[3], vzr.s[0]
- pmull2 v0.1q, v0.2d, v10.2d
+ __pmull_\p v0, v0, v10, 2
eor v7.16b, v7.16b, v0.16b
// barrett reduction
-_barrett:
ldr_l q10, rk7, x8
+ __pmull_pre_\p v10
mov v0.d[0], v7.d[1]
- pmull v0.1q, v0.1d, v10.1d
+ __pmull_\p v0, v0, v10
ext v0.16b, vzr.16b, v0.16b, #12
- pmull2 v0.1q, v0.2d, v10.2d
+ __pmull_\p v0, v0, v10, 2
ext v0.16b, vzr.16b, v0.16b, #12
eor v7.16b, v7.16b, v0.16b
mov w0, v7.s[1]
-_cleanup:
+.L_cleanup_\@:
// scale the result back to 16 bits
lsr x0, x0, #16
frame_pop
ret
-_less_than_128:
- cbz arg3, _cleanup
+.L_less_than_128_\@:
+ cbz arg3, .L_cleanup_\@
movi v0.16b, #0
mov v0.s[3], arg1_low32 // get the initial crc value
@@ -342,20 +496,21 @@ CPU_LE( ext v7.16b, v7.16b, v7.16b, #8 )
eor v7.16b, v7.16b, v0.16b // xor the initial crc value
cmp arg3, #16
- b.eq _128_done // exactly 16 left
- b.lt _less_than_16_left
+ b.eq .L_128_done_\@ // exactly 16 left
+ b.lt .L_less_than_16_left_\@
ldr_l q10, rk1, x8 // rk1 and rk2 in xmm10
+ __pmull_pre_\p v10
// update the counter. subtract 32 instead of 16 to save one
// instruction from the loop
subs arg3, arg3, #32
- b.ge _16B_reduction_loop
+ b.ge .L_16B_reduction_loop_\@
add arg3, arg3, #16
- b _get_last_two_regs
+ b .L_get_last_two_regs_\@
-_less_than_16_left:
+.L_less_than_16_left_\@:
// shl r9, 4
adr_l x0, tbl_shf_table + 16
sub x0, x0, arg3
@@ -363,8 +518,17 @@ _less_than_16_left:
movi v9.16b, #0x80
eor v0.16b, v0.16b, v9.16b
tbl v7.16b, {v7.16b}, v0.16b
- b _128_done
-ENDPROC(crc_t10dif_pmull)
+ b .L_128_done_\@
+ .endm
+
+ENTRY(crc_t10dif_pmull_p8)
+ crc_t10dif_pmull p8
+ENDPROC(crc_t10dif_pmull_p8)
+
+ .align 5
+ENTRY(crc_t10dif_pmull_p64)
+ crc_t10dif_pmull p64
+ENDPROC(crc_t10dif_pmull_p64)
// precomputed constants
// these constants are precomputed from the poly:
diff --git a/arch/arm64/crypto/crct10dif-ce-glue.c b/arch/arm64/crypto/crct10dif-ce-glue.c
index 96f0cae4a022..b461d62023f2 100644
--- a/arch/arm64/crypto/crct10dif-ce-glue.c
+++ b/arch/arm64/crypto/crct10dif-ce-glue.c
@@ -22,7 +22,10 @@
#define CRC_T10DIF_PMULL_CHUNK_SIZE 16U
-asmlinkage u16 crc_t10dif_pmull(u16 init_crc, const u8 buf[], u64 len);
+asmlinkage u16 crc_t10dif_pmull_p64(u16 init_crc, const u8 buf[], u64 len);
+asmlinkage u16 crc_t10dif_pmull_p8(u16 init_crc, const u8 buf[], u64 len);
+
+static u16 (*crc_t10dif_pmull)(u16 init_crc, const u8 buf[], u64 len);
static int crct10dif_init(struct shash_desc *desc)
{
@@ -85,6 +88,11 @@ static struct shash_alg crc_t10dif_alg = {
static int __init crc_t10dif_mod_init(void)
{
+ if (elf_hwcap & HWCAP_PMULL)
+ crc_t10dif_pmull = crc_t10dif_pmull_p64;
+ else
+ crc_t10dif_pmull = crc_t10dif_pmull_p8;
+
return crypto_register_shash(&crc_t10dif_alg);
}
@@ -93,8 +101,10 @@ static void __exit crc_t10dif_mod_exit(void)
crypto_unregister_shash(&crc_t10dif_alg);
}
-module_cpu_feature_match(PMULL, crc_t10dif_mod_init);
+module_cpu_feature_match(ASIMD, crc_t10dif_mod_init);
module_exit(crc_t10dif_mod_exit);
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS_CRYPTO("crct10dif");
+MODULE_ALIAS_CRYPTO("crct10dif-arm64-ce");
diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S
deleted file mode 100644
index b14463438b09..000000000000
--- a/arch/arm64/crypto/speck-neon-core.S
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Author: Eric Biggers <ebiggers@google.com>
- */
-
-#include <linux/linkage.h>
-
- .text
-
- // arguments
- ROUND_KEYS .req x0 // const {u64,u32} *round_keys
- NROUNDS .req w1 // int nrounds
- NROUNDS_X .req x1
- DST .req x2 // void *dst
- SRC .req x3 // const void *src
- NBYTES .req w4 // unsigned int nbytes
- TWEAK .req x5 // void *tweak
-
- // registers which hold the data being encrypted/decrypted
- // (underscores avoid a naming collision with ARM64 registers x0-x3)
- X_0 .req v0
- Y_0 .req v1
- X_1 .req v2
- Y_1 .req v3
- X_2 .req v4
- Y_2 .req v5
- X_3 .req v6
- Y_3 .req v7
-
- // the round key, duplicated in all lanes
- ROUND_KEY .req v8
-
- // index vector for tbl-based 8-bit rotates
- ROTATE_TABLE .req v9
- ROTATE_TABLE_Q .req q9
-
- // temporary registers
- TMP0 .req v10
- TMP1 .req v11
- TMP2 .req v12
- TMP3 .req v13
-
- // multiplication table for updating XTS tweaks
- GFMUL_TABLE .req v14
- GFMUL_TABLE_Q .req q14
-
- // next XTS tweak value(s)
- TWEAKV_NEXT .req v15
-
- // XTS tweaks for the blocks currently being encrypted/decrypted
- TWEAKV0 .req v16
- TWEAKV1 .req v17
- TWEAKV2 .req v18
- TWEAKV3 .req v19
- TWEAKV4 .req v20
- TWEAKV5 .req v21
- TWEAKV6 .req v22
- TWEAKV7 .req v23
-
- .align 4
-.Lror64_8_table:
- .octa 0x080f0e0d0c0b0a090007060504030201
-.Lror32_8_table:
- .octa 0x0c0f0e0d080b0a090407060500030201
-.Lrol64_8_table:
- .octa 0x0e0d0c0b0a09080f0605040302010007
-.Lrol32_8_table:
- .octa 0x0e0d0c0f0a09080b0605040702010003
-.Lgf128mul_table:
- .octa 0x00000000000000870000000000000001
-.Lgf64mul_table:
- .octa 0x0000000000000000000000002d361b00
-
-/*
- * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
- *
- * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
- * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
- * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
- * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
- */
-.macro _speck_round_128bytes n, lanes
-
- // x = ror(x, 8)
- tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
- tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
- tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
- tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
-
- // x += y
- add X_0.\lanes, X_0.\lanes, Y_0.\lanes
- add X_1.\lanes, X_1.\lanes, Y_1.\lanes
- add X_2.\lanes, X_2.\lanes, Y_2.\lanes
- add X_3.\lanes, X_3.\lanes, Y_3.\lanes
-
- // x ^= k
- eor X_0.16b, X_0.16b, ROUND_KEY.16b
- eor X_1.16b, X_1.16b, ROUND_KEY.16b
- eor X_2.16b, X_2.16b, ROUND_KEY.16b
- eor X_3.16b, X_3.16b, ROUND_KEY.16b
-
- // y = rol(y, 3)
- shl TMP0.\lanes, Y_0.\lanes, #3
- shl TMP1.\lanes, Y_1.\lanes, #3
- shl TMP2.\lanes, Y_2.\lanes, #3
- shl TMP3.\lanes, Y_3.\lanes, #3
- sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
- sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
- sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
- sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
-
- // y ^= x
- eor Y_0.16b, TMP0.16b, X_0.16b
- eor Y_1.16b, TMP1.16b, X_1.16b
- eor Y_2.16b, TMP2.16b, X_2.16b
- eor Y_3.16b, TMP3.16b, X_3.16b
-.endm
-
-/*
- * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
- *
- * This is the inverse of _speck_round_128bytes().
- */
-.macro _speck_unround_128bytes n, lanes
-
- // y ^= x
- eor TMP0.16b, Y_0.16b, X_0.16b
- eor TMP1.16b, Y_1.16b, X_1.16b
- eor TMP2.16b, Y_2.16b, X_2.16b
- eor TMP3.16b, Y_3.16b, X_3.16b
-
- // y = ror(y, 3)
- ushr Y_0.\lanes, TMP0.\lanes, #3
- ushr Y_1.\lanes, TMP1.\lanes, #3
- ushr Y_2.\lanes, TMP2.\lanes, #3
- ushr Y_3.\lanes, TMP3.\lanes, #3
- sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
- sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
- sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
- sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
-
- // x ^= k
- eor X_0.16b, X_0.16b, ROUND_KEY.16b
- eor X_1.16b, X_1.16b, ROUND_KEY.16b
- eor X_2.16b, X_2.16b, ROUND_KEY.16b
- eor X_3.16b, X_3.16b, ROUND_KEY.16b
-
- // x -= y
- sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
- sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
- sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
- sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
-
- // x = rol(x, 8)
- tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
- tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
- tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
- tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
-.endm
-
-.macro _next_xts_tweak next, cur, tmp, n
-.if \n == 64
- /*
- * Calculate the next tweak by multiplying the current one by x,
- * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
- */
- sshr \tmp\().2d, \cur\().2d, #63
- and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
- shl \next\().2d, \cur\().2d, #1
- ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
- eor \next\().16b, \next\().16b, \tmp\().16b
-.else
- /*
- * Calculate the next two tweaks by multiplying the current ones by x^2,
- * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
- */
- ushr \tmp\().2d, \cur\().2d, #62
- shl \next\().2d, \cur\().2d, #2
- tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
- eor \next\().16b, \next\().16b, \tmp\().16b
-.endif
-.endm
-
-/*
- * _speck_xts_crypt() - Speck-XTS encryption/decryption
- *
- * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
- * using Speck-XTS, specifically the variant with a block size of '2n' and round
- * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
- * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
- * nonzero multiple of 128.
- */
-.macro _speck_xts_crypt n, lanes, decrypting
-
- /*
- * If decrypting, modify the ROUND_KEYS parameter to point to the last
- * round key rather than the first, since for decryption the round keys
- * are used in reverse order.
- */
-.if \decrypting
- mov NROUNDS, NROUNDS /* zero the high 32 bits */
-.if \n == 64
- add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
- sub ROUND_KEYS, ROUND_KEYS, #8
-.else
- add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
- sub ROUND_KEYS, ROUND_KEYS, #4
-.endif
-.endif
-
- // Load the index vector for tbl-based 8-bit rotates
-.if \decrypting
- ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
-.else
- ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
-.endif
-
- // One-time XTS preparation
-.if \n == 64
- // Load first tweak
- ld1 {TWEAKV0.16b}, [TWEAK]
-
- // Load GF(2^128) multiplication table
- ldr GFMUL_TABLE_Q, .Lgf128mul_table
-.else
- // Load first tweak
- ld1 {TWEAKV0.8b}, [TWEAK]
-
- // Load GF(2^64) multiplication table
- ldr GFMUL_TABLE_Q, .Lgf64mul_table
-
- // Calculate second tweak, packing it together with the first
- ushr TMP0.2d, TWEAKV0.2d, #63
- shl TMP1.2d, TWEAKV0.2d, #1
- tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
- eor TMP0.8b, TMP0.8b, TMP1.8b
- mov TWEAKV0.d[1], TMP0.d[0]
-.endif
-
-.Lnext_128bytes_\@:
-
- // Calculate XTS tweaks for next 128 bytes
- _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
- _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
- _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
- _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
- _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
- _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
- _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
- _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
-
- // Load the next source blocks into {X,Y}[0-3]
- ld1 {X_0.16b-Y_1.16b}, [SRC], #64
- ld1 {X_2.16b-Y_3.16b}, [SRC], #64
-
- // XOR the source blocks with their XTS tweaks
- eor TMP0.16b, X_0.16b, TWEAKV0.16b
- eor Y_0.16b, Y_0.16b, TWEAKV1.16b
- eor TMP1.16b, X_1.16b, TWEAKV2.16b
- eor Y_1.16b, Y_1.16b, TWEAKV3.16b
- eor TMP2.16b, X_2.16b, TWEAKV4.16b
- eor Y_2.16b, Y_2.16b, TWEAKV5.16b
- eor TMP3.16b, X_3.16b, TWEAKV6.16b
- eor Y_3.16b, Y_3.16b, TWEAKV7.16b
-
- /*
- * De-interleave the 'x' and 'y' elements of each block, i.e. make it so
- * that the X[0-3] registers contain only the second halves of blocks,
- * and the Y[0-3] registers contain only the first halves of blocks.
- * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
- */
- uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
- uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
- uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
- uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
- uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
- uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
- uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
- uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
-
- // Do the cipher rounds
- mov x6, ROUND_KEYS
- mov w7, NROUNDS
-.Lnext_round_\@:
-.if \decrypting
- ld1r {ROUND_KEY.\lanes}, [x6]
- sub x6, x6, #( \n / 8 )
- _speck_unround_128bytes \n, \lanes
-.else
- ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
- _speck_round_128bytes \n, \lanes
-.endif
- subs w7, w7, #1
- bne .Lnext_round_\@
-
- // Re-interleave the 'x' and 'y' elements of each block
- zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
- zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
- zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
- zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
- zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
- zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
- zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
- zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
-
- // XOR the encrypted/decrypted blocks with the tweaks calculated earlier
- eor X_0.16b, TMP0.16b, TWEAKV0.16b
- eor Y_0.16b, Y_0.16b, TWEAKV1.16b
- eor X_1.16b, TMP1.16b, TWEAKV2.16b
- eor Y_1.16b, Y_1.16b, TWEAKV3.16b
- eor X_2.16b, TMP2.16b, TWEAKV4.16b
- eor Y_2.16b, Y_2.16b, TWEAKV5.16b
- eor X_3.16b, TMP3.16b, TWEAKV6.16b
- eor Y_3.16b, Y_3.16b, TWEAKV7.16b
- mov TWEAKV0.16b, TWEAKV_NEXT.16b
-
- // Store the ciphertext in the destination buffer
- st1 {X_0.16b-Y_1.16b}, [DST], #64
- st1 {X_2.16b-Y_3.16b}, [DST], #64
-
- // Continue if there are more 128-byte chunks remaining
- subs NBYTES, NBYTES, #128
- bne .Lnext_128bytes_\@
-
- // Store the next tweak and return
-.if \n == 64
- st1 {TWEAKV_NEXT.16b}, [TWEAK]
-.else
- st1 {TWEAKV_NEXT.8b}, [TWEAK]
-.endif
- ret
-.endm
-
-ENTRY(speck128_xts_encrypt_neon)
- _speck_xts_crypt n=64, lanes=2d, decrypting=0
-ENDPROC(speck128_xts_encrypt_neon)
-
-ENTRY(speck128_xts_decrypt_neon)
- _speck_xts_crypt n=64, lanes=2d, decrypting=1
-ENDPROC(speck128_xts_decrypt_neon)
-
-ENTRY(speck64_xts_encrypt_neon)
- _speck_xts_crypt n=32, lanes=4s, decrypting=0
-ENDPROC(speck64_xts_encrypt_neon)
-
-ENTRY(speck64_xts_decrypt_neon)
- _speck_xts_crypt n=32, lanes=4s, decrypting=1
-ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c
deleted file mode 100644
index 6e233aeb4ff4..000000000000
--- a/arch/arm64/crypto/speck-neon-glue.c
+++ /dev/null
@@ -1,282 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- * (64-bit version; based on the 32-bit version)
- *
- * Copyright (c) 2018 Google, Inc
- */
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <crypto/algapi.h>
-#include <crypto/gf128mul.h>
-#include <crypto/internal/skcipher.h>
-#include <crypto/speck.h>
-#include <crypto/xts.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-/* The assembly functions only handle multiples of 128 bytes */
-#define SPECK_NEON_CHUNK_SIZE 128
-
-/* Speck128 */
-
-struct speck128_xts_tfm_ctx {
- struct speck128_tfm_ctx main_key;
- struct speck128_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck128_xts_crypt(struct skcipher_request *req,
- speck128_crypt_one_t crypt_one,
- speck128_xts_crypt_many_t crypt_many)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- le128 tweak;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
-
- crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- le128_xor((le128 *)dst, (const le128 *)src, &tweak);
- (*crypt_one)(&ctx->main_key, dst, dst);
- le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
- gf128mul_x_ble(&tweak, &tweak);
-
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static int speck128_xts_encrypt(struct skcipher_request *req)
-{
- return __speck128_xts_crypt(req, crypto_speck128_encrypt,
- speck128_xts_encrypt_neon);
-}
-
-static int speck128_xts_decrypt(struct skcipher_request *req)
-{
- return __speck128_xts_crypt(req, crypto_speck128_decrypt,
- speck128_xts_decrypt_neon);
-}
-
-static int speck128_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck128_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- keylen /= 2;
-
- err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-/* Speck64 */
-
-struct speck64_xts_tfm_ctx {
- struct speck64_tfm_ctx main_key;
- struct speck64_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck64_xts_crypt(struct skcipher_request *req, speck64_crypt_one_t crypt_one,
- speck64_xts_crypt_many_t crypt_many)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- const struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct skcipher_walk walk;
- __le64 tweak;
- int err;
-
- err = skcipher_walk_virt(&walk, req, true);
-
- crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- *(__le64 *)dst = *(__le64 *)src ^ tweak;
- (*crypt_one)(&ctx->main_key, dst, dst);
- *(__le64 *)dst ^= tweak;
- tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
- ((tweak & cpu_to_le64(1ULL << 63)) ?
- 0x1B : 0));
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = skcipher_walk_done(&walk, nbytes);
- }
-
- return err;
-}
-
-static int speck64_xts_encrypt(struct skcipher_request *req)
-{
- return __speck64_xts_crypt(req, crypto_speck64_encrypt,
- speck64_xts_encrypt_neon);
-}
-
-static int speck64_xts_decrypt(struct skcipher_request *req)
-{
- return __speck64_xts_crypt(req, crypto_speck64_decrypt,
- speck64_xts_decrypt_neon);
-}
-
-static int speck64_xts_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck64_xts_tfm_ctx *ctx = crypto_skcipher_ctx(tfm);
- int err;
-
- err = xts_verify_key(tfm, key, keylen);
- if (err)
- return err;
-
- keylen /= 2;
-
- err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-static struct skcipher_alg speck_algs[] = {
- {
- .base.cra_name = "xts(speck128)",
- .base.cra_driver_name = "xts-speck128-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = SPECK128_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * SPECK128_128_KEY_SIZE,
- .max_keysize = 2 * SPECK128_256_KEY_SIZE,
- .ivsize = SPECK128_BLOCK_SIZE,
- .walksize = SPECK_NEON_CHUNK_SIZE,
- .setkey = speck128_xts_setkey,
- .encrypt = speck128_xts_encrypt,
- .decrypt = speck128_xts_decrypt,
- }, {
- .base.cra_name = "xts(speck64)",
- .base.cra_driver_name = "xts-speck64-neon",
- .base.cra_priority = 300,
- .base.cra_blocksize = SPECK64_BLOCK_SIZE,
- .base.cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
- .base.cra_alignmask = 7,
- .base.cra_module = THIS_MODULE,
- .min_keysize = 2 * SPECK64_96_KEY_SIZE,
- .max_keysize = 2 * SPECK64_128_KEY_SIZE,
- .ivsize = SPECK64_BLOCK_SIZE,
- .walksize = SPECK_NEON_CHUNK_SIZE,
- .setkey = speck64_xts_setkey,
- .encrypt = speck64_xts_encrypt,
- .decrypt = speck64_xts_decrypt,
- }
-};
-
-static int __init speck_neon_module_init(void)
-{
- if (!(elf_hwcap & HWCAP_ASIMD))
- return -ENODEV;
- return crypto_register_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-static void __exit speck_neon_module_exit(void)
-{
- crypto_unregister_skciphers(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-module_init(speck_neon_module_init);
-module_exit(speck_neon_module_exit);
-
-MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
-MODULE_ALIAS_CRYPTO("xts(speck128)");
-MODULE_ALIAS_CRYPTO("xts-speck128-neon");
-MODULE_ALIAS_CRYPTO("xts(speck64)");
-MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index cee28a05ee98..93ce86d5dae1 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -25,6 +25,8 @@
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#ifdef __AARCH64EB__
#define COMPAT_UTS_MACHINE "armv8b\0\0"
@@ -32,10 +34,6 @@
#define COMPAT_UTS_MACHINE "armv8l\0\0"
#endif
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u16 __compat_uid16_t;
@@ -43,27 +41,13 @@ typedef u16 __compat_gid16_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u32 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef s32 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_key_t;
-typedef s32 compat_timer_t;
-
-typedef s16 compat_short_t;
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u16 compat_ushort_t;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
struct compat_stat {
#ifdef __AARCH64EB__
@@ -86,11 +70,11 @@ struct compat_stat {
compat_off_t st_size;
compat_off_t st_blksize;
compat_off_t st_blocks;
- compat_time_t st_atime;
+ old_time32_t st_atime;
compat_ulong_t st_atime_nsec;
- compat_time_t st_mtime;
+ old_time32_t st_mtime;
compat_ulong_t st_mtime_nsec;
- compat_time_t st_ctime;
+ old_time32_t st_ctime;
compat_ulong_t st_ctime_nsec;
compat_ulong_t __unused4[2];
};
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 6db48d90ad63..7e2ec64aa414 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -537,6 +537,27 @@ static inline void arm64_set_ssbd_mitigation(bool state) {}
#endif
extern int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
+
+static inline u32 id_aa64mmfr0_parange_to_phys_shift(int parange)
+{
+ switch (parange) {
+ case 0: return 32;
+ case 1: return 36;
+ case 2: return 40;
+ case 3: return 42;
+ case 4: return 44;
+ case 5: return 48;
+ case 6: return 52;
+ /*
+ * A future PE could use a value unknown to the kernel.
+ * However, by the "D10.1.4 Principles of the ID scheme
+ * for fields in ID registers", ARM DDI 0487C.a, any new
+ * value is guaranteed to be higher than what we know already.
+ * As a safe limit, we return the limit supported by the kernel.
+ */
+ default: return CONFIG_ARM64_PA_BITS;
+ }
+}
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 5a5fa47a6b18..3dd3d664c5c5 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -23,7 +23,6 @@ struct dev_archdata {
#ifdef CONFIG_XEN
const struct dma_map_ops *dev_dma_ops;
#endif
- bool dma_coherent;
};
struct pdev_archdata {
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index b7847eb8a7bb..c41f3fb1446c 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -44,10 +44,13 @@ void arch_teardown_dma_ops(struct device *dev);
#define arch_teardown_dma_ops arch_teardown_dma_ops
#endif
-/* do not use this function in a driver */
+/*
+ * Do not use this function in a driver, it is only provided for
+ * arch/arm/mm/xen.c, which is used by arm64 as well.
+ */
static inline bool is_device_dma_coherent(struct device *dev)
{
- return dev->archdata.dma_coherent;
+ return dev->dma_coherent;
}
#endif /* __KERNEL__ */
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index b476bc46f0ab..6f602af5263c 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -107,6 +107,7 @@
#define VTCR_EL2_RES1 (1 << 31)
#define VTCR_EL2_HD (1 << 22)
#define VTCR_EL2_HA (1 << 21)
+#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT
#define VTCR_EL2_PS_MASK TCR_EL2_PS_MASK
#define VTCR_EL2_TG0_MASK TCR_TG0_MASK
#define VTCR_EL2_TG0_4K TCR_TG0_4K
@@ -120,63 +121,150 @@
#define VTCR_EL2_IRGN0_WBWA TCR_IRGN0_WBWA
#define VTCR_EL2_SL0_SHIFT 6
#define VTCR_EL2_SL0_MASK (3 << VTCR_EL2_SL0_SHIFT)
-#define VTCR_EL2_SL0_LVL1 (1 << VTCR_EL2_SL0_SHIFT)
#define VTCR_EL2_T0SZ_MASK 0x3f
-#define VTCR_EL2_T0SZ_40B 24
#define VTCR_EL2_VS_SHIFT 19
#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT)
#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT)
+#define VTCR_EL2_T0SZ(x) TCR_T0SZ(x)
+
/*
* We configure the Stage-2 page tables to always restrict the IPA space to be
* 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are
* not known to exist and will break with this configuration.
*
- * VTCR_EL2.PS is extracted from ID_AA64MMFR0_EL1.PARange at boot time
- * (see hyp-init.S).
+ * The VTCR_EL2 is configured per VM and is initialised in kvm_arm_setup_stage2().
*
* Note that when using 4K pages, we concatenate two first level page tables
* together. With 16K pages, we concatenate 16 first level page tables.
*
- * The magic numbers used for VTTBR_X in this patch can be found in Tables
- * D4-23 and D4-25 in ARM DDI 0487A.b.
*/
-#define VTCR_EL2_T0SZ_IPA VTCR_EL2_T0SZ_40B
#define VTCR_EL2_COMMON_BITS (VTCR_EL2_SH0_INNER | VTCR_EL2_ORGN0_WBWA | \
VTCR_EL2_IRGN0_WBWA | VTCR_EL2_RES1)
-#ifdef CONFIG_ARM64_64K_PAGES
/*
- * Stage2 translation configuration:
- * 64kB pages (TG0 = 1)
- * 2 level page tables (SL = 1)
+ * VTCR_EL2:SL0 indicates the entry level for Stage2 translation.
+ * Interestingly, it depends on the page size.
+ * See D.10.2.121, VTCR_EL2, in ARM DDI 0487C.a
+ *
+ * -----------------------------------------
+ * | Entry level | 4K | 16K/64K |
+ * ------------------------------------------
+ * | Level: 0 | 2 | - |
+ * ------------------------------------------
+ * | Level: 1 | 1 | 2 |
+ * ------------------------------------------
+ * | Level: 2 | 0 | 1 |
+ * ------------------------------------------
+ * | Level: 3 | - | 0 |
+ * ------------------------------------------
+ *
+ * The table roughly translates to :
+ *
+ * SL0(PAGE_SIZE, Entry_level) = TGRAN_SL0_BASE - Entry_Level
+ *
+ * Where TGRAN_SL0_BASE is a magic number depending on the page size:
+ * TGRAN_SL0_BASE(4K) = 2
+ * TGRAN_SL0_BASE(16K) = 3
+ * TGRAN_SL0_BASE(64K) = 3
+ * provided we take care of ruling out the unsupported cases and
+ * Entry_Level = 4 - Number_of_levels.
+ *
*/
-#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SL0_LVL1)
-#define VTTBR_X_TGRAN_MAGIC 38
+#ifdef CONFIG_ARM64_64K_PAGES
+
+#define VTCR_EL2_TGRAN VTCR_EL2_TG0_64K
+#define VTCR_EL2_TGRAN_SL0_BASE 3UL
+
#elif defined(CONFIG_ARM64_16K_PAGES)
-/*
- * Stage2 translation configuration:
- * 16kB pages (TG0 = 2)
- * 2 level page tables (SL = 1)
- */
-#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_16K | VTCR_EL2_SL0_LVL1)
-#define VTTBR_X_TGRAN_MAGIC 42
+
+#define VTCR_EL2_TGRAN VTCR_EL2_TG0_16K
+#define VTCR_EL2_TGRAN_SL0_BASE 3UL
+
#else /* 4K */
-/*
- * Stage2 translation configuration:
- * 4kB pages (TG0 = 0)
- * 3 level page tables (SL = 1)
- */
-#define VTCR_EL2_TGRAN_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SL0_LVL1)
-#define VTTBR_X_TGRAN_MAGIC 37
+
+#define VTCR_EL2_TGRAN VTCR_EL2_TG0_4K
+#define VTCR_EL2_TGRAN_SL0_BASE 2UL
+
#endif
-#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
-#define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
+#define VTCR_EL2_LVLS_TO_SL0(levels) \
+ ((VTCR_EL2_TGRAN_SL0_BASE - (4 - (levels))) << VTCR_EL2_SL0_SHIFT)
+#define VTCR_EL2_SL0_TO_LVLS(sl0) \
+ ((sl0) + 4 - VTCR_EL2_TGRAN_SL0_BASE)
+#define VTCR_EL2_LVLS(vtcr) \
+ VTCR_EL2_SL0_TO_LVLS(((vtcr) & VTCR_EL2_SL0_MASK) >> VTCR_EL2_SL0_SHIFT)
+
+#define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN)
+#define VTCR_EL2_IPA(vtcr) (64 - ((vtcr) & VTCR_EL2_T0SZ_MASK))
+
+/*
+ * ARM VMSAv8-64 defines an algorithm for finding the translation table
+ * descriptors in section D4.2.8 in ARM DDI 0487C.a.
+ *
+ * The algorithm defines the expectations on the translation table
+ * addresses for each level, based on PAGE_SIZE, entry level
+ * and the translation table size (T0SZ). The variable "x" in the
+ * algorithm determines the alignment of a table base address at a given
+ * level and thus determines the alignment of VTTBR:BADDR for stage2
+ * page table entry level.
+ * Since the number of bits resolved at the entry level could vary
+ * depending on the T0SZ, the value of "x" is defined based on a
+ * Magic constant for a given PAGE_SIZE and Entry Level. The
+ * intermediate levels must be always aligned to the PAGE_SIZE (i.e,
+ * x = PAGE_SHIFT).
+ *
+ * The value of "x" for entry level is calculated as :
+ * x = Magic_N - T0SZ
+ *
+ * where Magic_N is an integer depending on the page size and the entry
+ * level of the page table as below:
+ *
+ * --------------------------------------------
+ * | Entry level | 4K 16K 64K |
+ * --------------------------------------------
+ * | Level: 0 (4 levels) | 28 | - | - |
+ * --------------------------------------------
+ * | Level: 1 (3 levels) | 37 | 31 | 25 |
+ * --------------------------------------------
+ * | Level: 2 (2 levels) | 46 | 42 | 38 |
+ * --------------------------------------------
+ * | Level: 3 (1 level) | - | 53 | 51 |
+ * --------------------------------------------
+ *
+ * We have a magic formula for the Magic_N below:
+ *
+ * Magic_N(PAGE_SIZE, Level) = 64 - ((PAGE_SHIFT - 3) * Number_of_levels)
+ *
+ * where Number_of_levels = (4 - Level). We are only interested in the
+ * value for Entry_Level for the stage2 page table.
+ *
+ * So, given that T0SZ = (64 - IPA_SHIFT), we can compute 'x' as follows:
+ *
+ * x = (64 - ((PAGE_SHIFT - 3) * Number_of_levels)) - (64 - IPA_SHIFT)
+ * = IPA_SHIFT - ((PAGE_SHIFT - 3) * Number of levels)
+ *
+ * Here is one way to explain the Magic Formula:
+ *
+ * x = log2(Size_of_Entry_Level_Table)
+ *
+ * Since, we can resolve (PAGE_SHIFT - 3) bits at each level, and another
+ * PAGE_SHIFT bits in the PTE, we have :
+ *
+ * Bits_Entry_level = IPA_SHIFT - ((PAGE_SHIFT - 3) * (n - 1) + PAGE_SHIFT)
+ * = IPA_SHIFT - (PAGE_SHIFT - 3) * n - 3
+ * where n = number of levels, and since each pointer is 8bytes, we have:
+ *
+ * x = Bits_Entry_Level + 3
+ * = IPA_SHIFT - (PAGE_SHIFT - 3) * n
+ *
+ * The only constraint here is that, we have to find the number of page table
+ * levels for a given IPA size (which we do, see stage2_pt_levels())
+ */
+#define ARM64_VTTBR_X(ipa, levels) ((ipa) - ((levels) * (PAGE_SHIFT - 3)))
#define VTTBR_CNP_BIT (UL(1))
-#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
#define VTTBR_VMID_SHIFT (UL(48))
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
@@ -224,6 +312,13 @@
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf))
+/*
+ * We have
+ * PAR [PA_Shift - 1 : 12] = PA [PA_Shift - 1 : 12]
+ * HPFAR [PA_Shift - 9 : 4] = FIPA [PA_Shift - 1 : 12]
+ */
+#define PAR_TO_HPFAR(par) \
+ (((par) & GENMASK_ULL(PHYS_MASK_SHIFT - 1, 12)) >> 8)
#define kvm_arm_exception_type \
{0, "IRQ" }, \
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 102b5a5c47b6..aea01a09eb94 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -30,6 +30,7 @@
#define ARM_EXCEPTION_IRQ 0
#define ARM_EXCEPTION_EL1_SERROR 1
#define ARM_EXCEPTION_TRAP 2
+#define ARM_EXCEPTION_IL 3
/* The hyp-stub will return this for any kvm_call_hyp() call */
#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
@@ -72,8 +73,6 @@ extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);
-extern u32 __init_stage2_translation(void);
-
/* Home-grown __this_cpu_{ptr,read} variants that always work at HYP */
#define __hyp_this_cpu_ptr(sym) \
({ \
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2842bf149029..52fbc823ff8c 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -53,7 +53,7 @@ DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
-int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext);
+int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext);
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
struct kvm_arch {
@@ -61,11 +61,13 @@ struct kvm_arch {
u64 vmid_gen;
u32 vmid;
- /* 1-level 2nd stage table, protected by kvm->mmu_lock */
+ /* stage2 entry level table */
pgd_t *pgd;
/* VTTBR value associated with above pgd and vmid */
u64 vttbr;
+ /* VTCR_EL2 value for this VM */
+ u64 vtcr;
/* The last vcpu id that ran on each physical CPU */
int __percpu *last_vcpu_ran;
@@ -451,13 +453,7 @@ int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr);
-static inline void __cpu_init_stage2(void)
-{
- u32 parange = kvm_call_hyp(__init_stage2_translation);
-
- WARN_ONCE(parange < 40,
- "PARange is %d bits, unsupported configuration!", parange);
-}
+static inline void __cpu_init_stage2(void) {}
/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
@@ -520,8 +516,12 @@ static inline int kvm_arm_have_ssbd(void)
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
+void kvm_set_ipa_limit(void);
+
#define __KVM_HAVE_ARCH_VM_ALLOC
struct kvm *kvm_arch_alloc_vm(void);
void kvm_arch_free_vm(struct kvm *kvm);
+int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 384c34397619..23aca66767f9 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -155,5 +155,15 @@ void deactivate_traps_vhe_put(void);
u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
void __noreturn __hyp_do_panic(unsigned long, ...);
+/*
+ * Must be called from hyp code running at EL2 with an updated VTTBR
+ * and interrupts disabled.
+ */
+static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
+{
+ write_sysreg(kvm->arch.vtcr, vtcr_el2);
+ write_sysreg(kvm->arch.vttbr, vttbr_el2);
+}
+
#endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 64337afbf124..658657367f2f 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -141,8 +141,16 @@ static inline unsigned long __kern_hyp_va(unsigned long v)
* We currently only support a 40bit IPA.
*/
#define KVM_PHYS_SHIFT (40)
-#define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT)
-#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL)
+
+#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr)
+#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm))
+#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL))
+
+static inline bool kvm_page_empty(void *ptr)
+{
+ struct page *ptr_page = virt_to_page(ptr);
+ return page_count(ptr_page) == 1;
+}
#include <asm/stage2_pgtable.h>
@@ -238,12 +246,6 @@ static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
}
-static inline bool kvm_page_empty(void *ptr)
-{
- struct page *ptr_page = virt_to_page(ptr);
- return page_count(ptr_page) == 1;
-}
-
#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
#ifdef __PAGETABLE_PMD_FOLDED
@@ -517,6 +519,30 @@ static inline int hyp_map_aux_data(void)
#define kvm_phys_to_vttbr(addr) phys_to_ttbr(addr)
+/*
+ * Get the magic number 'x' for VTTBR:BADDR of this KVM instance.
+ * With v8.2 LVA extensions, 'x' should be a minimum of 6 with
+ * 52bit IPS.
+ */
+static inline int arm64_vttbr_x(u32 ipa_shift, u32 levels)
+{
+ int x = ARM64_VTTBR_X(ipa_shift, levels);
+
+ return (IS_ENABLED(CONFIG_ARM64_PA_BITS_52) && x < 6) ? 6 : x;
+}
+
+static inline u64 vttbr_baddr_mask(u32 ipa_shift, u32 levels)
+{
+ unsigned int x = arm64_vttbr_x(ipa_shift, levels);
+
+ return GENMASK_ULL(PHYS_MASK_SHIFT - 1, x);
+}
+
+static inline u64 kvm_vttbr_baddr_mask(struct kvm *kvm)
+{
+ return vttbr_baddr_mask(kvm_phys_shift(kvm), kvm_stage2_levels(kvm));
+}
+
static inline bool kvm_cpu_has_cnp(void)
{
return system_supports_cnp();
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 6bc43889d11e..fce22c4b2f73 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -25,6 +25,9 @@
#define CurrentEL_EL1 (1 << 2)
#define CurrentEL_EL2 (2 << 2)
+/* Additional SPSR bits not exposed in the UABI */
+#define PSR_IL_BIT (1 << 20)
+
/* AArch32-specific ptrace requests */
#define COMPAT_PTRACE_GETREGS 12
#define COMPAT_PTRACE_SETREGS 13
diff --git a/arch/arm64/include/asm/stage2_pgtable-nopmd.h b/arch/arm64/include/asm/stage2_pgtable-nopmd.h
deleted file mode 100644
index 2656a0fd05a6..000000000000
--- a/arch/arm64/include/asm/stage2_pgtable-nopmd.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2016 - ARM Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM64_S2_PGTABLE_NOPMD_H_
-#define __ARM64_S2_PGTABLE_NOPMD_H_
-
-#include <asm/stage2_pgtable-nopud.h>
-
-#define __S2_PGTABLE_PMD_FOLDED
-
-#define S2_PMD_SHIFT S2_PUD_SHIFT
-#define S2_PTRS_PER_PMD 1
-#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT)
-#define S2_PMD_MASK (~(S2_PMD_SIZE-1))
-
-#define stage2_pud_none(pud) (0)
-#define stage2_pud_present(pud) (1)
-#define stage2_pud_clear(pud) do { } while (0)
-#define stage2_pud_populate(pud, pmd) do { } while (0)
-#define stage2_pmd_offset(pud, address) ((pmd_t *)(pud))
-
-#define stage2_pmd_free(pmd) do { } while (0)
-
-#define stage2_pmd_addr_end(addr, end) (end)
-
-#define stage2_pud_huge(pud) (0)
-#define stage2_pmd_table_empty(pmdp) (0)
-
-#endif
diff --git a/arch/arm64/include/asm/stage2_pgtable-nopud.h b/arch/arm64/include/asm/stage2_pgtable-nopud.h
deleted file mode 100644
index 5ee87b54ebf3..000000000000
--- a/arch/arm64/include/asm/stage2_pgtable-nopud.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2016 - ARM Ltd
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM64_S2_PGTABLE_NOPUD_H_
-#define __ARM64_S2_PGTABLE_NOPUD_H_
-
-#define __S2_PGTABLE_PUD_FOLDED
-
-#define S2_PUD_SHIFT S2_PGDIR_SHIFT
-#define S2_PTRS_PER_PUD 1
-#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT)
-#define S2_PUD_MASK (~(S2_PUD_SIZE-1))
-
-#define stage2_pgd_none(pgd) (0)
-#define stage2_pgd_present(pgd) (1)
-#define stage2_pgd_clear(pgd) do { } while (0)
-#define stage2_pgd_populate(pgd, pud) do { } while (0)
-
-#define stage2_pud_offset(pgd, address) ((pud_t *)(pgd))
-
-#define stage2_pud_free(x) do { } while (0)
-
-#define stage2_pud_addr_end(addr, end) (end)
-#define stage2_pud_table_empty(pmdp) (0)
-
-#endif
diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h
index 8b68099348e5..d352f6df8d2c 100644
--- a/arch/arm64/include/asm/stage2_pgtable.h
+++ b/arch/arm64/include/asm/stage2_pgtable.h
@@ -19,9 +19,17 @@
#ifndef __ARM64_S2_PGTABLE_H_
#define __ARM64_S2_PGTABLE_H_
+#include <linux/hugetlb.h>
#include <asm/pgtable.h>
/*
+ * PGDIR_SHIFT determines the size a top-level page table entry can map
+ * and depends on the number of levels in the page table. Compute the
+ * PGDIR_SHIFT for a given number of levels.
+ */
+#define pt_levels_pgdir_shift(lvls) ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - (lvls))
+
+/*
* The hardware supports concatenation of up to 16 tables at stage2 entry level
* and we use the feature whenever possible.
*
@@ -29,112 +37,208 @@
* On arm64, the smallest PAGE_SIZE supported is 4k, which means
* (PAGE_SHIFT - 3) > 4 holds for all page sizes.
* This implies, the total number of page table levels at stage2 expected
- * by the hardware is actually the number of levels required for (KVM_PHYS_SHIFT - 4)
+ * by the hardware is actually the number of levels required for (IPA_SHIFT - 4)
* in normal translations(e.g, stage1), since we cannot have another level in
- * the range (KVM_PHYS_SHIFT, KVM_PHYS_SHIFT - 4).
+ * the range (IPA_SHIFT, IPA_SHIFT - 4).
*/
-#define STAGE2_PGTABLE_LEVELS ARM64_HW_PGTABLE_LEVELS(KVM_PHYS_SHIFT - 4)
+#define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4)
+#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr)
-/*
- * With all the supported VA_BITs and 40bit guest IPA, the following condition
- * is always true:
- *
- * STAGE2_PGTABLE_LEVELS <= CONFIG_PGTABLE_LEVELS
- *
- * We base our stage-2 page table walker helpers on this assumption and
- * fall back to using the host version of the helper wherever possible.
- * i.e, if a particular level is not folded (e.g, PUD) at stage2, we fall back
- * to using the host version, since it is guaranteed it is not folded at host.
- *
- * If the condition breaks in the future, we can rearrange the host level
- * definitions and reuse them for stage2. Till then...
- */
-#if STAGE2_PGTABLE_LEVELS > CONFIG_PGTABLE_LEVELS
-#error "Unsupported combination of guest IPA and host VA_BITS."
-#endif
-
-/* S2_PGDIR_SHIFT is the size mapped by top-level stage2 entry */
-#define S2_PGDIR_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(4 - STAGE2_PGTABLE_LEVELS)
-#define S2_PGDIR_SIZE (_AC(1, UL) << S2_PGDIR_SHIFT)
-#define S2_PGDIR_MASK (~(S2_PGDIR_SIZE - 1))
+/* stage2_pgdir_shift() is the size mapped by top-level stage2 entry for the VM */
+#define stage2_pgdir_shift(kvm) pt_levels_pgdir_shift(kvm_stage2_levels(kvm))
+#define stage2_pgdir_size(kvm) (1ULL << stage2_pgdir_shift(kvm))
+#define stage2_pgdir_mask(kvm) ~(stage2_pgdir_size(kvm) - 1)
/*
* The number of PTRS across all concatenated stage2 tables given by the
* number of bits resolved at the initial level.
+ * If we force more levels than necessary, we may have (stage2_pgdir_shift > IPA),
+ * in which case, stage2_pgd_ptrs will have one entry.
*/
-#define PTRS_PER_S2_PGD (1 << (KVM_PHYS_SHIFT - S2_PGDIR_SHIFT))
+#define pgd_ptrs_shift(ipa, pgdir_shift) \
+ ((ipa) > (pgdir_shift) ? ((ipa) - (pgdir_shift)) : 0)
+#define __s2_pgd_ptrs(ipa, lvls) \
+ (1 << (pgd_ptrs_shift((ipa), pt_levels_pgdir_shift(lvls))))
+#define __s2_pgd_size(ipa, lvls) (__s2_pgd_ptrs((ipa), (lvls)) * sizeof(pgd_t))
+
+#define stage2_pgd_ptrs(kvm) __s2_pgd_ptrs(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
+#define stage2_pgd_size(kvm) __s2_pgd_size(kvm_phys_shift(kvm), kvm_stage2_levels(kvm))
/*
- * KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation
- * levels in addition to the PGD.
+ * kvm_mmmu_cache_min_pages() is the number of pages required to install
+ * a stage-2 translation. We pre-allocate the entry level page table at
+ * the VM creation.
*/
-#define KVM_MMU_CACHE_MIN_PAGES (STAGE2_PGTABLE_LEVELS - 1)
+#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1)
-
-#if STAGE2_PGTABLE_LEVELS > 3
+/* Stage2 PUD definitions when the level is present */
+static inline bool kvm_stage2_has_pud(struct kvm *kvm)
+{
+ return (CONFIG_PGTABLE_LEVELS > 3) && (kvm_stage2_levels(kvm) > 3);
+}
#define S2_PUD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(1)
-#define S2_PUD_SIZE (_AC(1, UL) << S2_PUD_SHIFT)
+#define S2_PUD_SIZE (1UL << S2_PUD_SHIFT)
#define S2_PUD_MASK (~(S2_PUD_SIZE - 1))
-#define stage2_pgd_none(pgd) pgd_none(pgd)
-#define stage2_pgd_clear(pgd) pgd_clear(pgd)
-#define stage2_pgd_present(pgd) pgd_present(pgd)
-#define stage2_pgd_populate(pgd, pud) pgd_populate(NULL, pgd, pud)
-#define stage2_pud_offset(pgd, address) pud_offset(pgd, address)
-#define stage2_pud_free(pud) pud_free(NULL, pud)
+static inline bool stage2_pgd_none(struct kvm *kvm, pgd_t pgd)
+{
+ if (kvm_stage2_has_pud(kvm))
+ return pgd_none(pgd);
+ else
+ return 0;
+}
-#define stage2_pud_table_empty(pudp) kvm_page_empty(pudp)
+static inline void stage2_pgd_clear(struct kvm *kvm, pgd_t *pgdp)
+{
+ if (kvm_stage2_has_pud(kvm))
+ pgd_clear(pgdp);
+}
-static inline phys_addr_t stage2_pud_addr_end(phys_addr_t addr, phys_addr_t end)
+static inline bool stage2_pgd_present(struct kvm *kvm, pgd_t pgd)
{
- phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;
+ if (kvm_stage2_has_pud(kvm))
+ return pgd_present(pgd);
+ else
+ return 1;
+}
- return (boundary - 1 < end - 1) ? boundary : end;
+static inline void stage2_pgd_populate(struct kvm *kvm, pgd_t *pgd, pud_t *pud)
+{
+ if (kvm_stage2_has_pud(kvm))
+ pgd_populate(NULL, pgd, pud);
+}
+
+static inline pud_t *stage2_pud_offset(struct kvm *kvm,
+ pgd_t *pgd, unsigned long address)
+{
+ if (kvm_stage2_has_pud(kvm))
+ return pud_offset(pgd, address);
+ else
+ return (pud_t *)pgd;
}
-#endif /* STAGE2_PGTABLE_LEVELS > 3 */
+static inline void stage2_pud_free(struct kvm *kvm, pud_t *pud)
+{
+ if (kvm_stage2_has_pud(kvm))
+ pud_free(NULL, pud);
+}
+static inline bool stage2_pud_table_empty(struct kvm *kvm, pud_t *pudp)
+{
+ if (kvm_stage2_has_pud(kvm))
+ return kvm_page_empty(pudp);
+ else
+ return false;
+}
-#if STAGE2_PGTABLE_LEVELS > 2
+static inline phys_addr_t
+stage2_pud_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
+{
+ if (kvm_stage2_has_pud(kvm)) {
+ phys_addr_t boundary = (addr + S2_PUD_SIZE) & S2_PUD_MASK;
+
+ return (boundary - 1 < end - 1) ? boundary : end;
+ } else {
+ return end;
+ }
+}
+
+/* Stage2 PMD definitions when the level is present */
+static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
+{
+ return (CONFIG_PGTABLE_LEVELS > 2) && (kvm_stage2_levels(kvm) > 2);
+}
#define S2_PMD_SHIFT ARM64_HW_PGTABLE_LEVEL_SHIFT(2)
-#define S2_PMD_SIZE (_AC(1, UL) << S2_PMD_SHIFT)
+#define S2_PMD_SIZE (1UL << S2_PMD_SHIFT)
#define S2_PMD_MASK (~(S2_PMD_SIZE - 1))
-#define stage2_pud_none(pud) pud_none(pud)
-#define stage2_pud_clear(pud) pud_clear(pud)
-#define stage2_pud_present(pud) pud_present(pud)
-#define stage2_pud_populate(pud, pmd) pud_populate(NULL, pud, pmd)
-#define stage2_pmd_offset(pud, address) pmd_offset(pud, address)
-#define stage2_pmd_free(pmd) pmd_free(NULL, pmd)
+static inline bool stage2_pud_none(struct kvm *kvm, pud_t pud)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ return pud_none(pud);
+ else
+ return 0;
+}
+
+static inline void stage2_pud_clear(struct kvm *kvm, pud_t *pud)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ pud_clear(pud);
+}
-#define stage2_pud_huge(pud) pud_huge(pud)
-#define stage2_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
+static inline bool stage2_pud_present(struct kvm *kvm, pud_t pud)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ return pud_present(pud);
+ else
+ return 1;
+}
-static inline phys_addr_t stage2_pmd_addr_end(phys_addr_t addr, phys_addr_t end)
+static inline void stage2_pud_populate(struct kvm *kvm, pud_t *pud, pmd_t *pmd)
{
- phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;
+ if (kvm_stage2_has_pmd(kvm))
+ pud_populate(NULL, pud, pmd);
+}
- return (boundary - 1 < end - 1) ? boundary : end;
+static inline pmd_t *stage2_pmd_offset(struct kvm *kvm,
+ pud_t *pud, unsigned long address)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ return pmd_offset(pud, address);
+ else
+ return (pmd_t *)pud;
}
-#endif /* STAGE2_PGTABLE_LEVELS > 2 */
+static inline void stage2_pmd_free(struct kvm *kvm, pmd_t *pmd)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ pmd_free(NULL, pmd);
+}
+
+static inline bool stage2_pud_huge(struct kvm *kvm, pud_t pud)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ return pud_huge(pud);
+ else
+ return 0;
+}
+
+static inline bool stage2_pmd_table_empty(struct kvm *kvm, pmd_t *pmdp)
+{
+ if (kvm_stage2_has_pmd(kvm))
+ return kvm_page_empty(pmdp);
+ else
+ return 0;
+}
-#define stage2_pte_table_empty(ptep) kvm_page_empty(ptep)
+static inline phys_addr_t
+stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
+{
+ if (kvm_stage2_has_pmd(kvm)) {
+ phys_addr_t boundary = (addr + S2_PMD_SIZE) & S2_PMD_MASK;
-#if STAGE2_PGTABLE_LEVELS == 2
-#include <asm/stage2_pgtable-nopmd.h>
-#elif STAGE2_PGTABLE_LEVELS == 3
-#include <asm/stage2_pgtable-nopud.h>
-#endif
+ return (boundary - 1 < end - 1) ? boundary : end;
+ } else {
+ return end;
+ }
+}
+static inline bool stage2_pte_table_empty(struct kvm *kvm, pte_t *ptep)
+{
+ return kvm_page_empty(ptep);
+}
-#define stage2_pgd_index(addr) (((addr) >> S2_PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
+static inline unsigned long stage2_pgd_index(struct kvm *kvm, phys_addr_t addr)
+{
+ return (((addr) >> stage2_pgdir_shift(kvm)) & (stage2_pgd_ptrs(kvm) - 1));
+}
-static inline phys_addr_t stage2_pgd_addr_end(phys_addr_t addr, phys_addr_t end)
+static inline phys_addr_t
+stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
{
- phys_addr_t boundary = (addr + S2_PGDIR_SIZE) & S2_PGDIR_MASK;
+ phys_addr_t boundary = (addr + stage2_pgdir_size(kvm)) & stage2_pgdir_mask(kvm);
return (boundary - 1 < end - 1) ? boundary : end;
}
diff --git a/arch/arm64/include/asm/stat.h b/arch/arm64/include/asm/stat.h
index eab738019707..397c6ccd04e7 100644
--- a/arch/arm64/include/asm/stat.h
+++ b/arch/arm64/include/asm/stat.h
@@ -20,7 +20,7 @@
#ifdef CONFIG_COMPAT
-#include <linux/compat_time.h>
+#include <linux/time.h>
#include <asm/compat.h>
/*
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index e0d0f5b856e7..b13ca091f833 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -18,11 +18,11 @@
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_SIGPENDING
#define __ARCH_WANT_SYS_SIGPROCMASK
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
diff --git a/arch/arm64/include/uapi/asm/unistd.h b/arch/arm64/include/uapi/asm/unistd.h
index 5072cbd15c82..dae1584cf017 100644
--- a/arch/arm64/include/uapi/asm/unistd.h
+++ b/arch/arm64/include/uapi/asm/unistd.h
@@ -16,5 +16,6 @@
*/
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_NEW_STAT
#include <asm-generic/unistd.h>
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 0e2ea1c78542..bb85e2f4603f 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -165,16 +165,15 @@ static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
/* Interface called from ACPI code to setup PCI host controller */
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
{
- int node = acpi_get_node(root->device->handle);
struct acpi_pci_generic_root_info *ri;
struct pci_bus *bus, *child;
struct acpi_pci_root_ops *root_ops;
- ri = kzalloc_node(sizeof(*ri), GFP_KERNEL, node);
+ ri = kzalloc(sizeof(*ri), GFP_KERNEL);
if (!ri)
return NULL;
- root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
+ root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL);
if (!root_ops) {
kfree(ri);
return NULL;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 25fcd22a4bb2..96b8f2f51ab2 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -602,7 +602,7 @@ static void __init of_parse_and_init_cpus(void)
{
struct device_node *dn;
- for_each_node_by_type(dn, "cpu") {
+ for_each_of_cpu_node(dn) {
u64 hwid = of_get_cpu_mpidr(dn);
if (hwid == INVALID_HWID)
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index a6c9fbaeaefc..dd436a50fce7 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -391,15 +391,15 @@ int __attribute_const__ kvm_target_cpu(void)
return KVM_ARM_TARGET_CORTEX_A53;
case ARM_CPU_PART_CORTEX_A57:
return KVM_ARM_TARGET_CORTEX_A57;
- };
+ }
break;
case ARM_CPU_IMP_APM:
switch (part_number) {
case APM_CPU_PART_POTENZA:
return KVM_ARM_TARGET_XGENE_POTENZA;
- };
+ }
break;
- };
+ }
/* Return a default generic target */
return KVM_ARM_TARGET_GENERIC_V8;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index e5e741bfffe1..35a81bebd02b 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -284,6 +284,13 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
*/
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
return 0;
+ case ARM_EXCEPTION_IL:
+ /*
+ * We attempted an illegal exception return. Guest state must
+ * have been corrupted somehow. Give up.
+ */
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ return -EINVAL;
default:
kvm_pr_unimpl("Unsupported exception type: %d",
exception_index);
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 2fabc2dc1966..82d1904328ad 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_KVM_ARM_HOST) += switch.o
obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
-obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
# KVM code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 24b4fbafe3e4..b1f14f736962 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -162,6 +162,20 @@ el1_error:
mov x0, #ARM_EXCEPTION_EL1_SERROR
b __guest_exit
+el2_sync:
+ /* Check for illegal exception return, otherwise panic */
+ mrs x0, spsr_el2
+
+ /* if this was something else, then panic! */
+ tst x0, #PSR_IL_BIT
+ b.eq __hyp_panic
+
+ /* Let's attempt a recovery from the illegal exception return */
+ get_vcpu_ptr x1, x0
+ mov x0, #ARM_EXCEPTION_IL
+ b __guest_exit
+
+
el2_error:
ldp x0, x1, [sp], #16
@@ -240,7 +254,7 @@ ENTRY(__kvm_hyp_vector)
invalid_vect el2t_fiq_invalid // FIQ EL2t
invalid_vect el2t_error_invalid // Error EL2t
- invalid_vect el2h_sync_invalid // Synchronous EL2h
+ valid_vect el2_sync // Synchronous EL2h
invalid_vect el2h_irq_invalid // IRQ EL2h
invalid_vect el2h_fiq_invalid // FIQ EL2h
valid_vect el2_error // Error EL2h
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
deleted file mode 100644
index 603e1ee83e89..000000000000
--- a/arch/arm64/kvm/hyp/s2-setup.c
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2016 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/types.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_hyp.h>
-
-u32 __hyp_text __init_stage2_translation(void)
-{
- u64 val = VTCR_EL2_FLAGS;
- u64 parange;
- u64 tmp;
-
- /*
- * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS
- * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
- * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
- */
- parange = read_sysreg(id_aa64mmfr0_el1) & 7;
- if (parange > ID_AA64MMFR0_PARANGE_MAX)
- parange = ID_AA64MMFR0_PARANGE_MAX;
- val |= parange << 16;
-
- /* Compute the actual PARange... */
- switch (parange) {
- case 0:
- parange = 32;
- break;
- case 1:
- parange = 36;
- break;
- case 2:
- parange = 40;
- break;
- case 3:
- parange = 42;
- break;
- case 4:
- parange = 44;
- break;
- case 5:
- default:
- parange = 48;
- break;
- }
-
- /*
- * ... and clamp it to 40 bits, unless we have some braindead
- * HW that implements less than that. In all cases, we'll
- * return that value for the rest of the kernel to decide what
- * to do.
- */
- val |= 64 - (parange > 40 ? 40 : parange);
-
- /*
- * Check the availability of Hardware Access Flag / Dirty Bit
- * Management in ID_AA64MMFR1_EL1 and enable the feature in VTCR_EL2.
- */
- tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_HADBS_SHIFT) & 0xf;
- if (tmp)
- val |= VTCR_EL2_HA;
-
- /*
- * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
- * bit in VTCR_EL2.
- */
- tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf;
- val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ?
- VTCR_EL2_VS_16BIT :
- VTCR_EL2_VS_8BIT;
-
- write_sysreg(val, vtcr_el2);
-
- return parange;
-}
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ca46153d7915..7cc175c88a37 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -198,7 +198,7 @@ void deactivate_traps_vhe_put(void)
static void __hyp_text __activate_vm(struct kvm *kvm)
{
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ __load_guest_stage2(kvm);
}
static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
@@ -263,7 +263,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
return false; /* Translation failed, back to guest */
/* Convert PAR to HPFAR format */
- *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
+ *hpfar = PAR_TO_HPFAR(tmp);
return true;
}
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 76d016b446b2..68d6f7c3b237 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -152,8 +152,25 @@ static void __hyp_text __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
static void __hyp_text
__sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
{
+ u64 pstate = ctxt->gp_regs.regs.pstate;
+ u64 mode = pstate & PSR_AA32_MODE_MASK;
+
+ /*
+ * Safety check to ensure we're setting the CPU up to enter the guest
+ * in a less privileged mode.
+ *
+ * If we are attempting a return to EL2 or higher in AArch64 state,
+ * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
+ * we'll take an illegal exception state exception immediately after
+ * the ERET to the guest. Attempts to return to AArch32 Hyp will
+ * result in an illegal exception return because EL2's execution state
+ * is determined by SCR_EL3.RW.
+ */
+ if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
+ pstate = PSR_MODE_EL2h | PSR_IL_BIT;
+
write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
- write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+ write_sysreg_el2(pstate, spsr);
if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 131c7772703c..4dbd9c69a96d 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -30,7 +30,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
* bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
* let's flip TGE before executing the TLB operation.
*/
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ __load_guest_stage2(kvm);
val = read_sysreg(hcr_el2);
val &= ~HCR_TGE;
write_sysreg(val, hcr_el2);
@@ -39,7 +39,7 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
{
- write_sysreg(kvm->arch.vttbr, vttbr_el2);
+ __load_guest_stage2(kvm);
isb();
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index e37c78bbe1ca..b72a3dd56204 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -26,6 +26,7 @@
#include <kvm/arm_arch_timer.h>
+#include <asm/cpufeature.h>
#include <asm/cputype.h>
#include <asm/ptrace.h>
#include <asm/kvm_arm.h>
@@ -33,6 +34,9 @@
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
+/* Maximum phys_shift supported for any VM on this host */
+static u32 kvm_ipa_limit;
+
/*
* ARMv8 Reset Values
*/
@@ -55,12 +59,12 @@ static bool cpu_has_32bit_el1(void)
}
/**
- * kvm_arch_dev_ioctl_check_extension
+ * kvm_arch_vm_ioctl_check_extension
*
* We currently assume that the number of HW registers is uniform
* across all CPUs (see cpuinfo_sanity_check).
*/
-int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
+int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
@@ -82,9 +86,11 @@ int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
break;
case KVM_CAP_SET_GUEST_DEBUG:
case KVM_CAP_VCPU_ATTRIBUTES:
- case KVM_CAP_VCPU_EVENTS:
r = 1;
break;
+ case KVM_CAP_ARM_VM_IPA_SIZE:
+ r = kvm_ipa_limit;
+ break;
default:
r = 0;
}
@@ -133,3 +139,99 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
/* Reset timer */
return kvm_timer_vcpu_reset(vcpu);
}
+
+void kvm_set_ipa_limit(void)
+{
+ unsigned int ipa_max, pa_max, va_max, parange;
+
+ parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7;
+ pa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
+
+ /* Clamp the IPA limit to the PA size supported by the kernel */
+ ipa_max = (pa_max > PHYS_MASK_SHIFT) ? PHYS_MASK_SHIFT : pa_max;
+ /*
+ * Since our stage2 table is dependent on the stage1 page table code,
+ * we must always honor the following condition:
+ *
+ * Number of levels in Stage1 >= Number of levels in Stage2.
+ *
+ * So clamp the ipa limit further down to limit the number of levels.
+ * Since we can concatenate upto 16 tables at entry level, we could
+ * go upto 4bits above the maximum VA addressible with the current
+ * number of levels.
+ */
+ va_max = PGDIR_SHIFT + PAGE_SHIFT - 3;
+ va_max += 4;
+
+ if (va_max < ipa_max)
+ ipa_max = va_max;
+
+ /*
+ * If the final limit is lower than the real physical address
+ * limit of the CPUs, report the reason.
+ */
+ if (ipa_max < pa_max)
+ pr_info("kvm: Limiting the IPA size due to kernel %s Address limit\n",
+ (va_max < pa_max) ? "Virtual" : "Physical");
+
+ WARN(ipa_max < KVM_PHYS_SHIFT,
+ "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max);
+ kvm_ipa_limit = ipa_max;
+ kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit);
+}
+
+/*
+ * Configure the VTCR_EL2 for this VM. The VTCR value is common
+ * across all the physical CPUs on the system. We use system wide
+ * sanitised values to fill in different fields, except for Hardware
+ * Management of Access Flags. HA Flag is set unconditionally on
+ * all CPUs, as it is safe to run with or without the feature and
+ * the bit is RES0 on CPUs that don't support it.
+ */
+int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
+{
+ u64 vtcr = VTCR_EL2_FLAGS;
+ u32 parange, phys_shift;
+ u8 lvls;
+
+ if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+ return -EINVAL;
+
+ phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type);
+ if (phys_shift) {
+ if (phys_shift > kvm_ipa_limit ||
+ phys_shift < 32)
+ return -EINVAL;
+ } else {
+ phys_shift = KVM_PHYS_SHIFT;
+ }
+
+ parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7;
+ if (parange > ID_AA64MMFR0_PARANGE_MAX)
+ parange = ID_AA64MMFR0_PARANGE_MAX;
+ vtcr |= parange << VTCR_EL2_PS_SHIFT;
+
+ vtcr |= VTCR_EL2_T0SZ(phys_shift);
+ /*
+ * Use a minimum 2 level page table to prevent splitting
+ * host PMD huge pages at stage2.
+ */
+ lvls = stage2_pgtable_levels(phys_shift);
+ if (lvls < 2)
+ lvls = 2;
+ vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
+
+ /*
+ * Enable the Hardware Access Flag management, unconditionally
+ * on all CPUs. The features is RES0 on CPUs without the support
+ * and must be ignored by the CPUs.
+ */
+ vtcr |= VTCR_EL2_HA;
+
+ /* Set the vmid bits */
+ vtcr |= (kvm_get_vmid_bits() == 16) ?
+ VTCR_EL2_VS_16BIT :
+ VTCR_EL2_VS_8BIT;
+ kvm->arch.vtcr = vtcr;
+ return 0;
+}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 072c51fb07d7..d190612b8f33 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/genalloc.h>
#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <linux/dma-contiguous.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
@@ -32,16 +33,6 @@
#include <asm/cacheflush.h>
-static int swiotlb __ro_after_init;
-
-static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
- bool coherent)
-{
- if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
- return pgprot_writecombine(prot);
- return prot;
-}
-
static struct gen_pool *atomic_pool __ro_after_init;
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
@@ -91,18 +82,16 @@ static int __free_from_pool(void *start, size_t size)
return 1;
}
-static void *__dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flags, unsigned long attrs)
{
struct page *page;
void *ptr, *coherent_ptr;
- bool coherent = is_device_dma_coherent(dev);
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
+ pgprot_t prot = pgprot_writecombine(PAGE_KERNEL);
size = PAGE_ALIGN(size);
- if (!coherent && !gfpflags_allow_blocking(flags)) {
+ if (!gfpflags_allow_blocking(flags)) {
struct page *page = NULL;
void *addr = __alloc_from_pool(size, &page, flags);
@@ -112,14 +101,10 @@ static void *__dma_alloc(struct device *dev, size_t size,
return addr;
}
- ptr = swiotlb_alloc(dev, size, dma_handle, flags, attrs);
+ ptr = dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
if (!ptr)
goto no_mem;
- /* no need for non-cacheable mapping if coherent */
- if (coherent)
- return ptr;
-
/* remove any dirty cache lines on the kernel alias */
__dma_flush_area(ptr, size);
@@ -133,130 +118,57 @@ static void *__dma_alloc(struct device *dev, size_t size,
return coherent_ptr;
no_map:
- swiotlb_free(dev, size, ptr, *dma_handle, attrs);
+ dma_direct_free_pages(dev, size, ptr, *dma_handle, attrs);
no_mem:
return NULL;
}
-static void __dma_free(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
{
- void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
+ if (!__free_from_pool(vaddr, PAGE_ALIGN(size))) {
+ void *kaddr = phys_to_virt(dma_to_phys(dev, dma_handle));
- size = PAGE_ALIGN(size);
-
- if (!is_device_dma_coherent(dev)) {
- if (__free_from_pool(vaddr, size))
- return;
vunmap(vaddr);
+ dma_direct_free_pages(dev, size, kaddr, dma_handle, attrs);
}
- swiotlb_free(dev, size, swiotlb_addr, dma_handle, attrs);
}
-static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
+long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
+ dma_addr_t dma_addr)
{
- dma_addr_t dev_addr;
-
- dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
- if (!is_device_dma_coherent(dev) &&
- (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
-
- return dev_addr;
-}
-
-
-static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- if (!is_device_dma_coherent(dev) &&
- (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
- swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
+ return __phys_to_pfn(dma_to_phys(dev, dma_addr));
}
-static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- unsigned long attrs)
+pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
+ unsigned long attrs)
{
- struct scatterlist *sg;
- int i, ret;
-
- ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
- if (!is_device_dma_coherent(dev) &&
- (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- for_each_sg(sgl, sg, ret, i)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
-
- return ret;
-}
-
-static void __swiotlb_unmap_sg_attrs(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int i;
-
- if (!is_device_dma_coherent(dev) &&
- (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
- swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
+ if (!dev_is_dma_coherent(dev) || (attrs & DMA_ATTR_WRITE_COMBINE))
+ return pgprot_writecombine(prot);
+ return prot;
}
-static void __swiotlb_sync_single_for_cpu(struct device *dev,
- dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- if (!is_device_dma_coherent(dev))
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
- swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
+ __dma_map_area(phys_to_virt(paddr), size, dir);
}
-static void __swiotlb_sync_single_for_device(struct device *dev,
- dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
- if (!is_device_dma_coherent(dev))
- __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ __dma_unmap_area(phys_to_virt(paddr), size, dir);
}
-static void __swiotlb_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
+static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
+ struct page *page, size_t size)
{
- struct scatterlist *sg;
- int i;
-
- if (!is_device_dma_coherent(dev))
- for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
- swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
-}
+ int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-static void __swiotlb_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int i;
+ if (!ret)
+ sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
- swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
- if (!is_device_dma_coherent(dev))
- for_each_sg(sgl, sg, nelems, i)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
+ return ret;
}
static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
@@ -277,74 +189,6 @@ static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
return ret;
}
-static int __swiotlb_mmap(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- unsigned long attrs)
-{
- int ret;
- unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
-
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
- is_device_dma_coherent(dev));
-
- if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
-
- return __swiotlb_mmap_pfn(vma, pfn, size);
-}
-
-static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
- struct page *page, size_t size)
-{
- int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
-
- if (!ret)
- sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
-
- return ret;
-}
-
-static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size,
- unsigned long attrs)
-{
- struct page *page = phys_to_page(dma_to_phys(dev, handle));
-
- return __swiotlb_get_sgtable_page(sgt, page, size);
-}
-
-static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
-{
- if (swiotlb)
- return swiotlb_dma_supported(hwdev, mask);
- return 1;
-}
-
-static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
-{
- if (swiotlb)
- return swiotlb_dma_mapping_error(hwdev, addr);
- return 0;
-}
-
-static const struct dma_map_ops arm64_swiotlb_dma_ops = {
- .alloc = __dma_alloc,
- .free = __dma_free,
- .mmap = __swiotlb_mmap,
- .get_sgtable = __swiotlb_get_sgtable,
- .map_page = __swiotlb_map_page,
- .unmap_page = __swiotlb_unmap_page,
- .map_sg = __swiotlb_map_sg_attrs,
- .unmap_sg = __swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
- .sync_single_for_device = __swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = __swiotlb_sync_sg_for_device,
- .dma_supported = __swiotlb_dma_supported,
- .mapping_error = __swiotlb_dma_mapping_error,
-};
-
static int __init atomic_pool_init(void)
{
pgprot_t prot = __pgprot(PROT_NORMAL_NC);
@@ -500,10 +344,6 @@ EXPORT_SYMBOL(dummy_dma_ops);
static int __init arm64_dma_init(void)
{
- if (swiotlb_force == SWIOTLB_FORCE ||
- max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
- swiotlb = 1;
-
WARN_TAINT(ARCH_DMA_MINALIGN < cache_line_size(),
TAINT_CPU_OUT_OF_SPEC,
"ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)",
@@ -528,7 +368,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp,
unsigned long attrs)
{
- bool coherent = is_device_dma_coherent(dev);
+ bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
size_t iosize = size;
void *addr;
@@ -569,7 +409,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
addr = NULL;
}
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+ pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
struct page *page;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
@@ -596,7 +436,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
size >> PAGE_SHIFT);
}
} else {
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+ pgprot_t prot = arch_dma_mmap_pgprot(dev, PAGE_KERNEL, attrs);
struct page **pages;
pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
@@ -658,8 +498,7 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
struct vm_struct *area;
int ret;
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
- is_device_dma_coherent(dev));
+ vma->vm_page_prot = arch_dma_mmap_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
@@ -709,11 +548,11 @@ static void __iommu_sync_single_for_cpu(struct device *dev,
{
phys_addr_t phys;
- if (is_device_dma_coherent(dev))
+ if (dev_is_dma_coherent(dev))
return;
- phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
- __dma_unmap_area(phys_to_virt(phys), size, dir);
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
+ arch_sync_dma_for_cpu(dev, phys, size, dir);
}
static void __iommu_sync_single_for_device(struct device *dev,
@@ -722,11 +561,11 @@ static void __iommu_sync_single_for_device(struct device *dev,
{
phys_addr_t phys;
- if (is_device_dma_coherent(dev))
+ if (dev_is_dma_coherent(dev))
return;
- phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
- __dma_map_area(phys_to_virt(phys), size, dir);
+ phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dev_addr);
+ arch_sync_dma_for_device(dev, phys, size, dir);
}
static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
@@ -734,13 +573,13 @@ static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- bool coherent = is_device_dma_coherent(dev);
+ bool coherent = dev_is_dma_coherent(dev);
int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
- if (!iommu_dma_mapping_error(dev, dev_addr) &&
- (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
- __iommu_sync_single_for_device(dev, dev_addr, size, dir);
+ if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ !iommu_dma_mapping_error(dev, dev_addr))
+ __dma_map_area(page_address(page) + offset, size, dir);
return dev_addr;
}
@@ -762,11 +601,11 @@ static void __iommu_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sg;
int i;
- if (is_device_dma_coherent(dev))
+ if (dev_is_dma_coherent(dev))
return;
for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(sg_virt(sg), sg->length, dir);
+ arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
}
static void __iommu_sync_sg_for_device(struct device *dev,
@@ -776,18 +615,18 @@ static void __iommu_sync_sg_for_device(struct device *dev,
struct scatterlist *sg;
int i;
- if (is_device_dma_coherent(dev))
+ if (dev_is_dma_coherent(dev))
return;
for_each_sg(sgl, sg, nelems, i)
- __dma_map_area(sg_virt(sg), sg->length, dir);
+ arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
}
static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
unsigned long attrs)
{
- bool coherent = is_device_dma_coherent(dev);
+ bool coherent = dev_is_dma_coherent(dev);
if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
@@ -879,9 +718,9 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent)
{
if (!dev->dma_ops)
- dev->dma_ops = &arm64_swiotlb_dma_ops;
+ dev->dma_ops = &swiotlb_dma_ops;
- dev->archdata.dma_coherent = coherent;
+ dev->dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
#ifdef CONFIG_XEN
diff --git a/arch/c6x/Makefile b/arch/c6x/Makefile
index 3fe8a948e94c..b7aa854f7008 100644
--- a/arch/c6x/Makefile
+++ b/arch/c6x/Makefile
@@ -40,9 +40,7 @@ boot := arch/$(ARCH)/boot
DTB:=$(subst dtbImage.,,$(filter dtbImage.%, $(MAKECMDGOALS)))
export DTB
-ifneq ($(DTB),)
core-y += $(boot)/dts/
-endif
# With make 3.82 we cannot mix normal and wildcard targets
diff --git a/arch/c6x/boot/dts/Makefile b/arch/c6x/boot/dts/Makefile
index b212d278ebc4..f438285c3640 100644
--- a/arch/c6x/boot/dts/Makefile
+++ b/arch/c6x/boot/dts/Makefile
@@ -5,15 +5,12 @@
DTC_FLAGS ?= -p 1024
+dtb-$(CONFIG_SOC_TMS320C6455) += dsk6455.dtb
+dtb-$(CONFIG_SOC_TMS320C6457) += evmc6457.dtb
+dtb-$(CONFIG_SOC_TMS320C6472) += evmc6472.dtb
+dtb-$(CONFIG_SOC_TMS320C6474) += evmc6474.dtb
+dtb-$(CONFIG_SOC_TMS320C6678) += evmc6678.dtb
+
ifneq ($(DTB),)
-obj-y += linked_dtb.o
+obj-y += $(DTB).dtb.o
endif
-
-quiet_cmd_cp = CP $< $@$2
- cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
-
-# Generate builtin.dtb from $(DTB).dtb
-$(obj)/builtin.dtb: $(obj)/$(DTB).dtb
- $(call if_changed,cp)
-
-$(obj)/linked_dtb.o: $(obj)/builtin.dtb
diff --git a/arch/c6x/boot/dts/linked_dtb.S b/arch/c6x/boot/dts/linked_dtb.S
deleted file mode 100644
index cf347f1d16ce..000000000000
--- a/arch/c6x/boot/dts/linked_dtb.S
+++ /dev/null
@@ -1,2 +0,0 @@
-.section __fdt_blob,"a"
-.incbin "arch/c6x/boot/dts/builtin.dtb"
diff --git a/arch/c6x/include/asm/sections.h b/arch/c6x/include/asm/sections.h
index d6c591ab5b7e..dc2f15eb3bde 100644
--- a/arch/c6x/include/asm/sections.h
+++ b/arch/c6x/include/asm/sections.h
@@ -8,6 +8,5 @@ extern char _vectors_start[];
extern char _vectors_end[];
extern char _data_lma[];
-extern char _fdt_start[], _fdt_end[];
#endif /* _ASM_C6X_SECTIONS_H */
diff --git a/arch/c6x/include/uapi/asm/unistd.h b/arch/c6x/include/uapi/asm/unistd.h
index 0d2daf7f9809..6b2fe792de9d 100644
--- a/arch/c6x/include/uapi/asm/unistd.h
+++ b/arch/c6x/include/uapi/asm/unistd.h
@@ -16,6 +16,7 @@
*/
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_CLONE
/* Use the standard ABI for syscalls. */
diff --git a/arch/c6x/kernel/setup.c b/arch/c6x/kernel/setup.c
index 786e36e2f61d..05d96a9541b5 100644
--- a/arch/c6x/kernel/setup.c
+++ b/arch/c6x/kernel/setup.c
@@ -96,7 +96,7 @@ static void __init get_cpuinfo(void)
unsigned long core_khz;
u64 tmp;
struct cpuinfo_c6x *p;
- struct device_node *node, *np;
+ struct device_node *node;
p = &per_cpu(cpu_data, smp_processor_id());
@@ -190,13 +190,8 @@ static void __init get_cpuinfo(void)
p->core_id = get_coreid();
- node = of_find_node_by_name(NULL, "cpus");
- if (node) {
- for_each_child_of_node(node, np)
- if (!strcmp("cpu", np->name))
- ++c6x_num_cores;
- of_node_put(node);
- }
+ for_each_of_cpu_node(node)
+ ++c6x_num_cores;
node = of_find_node_by_name(NULL, "soc");
if (node) {
@@ -270,7 +265,7 @@ int __init c6x_add_memory(phys_addr_t start, unsigned long size)
notrace void __init machine_init(unsigned long dt_ptr)
{
void *dtb = __va(dt_ptr);
- void *fdt = _fdt_start;
+ void *fdt = __dtb_start;
/* interrupts must be masked */
set_creg(IER, 2);
@@ -363,7 +358,7 @@ void __init setup_arch(char **cmdline_p)
memory_end >> PAGE_SHIFT);
memblock_reserve(memory_start, bootmap_size);
- unflatten_device_tree();
+ unflatten_and_copy_device_tree();
c6x_cache_init();
diff --git a/arch/c6x/kernel/vmlinux.lds.S b/arch/c6x/kernel/vmlinux.lds.S
index 1fba5b421eee..584bab2bace6 100644
--- a/arch/c6x/kernel/vmlinux.lds.S
+++ b/arch/c6x/kernel/vmlinux.lds.S
@@ -90,16 +90,6 @@ SECTIONS
*(.switch)
}
- . = ALIGN (8) ;
- __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET)
- {
- _fdt_start = . ; /* place for fdt blob */
- *(__fdt_blob) ; /* Any link-placed DTB */
- BYTE(0); /* section always has contents */
- . = _fdt_start + 0x4000; /* Pad up to 16kbyte */
- _fdt_end = . ;
- }
-
_etext = .;
/*
diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
index 58634e6bae92..4003ddc616e1 100644
--- a/arch/h8300/Makefile
+++ b/arch/h8300/Makefile
@@ -31,21 +31,12 @@ CROSS_COMPILE := h8300-unknown-linux-
endif
core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
-ifneq '$(CONFIG_H8300_BUILTIN_DTB)' '""'
-core-y += arch/h8300/boot/dts/
-endif
+core-y += arch/$(ARCH)/boot/dts/
libs-y += arch/$(ARCH)/lib/
boot := arch/h8300/boot
-%.dtb %.dtb.S %.dtb.o: | scripts
- $(Q)$(MAKE) $(build)=arch/h8300/boot/dts arch/h8300/boot/dts/$@
-
-PHONY += dtbs
-dtbs: scripts
- $(Q)$(MAKE) $(build)=arch/h8300/boot/dts
-
archmrproper:
archclean:
diff --git a/arch/h8300/include/uapi/asm/unistd.h b/arch/h8300/include/uapi/asm/unistd.h
index 7dd20ef7625a..628195823816 100644
--- a/arch/h8300/include/uapi/asm/unistd.h
+++ b/arch/h8300/include/uapi/asm/unistd.h
@@ -1,5 +1,6 @@
#define __ARCH_NOMMU
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
#include <asm-generic/unistd.h>
diff --git a/arch/hexagon/include/uapi/asm/unistd.h b/arch/hexagon/include/uapi/asm/unistd.h
index ea181e79162e..c91ca7d02461 100644
--- a/arch/hexagon/include/uapi/asm/unistd.h
+++ b/arch/hexagon/include/uapi/asm/unistd.h
@@ -29,6 +29,7 @@
#define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index ffb705dc9c13..49e34db2529c 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -28,6 +28,9 @@
#define __IGNORE_vfork /* clone() */
#define __IGNORE_umount2 /* umount() */
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SYS_UTIME
+
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
#include <linux/types.h>
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index 1d5483f6e457..85904b73e261 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -621,7 +621,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -657,7 +656,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 52a0af127951..9b3818bbb68b 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -578,7 +578,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -614,7 +613,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index b3103e51268a..769677809945 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -599,7 +599,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -635,7 +634,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index fb7d651a4cab..7dd264ddf2ea 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 6b37f5537c39..515f7439c755 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -580,7 +580,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -616,7 +615,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index c717bf879449..8e1038ceb407 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -602,7 +602,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -638,7 +637,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 226c994ce794..62c8aaa15cc7 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -684,7 +684,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -720,7 +719,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index b383327fd77a..733973f91297 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 9783d3deb9e9..fee30cc9ac16 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -570,7 +570,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -606,7 +605,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index a35d10ee10cb..eebf9c9088e7 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -593,7 +593,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -629,7 +628,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 573bf922d448..dabc54318c09 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -571,7 +571,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -607,7 +606,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index efb27a7fcc55..0d9a5c2a311a 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -572,7 +572,6 @@ CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_MANAGER=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_AEGIS128=m
@@ -608,7 +607,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_LZO=m
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 30d0d3fbd4ef..e680031bda7b 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -7,6 +7,7 @@
#define NR_syscalls 380
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
#define __ARCH_WANT_STAT64
@@ -21,7 +22,6 @@
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLD_MMAP
diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c
index 1b083c500b9a..ebb3b6d169ea 100644
--- a/arch/m68k/mac/misc.c
+++ b/arch/m68k/mac/misc.c
@@ -37,35 +37,6 @@
static void (*rom_reset)(void);
#ifdef CONFIG_ADB_CUDA
-static time64_t cuda_read_time(void)
-{
- struct adb_request req;
- time64_t time;
-
- if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
- return 0;
- while (!req.complete)
- cuda_poll();
-
- time = (u32)((req.reply[3] << 24) | (req.reply[4] << 16) |
- (req.reply[5] << 8) | req.reply[6]);
-
- return time - RTC_OFFSET;
-}
-
-static void cuda_write_time(time64_t time)
-{
- struct adb_request req;
- u32 data = lower_32_bits(time + RTC_OFFSET);
-
- if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
- (data >> 24) & 0xFF, (data >> 16) & 0xFF,
- (data >> 8) & 0xFF, data & 0xFF) < 0)
- return;
- while (!req.complete)
- cuda_poll();
-}
-
static __u8 cuda_read_pram(int offset)
{
struct adb_request req;
@@ -91,33 +62,6 @@ static void cuda_write_pram(int offset, __u8 data)
#endif /* CONFIG_ADB_CUDA */
#ifdef CONFIG_ADB_PMU
-static time64_t pmu_read_time(void)
-{
- struct adb_request req;
- time64_t time;
-
- if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
- return 0;
- pmu_wait_complete(&req);
-
- time = (u32)((req.reply[0] << 24) | (req.reply[1] << 16) |
- (req.reply[2] << 8) | req.reply[3]);
-
- return time - RTC_OFFSET;
-}
-
-static void pmu_write_time(time64_t time)
-{
- struct adb_request req;
- u32 data = lower_32_bits(time + RTC_OFFSET);
-
- if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
- (data >> 24) & 0xFF, (data >> 16) & 0xFF,
- (data >> 8) & 0xFF, data & 0xFF) < 0)
- return;
- pmu_wait_complete(&req);
-}
-
static __u8 pmu_read_pram(int offset)
{
struct adb_request req;
@@ -295,13 +239,17 @@ static time64_t via_read_time(void)
* is basically any machine with Mac II-style ADB.
*/
-static void via_write_time(time64_t time)
+static void via_set_rtc_time(struct rtc_time *tm)
{
union {
__u8 cdata[4];
__u32 idata;
} data;
__u8 temp;
+ time64_t time;
+
+ time = mktime64(tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
/* Clear the write protect bit */
@@ -641,12 +589,12 @@ int mac_hwclk(int op, struct rtc_time *t)
#ifdef CONFIG_ADB_CUDA
case MAC_ADB_EGRET:
case MAC_ADB_CUDA:
- now = cuda_read_time();
+ now = cuda_get_time();
break;
#endif
#ifdef CONFIG_ADB_PMU
case MAC_ADB_PB2:
- now = pmu_read_time();
+ now = pmu_get_time();
break;
#endif
default:
@@ -665,24 +613,21 @@ int mac_hwclk(int op, struct rtc_time *t)
__func__, t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
t->tm_hour, t->tm_min, t->tm_sec);
- now = mktime64(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
- t->tm_hour, t->tm_min, t->tm_sec);
-
switch (macintosh_config->adb_type) {
case MAC_ADB_IOP:
case MAC_ADB_II:
case MAC_ADB_PB1:
- via_write_time(now);
+ via_set_rtc_time(t);
break;
#ifdef CONFIG_ADB_CUDA
case MAC_ADB_EGRET:
case MAC_ADB_CUDA:
- cuda_write_time(now);
+ cuda_set_rtc_time(t);
break;
#endif
#ifdef CONFIG_ADB_PMU
case MAC_ADB_PB2:
- pmu_write_time(now);
+ pmu_set_rtc_time(t);
break;
#endif
default:
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 4f3ab5707265..0823d291fbeb 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -65,9 +65,7 @@ boot := arch/microblaze/boot
# Are we making a simpleImage.<boardname> target? If so, crack out the boardname
DTB:=$(subst simpleImage.,,$(filter simpleImage.%, $(MAKECMDGOALS)))
-ifneq ($(DTB),)
- core-y += $(boot)/dts/
-endif
+core-y += $(boot)/dts/
# defines filename extension depending memory management type
ifeq ($(CONFIG_MMU),)
diff --git a/arch/microblaze/boot/dts/Makefile b/arch/microblaze/boot/dts/Makefile
index 1f77913d404d..c7324e74f9ef 100644
--- a/arch/microblaze/boot/dts/Makefile
+++ b/arch/microblaze/boot/dts/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
#
+dtb-y := system.dtb
+
+ifneq ($(DTB),)
obj-y += linked_dtb.o
# Ensure system.dtb exists
@@ -11,6 +14,7 @@ ifneq ($(DTB),system)
$(obj)/system.dtb: $(obj)/$(DTB).dtb
$(call if_changed,cp)
endif
+endif
quiet_cmd_cp = CP $< $@$2
cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false)
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index a62d09420a47..f42c40f5001b 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -15,6 +15,7 @@
/* #define __ARCH_WANT_OLD_READDIR */
/* #define __ARCH_WANT_OLD_STAT */
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
@@ -26,7 +27,6 @@
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
/* #define __ARCH_WANT_SYS_OLD_GETRLIMIT */
#define __ARCH_WANT_SYS_OLDUMOUNT
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
index 96b3f26d16be..ef2f49471a2a 100644
--- a/arch/microblaze/kernel/cpu/cpuinfo.c
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -89,9 +89,9 @@ static struct device_node *cpu;
void __init setup_cpuinfo(void)
{
- cpu = (struct device_node *) of_find_node_by_type(NULL, "cpu");
+ cpu = of_get_cpu_node(0, NULL);
if (!cpu)
- pr_err("You don't have cpu!!!\n");
+ pr_err("You don't have cpu or are missing cpu reg property!!!\n");
pr_info("%s: initialising\n", __func__);
@@ -117,6 +117,8 @@ void __init setup_cpuinfo(void)
if (cpuinfo.mmu_privins)
pr_warn("%s: Stream instructions enabled"
" - USERSPACE CAN LOCK THIS KERNEL!\n", __func__);
+
+ of_node_put(cpu);
}
void __init setup_cpuinfo_clk(void)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 77c022e56e6e..53e75ddbba1c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -21,6 +21,7 @@ config MIPS
select GENERIC_CLOCKEVENTS
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_LIB_ASHLDI3
@@ -28,7 +29,6 @@ config MIPS
select GENERIC_LIB_CMPDI2
select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_UCMPDI2
- select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
@@ -78,6 +78,7 @@ config MIPS
select RTC_LIB if !MACH_LOONGSON64
select SYSCTL_EXCEPTION_TRACE
select VIRT_TO_BUS
+ select NO_BOOTMEM
menu "Machine selection"
@@ -132,6 +133,7 @@ config MIPS_GENERIC
select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select USE_OF
+ select UHI_BOOT
help
Select this to build a kernel which aims to support multiple boards,
generally using a flattened device tree passed from the bootloader
@@ -1149,6 +1151,7 @@ config NO_IOPORT_MAP
config GENERIC_CSUM
bool
+ default y if !CPU_HAS_LOAD_STORE_LR
config GENERIC_ISA_DMA
bool
@@ -1367,6 +1370,7 @@ config CPU_LOONGSON3
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
+ select CPU_HAS_LOAD_STORE_LR
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select MIPS_PGD_C0_CONTEXT
@@ -1443,6 +1447,7 @@ config CPU_MIPS32_R1
bool "MIPS32 Release 1"
depends on SYS_HAS_CPU_MIPS32_R1
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
help
@@ -1460,6 +1465,7 @@ config CPU_MIPS32_R2
bool "MIPS32 Release 2"
depends on SYS_HAS_CPU_MIPS32_R2
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
@@ -1478,7 +1484,6 @@ config CPU_MIPS32_R6
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
- select GENERIC_CSUM
select HAVE_KVM
select MIPS_O32_FP64_SUPPORT
help
@@ -1491,6 +1496,7 @@ config CPU_MIPS64_R1
bool "MIPS64 Release 1"
depends on SYS_HAS_CPU_MIPS64_R1
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
@@ -1510,6 +1516,7 @@ config CPU_MIPS64_R2
bool "MIPS64 Release 2"
depends on SYS_HAS_CPU_MIPS64_R2
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
@@ -1531,7 +1538,6 @@ config CPU_MIPS64_R6
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
- select GENERIC_CSUM
select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
select HAVE_KVM
help
@@ -1544,6 +1550,7 @@ config CPU_R3000
bool "R3000"
depends on SYS_HAS_CPU_R3000
select CPU_HAS_WB
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
help
@@ -1558,12 +1565,14 @@ config CPU_TX39XX
bool "R39XX"
depends on SYS_HAS_CPU_TX39XX
select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_HAS_LOAD_STORE_LR
config CPU_VR41XX
bool "R41xx"
depends on SYS_HAS_CPU_VR41XX
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_HAS_LOAD_STORE_LR
help
The options selects support for the NEC VR4100 series of processors.
Only choose this option if you have one of these processors as a
@@ -1575,6 +1584,7 @@ config CPU_R4300
depends on SYS_HAS_CPU_R4300
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_HAS_LOAD_STORE_LR
help
MIPS Technologies R4300-series processors.
@@ -1584,6 +1594,7 @@ config CPU_R4X00
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
+ select CPU_HAS_LOAD_STORE_LR
help
MIPS Technologies R4000-series processors other than 4300, including
the R4000, R4400, R4600, and 4700.
@@ -1592,6 +1603,7 @@ config CPU_TX49XX
bool "R49XX"
depends on SYS_HAS_CPU_TX49XX
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
@@ -1602,6 +1614,7 @@ config CPU_R5000
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
+ select CPU_HAS_LOAD_STORE_LR
help
MIPS Technologies R5000-series processors other than the Nevada.
@@ -1611,6 +1624,7 @@ config CPU_R5432
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
+ select CPU_HAS_LOAD_STORE_LR
config CPU_R5500
bool "R5500"
@@ -1618,6 +1632,7 @@ config CPU_R5500
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
+ select CPU_HAS_LOAD_STORE_LR
help
NEC VR5500 and VR5500A series processors implement 64-bit MIPS IV
instruction set.
@@ -1628,6 +1643,7 @@ config CPU_NEVADA
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HUGEPAGES
+ select CPU_HAS_LOAD_STORE_LR
help
QED / PMC-Sierra RM52xx-series ("Nevada") processors.
@@ -1635,6 +1651,7 @@ config CPU_R8000
bool "R8000"
depends on SYS_HAS_CPU_R8000
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_64BIT_KERNEL
help
MIPS Technologies R8000 processors. Note these processors are
@@ -1644,6 +1661,7 @@ config CPU_R10000
bool "R10000"
depends on SYS_HAS_CPU_R10000
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
@@ -1655,6 +1673,7 @@ config CPU_RM7000
bool "RM7000"
depends on SYS_HAS_CPU_RM7000
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
@@ -1663,6 +1682,7 @@ config CPU_RM7000
config CPU_SB1
bool "SB1"
depends on SYS_HAS_CPU_SB1
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
@@ -1673,6 +1693,7 @@ config CPU_CAVIUM_OCTEON
bool "Cavium Octeon processor"
depends on SYS_HAS_CPU_CAVIUM_OCTEON
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_64BIT_KERNEL
select WEAK_ORDERING
select CPU_SUPPORTS_HIGHMEM
@@ -1702,6 +1723,7 @@ config CPU_BMIPS
select WEAK_ORDERING
select CPU_SUPPORTS_HIGHMEM
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_CPUFREQ
select MIPS_EXTERNAL_TIMER
help
@@ -1710,6 +1732,7 @@ config CPU_BMIPS
config CPU_XLR
bool "Netlogic XLR SoC"
depends on SYS_HAS_CPU_XLR
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
@@ -1728,6 +1751,7 @@ config CPU_XLP
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_MIPSR2
select CPU_SUPPORTS_HUGEPAGES
select MIPS_ASID_BITS_VARIABLE
@@ -1833,12 +1857,14 @@ config CPU_LOONGSON2
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
select ARCH_HAS_PHYS_TO_DMA
+ select CPU_HAS_LOAD_STORE_LR
config CPU_LOONGSON1
bool
select CPU_MIPS32
select CPU_MIPSR1
select CPU_HAS_PREFETCH
+ select CPU_HAS_LOAD_STORE_LR
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_CPUFREQ
@@ -2452,6 +2478,13 @@ config XKS01
config CPU_HAS_RIXI
bool
+config CPU_HAS_LOAD_STORE_LR
+ bool
+ help
+ CPU has support for unaligned load and store instructions:
+ LWL, LWR, SWL, SWR (Load/store word left/right).
+ LDL, LDR, SDL, SDR (Load/store doubleword left/right, for 64bit systems).
+
#
# Vectored interrupt mode is an R2 feature
#
@@ -2899,6 +2932,9 @@ config USE_OF
select OF_EARLY_FLATTREE
select IRQ_DOMAIN
+config UHI_BOOT
+ bool
+
config BUILTIN_DTB
bool
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index d74b3742fa5d..15a84cfd0719 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -13,6 +13,7 @@
#
archscripts: scripts_basic
+ $(Q)$(MAKE) $(build)=arch/mips/tools elf-entry
$(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs
KBUILD_DEFCONFIG := 32r2el_defconfig
@@ -230,6 +231,8 @@ toolchain-xpa := $(call cc-option-yn,$(xpa-cflags-y) -mxpa)
cflags-$(toolchain-xpa) += -DTOOLCHAIN_SUPPORTS_XPA
toolchain-crc := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mcrc)
cflags-$(toolchain-crc) += -DTOOLCHAIN_SUPPORTS_CRC
+toolchain-dsp := $(call cc-option-yn,$(mips-cflags) -Wa$(comma)-mdsp)
+cflags-$(toolchain-dsp) += -DTOOLCHAIN_SUPPORTS_DSP
#
# Firmware support
@@ -257,13 +260,7 @@ ifdef CONFIG_PHYSICAL_START
load-y = $(CONFIG_PHYSICAL_START)
endif
-# Sign-extend the entry point to 64 bits if retrieved as a 32-bit number.
-entry-y = $(shell $(OBJDUMP) -f vmlinux 2>/dev/null \
- | sed -n '/^start address / { \
- s/^.* //; \
- s/0x\([0-7].......\)$$/0x00000000\1/; \
- s/0x\(........\)$$/0xffffffff\1/; p }')
-
+entry-y = $(shell $(objtree)/arch/mips/tools/elf-entry vmlinux)
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/
@@ -407,18 +404,7 @@ endif
CLEAN_FILES += vmlinux.32 vmlinux.64
# device-trees
-core-$(CONFIG_BUILTIN_DTB) += arch/mips/boot/dts/
-
-%.dtb %.dtb.S %.dtb.o: | scripts
- $(Q)$(MAKE) $(build)=arch/mips/boot/dts arch/mips/boot/dts/$@
-
-PHONY += dtbs
-dtbs: scripts
- $(Q)$(MAKE) $(build)=arch/mips/boot/dts
-
-PHONY += dtbs_install
-dtbs_install:
- $(Q)$(MAKE) $(dtbinst)=arch/mips/boot/dts
+core-y += arch/mips/boot/dts/
archprepare:
ifdef CONFIG_MIPS32_N32
@@ -461,8 +447,6 @@ define archhelp
echo ' uImage.lzma - U-Boot image (lzma)'
echo ' uImage.lzo - U-Boot image (lzo)'
echo ' uzImage.bin - U-Boot image (self-extracting)'
- echo ' dtbs - Device-tree blobs for enabled boards'
- echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)'
echo
echo ' These will be default as appropriate for a configured platform.'
echo
diff --git a/arch/mips/bcm47xx/workarounds.c b/arch/mips/bcm47xx/workarounds.c
index 1a8a07e7a563..46eddbec8d9f 100644
--- a/arch/mips/bcm47xx/workarounds.c
+++ b/arch/mips/bcm47xx/workarounds.c
@@ -5,9 +5,8 @@
#include <bcm47xx_board.h>
#include <bcm47xx.h>
-static void __init bcm47xx_workarounds_netgear_wnr3500l(void)
+static void __init bcm47xx_workarounds_enable_usb_power(int usb_power)
{
- const int usb_power = 12;
int err;
err = gpio_request_one(usb_power, GPIOF_OUT_INIT_HIGH, "usb_power");
@@ -23,7 +22,10 @@ void __init bcm47xx_workarounds(void)
switch (board) {
case BCM47XX_BOARD_NETGEAR_WNR3500L:
- bcm47xx_workarounds_netgear_wnr3500l();
+ bcm47xx_workarounds_enable_usb_power(12);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WNDR3400_V3:
+ bcm47xx_workarounds_enable_usb_power(21);
break;
default:
/* No workaround(s) needed */
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 231fc5ce375e..6329c5f780d6 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -153,8 +153,6 @@ void __init plat_time_init(void)
mips_hpt_frequency = freq;
}
-extern const char __appended_dtb;
-
void __init plat_mem_setup(void)
{
void *dtb;
@@ -164,15 +162,10 @@ void __init plat_mem_setup(void)
ioport_resource.start = 0;
ioport_resource.end = ~0;
-#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
- if (!fdt_check_header(&__appended_dtb))
- dtb = (void *)&__appended_dtb;
- else
-#endif
/* intended to somewhat resemble ARM; see Documentation/arm/Booting */
if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
dtb = phys_to_virt(fw_arg2);
- else if (fw_passed_dtb) /* UHI interface */
+ else if (fw_passed_dtb) /* UHI interface or appended dtb */
dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
dtb = (void *)__dtb_start;
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 26c6b561d6f7..6fb16fd24035 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -154,6 +154,21 @@
clock-names = "baud", "module";
};
+ dmac: dma-controller@13020000 {
+ compatible = "ingenic,jz4740-dma";
+ reg = <0x13020000 0xbc
+ 0x13020300 0x14>;
+ #dma-cells = <2>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <29>;
+
+ clocks = <&cgu JZ4740_CLK_DMA>;
+
+ /* Disable dmac until we have something that uses it */
+ status = "disabled";
+ };
+
uhc: uhc@13030000 {
compatible = "ingenic,jz4740-ohci", "generic-ohci";
reg = <0x13030000 0x1000>;
diff --git a/arch/mips/boot/dts/ingenic/jz4770.dtsi b/arch/mips/boot/dts/ingenic/jz4770.dtsi
index 7c2804f3f5f1..49ede6c14ff3 100644
--- a/arch/mips/boot/dts/ingenic/jz4770.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4770.dtsi
@@ -196,6 +196,36 @@
status = "disabled";
};
+ dmac0: dma-controller@13420000 {
+ compatible = "ingenic,jz4770-dma";
+ reg = <0x13420000 0xC0
+ 0x13420300 0x20>;
+
+ #dma-cells = <1>;
+
+ clocks = <&cgu JZ4770_CLK_DMA>;
+ interrupt-parent = <&intc>;
+ interrupts = <24>;
+
+ /* Disable dmac0 until we have something that uses it */
+ status = "disabled";
+ };
+
+ dmac1: dma-controller@13420100 {
+ compatible = "ingenic,jz4770-dma";
+ reg = <0x13420100 0xC0
+ 0x13420400 0x20>;
+
+ #dma-cells = <1>;
+
+ clocks = <&cgu JZ4770_CLK_DMA>;
+ interrupt-parent = <&intc>;
+ interrupts = <23>;
+
+ /* Disable dmac1 until we have something that uses it */
+ status = "disabled";
+ };
+
uhc: uhc@13430000 {
compatible = "generic-ohci";
reg = <0x13430000 0x1000>;
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index ce93d57f1b4d..b03cdec56de9 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -266,7 +266,8 @@
dma: dma@13420000 {
compatible = "ingenic,jz4780-dma";
- reg = <0x13420000 0x10000>;
+ reg = <0x13420000 0x400
+ 0x13421000 0x40>;
#dma-cells = <2>;
interrupt-parent = <&intc>;
diff --git a/arch/mips/boot/dts/lantiq/danube.dtsi b/arch/mips/boot/dts/lantiq/danube.dtsi
index 2dd950181f8a..510be63c8bdf 100644
--- a/arch/mips/boot/dts/lantiq/danube.dtsi
+++ b/arch/mips/boot/dts/lantiq/danube.dtsi
@@ -10,12 +10,12 @@
};
};
- biu@1F800000 {
+ biu@1f800000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "lantiq,biu", "simple-bus";
- reg = <0x1F800000 0x800000>;
- ranges = <0x0 0x1F800000 0x7FFFFF>;
+ reg = <0x1f800000 0x800000>;
+ ranges = <0x0 0x1f800000 0x7fffff>;
icu0: icu@80200 {
#interrupt-cells = <1>;
@@ -24,18 +24,18 @@
reg = <0x80200 0x120>;
};
- watchdog@803F0 {
+ watchdog@803f0 {
compatible = "lantiq,wdt";
- reg = <0x803F0 0x10>;
+ reg = <0x803f0 0x10>;
};
};
- sram@1F000000 {
+ sram@1f000000 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "lantiq,sram";
- reg = <0x1F000000 0x800000>;
- ranges = <0x0 0x1F000000 0x7FFFFF>;
+ reg = <0x1f000000 0x800000>;
+ ranges = <0x0 0x1f000000 0x7fffff>;
eiu0: eiu@101000 {
#interrupt-cells = <1>;
@@ -66,41 +66,41 @@
#address-cells = <1>;
#size-cells = <1>;
compatible = "lantiq,fpi", "simple-bus";
- ranges = <0x0 0x10000000 0xEEFFFFF>;
- reg = <0x10000000 0xEF00000>;
+ ranges = <0x0 0x10000000 0xeefffff>;
+ reg = <0x10000000 0xef00000>;
- gptu@E100A00 {
+ gptu@e100a00 {
compatible = "lantiq,gptu-xway";
- reg = <0xE100A00 0x100>;
+ reg = <0xe100a00 0x100>;
};
- serial@E100C00 {
+ serial@e100c00 {
compatible = "lantiq,asc";
- reg = <0xE100C00 0x400>;
+ reg = <0xe100c00 0x400>;
interrupt-parent = <&icu0>;
interrupts = <112 113 114>;
};
- dma0: dma@E104100 {
+ dma0: dma@e104100 {
compatible = "lantiq,dma-xway";
- reg = <0xE104100 0x800>;
+ reg = <0xe104100 0x800>;
};
- ebu0: ebu@E105300 {
+ ebu0: ebu@e105300 {
compatible = "lantiq,ebu-xway";
- reg = <0xE105300 0x100>;
+ reg = <0xe105300 0x100>;
};
- pci0: pci@E105400 {
+ pci0: pci@e105400 {
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
compatible = "lantiq,pci-xway";
bus-range = <0x0 0x0>;
ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000 /* pci memory */
- 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
+ 0x1000000 0 0x00000000 0xae00000 0 0x200000>; /* io space */
reg = <0x7000000 0x8000 /* config space */
- 0xE105400 0x400>; /* pci bridge */
+ 0xe105400 0x400>; /* pci bridge */
};
};
};
diff --git a/arch/mips/boot/dts/lantiq/easy50712.dts b/arch/mips/boot/dts/lantiq/easy50712.dts
index c37a33962f28..1ce20b7d05cb 100644
--- a/arch/mips/boot/dts/lantiq/easy50712.dts
+++ b/arch/mips/boot/dts/lantiq/easy50712.dts
@@ -52,14 +52,14 @@
};
};
- gpio: pinmux@E100B10 {
+ gpio: pinmux@e100b10 {
compatible = "lantiq,danube-pinctrl";
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
#gpio-cells = <2>;
gpio-controller;
- reg = <0xE100B10 0xA0>;
+ reg = <0xe100b10 0xa0>;
state_default: pinmux {
stp {
@@ -82,26 +82,26 @@
};
};
- etop@E180000 {
+ etop@e180000 {
compatible = "lantiq,etop-xway";
- reg = <0xE180000 0x40000>;
+ reg = <0xe180000 0x40000>;
interrupt-parent = <&icu0>;
interrupts = <73 78>;
phy-mode = "rmii";
mac-address = [ 00 11 22 33 44 55 ];
};
- stp0: stp@E100BB0 {
+ stp0: stp@e100bb0 {
#gpio-cells = <2>;
compatible = "lantiq,gpio-stp-xway";
gpio-controller;
- reg = <0xE100BB0 0x40>;
+ reg = <0xe100bb0 0x40>;
lantiq,shadow = <0xfff>;
lantiq,groups = <0x3>;
};
- pci@E105400 {
+ pci@e105400 {
lantiq,bus-clock = <33333333>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
diff --git a/arch/mips/boot/dts/mscc/Makefile b/arch/mips/boot/dts/mscc/Makefile
index 9a9bb7ea0503..ec6f5b2bf093 100644
--- a/arch/mips/boot/dts/mscc/Makefile
+++ b/arch/mips/boot/dts/mscc/Makefile
@@ -1,3 +1,3 @@
-dtb-$(CONFIG_MSCC_OCELOT) += ocelot_pcb123.dtb
+dtb-$(CONFIG_MSCC_OCELOT) += ocelot_pcb123.dtb ocelot_pcb120.dtb
obj-$(CONFIG_BUILTIN_DTB) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
index 8ce317c5b9ed..90c60d42f571 100644
--- a/arch/mips/boot/dts/mscc/ocelot.dtsi
+++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
@@ -78,6 +78,19 @@
status = "disabled";
};
+ i2c: i2c@100400 {
+ compatible = "mscc,ocelot-i2c", "snps,designware-i2c";
+ pinctrl-0 = <&i2c_pins>;
+ pinctrl-names = "default";
+ reg = <0x100400 0x100>, <0x198 0x8>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupts = <8>;
+ clocks = <&ahb_clk>;
+
+ status = "disabled";
+ };
+
uart2: serial@100800 {
pinctrl-0 = <&uart2_pins>;
pinctrl-names = "default";
@@ -182,6 +195,11 @@
interrupts = <13>;
#interrupt-cells = <2>;
+ i2c_pins: i2c-pins {
+ pins = "GPIO_16", "GPIO_17";
+ function = "twi";
+ };
+
uart_pins: uart-pins {
pins = "GPIO_6", "GPIO_7";
function = "uart";
@@ -196,6 +214,7 @@
pins = "GPIO_14", "GPIO_15";
function = "miim1";
};
+
};
mdio0: mdio@107009c {
diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb120.dts b/arch/mips/boot/dts/mscc/ocelot_pcb120.dts
new file mode 100644
index 000000000000..33991fd209f5
--- /dev/null
+++ b/arch/mips/boot/dts/mscc/ocelot_pcb120.dts
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2017 Microsemi Corporation */
+
+/dts-v1/;
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/phy/phy-ocelot-serdes.h>
+#include "ocelot.dtsi"
+
+/ {
+ compatible = "mscc,ocelot-pcb120", "mscc,ocelot";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x0e000000>;
+ };
+};
+
+&gpio {
+ phy_int_pins: phy_int_pins {
+ pins = "GPIO_4";
+ function = "gpio";
+ };
+};
+
+&mdio0 {
+ status = "okay";
+};
+
+&mdio1 {
+ status = "okay";
+ pinctrl-names = "default";
+ pinctrl-0 = <&miim1>, <&phy_int_pins>;
+
+ phy7: ethernet-phy@0 {
+ reg = <0>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gpio>;
+ };
+ phy6: ethernet-phy@1 {
+ reg = <1>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gpio>;
+ };
+ phy5: ethernet-phy@2 {
+ reg = <2>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gpio>;
+ };
+ phy4: ethernet-phy@3 {
+ reg = <3>;
+ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&gpio>;
+ };
+};
+
+&port0 {
+ phy-handle = <&phy0>;
+};
+
+&port1 {
+ phy-handle = <&phy1>;
+};
+
+&port2 {
+ phy-handle = <&phy2>;
+};
+
+&port3 {
+ phy-handle = <&phy3>;
+};
+
+&port4 {
+ phy-handle = <&phy7>;
+ phy-mode = "sgmii";
+ phys = <&serdes 4 SERDES1G(2)>;
+};
+
+&port5 {
+ phy-handle = <&phy4>;
+ phy-mode = "sgmii";
+ phys = <&serdes 5 SERDES1G(5)>;
+};
+
+&port6 {
+ phy-handle = <&phy6>;
+ phy-mode = "sgmii";
+ phys = <&serdes 6 SERDES1G(3)>;
+};
+
+&port9 {
+ phy-handle = <&phy5>;
+ phy-mode = "sgmii";
+ phys = <&serdes 9 SERDES1G(4)>;
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
index 2266027759f9..ef852f382da8 100644
--- a/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
+++ b/arch/mips/boot/dts/mscc/ocelot_pcb123.dts
@@ -36,6 +36,12 @@
};
};
+&i2c {
+ clock-frequency = <100000>;
+ i2c-sda-hold-time-ns = <300>;
+ status = "okay";
+};
+
&mdio0 {
status = "okay";
};
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 8272d8c648ca..cc1d8525e651 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1180,8 +1180,8 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d,
type = IRQ_TYPE_LEVEL_LOW;
break;
default:
- pr_err("Error: (%s) Invalid irq trigger specification: %x\n",
- node->name,
+ pr_err("Error: (%pOFn) Invalid irq trigger specification: %x\n",
+ node,
trigger);
type = IRQ_TYPE_LEVEL_LOW;
break;
@@ -2271,8 +2271,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
parent_irq = irq_of_parse_and_map(ciu_node, 0);
if (!parent_irq) {
- pr_err("ERROR: Couldn't acquire parent_irq for %s\n",
- ciu_node->name);
+ pr_err("ERROR: Couldn't acquire parent_irq for %pOFn\n",
+ ciu_node);
return -EINVAL;
}
@@ -2283,7 +2283,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
addr = of_get_address(ciu_node, 0, NULL, NULL);
if (!addr) {
- pr_err("ERROR: Couldn't acquire reg(0) %s\n", ciu_node->name);
+ pr_err("ERROR: Couldn't acquire reg(0) %pOFn\n", ciu_node);
return -EINVAL;
}
host_data->raw_reg = (u64)phys_to_virt(
@@ -2291,7 +2291,7 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
addr = of_get_address(ciu_node, 1, NULL, NULL);
if (!addr) {
- pr_err("ERROR: Couldn't acquire reg(1) %s\n", ciu_node->name);
+ pr_err("ERROR: Couldn't acquire reg(1) %pOFn\n", ciu_node);
return -EINVAL;
}
host_data->en_reg = (u64)phys_to_virt(
@@ -2299,8 +2299,8 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
if (r) {
- pr_err("ERROR: Couldn't read cavium,max-bits from %s\n",
- ciu_node->name);
+ pr_err("ERROR: Couldn't read cavium,max-bits from %pOFn\n",
+ ciu_node);
return r;
}
host_data->max_bits = val;
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index c2426232db06..dfb95cffef3e 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -1161,15 +1161,12 @@ void __init device_tree_init(void)
bool do_prune;
bool fill_mac;
-#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
- if (!fdt_check_header(&__appended_dtb)) {
- fdt = &__appended_dtb;
+ if (fw_passed_dtb) {
+ fdt = (void *)fw_passed_dtb;
do_prune = false;
fill_mac = true;
pr_info("Using appended Device Tree.\n");
- } else
-#endif
- if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
+ } else if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
if (fdt_check_header(fdt))
panic("Corrupt Device Tree passed to kernel.");
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 75e7c8625659..39f2a2ec1286 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -15,6 +15,7 @@
#include <linux/sched/task_stack.h>
#include <linux/init.h>
#include <linux/export.h>
+#include <linux/kexec.h>
#include <asm/mmu_context.h>
#include <asm/time.h>
@@ -424,6 +425,9 @@ const struct plat_smp_ops octeon_smp_ops = {
.cpu_disable = octeon_cpu_disable,
.cpu_die = octeon_cpu_die,
#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
};
static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
@@ -501,6 +505,9 @@ static const struct plat_smp_ops octeon_78xx_smp_ops = {
.cpu_disable = octeon_cpu_disable,
.cpu_die = octeon_cpu_die,
#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
};
void __init octeon_setup_smp(void)
diff --git a/arch/mips/configs/generic/board-ocelot.config b/arch/mips/configs/generic/board-ocelot.config
index aa815761d85e..f607888d2483 100644
--- a/arch/mips/configs/generic/board-ocelot.config
+++ b/arch/mips/configs/generic/board-ocelot.config
@@ -18,17 +18,25 @@ CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_GPIO_SYSFS=y
+CONFIG_NETDEVICES=y
+CONFIG_MSCC_OCELOT_SWITCH=y
+CONFIG_MSCC_OCELOT_SWITCH_OCELOT=y
+CONFIG_MDIO_MSCC_MIIM=y
+CONFIG_MICROSEMI_PHY=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_SPI=y
CONFIG_SPI_BITBANG=y
CONFIG_SPI_DESIGNWARE=y
+CONFIG_SPI_DW_MMIO=y
CONFIG_SPI_SPIDEV=y
+CONFIG_GPIO_SYSFS=y
+
CONFIG_POWER_RESET=y
CONFIG_POWER_RESET_OCELOT_RESET=y
diff --git a/arch/mips/generic/Kconfig b/arch/mips/generic/Kconfig
index 08e33c6b2539..fd6019802657 100644
--- a/arch/mips/generic/Kconfig
+++ b/arch/mips/generic/Kconfig
@@ -65,11 +65,11 @@ config FIT_IMAGE_FDT_XILFPGA
Enable this to include the FDT for the MIPSfpga platform
from Imagination Technologies in the FIT kernel image.
-config FIT_IMAGE_FDT_OCELOT_PCB123
- bool "Include FDT for Microsemi Ocelot PCB123"
+config FIT_IMAGE_FDT_OCELOT
+ bool "Include FDT for Microsemi Ocelot development platforms"
select MSCC_OCELOT
help
- Enable this to include the FDT for the Ocelot PCB123 platform
+ Enable this to include the FDT for the Ocelot development platforms
from Microsemi in the FIT kernel image.
This requires u-boot on the platform.
diff --git a/arch/mips/generic/Makefile b/arch/mips/generic/Makefile
index d03a36f869a4..181aa1335419 100644
--- a/arch/mips/generic/Makefile
+++ b/arch/mips/generic/Makefile
@@ -15,5 +15,4 @@ obj-y += proc.o
obj-$(CONFIG_YAMON_DT_SHIM) += yamon-dt.o
obj-$(CONFIG_LEGACY_BOARD_SEAD3) += board-sead3.o
obj-$(CONFIG_LEGACY_BOARD_OCELOT) += board-ocelot.o
-obj-$(CONFIG_KEXEC) += kexec.o
obj-$(CONFIG_VIRT_BOARD_RANCHU) += board-ranchu.o
diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform
index 879cb80396c8..eaa19d189324 100644
--- a/arch/mips/generic/Platform
+++ b/arch/mips/generic/Platform
@@ -16,5 +16,5 @@ all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb
its-y := vmlinux.its.S
its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S
its-$(CONFIG_FIT_IMAGE_FDT_NI169445) += board-ni169445.its.S
-its-$(CONFIG_FIT_IMAGE_FDT_OCELOT_PCB123) += board-ocelot_pcb123.its.S
+its-$(CONFIG_FIT_IMAGE_FDT_OCELOT) += board-ocelot.its.S
its-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += board-xilfpga.its.S
diff --git a/arch/mips/generic/board-ocelot_pcb123.its.S b/arch/mips/generic/board-ocelot.its.S
index 5a7d5e1c878a..3da23988149a 100644
--- a/arch/mips/generic/board-ocelot_pcb123.its.S
+++ b/arch/mips/generic/board-ocelot.its.S
@@ -11,6 +11,17 @@
algo = "sha1";
};
};
+
+ fdt@ocelot_pcb120 {
+ description = "MSCC Ocelot PCB120 Device Tree";
+ data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
+ type = "flat_dt";
+ arch = "mips";
+ compression = "none";
+ hash@0 {
+ algo = "sha1";
+ };
+ };
};
configurations {
@@ -19,5 +30,11 @@
kernel = "kernel@0";
fdt = "fdt@ocelot_pcb123";
};
+
+ conf@ocelot_pcb120 {
+ description = "Ocelot Linux kernel";
+ kernel = "kernel@0";
+ fdt = "fdt@ocelot_pcb120";
+ };
};
};
diff --git a/arch/mips/generic/kexec.c b/arch/mips/generic/kexec.c
deleted file mode 100644
index 1ca409f58929..000000000000
--- a/arch/mips/generic/kexec.c
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (C) 2016 Imagination Technologies
- * Author: Marcin Nowakowski <marcin.nowakowski@mips.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/kexec.h>
-#include <linux/libfdt.h>
-#include <linux/uaccess.h>
-
-static int generic_kexec_prepare(struct kimage *image)
-{
- int i;
-
- for (i = 0; i < image->nr_segments; i++) {
- struct fdt_header fdt;
-
- if (image->segment[i].memsz <= sizeof(fdt))
- continue;
-
- if (copy_from_user(&fdt, image->segment[i].buf, sizeof(fdt)))
- continue;
-
- if (fdt_check_header(&fdt))
- continue;
-
- kexec_args[0] = -2;
- kexec_args[1] = (unsigned long)
- phys_to_virt((unsigned long)image->segment[i].mem);
- break;
- }
- return 0;
-}
-
-static int __init register_generic_kexec(void)
-{
- _machine_kexec_prepare = generic_kexec_prepare;
- return 0;
-}
-arch_initcall(register_generic_kexec);
diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
index 1e38f0e1ea3e..d80be38c4144 100644
--- a/arch/mips/include/asm/asm-eva.h
+++ b/arch/mips/include/asm/asm-eva.h
@@ -15,6 +15,7 @@
/* Kernel variants */
#define kernel_cache(op, base) "cache " op ", " base "\n"
+#define kernel_pref(hint, base) "pref " hint ", " base "\n"
#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
@@ -51,6 +52,7 @@
" .set pop\n"
#define user_cache(op, base) __BUILD_EVA_INSN("cachee", op, base)
+#define user_pref(hint, base) __BUILD_EVA_INSN("prefe", hint, base)
#define user_ll(reg, addr) __BUILD_EVA_INSN("lle", reg, addr)
#define user_sc(reg, addr) __BUILD_EVA_INSN("sce", reg, addr)
#define user_lw(reg, addr) __BUILD_EVA_INSN("lwe", reg, addr)
@@ -72,6 +74,7 @@
#else
#define user_cache(op, base) kernel_cache(op, base)
+#define user_pref(hint, base) kernel_pref(hint, base)
#define user_ll(reg, addr) kernel_ll(reg, addr)
#define user_sc(reg, addr) kernel_sc(reg, addr)
#define user_lw(reg, addr) kernel_lw(reg, addr)
@@ -99,6 +102,7 @@
#else /* __ASSEMBLY__ */
#define kernel_cache(op, base) cache op, base
+#define kernel_pref(hint, base) pref hint, base
#define kernel_ll(reg, addr) ll reg, addr
#define kernel_sc(reg, addr) sc reg, addr
#define kernel_lw(reg, addr) lw reg, addr
@@ -135,6 +139,7 @@
.set pop;
#define user_cache(op, base) __BUILD_EVA_INSN(cachee, op, base)
+#define user_pref(hint, base) __BUILD_EVA_INSN(prefe, hint, base)
#define user_ll(reg, addr) __BUILD_EVA_INSN(lle, reg, addr)
#define user_sc(reg, addr) __BUILD_EVA_INSN(sce, reg, addr)
#define user_lw(reg, addr) __BUILD_EVA_INSN(lwe, reg, addr)
@@ -155,6 +160,7 @@
#else
#define user_cache(op, base) kernel_cache(op, base)
+#define user_pref(hint, base) kernel_pref(hint, base)
#define user_ll(reg, addr) kernel_ll(reg, addr)
#define user_sc(reg, addr) kernel_sc(reg, addr)
#define user_lw(reg, addr) kernel_lw(reg, addr)
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 81fae23ce7cd..c23527ba65d0 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -20,32 +20,6 @@
#include <asm/sgidefs.h>
#include <asm/asm-eva.h>
-#ifndef CAT
-#ifdef __STDC__
-#define __CAT(str1, str2) str1##str2
-#else
-#define __CAT(str1, str2) str1/**/str2
-#endif
-#define CAT(str1, str2) __CAT(str1, str2)
-#endif
-
-/*
- * PIC specific declarations
- * Not used for the kernel but here seems to be the right place.
- */
-#ifdef __PIC__
-#define CPRESTORE(register) \
- .cprestore register
-#define CPADD(register) \
- .cpadd register
-#define CPLOAD(register) \
- .cpload register
-#else
-#define CPRESTORE(register)
-#define CPADD(register)
-#define CPLOAD(register)
-#endif
-
/*
* LEAF - declare leaf routine
*/
@@ -130,96 +104,6 @@ symbol = value
.popsection;
/*
- * Build text tables
- */
-#define TTABLE(string) \
- .pushsection .text; \
- .word 1f; \
- .popsection \
- .pushsection .data; \
-1: .asciiz string; \
- .popsection
-
-/*
- * MIPS IV pref instruction.
- * Use with .set noreorder only!
- *
- * MIPS IV implementations are free to treat this as a nop. The R5000
- * is one of them. So we should have an option not to use this instruction.
- */
-#ifdef CONFIG_CPU_HAS_PREFETCH
-
-#define PREF(hint,addr) \
- .set push; \
- .set arch=r5000; \
- pref hint, addr; \
- .set pop
-
-#define PREFE(hint, addr) \
- .set push; \
- .set mips0; \
- .set eva; \
- prefe hint, addr; \
- .set pop
-
-#define PREFX(hint,addr) \
- .set push; \
- .set arch=r5000; \
- prefx hint, addr; \
- .set pop
-
-#else /* !CONFIG_CPU_HAS_PREFETCH */
-
-#define PREF(hint, addr)
-#define PREFE(hint, addr)
-#define PREFX(hint, addr)
-
-#endif /* !CONFIG_CPU_HAS_PREFETCH */
-
-/*
- * MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs.
- */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS1)
-#define MOVN(rd, rs, rt) \
- .set push; \
- .set reorder; \
- beqz rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#define MOVZ(rd, rs, rt) \
- .set push; \
- .set reorder; \
- bnez rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3)
-#define MOVN(rd, rs, rt) \
- .set push; \
- .set noreorder; \
- bnezl rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#define MOVZ(rd, rs, rt) \
- .set push; \
- .set noreorder; \
- beqzl rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
- (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
-#define MOVN(rd, rs, rt) \
- movn rd, rs, rt
-#define MOVZ(rd, rs, rt) \
- movz rd, rs, rt
-#endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */
-
-/*
* Stack alignment
*/
#if (_MIPS_SIM == _MIPS_SIM_ABI32)
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index 78675f19440f..c99166eadbde 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -9,43 +9,25 @@
#include <asm/page.h>
#include <asm/ptrace.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "mips\0\0\0"
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_suseconds_t;
-
-typedef s32 compat_pid_t;
typedef s32 __compat_uid_t;
typedef s32 __compat_gid_t;
typedef __compat_uid_t __compat_uid32_t;
typedef __compat_gid_t __compat_gid32_t;
typedef u32 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u32 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef u32 compat_nlink_t;
typedef s32 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef s32 compat_caddr_t;
typedef struct {
s32 val[2];
} compat_fsid_t;
-typedef s32 compat_timer_t;
-typedef s32 compat_key_t;
-
-typedef s16 compat_short_t;
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u16 compat_ushort_t;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
struct compat_stat {
compat_dev_t st_dev;
@@ -59,11 +41,11 @@ struct compat_stat {
s32 st_pad2[2];
compat_off_t st_size;
s32 st_pad3;
- compat_time_t st_atime;
+ old_time32_t st_atime;
s32 st_atime_nsec;
- compat_time_t st_mtime;
+ old_time32_t st_mtime;
s32 st_mtime_nsec;
- compat_time_t st_ctime;
+ old_time32_t st_ctime;
s32 st_ctime_nsec;
s32 st_blksize;
s32 st_blocks;
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 54c730aed327..266257d56fb6 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -20,6 +20,7 @@
#include <linux/irqflags.h>
#include <asm/addrspace.h>
+#include <asm/barrier.h>
#include <asm/bug.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
@@ -34,11 +35,6 @@
#include <mangle-port.h>
/*
- * Slowdown I/O port space accesses for antique hardware.
- */
-#undef CONF_SLOWDOWN_IO
-
-/*
* Raw operations are never swapped in software. OTOH values that raw
* operations are working on may or may not have been swapped by the bus
* hardware. An example use would be for flash memory that's used for
@@ -50,6 +46,11 @@
# define __raw_ioswabq(a, x) (x)
# define ____raw_ioswabq(a, x) (x)
+# define __relaxed_ioswabb ioswabb
+# define __relaxed_ioswabw ioswabw
+# define __relaxed_ioswabl ioswabl
+# define __relaxed_ioswabq ioswabq
+
/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
#define IO_SPACE_LIMIT 0xffff
@@ -80,31 +81,29 @@ static inline void set_io_port_base(unsigned long base)
}
/*
- * Thanks to James van Artsdalen for a better timing-fix than
- * the two short jumps: using outb's to a nonexistent port seems
- * to guarantee better timings even on fast machines.
- *
- * On the other hand, I'd like to be sure of a non-existent port:
- * I feel a bit unsafe about using 0x80 (should be safe, though)
- *
- * Linus
- *
+ * Provide the necessary definitions for generic iomap. We make use of
+ * mips_io_port_base for iomap(), but we don't reserve any low addresses for
+ * use with I/O ports.
*/
-#define __SLOW_DOWN_IO \
- __asm__ __volatile__( \
- "sb\t$0,0x80(%0)" \
- : : "r" (mips_io_port_base));
+#define HAVE_ARCH_PIO_SIZE
+#define PIO_OFFSET mips_io_port_base
+#define PIO_MASK IO_SPACE_LIMIT
+#define PIO_RESERVED 0x0UL
-#ifdef CONF_SLOWDOWN_IO
-#ifdef REALLY_SLOW_IO
-#define SLOW_DOWN_IO { __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; __SLOW_DOWN_IO; }
-#else
-#define SLOW_DOWN_IO __SLOW_DOWN_IO
-#endif
-#else
-#define SLOW_DOWN_IO
-#endif
+/*
+ * Enforce in-order execution of data I/O. In the MIPS architecture
+ * these are equivalent to corresponding platform-specific memory
+ * barriers defined in <asm/barrier.h>. API pinched from PowerPC,
+ * with sync additionally defined.
+ */
+#define iobarrier_rw() mb()
+#define iobarrier_r() rmb()
+#define iobarrier_w() wmb()
+#define iobarrier_sync() iob()
+
+/* Some callers use this older API instead. */
+#define mmiowb() iobarrier_w()
/*
* virt_to_phys - map virtual addresses to physical
@@ -172,11 +171,6 @@ static inline void *isa_bus_to_virt(unsigned long address)
extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags);
extern void __iounmap(const volatile void __iomem *addr);
-#ifndef CONFIG_PCI
-struct pci_dev;
-static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
-#endif
-
static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size,
unsigned long flags)
{
@@ -316,13 +310,13 @@ static inline void iounmap(const volatile void __iomem *addr)
#undef __IS_KSEG1
}
-#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT)
+#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_CPU_LOONGSON3)
#define war_io_reorder_wmb() wmb()
#else
#define war_io_reorder_wmb() barrier()
#endif
-#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
+#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, barrier, relax, irq) \
\
static inline void pfx##write##bwlq(type val, \
volatile void __iomem *mem) \
@@ -330,7 +324,10 @@ static inline void pfx##write##bwlq(type val, \
volatile type *__mem; \
type __val; \
\
- war_io_reorder_wmb(); \
+ if (barrier) \
+ iobarrier_rw(); \
+ else \
+ war_io_reorder_wmb(); \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
@@ -367,6 +364,9 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
+ if (barrier) \
+ iobarrier_rw(); \
+ \
if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
__val = *__mem; \
else if (cpu_has_64bits) { \
@@ -390,18 +390,22 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
} \
\
/* prevent prefetching of coherent DMA data prematurely */ \
- rmb(); \
+ if (!relax) \
+ rmb(); \
return pfx##ioswab##bwlq(__mem, __val); \
}
-#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow) \
+#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, barrier, relax, p) \
\
static inline void pfx##out##bwlq##p(type val, unsigned long port) \
{ \
volatile type *__addr; \
type __val; \
\
- war_io_reorder_wmb(); \
+ if (barrier) \
+ iobarrier_rw(); \
+ else \
+ war_io_reorder_wmb(); \
\
__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
\
@@ -411,7 +415,6 @@ static inline void pfx##out##bwlq##p(type val, unsigned long port) \
BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
\
*__addr = __val; \
- slow; \
} \
\
static inline type pfx##in##bwlq##p(unsigned long port) \
@@ -423,23 +426,27 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
\
BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
\
+ if (barrier) \
+ iobarrier_rw(); \
+ \
__val = *__addr; \
- slow; \
\
/* prevent prefetching of coherent DMA data prematurely */ \
- rmb(); \
+ if (!relax) \
+ rmb(); \
return pfx##ioswab##bwlq(__addr, __val); \
}
-#define __BUILD_MEMORY_PFX(bus, bwlq, type) \
+#define __BUILD_MEMORY_PFX(bus, bwlq, type, relax) \
\
-__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
+__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1, relax, 1)
#define BUILDIO_MEM(bwlq, type) \
\
-__BUILD_MEMORY_PFX(__raw_, bwlq, type) \
-__BUILD_MEMORY_PFX(, bwlq, type) \
-__BUILD_MEMORY_PFX(__mem_, bwlq, type) \
+__BUILD_MEMORY_PFX(__raw_, bwlq, type, 0) \
+__BUILD_MEMORY_PFX(__relaxed_, bwlq, type, 1) \
+__BUILD_MEMORY_PFX(__mem_, bwlq, type, 0) \
+__BUILD_MEMORY_PFX(, bwlq, type, 0)
BUILDIO_MEM(b, u8)
BUILDIO_MEM(w, u16)
@@ -447,8 +454,8 @@ BUILDIO_MEM(l, u32)
BUILDIO_MEM(q, u64)
#define __BUILD_IOPORT_PFX(bus, bwlq, type) \
- __BUILD_IOPORT_SINGLE(bus, bwlq, type, ,) \
- __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
+ __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0,) \
+ __BUILD_IOPORT_SINGLE(bus, bwlq, type, 1, 0, _p)
#define BUILDIO_IOPORT(bwlq, type) \
__BUILD_IOPORT_PFX(, bwlq, type) \
@@ -463,19 +470,19 @@ BUILDIO_IOPORT(q, u64)
#define __BUILDIO(bwlq, type) \
\
-__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0)
+__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 1, 0, 0)
__BUILDIO(q, u64)
-#define readb_relaxed readb
-#define readw_relaxed readw
-#define readl_relaxed readl
-#define readq_relaxed readq
+#define readb_relaxed __relaxed_readb
+#define readw_relaxed __relaxed_readw
+#define readl_relaxed __relaxed_readl
+#define readq_relaxed __relaxed_readq
-#define writeb_relaxed writeb
-#define writew_relaxed writew
-#define writel_relaxed writel
-#define writeq_relaxed writeq
+#define writeb_relaxed __relaxed_writeb
+#define writew_relaxed __relaxed_writew
+#define writel_relaxed __relaxed_writel
+#define writeq_relaxed __relaxed_writeq
#define readb_be(addr) \
__raw_readb((__force unsigned *)(addr))
@@ -561,14 +568,6 @@ BUILDSTRING(l, u32)
BUILDSTRING(q, u64)
#endif
-
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
-#define mmiowb() wmb()
-#else
-/* Depends on MIPS II instruction set */
-#define mmiowb() asm volatile ("sync" ::: "memory")
-#endif
-
static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
{
memset((void __force *) addr, val, count);
diff --git a/arch/mips/include/asm/kexec.h b/arch/mips/include/asm/kexec.h
index 493a3cc7c39a..40795ca89961 100644
--- a/arch/mips/include/asm/kexec.h
+++ b/arch/mips/include/asm/kexec.h
@@ -12,11 +12,11 @@
#include <asm/stacktrace.h>
/* Maximum physical address we can use pages from */
-#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000)
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
/* Maximum address we can reach in physical address mode */
-#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000)
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
/* Maximum address we can use for the control code buffer */
-#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
/* Reserve 3*4096 bytes for board-specific info */
#define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096)
@@ -39,11 +39,12 @@ extern unsigned long kexec_args[4];
extern int (*_machine_kexec_prepare)(struct kimage *);
extern void (*_machine_kexec_shutdown)(void);
extern void (*_machine_crash_shutdown)(struct pt_regs *regs);
-extern void default_machine_crash_shutdown(struct pt_regs *regs);
+void default_machine_crash_shutdown(struct pt_regs *regs);
+void kexec_nonboot_cpu_jump(void);
+void kexec_reboot(void);
#ifdef CONFIG_SMP
extern const unsigned char kexec_smp_wait[];
extern unsigned long secondary_kexec_args[4];
-extern void (*relocated_kexec_smp_wait) (void *);
extern atomic_t kexec_ready_to_reboot;
extern void (*_crash_smp_send_stop)(void);
#endif
diff --git a/arch/mips/include/asm/mach-loongson64/irq.h b/arch/mips/include/asm/mach-loongson64/irq.h
index 3644b68c0ccc..be9f727a9328 100644
--- a/arch/mips/include/asm/mach-loongson64/irq.h
+++ b/arch/mips/include/asm/mach-loongson64/irq.h
@@ -10,7 +10,7 @@
#define MIPS_CPU_IRQ_BASE 56
#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 2) /* UART */
-#define LOONGSON_HT1_IRQ (MIPS_CPU_IRQ_BASE + 3) /* HT1 */
+#define LOONGSON_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 3) /* CASCADE */
#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* CPU Timer */
#define LOONGSON_HT1_CFG_BASE loongson_sysconf.ht_control_base
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
index 312739117bb0..cbac603ced19 100644
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -11,6 +11,8 @@
#ifndef __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H
#define __ASM_MACH_LOONGSON64_KERNEL_ENTRY_H
+#include <asm/cpu.h>
+
/*
* Override macros used in arch/mips/kernel/head.S.
*/
@@ -26,12 +28,15 @@
mfc0 t0, CP0_PAGEGRAIN
or t0, (0x1 << 29)
mtc0 t0, CP0_PAGEGRAIN
-#ifdef CONFIG_LOONGSON3_ENHANCEMENT
/* Enable STFill Buffer */
+ mfc0 t0, CP0_PRID
+ andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
+ slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2)
+ bnez t0, 1f
mfc0 t0, CP0_CONFIG6
or t0, 0x100
mtc0 t0, CP0_CONFIG6
-#endif
+1:
_ehb
.set pop
#endif
@@ -52,12 +57,15 @@
mfc0 t0, CP0_PAGEGRAIN
or t0, (0x1 << 29)
mtc0 t0, CP0_PAGEGRAIN
-#ifdef CONFIG_LOONGSON3_ENHANCEMENT
/* Enable STFill Buffer */
+ mfc0 t0, CP0_PRID
+ andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
+ slti t0, (PRID_IMP_LOONGSON_64 | PRID_REV_LOONGSON3A_R2)
+ bnez t0, 1f
mfc0 t0, CP0_CONFIG6
or t0, 0x100
mtc0 t0, CP0_CONFIG6
-#endif
+1:
_ehb
.set pop
#endif
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 01df9ad62fb8..341a02c92985 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -2287,13 +2287,14 @@ do { \
_write_32bit_cp1_register(dest, val, )
#endif
-#ifdef HAVE_AS_DSP
+#ifdef TOOLCHAIN_SUPPORTS_DSP
#define rddsp(mask) \
({ \
unsigned int __dspctl; \
\
__asm__ __volatile__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" rddsp %0, %x1 \n" \
" .set pop \n" \
@@ -2306,6 +2307,7 @@ do { \
do { \
__asm__ __volatile__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" wrdsp %0, %x1 \n" \
" .set pop \n" \
@@ -2318,6 +2320,7 @@ do { \
long mflo0; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac0 \n" \
" .set pop \n" \
@@ -2330,6 +2333,7 @@ do { \
long mflo1; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac1 \n" \
" .set pop \n" \
@@ -2342,6 +2346,7 @@ do { \
long mflo2; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac2 \n" \
" .set pop \n" \
@@ -2354,6 +2359,7 @@ do { \
long mflo3; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mflo %0, $ac3 \n" \
" .set pop \n" \
@@ -2366,6 +2372,7 @@ do { \
long mfhi0; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac0 \n" \
" .set pop \n" \
@@ -2378,6 +2385,7 @@ do { \
long mfhi1; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac1 \n" \
" .set pop \n" \
@@ -2390,6 +2398,7 @@ do { \
long mfhi2; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac2 \n" \
" .set pop \n" \
@@ -2402,6 +2411,7 @@ do { \
long mfhi3; \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mfhi %0, $ac3 \n" \
" .set pop \n" \
@@ -2414,6 +2424,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac0 \n" \
" .set pop \n" \
@@ -2425,6 +2436,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac1 \n" \
" .set pop \n" \
@@ -2436,6 +2448,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac2 \n" \
" .set pop \n" \
@@ -2447,6 +2460,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mtlo %0, $ac3 \n" \
" .set pop \n" \
@@ -2458,6 +2472,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac0 \n" \
" .set pop \n" \
@@ -2469,6 +2484,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac1 \n" \
" .set pop \n" \
@@ -2480,6 +2496,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac2 \n" \
" .set pop \n" \
@@ -2491,6 +2508,7 @@ do { \
({ \
__asm__( \
" .set push \n" \
+ " .set " MIPS_ISA_LEVEL " \n" \
" .set dsp \n" \
" mthi %0, $ac3 \n" \
" .set pop \n" \
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 49d6046ca1d0..c373eb605040 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -81,7 +81,7 @@ extern unsigned int vced_count, vcei_count;
#endif
-#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_256M)
+#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
extern unsigned long mips_stack_top(void);
#define STACK_TOP mips_stack_top()
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 7f12d7e27c94..d19b2d65336b 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -48,58 +48,14 @@ extern void (*r4k_blast_icache)(void);
: \
: "i" (op), "R" (*(unsigned char *)(addr)))
-#ifdef CONFIG_MIPS_MT
-
-#define __iflush_prologue \
- unsigned long redundance; \
- extern int mt_n_iflushes; \
- for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
-
-#define __iflush_epilogue \
- }
-
-#define __dflush_prologue \
- unsigned long redundance; \
- extern int mt_n_dflushes; \
- for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
-
-#define __dflush_epilogue \
- }
-
-#define __inv_dflush_prologue __dflush_prologue
-#define __inv_dflush_epilogue __dflush_epilogue
-#define __sflush_prologue {
-#define __sflush_epilogue }
-#define __inv_sflush_prologue __sflush_prologue
-#define __inv_sflush_epilogue __sflush_epilogue
-
-#else /* CONFIG_MIPS_MT */
-
-#define __iflush_prologue {
-#define __iflush_epilogue }
-#define __dflush_prologue {
-#define __dflush_epilogue }
-#define __inv_dflush_prologue {
-#define __inv_dflush_epilogue }
-#define __sflush_prologue {
-#define __sflush_epilogue }
-#define __inv_sflush_prologue {
-#define __inv_sflush_epilogue }
-
-#endif /* CONFIG_MIPS_MT */
-
static inline void flush_icache_line_indexed(unsigned long addr)
{
- __iflush_prologue
cache_op(Index_Invalidate_I, addr);
- __iflush_epilogue
}
static inline void flush_dcache_line_indexed(unsigned long addr)
{
- __dflush_prologue
cache_op(Index_Writeback_Inv_D, addr);
- __dflush_epilogue
}
static inline void flush_scache_line_indexed(unsigned long addr)
@@ -109,7 +65,6 @@ static inline void flush_scache_line_indexed(unsigned long addr)
static inline void flush_icache_line(unsigned long addr)
{
- __iflush_prologue
switch (boot_cpu_type()) {
case CPU_LOONGSON2:
cache_op(Hit_Invalidate_I_Loongson2, addr);
@@ -119,21 +74,16 @@ static inline void flush_icache_line(unsigned long addr)
cache_op(Hit_Invalidate_I, addr);
break;
}
- __iflush_epilogue
}
static inline void flush_dcache_line(unsigned long addr)
{
- __dflush_prologue
cache_op(Hit_Writeback_Inv_D, addr);
- __dflush_epilogue
}
static inline void invalidate_dcache_line(unsigned long addr)
{
- __dflush_prologue
cache_op(Hit_Invalidate_D, addr);
- __dflush_epilogue
}
static inline void invalidate_scache_line(unsigned long addr)
@@ -586,13 +536,9 @@ static inline void extra##blast_##pfx##cache##lsize(void) \
current_cpu_data.desc.waybit; \
unsigned long ws, addr; \
\
- __##pfx##flush_prologue \
- \
for (ws = 0; ws < ws_end; ws += ws_inc) \
for (addr = start; addr < end; addr += lsize * 32) \
cache##lsize##_unroll32(addr|ws, indexop); \
- \
- __##pfx##flush_epilogue \
} \
\
static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
@@ -600,14 +546,10 @@ static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
unsigned long start = page; \
unsigned long end = page + PAGE_SIZE; \
\
- __##pfx##flush_prologue \
- \
do { \
cache##lsize##_unroll32(start, hitop); \
start += lsize * 32; \
} while (start < end); \
- \
- __##pfx##flush_epilogue \
} \
\
static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
@@ -620,13 +562,9 @@ static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long
current_cpu_data.desc.waybit; \
unsigned long ws, addr; \
\
- __##pfx##flush_prologue \
- \
for (ws = 0; ws < ws_end; ws += ws_inc) \
for (addr = start; addr < end; addr += lsize * 32) \
cache##lsize##_unroll32(addr|ws, indexop); \
- \
- __##pfx##flush_epilogue \
}
__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
@@ -656,14 +594,10 @@ static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
unsigned long start = page; \
unsigned long end = page + PAGE_SIZE; \
\
- __##pfx##flush_prologue \
- \
do { \
cache##lsize##_unroll32_user(start, hitop); \
start += lsize * 32; \
} while (start < end); \
- \
- __##pfx##flush_epilogue \
}
__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
@@ -685,16 +619,12 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
unsigned long addr = start & ~(lsize - 1); \
unsigned long aend = (end - 1) & ~(lsize - 1); \
\
- __##pfx##flush_prologue \
- \
while (1) { \
prot##cache_op(hitop, addr); \
if (addr == aend) \
break; \
addr += lsize; \
} \
- \
- __##pfx##flush_epilogue \
}
#ifndef CONFIG_EVA
@@ -712,8 +642,6 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
unsigned long addr = start & ~(lsize - 1); \
unsigned long aend = (end - 1) & ~(lsize - 1); \
\
- __##pfx##flush_prologue \
- \
if (!uaccess_kernel()) { \
while (1) { \
protected_cachee_op(hitop, addr); \
@@ -730,7 +658,6 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
} \
\
} \
- __##pfx##flush_epilogue \
}
__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index 53b2cb8e5966..b7123f9c0785 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -33,6 +33,9 @@ struct plat_smp_ops {
int (*cpu_disable)(void);
void (*cpu_die)(unsigned int cpu);
#endif
+#ifdef CONFIG_KEXEC
+ void (*kexec_nonboot_cpu)(void);
+#endif
};
extern void register_smp_ops(const struct plat_smp_ops *ops);
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 056a6bf13491..7990c1c70471 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -91,6 +91,22 @@ static inline void __cpu_die(unsigned int cpu)
extern void play_dead(void);
#endif
+#ifdef CONFIG_KEXEC
+static inline void kexec_nonboot_cpu(void)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ return mp_ops->kexec_nonboot_cpu();
+}
+
+static inline void *kexec_nonboot_cpu_func(void)
+{
+ extern const struct plat_smp_ops *mp_ops; /* private */
+
+ return mp_ops->kexec_nonboot_cpu;
+}
+#endif
+
/*
* This function will set up the necessary IPIs for Linux to communicate
* with the CPUs in mask.
diff --git a/arch/mips/include/asm/unistd.h b/arch/mips/include/asm/unistd.h
index 3c09450908aa..c68b8ae3efcb 100644
--- a/arch/mips/include/asm/unistd.h
+++ b/arch/mips/include/asm/unistd.h
@@ -24,16 +24,17 @@
#ifndef __ASSEMBLY__
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_PAUSE
#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_WAITPID
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLD_UNAME
#define __ARCH_WANT_SYS_OLDUMOUNT
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index f10e1e15e1c6..210c2802cf4d 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -113,22 +113,4 @@ obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
obj-$(CONFIG_CPU_PM) += pm.o
obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
-#
-# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
-# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
-# here because the compiler may use DSP ASE instructions (such as lwx) in
-# code paths where we cannot check that the CPU we are running on supports it.
-# Proper abstraction using HAVE_AS_DSP and macros is done in
-# arch/mips/include/asm/mipsregs.h.
-#
-ifeq ($(CONFIG_CPU_MIPSR2), y)
-CFLAGS_DSP = -DHAVE_AS_DSP
-
-CFLAGS_signal.o = $(CFLAGS_DSP)
-CFLAGS_signal32.o = $(CFLAGS_DSP)
-CFLAGS_process.o = $(CFLAGS_DSP)
-CFLAGS_branch.o = $(CFLAGS_DSP)
-CFLAGS_ptrace.o = $(CFLAGS_DSP)
-endif
-
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 89b234844534..7a12763d553a 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -54,10 +54,10 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
- struct compat_timeval pr_cutime;/* Cumulative user time */
- struct compat_timeval pr_cstime;/* Cumulative system time */
+ struct old_timeval32 pr_utime; /* User time */
+ struct old_timeval32 pr_stime; /* System time */
+ struct old_timeval32 pr_cutime;/* Cumulative user time */
+ struct old_timeval32 pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
@@ -81,9 +81,9 @@ struct elf_prpsinfo32
#define elf_caddr_t u32
#define init_elf_binfmt init_elfn32_binfmt
-#define jiffies_to_timeval jiffies_to_compat_timeval
+#define jiffies_to_timeval jiffies_to_old_timeval32
static __inline__ void
-jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
+jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
{
/*
* Convert jiffies to nanoseconds and separate with
@@ -101,6 +101,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
#define TASK_SIZE TASK_SIZE32
#undef ns_to_timeval
-#define ns_to_timeval ns_to_compat_timeval
+#define ns_to_timeval ns_to_old_timeval32
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index a88c59db3d48..e6db06a1d31a 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -59,10 +59,10 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
- struct compat_timeval pr_cutime;/* Cumulative user time */
- struct compat_timeval pr_cstime;/* Cumulative system time */
+ struct old_timeval32 pr_utime; /* User time */
+ struct old_timeval32 pr_stime; /* System time */
+ struct old_timeval32 pr_cutime;/* Cumulative user time */
+ struct old_timeval32 pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
int pr_fpvalid; /* True if math co-processor being used. */
};
@@ -86,9 +86,9 @@ struct elf_prpsinfo32
#define elf_caddr_t u32
#define init_elf_binfmt init_elf32_binfmt
-#define jiffies_to_timeval jiffies_to_compat_timeval
+#define jiffies_to_timeval jiffies_to_old_timeval32
static inline void
-jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
+jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
{
/*
* Convert jiffies to nanoseconds and separate with
@@ -104,6 +104,6 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
#define TASK_SIZE TASK_SIZE32
#undef ns_to_timeval
-#define ns_to_timeval ns_to_compat_timeval
+#define ns_to_timeval ns_to_old_timeval32
#include "../../../fs/binfmt_elf.c"
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index d455363d51c3..2c7288041a99 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -36,6 +36,9 @@ static void crash_shutdown_secondary(void *passed_regs)
if (!cpu_online(cpu))
return;
+ /* We won't be sent IPIs any more. */
+ set_cpu_online(cpu, false);
+
local_irq_disable();
if (!cpumask_test_cpu(cpu, &cpus_in_crash))
crash_save_cpu(regs, cpu);
@@ -43,7 +46,9 @@ static void crash_shutdown_secondary(void *passed_regs)
while (!atomic_read(&kexec_ready_to_reboot))
cpu_relax();
- relocated_kexec_smp_wait(NULL);
+
+ kexec_reboot();
+
/* NOTREACHED */
}
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index d1bb506adc10..351d40fe0859 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -77,7 +77,7 @@ EXPORT(_stext)
*/
FEXPORT(__kernel_entry)
j kernel_entry
-#endif
+#endif /* CONFIG_BOOT_RAW */
__REF
@@ -94,24 +94,26 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
0:
#ifdef CONFIG_USE_OF
-#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
+#if defined(CONFIG_MIPS_RAW_APPENDED_DTB) || \
+ defined(CONFIG_MIPS_ELF_APPENDED_DTB)
+
PTR_LA t2, __appended_dtb
#ifdef CONFIG_CPU_BIG_ENDIAN
li t1, 0xd00dfeed
-#else
+#else /* !CONFIG_CPU_BIG_ENDIAN */
li t1, 0xedfe0dd0
-#endif
+#endif /* !CONFIG_CPU_BIG_ENDIAN */
lw t0, (t2)
beq t0, t1, dtb_found
-#endif
+#endif /* CONFIG_MIPS_RAW_APPENDED_DTB || CONFIG_MIPS_ELF_APPENDED_DTB */
li t1, -2
move t2, a1
beq a0, t1, dtb_found
li t2, 0
dtb_found:
-#endif
+#endif /* CONFIG_USE_OF */
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
@@ -156,9 +158,9 @@ dtb_found:
* newly sync'd icache.
*/
jr.hb v0
-#else
+#else /* !CONFIG_RELOCATABLE */
j start_kernel
-#endif
+#endif /* !CONFIG_RELOCATABLE */
END(kernel_entry)
#ifdef CONFIG_SMP
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
index 8b574bcd39ba..93936dce04d6 100644
--- a/arch/mips/kernel/machine_kexec.c
+++ b/arch/mips/kernel/machine_kexec.c
@@ -9,6 +9,7 @@
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/delay.h>
+#include <linux/libfdt.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
@@ -19,15 +20,18 @@ extern const size_t relocate_new_kernel_size;
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
-int (*_machine_kexec_prepare)(struct kimage *) = NULL;
-void (*_machine_kexec_shutdown)(void) = NULL;
-void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+static unsigned long reboot_code_buffer;
+
#ifdef CONFIG_SMP
-void (*relocated_kexec_smp_wait) (void *);
+static void (*relocated_kexec_smp_wait)(void *);
+
atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
void (*_crash_smp_send_stop)(void) = NULL;
#endif
+void (*_machine_kexec_shutdown)(void) = NULL;
+void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+
static void kexec_image_info(const struct kimage *kimage)
{
unsigned long i;
@@ -48,13 +52,59 @@ static void kexec_image_info(const struct kimage *kimage)
}
}
+#ifdef CONFIG_UHI_BOOT
+
+static int uhi_machine_kexec_prepare(struct kimage *kimage)
+{
+ int i;
+
+ /*
+ * In case DTB file is not passed to the new kernel, a flat device
+ * tree will be created by kexec tool. It holds modified command
+ * line for the new kernel.
+ */
+ for (i = 0; i < kimage->nr_segments; i++) {
+ struct fdt_header fdt;
+
+ if (kimage->segment[i].memsz <= sizeof(fdt))
+ continue;
+
+ if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt)))
+ continue;
+
+ if (fdt_check_header(&fdt))
+ continue;
+
+ kexec_args[0] = -2;
+ kexec_args[1] = (unsigned long)
+ phys_to_virt((unsigned long)kimage->segment[i].mem);
+ break;
+ }
+
+ return 0;
+}
+
+int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare;
+
+#else
+
+int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+
+#endif /* CONFIG_UHI_BOOT */
+
int
machine_kexec_prepare(struct kimage *kimage)
{
+#ifdef CONFIG_SMP
+ if (!kexec_nonboot_cpu_func())
+ return -EINVAL;
+#endif
+
kexec_image_info(kimage);
if (_machine_kexec_prepare)
return _machine_kexec_prepare(kimage);
+
return 0;
}
@@ -63,11 +113,41 @@ machine_kexec_cleanup(struct kimage *kimage)
{
}
+#ifdef CONFIG_SMP
+static void kexec_shutdown_secondary(void *param)
+{
+ int cpu = smp_processor_id();
+
+ if (!cpu_online(cpu))
+ return;
+
+ /* We won't be sent IPIs any more. */
+ set_cpu_online(cpu, false);
+
+ local_irq_disable();
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+
+ kexec_reboot();
+
+ /* NOTREACHED */
+}
+#endif
+
void
machine_shutdown(void)
{
if (_machine_kexec_shutdown)
_machine_kexec_shutdown();
+
+#ifdef CONFIG_SMP
+ smp_call_function(kexec_shutdown_secondary, NULL, 0);
+
+ while (num_online_cpus() > 1) {
+ cpu_relax();
+ mdelay(1);
+ }
+#endif
}
void
@@ -79,12 +159,57 @@ machine_crash_shutdown(struct pt_regs *regs)
default_machine_crash_shutdown(regs);
}
-typedef void (*noretfun_t)(void) __noreturn;
+#ifdef CONFIG_SMP
+void kexec_nonboot_cpu_jump(void)
+{
+ local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
+ reboot_code_buffer + relocate_new_kernel_size);
+
+ relocated_kexec_smp_wait(NULL);
+}
+#endif
+
+void kexec_reboot(void)
+{
+ void (*do_kexec)(void) __noreturn;
+
+ /*
+ * We know we were online, and there will be no incoming IPIs at
+ * this point. Mark online again before rebooting so that the crash
+ * analysis tool will see us correctly.
+ */
+ set_cpu_online(smp_processor_id(), true);
+
+ /* Ensure remote CPUs observe that we're online before rebooting. */
+ smp_mb__after_atomic();
+
+#ifdef CONFIG_SMP
+ if (smp_processor_id() > 0) {
+ /*
+ * Instead of cpu_relax() or wait, this is needed for kexec
+ * smp reboot. Kdump usually doesn't require an smp new
+ * kernel, but kexec may do.
+ */
+ kexec_nonboot_cpu();
+
+ /* NOTREACHED */
+ }
+#endif
+
+ /*
+ * Make sure we get correct instructions written by the
+ * machine_kexec() CPU.
+ */
+ local_flush_icache_range(reboot_code_buffer,
+ reboot_code_buffer + relocate_new_kernel_size);
+
+ do_kexec = (void *)reboot_code_buffer;
+ do_kexec();
+}
void
machine_kexec(struct kimage *image)
{
- unsigned long reboot_code_buffer;
unsigned long entry;
unsigned long *ptr;
@@ -118,6 +243,9 @@ machine_kexec(struct kimage *image)
*ptr = (unsigned long) phys_to_virt(*ptr);
}
+ /* Mark offline BEFORE disabling local irq. */
+ set_cpu_online(smp_processor_id(), false);
+
/*
* we do not want to be bothered.
*/
@@ -125,6 +253,7 @@ machine_kexec(struct kimage *image)
printk("Will call new kernel at %08lx\n", image->start);
printk("Bye ...\n");
+ /* Make reboot code buffer available to the boot CPU. */
__flush_cache_all();
#ifdef CONFIG_SMP
/* All secondary cpus now may jump to kexec_wait cycle */
@@ -133,5 +262,5 @@ machine_kexec(struct kimage *image)
smp_wmb();
atomic_set(&kexec_ready_to_reboot, 1);
#endif
- ((noretfun_t) reboot_code_buffer)();
+ kexec_reboot();
}
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
index efaa2527657d..9f85b98d24ac 100644
--- a/arch/mips/kernel/mips-mt.c
+++ b/arch/mips/kernel/mips-mt.c
@@ -154,40 +154,6 @@ static int __init config7_set(char *str)
}
__setup("config7=", config7_set);
-/* Experimental cache flush control parameters that should go away some day */
-int mt_protiflush;
-int mt_protdflush;
-int mt_n_iflushes = 1;
-int mt_n_dflushes = 1;
-
-static int __init set_protiflush(char *s)
-{
- mt_protiflush = 1;
- return 1;
-}
-__setup("protiflush", set_protiflush);
-
-static int __init set_protdflush(char *s)
-{
- mt_protdflush = 1;
- return 1;
-}
-__setup("protdflush", set_protdflush);
-
-static int __init niflush(char *s)
-{
- get_option(&s, &mt_n_iflushes);
- return 1;
-}
-__setup("niflush=", niflush);
-
-static int __init ndflush(char *s)
-{
- get_option(&s, &mt_n_dflushes);
- return 1;
-}
-__setup("ndflush=", ndflush);
-
static unsigned int itc_base;
static int __init set_itc_base(char *str)
@@ -232,16 +198,6 @@ void mips_mt_set_cpuoptions(void)
printk("Config7: 0x%08x\n", read_c0_config7());
}
- /* Report Cache management debug options */
- if (mt_protiflush)
- printk("I-cache flushes single-threaded\n");
- if (mt_protdflush)
- printk("D-cache flushes single-threaded\n");
- if (mt_n_iflushes != 1)
- printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
- if (mt_n_dflushes != 1)
- printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
-
if (itc_base != 0) {
/*
* Configure ITC mapping. This code is very
@@ -283,21 +239,6 @@ void mips_mt_set_cpuoptions(void)
}
}
-/*
- * Function to protect cache flushes from concurrent execution
- * depends on MP software model chosen.
- */
-
-void mt_cflush_lockdown(void)
-{
- /* FILL IN VSMP and AP/SP VERSIONS HERE */
-}
-
-void mt_cflush_release(void)
-{
- /* FILL IN VSMP and AP/SP VERSIONS HERE */
-}
-
struct class *mt_class;
static int __init mt_init(void)
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
index cbf4cc0b0b6c..3d80a51256de 100644
--- a/arch/mips/kernel/relocate.c
+++ b/arch/mips/kernel/relocate.c
@@ -146,7 +146,7 @@ int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
break;
type = (*r >> 24) & 0xff;
- loc_orig = (void *)(kbase_old + ((*r & 0x00ffffff) << 2));
+ loc_orig = kbase_old + ((*r & 0x00ffffff) << 2);
loc_new = RELOCATED(loc_orig);
if (reloc_handlers_rel[type] == NULL) {
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index e64b9e8bb002..01a5ff4c41ff 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -333,7 +333,7 @@ static void __init finalize_initrd(void)
maybe_bswap_initrd();
- reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
+ memblock_reserve(__pa(initrd_start), size);
initrd_below_start_ok = 1;
pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
@@ -370,20 +370,10 @@ static void __init bootmem_init(void)
#else /* !CONFIG_SGI_IP27 */
-static unsigned long __init bootmap_bytes(unsigned long pages)
-{
- unsigned long bytes = DIV_ROUND_UP(pages, 8);
-
- return ALIGN(bytes, sizeof(long));
-}
-
static void __init bootmem_init(void)
{
unsigned long reserved_end;
- unsigned long mapstart = ~0UL;
- unsigned long bootmap_size;
phys_addr_t ramstart = PHYS_ADDR_MAX;
- bool bootmap_valid = false;
int i;
/*
@@ -395,6 +385,8 @@ static void __init bootmem_init(void)
init_initrd();
reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
+ memblock_reserve(PHYS_OFFSET, reserved_end << PAGE_SHIFT);
+
/*
* max_low_pfn is not a number of pages. The number of pages
* of the system is given by 'max_low_pfn - min_low_pfn'.
@@ -442,9 +434,6 @@ static void __init bootmem_init(void)
if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
continue;
#endif
- if (start >= mapstart)
- continue;
- mapstart = max(reserved_end, start);
}
if (min_low_pfn >= max_low_pfn)
@@ -456,9 +445,11 @@ static void __init bootmem_init(void)
/*
* Reserve any memory between the start of RAM and PHYS_OFFSET
*/
- if (ramstart > PHYS_OFFSET)
+ if (ramstart > PHYS_OFFSET) {
add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
BOOT_MEM_RESERVED);
+ memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
+ }
if (min_low_pfn > ARCH_PFN_OFFSET) {
pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
@@ -483,52 +474,6 @@ static void __init bootmem_init(void)
max_low_pfn = PFN_DOWN(HIGHMEM_START);
}
-#ifdef CONFIG_BLK_DEV_INITRD
- /*
- * mapstart should be after initrd_end
- */
- if (initrd_end)
- mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
-#endif
-
- /*
- * check that mapstart doesn't overlap with any of
- * memory regions that have been reserved through eg. DTB
- */
- bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn);
-
- bootmap_valid = memory_region_available(PFN_PHYS(mapstart),
- bootmap_size);
- for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) {
- unsigned long mapstart_addr;
-
- switch (boot_mem_map.map[i].type) {
- case BOOT_MEM_RESERVED:
- mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr +
- boot_mem_map.map[i].size);
- if (PHYS_PFN(mapstart_addr) < mapstart)
- break;
-
- bootmap_valid = memory_region_available(mapstart_addr,
- bootmap_size);
- if (bootmap_valid)
- mapstart = PHYS_PFN(mapstart_addr);
- break;
- default:
- break;
- }
- }
-
- if (!bootmap_valid)
- panic("No memory area to place a bootmap bitmap");
-
- /*
- * Initialize the boot-time allocator with low memory only.
- */
- if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart,
- min_low_pfn, max_low_pfn))
- panic("Unexpected memory size required for bootmap");
-
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
@@ -577,9 +522,9 @@ static void __init bootmem_init(void)
default:
/* Not usable memory */
if (start > min_low_pfn && end < max_low_pfn)
- reserve_bootmem(boot_mem_map.map[i].addr,
- boot_mem_map.map[i].size,
- BOOTMEM_DEFAULT);
+ memblock_reserve(boot_mem_map.map[i].addr,
+ boot_mem_map.map[i].size);
+
continue;
}
@@ -602,15 +547,9 @@ static void __init bootmem_init(void)
size = end - start;
/* Register lowmem ranges */
- free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
memory_present(0, start, end);
}
- /*
- * Reserve the bootmap memory.
- */
- reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
-
#ifdef CONFIG_RELOCATABLE
/*
* The kernel reserves all memory below its _end symbol as bootmem,
@@ -642,29 +581,6 @@ static void __init bootmem_init(void)
#endif /* CONFIG_SGI_IP27 */
-/*
- * arch_mem_init - initialize memory management subsystem
- *
- * o plat_mem_setup() detects the memory configuration and will record detected
- * memory areas using add_memory_region.
- *
- * At this stage the memory configuration of the system is known to the
- * kernel but generic memory management system is still entirely uninitialized.
- *
- * o bootmem_init()
- * o sparse_init()
- * o paging_init()
- * o dma_contiguous_reserve()
- *
- * At this stage the bootmem allocator is ready to use.
- *
- * NOTE: historically plat_mem_setup did the entire platform initialization.
- * This was rather impractical because it meant plat_mem_setup had to
- * get away without any kind of memory allocator. To keep old code from
- * breaking plat_setup was just renamed to plat_mem_setup and a second platform
- * initialization hook for anything else was introduced.
- */
-
static int usermem __initdata;
static int __init early_parse_mem(char *p)
@@ -841,6 +757,28 @@ static void __init request_crashkernel(struct resource *res)
#define BUILTIN_EXTEND_WITH_PROM \
IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
+/*
+ * arch_mem_init - initialize memory management subsystem
+ *
+ * o plat_mem_setup() detects the memory configuration and will record detected
+ * memory areas using add_memory_region.
+ *
+ * At this stage the memory configuration of the system is known to the
+ * kernel but generic memory management system is still entirely uninitialized.
+ *
+ * o bootmem_init()
+ * o sparse_init()
+ * o paging_init()
+ * o dma_contiguous_reserve()
+ *
+ * At this stage the bootmem allocator is ready to use.
+ *
+ * NOTE: historically plat_mem_setup did the entire platform initialization.
+ * This was rather impractical because it meant plat_mem_setup had to
+ * get away without any kind of memory allocator. To keep old code from
+ * breaking plat_setup was just renamed to plat_mem_setup and a second platform
+ * initialization hook for anything else was introduced.
+ */
static void __init arch_mem_init(char **cmdline_p)
{
struct memblock_region *reg;
@@ -916,21 +854,29 @@ static void __init arch_mem_init(char **cmdline_p)
early_init_fdt_scan_reserved_mem();
bootmem_init();
+
+ /*
+ * Prevent memblock from allocating high memory.
+ * This cannot be done before max_low_pfn is detected, so up
+ * to this point is possible to only reserve physical memory
+ * with memblock_reserve; memblock_virt_alloc* can be used
+ * only after this point
+ */
+ memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+
#ifdef CONFIG_PROC_VMCORE
if (setup_elfcorehdr && setup_elfcorehdr_size) {
printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
setup_elfcorehdr, setup_elfcorehdr_size);
- reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
- BOOTMEM_DEFAULT);
+ memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
}
#endif
mips_parse_crashkernel();
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
- reserve_bootmem(crashk_res.start,
- crashk_res.end - crashk_res.start + 1,
- BOOTMEM_DEFAULT);
+ memblock_reserve(crashk_res.start,
+ crashk_res.end - crashk_res.start + 1);
#endif
device_tree_init();
sparse_init();
@@ -940,7 +886,7 @@ static void __init arch_mem_init(char **cmdline_p)
/* Tell bootmem about cma reserved memblock section */
for_each_memblock(reserved, reg)
if (reg->size != 0)
- reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+ memblock_reserve(reg->base, reg->size);
reserve_bootmem_region(__pa_symbol(&__nosave_begin),
__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 159e83add4bb..76fae9b79f13 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -25,6 +25,7 @@
#include <linux/linkage.h>
#include <linux/bug.h>
#include <linux/kernel.h>
+#include <linux/kexec.h>
#include <asm/time.h>
#include <asm/pgtable.h>
@@ -423,6 +424,9 @@ const struct plat_smp_ops bmips43xx_smp_ops = {
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
};
const struct plat_smp_ops bmips5000_smp_ops = {
@@ -437,6 +441,9 @@ const struct plat_smp_ops bmips5000_smp_ops = {
.cpu_disable = bmips_cpu_disable,
.cpu_die = bmips_cpu_die,
#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
};
#endif /* CONFIG_SMP */
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 03f1026ad148..faccfa4b280b 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -398,6 +398,55 @@ static void cps_smp_finish(void)
local_irq_enable();
}
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
+
+enum cpu_death {
+ CPU_DEATH_HALT,
+ CPU_DEATH_POWER,
+};
+
+static void cps_shutdown_this_cpu(enum cpu_death death)
+{
+ unsigned int cpu, core, vpe_id;
+
+ cpu = smp_processor_id();
+ core = cpu_core(&cpu_data[cpu]);
+
+ if (death == CPU_DEATH_HALT) {
+ vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+
+ pr_debug("Halting core %d VP%d\n", core, vpe_id);
+ if (cpu_has_mipsmt) {
+ /* Halt this TC */
+ write_c0_tchalt(TCHALT_H);
+ instruction_hazard();
+ } else if (cpu_has_vp) {
+ write_cpc_cl_vp_stop(1 << vpe_id);
+
+ /* Ensure that the VP_STOP register is written */
+ wmb();
+ }
+ } else {
+ pr_debug("Gating power to core %d\n", core);
+ /* Power down the core */
+ cps_pm_enter_state(CPS_PM_POWER_GATED);
+ }
+}
+
+#ifdef CONFIG_KEXEC
+
+static void cps_kexec_nonboot_cpu(void)
+{
+ if (cpu_has_mipsmt || cpu_has_vp)
+ cps_shutdown_this_cpu(CPU_DEATH_HALT);
+ else
+ cps_shutdown_this_cpu(CPU_DEATH_POWER);
+}
+
+#endif /* CONFIG_KEXEC */
+
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
+
#ifdef CONFIG_HOTPLUG_CPU
static int cps_cpu_disable(void)
@@ -421,19 +470,15 @@ static int cps_cpu_disable(void)
}
static unsigned cpu_death_sibling;
-static enum {
- CPU_DEATH_HALT,
- CPU_DEATH_POWER,
-} cpu_death;
+static enum cpu_death cpu_death;
void play_dead(void)
{
- unsigned int cpu, core, vpe_id;
+ unsigned int cpu;
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
- core = cpu_core(&cpu_data[cpu]);
cpu_death = CPU_DEATH_POWER;
pr_debug("CPU%d going offline\n", cpu);
@@ -456,25 +501,7 @@ void play_dead(void)
/* This CPU has chosen its way out */
(void)cpu_report_death();
- if (cpu_death == CPU_DEATH_HALT) {
- vpe_id = cpu_vpe_id(&cpu_data[cpu]);
-
- pr_debug("Halting core %d VP%d\n", core, vpe_id);
- if (cpu_has_mipsmt) {
- /* Halt this TC */
- write_c0_tchalt(TCHALT_H);
- instruction_hazard();
- } else if (cpu_has_vp) {
- write_cpc_cl_vp_stop(1 << vpe_id);
-
- /* Ensure that the VP_STOP register is written */
- wmb();
- }
- } else {
- pr_debug("Gating power to core %d\n", core);
- /* Power down the core */
- cps_pm_enter_state(CPS_PM_POWER_GATED);
- }
+ cps_shutdown_this_cpu(cpu_death);
/* This should never be reached */
panic("Failed to offline CPU %u", cpu);
@@ -593,6 +620,9 @@ static const struct plat_smp_ops cps_smp_ops = {
.cpu_disable = cps_cpu_disable,
.cpu_die = cps_cpu_die,
#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
+#endif
};
bool mips_cps_smp_in_use(void)
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9dab0ed1b227..5feef28deac8 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
+#include <linux/memblock.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/kgdb.h>
@@ -348,7 +349,7 @@ static void __show_regs(const struct pt_regs *regs)
*/
void show_regs(struct pt_regs *regs)
{
- __show_regs((struct pt_regs *)regs);
+ __show_regs(regs);
dump_stack();
}
@@ -2260,8 +2261,10 @@ void __init trap_init(void)
unsigned long size = 0x200 + VECTORSPACING*64;
phys_addr_t ebase_pa;
+ memblock_set_bottom_up(true);
ebase = (unsigned long)
__alloc_bootmem(size, 1 << fls(size), 0);
+ memblock_set_bottom_up(false);
/*
* Try to ensure ebase resides in KSeg0 if possible.
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 2d0b912f9e3e..ce446eed62d2 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -130,7 +130,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#ifndef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -151,8 +151,8 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else
-/* MIPSR6 has no lwl instruction */
+#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+/* For CPUs without lwl instruction */
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -186,7 +186,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define _LoadHWU(addr, value, res, type) \
do { \
@@ -212,7 +212,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#ifndef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -255,8 +255,8 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else
-/* MIPSR6 has not lwl and ldl instructions */
+#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+/* For CPUs without lwl and ldl instructions */
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -339,7 +339,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define _StoreHW(addr, value, res, type) \
@@ -365,7 +365,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT));\
} while(0)
-#ifndef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -406,8 +406,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else
-/* MIPSR6 has no swl and sdl instructions */
+#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -483,7 +482,7 @@ do { \
: "memory"); \
} while(0)
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#else /* __BIG_ENDIAN */
@@ -509,7 +508,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#ifndef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -530,8 +529,8 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else
-/* MIPSR6 has no lwl instruction */
+#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+/* For CPUs without lwl instruction */
#define _LoadW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -565,7 +564,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define _LoadHWU(addr, value, res, type) \
@@ -592,7 +591,7 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#ifndef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -635,8 +634,8 @@ do { \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else
-/* MIPSR6 has not lwl and ldl instructions */
+#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+/* For CPUs without lwl and ldl instructions */
#define _LoadWU(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -718,7 +717,7 @@ do { \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
} while(0)
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define _StoreHW(addr, value, res, type) \
do { \
@@ -743,7 +742,7 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT));\
} while(0)
-#ifndef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -784,8 +783,8 @@ do { \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
} while(0)
-#else
-/* MIPSR6 has no swl and sdl instructions */
+#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
+/* For CPUs without swl and sdl instructions */
#define _StoreW(addr, value, res, type) \
do { \
__asm__ __volatile__ ( \
@@ -861,7 +860,7 @@ do { \
: "memory"); \
} while(0)
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#endif
#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
index 6537e022ef62..479f50559c83 100644
--- a/arch/mips/lib/Makefile
+++ b/arch/mips/lib/Makefile
@@ -7,7 +7,7 @@ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \
mips-atomic.o strncpy_user.o \
strnlen_user.o uncached.o
-obj-y += iomap.o iomap_copy.o
+obj-y += iomap_copy.o
obj-$(CONFIG_PCI) += iomap-pci.o
lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y))
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c
index 4850509c5534..210f5a95ecb1 100644
--- a/arch/mips/lib/iomap-pci.c
+++ b/arch/mips/lib/iomap-pci.c
@@ -44,10 +44,3 @@ void __iomem *__pci_ioport_map(struct pci_dev *dev,
}
#endif /* CONFIG_PCI_DRIVERS_LEGACY */
-
-void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
-{
- iounmap(addr);
-}
-
-EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c
deleted file mode 100644
index 9b31653f318c..000000000000
--- a/arch/mips/lib/iomap.c
+++ /dev/null
@@ -1,227 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Implement the default iomap interfaces
- *
- * (C) Copyright 2004 Linus Torvalds
- * (C) Copyright 2006 Ralf Baechle <ralf@linux-mips.org>
- * (C) Copyright 2007 MIPS Technologies, Inc.
- * written by Ralf Baechle <ralf@linux-mips.org>
- */
-#include <linux/export.h>
-#include <asm/io.h>
-
-/*
- * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
- * access or a MMIO access, these functions don't care. The info is
- * encoded in the hardware mapping set up by the mapping functions
- * (or the cookie itself, depending on implementation and hw).
- *
- * The generic routines don't assume any hardware mappings, and just
- * encode the PIO/MMIO as part of the cookie. They coldly assume that
- * the MMIO IO mappings are not in the low address range.
- *
- * Architectures for which this is not true can't use this generic
- * implementation and should do their own copy.
- */
-
-#define PIO_MASK 0x0ffffUL
-
-unsigned int ioread8(void __iomem *addr)
-{
- return readb(addr);
-}
-
-EXPORT_SYMBOL(ioread8);
-
-unsigned int ioread16(void __iomem *addr)
-{
- return readw(addr);
-}
-
-EXPORT_SYMBOL(ioread16);
-
-unsigned int ioread16be(void __iomem *addr)
-{
- return be16_to_cpu(__raw_readw(addr));
-}
-
-EXPORT_SYMBOL(ioread16be);
-
-unsigned int ioread32(void __iomem *addr)
-{
- return readl(addr);
-}
-
-EXPORT_SYMBOL(ioread32);
-
-unsigned int ioread32be(void __iomem *addr)
-{
- return be32_to_cpu(__raw_readl(addr));
-}
-
-EXPORT_SYMBOL(ioread32be);
-
-void iowrite8(u8 val, void __iomem *addr)
-{
- writeb(val, addr);
-}
-
-EXPORT_SYMBOL(iowrite8);
-
-void iowrite16(u16 val, void __iomem *addr)
-{
- writew(val, addr);
-}
-
-EXPORT_SYMBOL(iowrite16);
-
-void iowrite16be(u16 val, void __iomem *addr)
-{
- __raw_writew(cpu_to_be16(val), addr);
-}
-
-EXPORT_SYMBOL(iowrite16be);
-
-void iowrite32(u32 val, void __iomem *addr)
-{
- writel(val, addr);
-}
-
-EXPORT_SYMBOL(iowrite32);
-
-void iowrite32be(u32 val, void __iomem *addr)
-{
- __raw_writel(cpu_to_be32(val), addr);
-}
-
-EXPORT_SYMBOL(iowrite32be);
-
-/*
- * These are the "repeat MMIO read/write" functions.
- * Note the "__mem" accesses, since we want to convert
- * to CPU byte order if the host bus happens to not match the
- * endianness of PCI/ISA (see mach-generic/mangle-port.h).
- */
-static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
-{
- while (--count >= 0) {
- u8 data = __mem_readb(addr);
- *dst = data;
- dst++;
- }
-}
-
-static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
-{
- while (--count >= 0) {
- u16 data = __mem_readw(addr);
- *dst = data;
- dst++;
- }
-}
-
-static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
-{
- while (--count >= 0) {
- u32 data = __mem_readl(addr);
- *dst = data;
- dst++;
- }
-}
-
-static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
-{
- while (--count >= 0) {
- __mem_writeb(*src, addr);
- src++;
- }
-}
-
-static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
-{
- while (--count >= 0) {
- __mem_writew(*src, addr);
- src++;
- }
-}
-
-static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
-{
- while (--count >= 0) {
- __mem_writel(*src, addr);
- src++;
- }
-}
-
-void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
-{
- mmio_insb(addr, dst, count);
-}
-
-EXPORT_SYMBOL(ioread8_rep);
-
-void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
-{
- mmio_insw(addr, dst, count);
-}
-
-EXPORT_SYMBOL(ioread16_rep);
-
-void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
-{
- mmio_insl(addr, dst, count);
-}
-
-EXPORT_SYMBOL(ioread32_rep);
-
-void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
-{
- mmio_outsb(addr, src, count);
-}
-
-EXPORT_SYMBOL(iowrite8_rep);
-
-void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
-{
- mmio_outsw(addr, src, count);
-}
-
-EXPORT_SYMBOL(iowrite16_rep);
-
-void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
-{
- mmio_outsl(addr, src, count);
-}
-
-EXPORT_SYMBOL(iowrite32_rep);
-
-/*
- * Create a virtual mapping cookie for an IO port range
- *
- * This uses the same mapping are as the in/out family which has to be setup
- * by the platform initialization code.
- *
- * Just to make matters somewhat more interesting on MIPS systems with
- * multiple host bridge each will have it's own ioport address space.
- */
-static void __iomem *ioport_map_legacy(unsigned long port, unsigned int nr)
-{
- return (void __iomem *) (mips_io_port_base + port);
-}
-
-void __iomem *ioport_map(unsigned long port, unsigned int nr)
-{
- if (port > PIO_MASK)
- return NULL;
-
- return ioport_map_legacy(port, nr);
-}
-
-EXPORT_SYMBOL(ioport_map);
-
-void ioport_unmap(void __iomem *addr)
-{
- /* Nothing to do */
-}
-
-EXPORT_SYMBOL(ioport_unmap);
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 03e3304d6ae5..cdd19d8561e8 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -204,9 +204,10 @@
#define LOADB(reg, addr, handler) EXC(lb, LD_INSN, reg, addr, handler)
#define STOREB(reg, addr, handler) EXC(sb, ST_INSN, reg, addr, handler)
-#define _PREF(hint, addr, type) \
+#ifdef CONFIG_CPU_HAS_PREFETCH
+# define _PREF(hint, addr, type) \
.if \mode == LEGACY_MODE; \
- PREF(hint, addr); \
+ kernel_pref(hint, addr); \
.else; \
.if ((\from == USEROP) && (type == SRC_PREFETCH)) || \
((\to == USEROP) && (type == DST_PREFETCH)); \
@@ -218,12 +219,15 @@
* used later on. Therefore use $v1. \
*/ \
.set at=v1; \
- PREFE(hint, addr); \
+ user_pref(hint, addr); \
.set noat; \
.else; \
- PREF(hint, addr); \
+ kernel_pref(hint, addr); \
.endif; \
.endif
+#else
+# define _PREF(hint, addr, type)
+#endif
#define PREFS(hint, addr) _PREF(hint, addr, SRC_PREFETCH)
#define PREFD(hint, addr) _PREF(hint, addr, DST_PREFETCH)
@@ -297,7 +301,7 @@
and t0, src, ADDRMASK
PREFS( 0, 2*32(src) )
PREFD( 1, 2*32(dst) )
-#ifndef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
bnez t1, .Ldst_unaligned\@
nop
bnez t0, .Lsrc_unaligned_dst_aligned\@
@@ -385,7 +389,7 @@
bne rem, len, 1b
.set noreorder
-#ifndef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
@@ -487,7 +491,7 @@
bne len, rem, 1b
.set noreorder
-#endif /* !CONFIG_CPU_MIPSR6 */
+#endif /* CONFIG_CPU_HAS_LOAD_STORE_LR */
.Lcopy_bytes_checklen\@:
beqz len, .Ldone\@
nop
@@ -516,7 +520,7 @@
jr ra
nop
-#ifdef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR
.Lcopy_unaligned_bytes\@:
1:
COPY_BYTE(0)
@@ -530,7 +534,7 @@
ADD src, src, 8
b 1b
ADD dst, dst, 8
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
.if __memcpy == 1
END(memcpy)
.set __memcpy, 0
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 069acec3df9f..418611ef13cf 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -78,7 +78,6 @@
#endif
.endm
- .set noreorder
.align 5
/*
@@ -94,13 +93,16 @@
.endif
sltiu t0, a2, STORSIZE /* very small region? */
+ .set noreorder
bnez t0, .Lsmall_memset\@
andi t0, a0, STORMASK /* aligned? */
+ .set reorder
#ifdef CONFIG_CPU_MICROMIPS
move t8, a1 /* used by 'swp' instruction */
move t9, a1
#endif
+ .set noreorder
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
beqz t0, 1f
PTR_SUBU t0, STORSIZE /* alignment in bytes */
@@ -111,8 +113,9 @@
PTR_SUBU t0, AT /* alignment in bytes */
.set at
#endif
+ .set reorder
-#ifndef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
@@ -122,11 +125,13 @@
PTR_SUBU a0, t0 /* long align ptr */
PTR_ADDU a2, t0 /* correct size */
-#else /* CONFIG_CPU_MIPSR6 */
+#else /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
#define STORE_BYTE(N) \
EX(sb, a1, N(a0), .Lbyte_fixup\@); \
+ .set noreorder; \
beqz t0, 0f; \
- PTR_ADDU t0, 1;
+ PTR_ADDU t0, 1; \
+ .set reorder;
PTR_ADDU a2, t0 /* correct size */
PTR_ADDU t0, 1
@@ -145,19 +150,17 @@
ori a0, STORMASK
xori a0, STORMASK
PTR_ADDIU a0, STORSIZE
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
1: ori t1, a2, 0x3f /* # of full blocks */
xori t1, 0x3f
+ andi t0, a2, 0x40-STORSIZE
beqz t1, .Lmemset_partial\@ /* no block to fill */
- andi t0, a2, 0x40-STORSIZE
PTR_ADDU t1, a0 /* end address */
- .set reorder
1: PTR_ADDIU a0, 64
R10KCBARRIER(0(ra))
f_fill64 a0, -64, FILL64RG, .Lfwd_fixup\@, \mode
bne t1, a0, 1b
- .set noreorder
.Lmemset_partial\@:
R10KCBARRIER(0(ra))
@@ -173,20 +176,18 @@
PTR_SUBU t1, AT
.set at
#endif
+ PTR_ADDU a0, t0 /* dest ptr */
jr t1
- PTR_ADDU a0, t0 /* dest ptr */
- .set push
- .set noreorder
- .set nomacro
/* ... but first do longs ... */
f_fill64 a0, -64, FILL64RG, .Lpartial_fixup\@, \mode
-2: .set pop
- andi a2, STORMASK /* At most one long to go */
+2: andi a2, STORMASK /* At most one long to go */
+ .set noreorder
beqz a2, 1f
-#ifndef CONFIG_CPU_MIPSR6
+#ifdef CONFIG_CPU_HAS_LOAD_STORE_LR
PTR_ADDU a0, a2 /* What's left */
+ .set reorder
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@)
@@ -195,6 +196,7 @@
#endif
#else
PTR_SUBU t0, $0, a2
+ .set reorder
move a2, zero /* No remaining longs */
PTR_ADDIU t0, 1
STORE_BYTE(0)
@@ -210,41 +212,42 @@
#endif
0:
#endif
-1: jr ra
- move a2, zero
+1: move a2, zero
+ jr ra
.Lsmall_memset\@:
+ PTR_ADDU t1, a0, a2
beqz a2, 2f
- PTR_ADDU t1, a0, a2
1: PTR_ADDIU a0, 1 /* fill bytewise */
R10KCBARRIER(0(ra))
+ .set noreorder
bne t1, a0, 1b
EX(sb, a1, -1(a0), .Lsmall_fixup\@)
+ .set reorder
-2: jr ra /* done */
- move a2, zero
+2: move a2, zero
+ jr ra /* done */
.if __memset == 1
END(memset)
.set __memset, 0
.hidden __memset
.endif
-#ifdef CONFIG_CPU_MIPSR6
+#ifndef CONFIG_CPU_HAS_LOAD_STORE_LR
.Lbyte_fixup\@:
/*
* unset_bytes = (#bytes - (#unaligned bytes)) - (-#unaligned bytes remaining + 1) + 1
* a2 = a2 - t0 + 1
*/
PTR_SUBU a2, t0
+ PTR_ADDIU a2, 1
jr ra
- PTR_ADDIU a2, 1
-#endif /* CONFIG_CPU_MIPSR6 */
+#endif /* !CONFIG_CPU_HAS_LOAD_STORE_LR */
.Lfirst_fixup\@:
/* unset_bytes already in a2 */
jr ra
- nop
.Lfwd_fixup\@:
/*
@@ -255,8 +258,8 @@
andi a2, 0x3f
LONG_L t0, THREAD_BUADDR(t0)
LONG_ADDU a2, t1
+ LONG_SUBU a2, t0
jr ra
- LONG_SUBU a2, t0
.Lpartial_fixup\@:
/*
@@ -267,24 +270,21 @@
andi a2, STORMASK
LONG_L t0, THREAD_BUADDR(t0)
LONG_ADDU a2, a0
+ LONG_SUBU a2, t0
jr ra
- LONG_SUBU a2, t0
.Llast_fixup\@:
/* unset_bytes already in a2 */
jr ra
- nop
.Lsmall_fixup\@:
/*
* unset_bytes = end_addr - current_addr + 1
* a2 = t1 - a0 + 1
*/
- .set reorder
PTR_SUBU a2, t1, a0
PTR_ADDIU a2, 1
jr ra
- .set noreorder
.endm
@@ -298,8 +298,8 @@
LEAF(memset)
EXPORT_SYMBOL(memset)
+ move v0, a0 /* result */
beqz a1, 1f
- move v0, a0 /* result */
andi a1, 0xff /* spread fillword */
LONG_SLL t1, a1, 8
diff --git a/arch/mips/loongson64/common/Makefile b/arch/mips/loongson64/common/Makefile
index 57ee03022941..684624f61f5a 100644
--- a/arch/mips/loongson64/common/Makefile
+++ b/arch/mips/loongson64/common/Makefile
@@ -6,7 +6,6 @@
obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
bonito-irq.o mem.o machtype.o platform.o serial.o
obj-$(CONFIG_PCI) += pci.o
-obj-$(CONFIG_CPU_LOONGSON2) += dma.o
#
# Serial port support
diff --git a/arch/mips/loongson64/fuloong-2e/Makefile b/arch/mips/loongson64/fuloong-2e/Makefile
index b7622720c1ad..0a9a472bec0a 100644
--- a/arch/mips/loongson64/fuloong-2e/Makefile
+++ b/arch/mips/loongson64/fuloong-2e/Makefile
@@ -2,4 +2,4 @@
# Makefile for Lemote Fuloong2e mini-PC board.
#
-obj-y += irq.o reset.o
+obj-y += irq.o reset.o dma.o
diff --git a/arch/mips/loongson64/fuloong-2e/dma.c b/arch/mips/loongson64/fuloong-2e/dma.c
new file mode 100644
index 000000000000..e122292bf666
--- /dev/null
+++ b/arch/mips/loongson64/fuloong-2e/dma.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/dma-direct.h>
+
+dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
+{
+ return paddr | 0x80000000;
+}
+
+phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr & 0x7fffffff;
+}
diff --git a/arch/mips/loongson64/lemote-2f/Makefile b/arch/mips/loongson64/lemote-2f/Makefile
index 08b8abcbfef5..b5792c334cd5 100644
--- a/arch/mips/loongson64/lemote-2f/Makefile
+++ b/arch/mips/loongson64/lemote-2f/Makefile
@@ -2,7 +2,7 @@
# Makefile for lemote loongson2f family machines
#
-obj-y += clock.o machtype.o irq.o reset.o ec_kb3310b.o
+obj-y += clock.o machtype.o irq.o reset.o dma.o ec_kb3310b.o
#
# Suspend Support
diff --git a/arch/mips/loongson64/common/dma.c b/arch/mips/loongson64/lemote-2f/dma.c
index 48f04126bde2..abf0e39d7e46 100644
--- a/arch/mips/loongson64/common/dma.c
+++ b/arch/mips/loongson64/lemote-2f/dma.c
@@ -8,11 +8,7 @@ dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dma_addr)
{
-#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
if (dma_addr > 0x8fffffff)
return dma_addr;
return dma_addr & 0x0fffffff;
-#else
- return dma_addr & 0x7fffffff;
-#endif
}
diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c
index cbeb20f9fc95..5605061f5f98 100644
--- a/arch/mips/loongson64/loongson-3/irq.c
+++ b/arch/mips/loongson64/loongson-3/irq.c
@@ -96,51 +96,8 @@ void mach_irq_dispatch(unsigned int pending)
}
}
-static struct irqaction cascade_irqaction = {
- .handler = no_action,
- .flags = IRQF_NO_SUSPEND,
- .name = "cascade",
-};
-
-static inline void mask_loongson_irq(struct irq_data *d)
-{
- clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- irq_disable_hazard();
-
- /* Workaround: UART IRQ may deliver to any core */
- if (d->irq == LOONGSON_UART_IRQ) {
- int cpu = smp_processor_id();
- int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
- int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
- u64 intenclr_addr = smp_group[node_id] |
- (u64)(&LOONGSON_INT_ROUTER_INTENCLR);
- u64 introuter_lpc_addr = smp_group[node_id] |
- (u64)(&LOONGSON_INT_ROUTER_LPC);
-
- *(volatile u32 *)intenclr_addr = 1 << 10;
- *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
- }
-}
-
-static inline void unmask_loongson_irq(struct irq_data *d)
-{
- /* Workaround: UART IRQ may deliver to any core */
- if (d->irq == LOONGSON_UART_IRQ) {
- int cpu = smp_processor_id();
- int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
- int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
- u64 intenset_addr = smp_group[node_id] |
- (u64)(&LOONGSON_INT_ROUTER_INTENSET);
- u64 introuter_lpc_addr = smp_group[node_id] |
- (u64)(&LOONGSON_INT_ROUTER_LPC);
-
- *(volatile u32 *)intenset_addr = 1 << 10;
- *(volatile u8 *)introuter_lpc_addr = 0x10 + (1<<core_id);
- }
-
- set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
- irq_enable_hazard();
-}
+static inline void mask_loongson_irq(struct irq_data *d) { }
+static inline void unmask_loongson_irq(struct irq_data *d) { }
/* For MIPS IRQs which shared by all cores */
static struct irq_chip loongson_irq_chip = {
@@ -183,12 +140,11 @@ void __init mach_init_irq(void)
chip->irq_set_affinity = plat_set_irq_affinity;
irq_set_chip_and_handler(LOONGSON_UART_IRQ,
- &loongson_irq_chip, handle_level_irq);
-
- /* setup HT1 irq */
- setup_irq(LOONGSON_HT1_IRQ, &cascade_irqaction);
+ &loongson_irq_chip, handle_percpu_irq);
+ irq_set_chip_and_handler(LOONGSON_BRIDGE_IRQ,
+ &loongson_irq_chip, handle_percpu_irq);
- set_c0_status(STATUSF_IP2 | STATUSF_IP6);
+ set_c0_status(STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP6);
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index 9717106de4a5..c1e6ec52c614 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -180,43 +180,39 @@ static void __init szmem(unsigned int node)
static void __init node_mem_init(unsigned int node)
{
- unsigned long bootmap_size;
unsigned long node_addrspace_offset;
- unsigned long start_pfn, end_pfn, freepfn;
+ unsigned long start_pfn, end_pfn;
node_addrspace_offset = nid_to_addroffset(node);
pr_info("Node%d's addrspace_offset is 0x%lx\n",
node, node_addrspace_offset);
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
- freepfn = start_pfn;
- if (node == 0)
- freepfn = PFN_UP(__pa_symbol(&_end)); /* kernel end address */
- pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx, freepfn=0x%lx\n",
- node, start_pfn, end_pfn, freepfn);
+ pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
+ node, start_pfn, end_pfn);
__node_data[node] = prealloc__node_data + node;
- NODE_DATA(node)->bdata = &bootmem_node_data[node];
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
- bootmap_size = init_bootmem_node(NODE_DATA(node), freepfn,
- start_pfn, end_pfn);
free_bootmem_with_active_regions(node, end_pfn);
- if (node == 0) /* used by finalize_initrd() */
+
+ if (node == 0) {
+ /* kernel end address */
+ unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end));
+
+ /* used by finalize_initrd() */
max_low_pfn = end_pfn;
- /* This is reserved for the kernel and bdata->node_bootmem_map */
- reserve_bootmem_node(NODE_DATA(node), start_pfn << PAGE_SHIFT,
- ((freepfn - start_pfn) << PAGE_SHIFT) + bootmap_size,
- BOOTMEM_DEFAULT);
+ /* Reserve the kernel text/data/bss */
+ memblock_reserve(start_pfn << PAGE_SHIFT,
+ ((kernel_end_pfn - start_pfn) << PAGE_SHIFT));
- if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) {
/* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */
- reserve_bootmem_node(NODE_DATA(node),
- (node_addrspace_offset | 0xfe000000),
- 32 << 20, BOOTMEM_DEFAULT);
+ if (node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT))
+ memblock_reserve((node_addrspace_offset | 0xfe000000),
+ 32 << 20);
}
sparse_memory_present_with_active_regions(node);
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index fea95d003269..b5c1e0aa955e 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -21,6 +21,7 @@
#include <linux/sched/task_stack.h>
#include <linux/smp.h>
#include <linux/cpufreq.h>
+#include <linux/kexec.h>
#include <asm/processor.h>
#include <asm/time.h>
#include <asm/clock.h>
@@ -349,7 +350,7 @@ static void loongson3_smp_finish(void)
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
local_irq_enable();
loongson3_ipi_write64(0,
- (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0));
+ ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
pr_info("CPU#%d finished, CP0_ST=%x\n",
smp_processor_id(), read_c0_status());
}
@@ -416,13 +417,13 @@ static int loongson3_boot_secondary(int cpu, struct task_struct *idle)
cpu, startargs[0], startargs[1], startargs[2]);
loongson3_ipi_write64(startargs[3],
- (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x18));
+ ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x18);
loongson3_ipi_write64(startargs[2],
- (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x10));
+ ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x10);
loongson3_ipi_write64(startargs[1],
- (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x8));
+ ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x8);
loongson3_ipi_write64(startargs[0],
- (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0));
+ ipi_mailbox_buf[cpu_logical_map(cpu)] + 0x0);
return 0;
}
@@ -749,4 +750,7 @@ const struct plat_smp_ops loongson3_smp_ops = {
.cpu_disable = loongson3_cpu_disable,
.cpu_die = loongson3_cpu_die,
#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
};
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 400676ce03f4..15cae0f11880 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -32,7 +32,6 @@
#include <linux/kcore.h>
#include <linux/initrd.h>
-#include <asm/asm-offsets.h>
#include <asm/bootinfo.h>
#include <asm/cachectl.h>
#include <asm/cpu.h>
@@ -521,17 +520,13 @@ unsigned long pgd_current[NR_CPUS];
#endif
/*
- * gcc 3.3 and older have trouble determining that PTRS_PER_PGD and PGD_ORDER
- * are constants. So we use the variants from asm-offset.h until that gcc
- * will officially be retired.
- *
* Align swapper_pg_dir in to 64K, allows its address to be loaded
* with a single LUI instruction in the TLB handlers. If we used
* __aligned(64K), its size would get rounded up to the alignment
* size, and waste space. So we place it in its own section and align
* it in the linker script.
*/
-pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
#ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
#endif
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index f4961bc9a61d..cf33dd8a487e 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -291,7 +291,7 @@ static int __init xlp_of_pic_init(struct device_node *node,
/* we need a hack to get the PIC's SoC chip id */
ret = of_address_to_resource(node, 0, &res);
if (ret < 0) {
- pr_err("PIC %s: reg property not found!\n", node->name);
+ pr_err("PIC %pOFn: reg property not found!\n", node);
return -EINVAL;
}
@@ -304,21 +304,21 @@ static int __init xlp_of_pic_init(struct device_node *node,
break;
}
if (socid == NLM_NR_NODES) {
- pr_err("PIC %s: Node mapping for bus %d not found!\n",
- node->name, bus);
+ pr_err("PIC %pOFn: Node mapping for bus %d not found!\n",
+ node, bus);
return -EINVAL;
}
} else {
socid = (res.start >> 18) & 0x3;
if (!nlm_node_present(socid)) {
- pr_err("PIC %s: node %d does not exist!\n",
- node->name, socid);
+ pr_err("PIC %pOFn: node %d does not exist!\n",
+ node, socid);
return -EINVAL;
}
}
if (!nlm_node_present(socid)) {
- pr_err("PIC %s: node %d does not exist!\n", node->name, socid);
+ pr_err("PIC %pOFn: node %d does not exist!\n", node, socid);
return -EINVAL;
}
@@ -326,7 +326,7 @@ static int __init xlp_of_pic_init(struct device_node *node,
nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
&xlp_pic_irq_domain_ops, NULL);
if (xlp_pic_domain == NULL) {
- pr_err("PIC %s: Creating legacy domain failed!\n", node->name);
+ pr_err("PIC %pOFn: Creating legacy domain failed!\n", node);
return -EINVAL;
}
pr_info("Node %d: IRQ domain created for PIC@%pR\n", socid, &res);
diff --git a/arch/mips/pci/ops-loongson3.c b/arch/mips/pci/ops-loongson3.c
index 9e118431e226..2f6ad36bdea6 100644
--- a/arch/mips/pci/ops-loongson3.c
+++ b/arch/mips/pci/ops-loongson3.c
@@ -18,22 +18,36 @@ static int loongson3_pci_config_access(unsigned char access_type,
int where, u32 *data)
{
unsigned char busnum = bus->number;
- u_int64_t addr, type;
- void *addrp;
- int device = PCI_SLOT(devfn);
int function = PCI_FUNC(devfn);
+ int device = PCI_SLOT(devfn);
int reg = where & ~3;
+ void *addrp;
+ u64 addr;
+
+ if (where < PCI_CFG_SPACE_SIZE) { /* standard config */
+ addr = (busnum << 16) | (device << 11) | (function << 8) | reg;
+ if (busnum == 0) {
+ if (device > 31)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+ addrp = (void *)TO_UNCAC(HT1LO_PCICFG_BASE | addr);
+ } else {
+ addrp = (void *)TO_UNCAC(HT1LO_PCICFG_BASE_TP1 | addr);
+ }
+ } else if (where < PCI_CFG_SPACE_EXP_SIZE) { /* extended config */
+ struct pci_dev *rootdev;
+
+ rootdev = pci_get_domain_bus_and_slot(0, 0, 0);
+ if (!rootdev)
+ return PCIBIOS_DEVICE_NOT_FOUND;
- addr = (busnum << 16) | (device << 11) | (function << 8) | reg;
- if (busnum == 0) {
- if (device > 31)
+ addr = pci_resource_start(rootdev, 3);
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- addrp = (void *)(TO_UNCAC(HT1LO_PCICFG_BASE) | (addr & 0xffff));
- type = 0;
+ addr |= busnum << 20 | device << 15 | function << 12 | reg;
+ addrp = (void *)TO_UNCAC(addr);
} else {
- addrp = (void *)(TO_UNCAC(HT1LO_PCICFG_BASE_TP1) | (addr));
- type = 0x10000;
+ return PCIBIOS_DEVICE_NOT_FOUND;
}
if (access_type == PCI_ACCESS_WRITE)
diff --git a/arch/mips/pci/pci-legacy.c b/arch/mips/pci/pci-legacy.c
index f1e92bf743c2..3c3b1e6abb53 100644
--- a/arch/mips/pci/pci-legacy.c
+++ b/arch/mips/pci/pci-legacy.c
@@ -127,8 +127,12 @@ static void pcibios_scanbus(struct pci_controller *hose)
if (pci_has_flag(PCI_PROBE_ONLY)) {
pci_bus_claim_resources(bus);
} else {
+ struct pci_bus *child;
+
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
}
pci_bus_add_devices(bus);
}
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index 711cdccdf65b..f376a1df326a 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -246,6 +246,8 @@ static int rt288x_pci_probe(struct platform_device *pdev)
rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
(void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
+ rt2880_pci_controller.of_node = pdev->dev.of_node;
+
register_pci_controller(&rt2880_pci_controller);
return 0;
}
diff --git a/arch/mips/pmcs-msp71xx/msp_usb.c b/arch/mips/pmcs-msp71xx/msp_usb.c
index c87c5f810cd1..d38ac70b5a2e 100644
--- a/arch/mips/pmcs-msp71xx/msp_usb.c
+++ b/arch/mips/pmcs-msp71xx/msp_usb.c
@@ -133,13 +133,13 @@ static int __init msp_usb_setup(void)
* "D" for device-mode. If it works for Ethernet, why not USB...
* -- hammtrev, 2007/03/22
*/
- snprintf((char *)&envstr[0], sizeof(envstr), "usbmode");
+ snprintf(&envstr[0], sizeof(envstr), "usbmode");
/* set default host mode */
val = 1;
/* get environment string */
- strp = prom_getenv((char *)&envstr[0]);
+ strp = prom_getenv(&envstr[0]);
if (strp) {
/* compare string */
if (!strcmp(strp, "device"))
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
index 92f284d2b802..61a08943eb2f 100644
--- a/arch/mips/ralink/cevt-rt3352.c
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -134,7 +134,7 @@ static int __init ralink_systick_init(struct device_node *np)
systick.dev.min_delta_ticks = 0x3;
systick.dev.irq = irq_of_parse_and_map(np, 0);
if (!systick.dev.irq) {
- pr_err("%s: request_irq failed", np->name);
+ pr_err("%pOFn: request_irq failed", np);
return -EINVAL;
}
@@ -146,8 +146,8 @@ static int __init ralink_systick_init(struct device_node *np)
clockevents_register_device(&systick.dev);
- pr_info("%s: running - mult: %d, shift: %d\n",
- np->name, systick.dev.mult, systick.dev.shift);
+ pr_info("%pOFn: running - mult: %d, shift: %d\n",
+ np, systick.dev.mult, systick.dev.shift);
return 0;
}
diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
index 765d5ba98fa2..fc056f2acfeb 100644
--- a/arch/mips/ralink/ill_acc.c
+++ b/arch/mips/ralink/ill_acc.c
@@ -62,7 +62,7 @@ static int __init ill_acc_of_setup(void)
pdev = of_find_device_by_node(np);
if (!pdev) {
- pr_err("%s: failed to lookup pdev\n", np->name);
+ pr_err("%pOFn: failed to lookup pdev\n", np);
return -EINVAL;
}
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index 93d472c60ce4..0f2264e0cf76 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -49,6 +49,10 @@ static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) };
static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) };
static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) };
static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) };
+static struct rt2880_pmx_func rt3352_cs1_func[] = {
+ FUNC("spi_cs1", 0, 45, 1),
+ FUNC("wdg_cs1", 1, 45, 1),
+};
static struct rt2880_pmx_group rt3050_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
@@ -75,6 +79,7 @@ static struct rt2880_pmx_group rt3352_pinmux_data[] = {
GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA),
GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA),
GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
+ GRP("spi_cs1", rt3352_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
{ 0 }
};
diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c
index 2ed8e4990b7a..082541d33161 100644
--- a/arch/mips/sgi-ip22/ip28-berr.c
+++ b/arch/mips/sgi-ip22/ip28-berr.c
@@ -464,7 +464,7 @@ void ip22_be_interrupt(int irq)
die_if_kernel("Oops", regs);
force_sig(SIGBUS, current);
} else if (debug_be_interrupt)
- show_regs((struct pt_regs *)regs);
+ show_regs(regs);
}
static int ip28_be_handler(struct pt_regs *regs, int is_fixup)
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 59133d0abc83..6f7bef052b7f 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -389,7 +389,6 @@ static void __init node_mem_init(cnodeid_t node)
{
unsigned long slot_firstpfn = slot_getbasepfn(node, 0);
unsigned long slot_freepfn = node_getfirstfree(node);
- unsigned long bootmap_size;
unsigned long start_pfn, end_pfn;
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
@@ -400,7 +399,6 @@ static void __init node_mem_init(cnodeid_t node)
__node_data[node] = __va(slot_freepfn << PAGE_SHIFT);
memset(__node_data[node], 0, PAGE_SIZE);
- NODE_DATA(node)->bdata = &bootmem_node_data[node];
NODE_DATA(node)->node_start_pfn = start_pfn;
NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
@@ -409,12 +407,11 @@ static void __init node_mem_init(cnodeid_t node)
slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
sizeof(struct hub_data));
- bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
- start_pfn, end_pfn);
free_bootmem_with_active_regions(node, end_pfn);
- reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
- ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT) + bootmap_size,
- BOOTMEM_DEFAULT);
+
+ memblock_reserve(slot_firstpfn << PAGE_SHIFT,
+ ((slot_freepfn - slot_firstpfn) << PAGE_SHIFT));
+
sparse_memory_present_with_active_regions(node);
}
diff --git a/arch/mips/tools/.gitignore b/arch/mips/tools/.gitignore
new file mode 100644
index 000000000000..56d34ccccce4
--- /dev/null
+++ b/arch/mips/tools/.gitignore
@@ -0,0 +1 @@
+elf-entry
diff --git a/arch/mips/tools/Makefile b/arch/mips/tools/Makefile
new file mode 100644
index 000000000000..3baee4bc6775
--- /dev/null
+++ b/arch/mips/tools/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+hostprogs-y := elf-entry
+PHONY += elf-entry
+elf-entry: $(obj)/elf-entry
+ @:
diff --git a/arch/mips/tools/elf-entry.c b/arch/mips/tools/elf-entry.c
new file mode 100644
index 000000000000..adde79ce7fc0
--- /dev/null
+++ b/arch/mips/tools/elf-entry.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <byteswap.h>
+#include <elf.h>
+#include <endian.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef be32toh
+/* If libc provides [bl]e{32,64}toh() then we'll use them */
+#elif BYTE_ORDER == LITTLE_ENDIAN
+# define be32toh(x) bswap_32(x)
+# define le32toh(x) (x)
+# define be64toh(x) bswap_64(x)
+# define le64toh(x) (x)
+#elif BYTE_ORDER == BIG_ENDIAN
+# define be32toh(x) (x)
+# define le32toh(x) bswap_32(x)
+# define be64toh(x) (x)
+# define le64toh(x) bswap_64(x)
+#endif
+
+__attribute__((noreturn))
+static void die(const char *msg)
+{
+ fputs(msg, stderr);
+ exit(EXIT_FAILURE);
+}
+
+int main(int argc, const char *argv[])
+{
+ uint64_t entry;
+ size_t nread;
+ FILE *file;
+ union {
+ Elf32_Ehdr ehdr32;
+ Elf64_Ehdr ehdr64;
+ } hdr;
+
+ if (argc != 2)
+ die("Usage: elf-entry <elf-file>\n");
+
+ file = fopen(argv[1], "r");
+ if (!file) {
+ perror("Unable to open input file");
+ return EXIT_FAILURE;
+ }
+
+ nread = fread(&hdr, 1, sizeof(hdr), file);
+ if (nread != sizeof(hdr)) {
+ perror("Unable to read input file");
+ return EXIT_FAILURE;
+ }
+
+ if (memcmp(hdr.ehdr32.e_ident, ELFMAG, SELFMAG))
+ die("Input is not an ELF\n");
+
+ switch (hdr.ehdr32.e_ident[EI_CLASS]) {
+ case ELFCLASS32:
+ switch (hdr.ehdr32.e_ident[EI_DATA]) {
+ case ELFDATA2LSB:
+ entry = le32toh(hdr.ehdr32.e_entry);
+ break;
+ case ELFDATA2MSB:
+ entry = be32toh(hdr.ehdr32.e_entry);
+ break;
+ default:
+ die("Invalid ELF encoding\n");
+ }
+
+ /* Sign extend to form a canonical address */
+ entry = (int64_t)(int32_t)entry;
+ break;
+
+ case ELFCLASS64:
+ switch (hdr.ehdr32.e_ident[EI_DATA]) {
+ case ELFDATA2LSB:
+ entry = le64toh(hdr.ehdr64.e_entry);
+ break;
+ case ELFDATA2MSB:
+ entry = be64toh(hdr.ehdr64.e_entry);
+ break;
+ default:
+ die("Invalid ELF encoding\n");
+ }
+ break;
+
+ default:
+ die("Invalid ELF class\n");
+ }
+
+ printf("0x%016" PRIx64 "\n", entry);
+ return EXIT_SUCCESS;
+}
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index f6d9182ef82a..70a1ab66d252 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -960,12 +960,11 @@ void __init txx9_sramc_init(struct resource *r)
goto exit_put;
err = sysfs_create_bin_file(&dev->dev.kobj, &dev->bindata_attr);
if (err) {
- device_unregister(&dev->dev);
iounmap(dev->base);
- kfree(dev);
+ device_unregister(&dev->dev);
}
return;
exit_put:
+ iounmap(dev->base);
put_device(&dev->dev);
- return;
}
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 3509fac10491..9f525ed70049 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -47,7 +47,7 @@ CHECKFLAGS += -D__NDS32_EB__
endif
boot := arch/nds32/boot
-core-$(BUILTIN_DTB) += $(boot)/dts/
+core-y += $(boot)/dts/
.PHONY: FORCE
diff --git a/arch/nds32/include/uapi/asm/unistd.h b/arch/nds32/include/uapi/asm/unistd.h
index 6e95901cabe3..603e826e0449 100644
--- a/arch/nds32/include/uapi/asm/unistd.h
+++ b/arch/nds32/include/uapi/asm/unistd.h
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYNC_FILE_RANGE2
/* Use the standard ABI for syscalls */
diff --git a/arch/nios2/Makefile b/arch/nios2/Makefile
index 8673a79dca9c..52c03e60b114 100644
--- a/arch/nios2/Makefile
+++ b/arch/nios2/Makefile
@@ -49,21 +49,13 @@ BOOT_TARGETS = vmImage zImage
PHONY += $(BOOT_TARGETS) install
KBUILD_IMAGE := $(nios2-boot)/vmImage
-ifneq ($(CONFIG_NIOS2_DTB_SOURCE),"")
- core-y += $(nios2-boot)/
-endif
+core-y += $(nios2-boot)/dts/
all: vmImage
archclean:
$(Q)$(MAKE) $(clean)=$(nios2-boot)
-%.dtb: | scripts
- $(Q)$(MAKE) $(build)=$(nios2-boot) $(nios2-boot)/$@
-
-dtbs:
- $(Q)$(MAKE) $(build)=$(nios2-boot) $(nios2-boot)/$@
-
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(nios2-boot) $(nios2-boot)/$@
@@ -76,5 +68,4 @@ define archhelp
echo ' (your) ~/bin/$(INSTALLKERNEL) or'
echo ' (distribution) /sbin/$(INSTALLKERNEL) or'
echo ' install to $$(INSTALL_PATH)'
- echo ' dtbs - Build device tree blobs for enabled boards'
endef
diff --git a/arch/nios2/boot/Makefile b/arch/nios2/boot/Makefile
index 2ba23a679732..37dfc7e584bc 100644
--- a/arch/nios2/boot/Makefile
+++ b/arch/nios2/boot/Makefile
@@ -31,27 +31,5 @@ $(obj)/zImage: $(obj)/compressed/vmlinux FORCE
$(obj)/compressed/vmlinux: $(obj)/vmlinux.gz FORCE
$(Q)$(MAKE) $(build)=$(obj)/compressed $@
-# Rule to build device tree blobs
-DTB_SRC := $(patsubst "%",%,$(CONFIG_NIOS2_DTB_SOURCE))
-
-# Make sure the generated dtb gets removed during clean
-extra-$(CONFIG_NIOS2_DTB_SOURCE_BOOL) += system.dtb
-
-$(obj)/system.dtb: $(DTB_SRC) FORCE
- $(call cmd,dtc)
-
-# Ensure system.dtb exists
-$(obj)/linked_dtb.o: $(obj)/system.dtb
-
-obj-$(CONFIG_NIOS2_DTB_SOURCE_BOOL) += linked_dtb.o
-
-targets += $(dtb-y)
-
-# Rule to build device tree blobs with make command
-$(obj)/%.dtb: $(src)/dts/%.dts FORCE
- $(call if_changed_dep,dtc)
-
-$(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y))
-
install:
sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
diff --git a/arch/nios2/boot/dts/Makefile b/arch/nios2/boot/dts/Makefile
new file mode 100644
index 000000000000..a91a0b09be63
--- /dev/null
+++ b/arch/nios2/boot/dts/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-y := $(patsubst "%.dts",%.dtb.o,$(CONFIG_NIOS2_DTB_SOURCE))
+
+dtstree := $(srctree)/$(src)
+dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
diff --git a/arch/nios2/include/uapi/asm/unistd.h b/arch/nios2/include/uapi/asm/unistd.h
index b6bdae04bc84..d9948d88790b 100644
--- a/arch/nios2/include/uapi/asm/unistd.h
+++ b/arch/nios2/include/uapi/asm/unistd.h
@@ -19,6 +19,7 @@
#define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
/* Use the standard ABI for syscalls */
#include <asm-generic/unistd.h>
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
index 93207718bb22..ccc1d2a15a0a 100644
--- a/arch/nios2/kernel/cpuinfo.c
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -47,7 +47,7 @@ void __init setup_cpuinfo(void)
const char *str;
int len;
- cpu = of_find_node_by_type(NULL, "cpu");
+ cpu = of_get_cpu_node(0, NULL);
if (!cpu)
panic("%s: No CPU found in devicetree!\n", __func__);
@@ -120,6 +120,8 @@ void __init setup_cpuinfo(void)
cpuinfo.reset_addr = fcpu(cpu, "altr,reset-addr");
cpuinfo.exception_addr = fcpu(cpu, "altr,exception-addr");
cpuinfo.fast_tlb_miss_exc_addr = fcpu(cpu, "altr,fast-tlb-miss-addr");
+
+ of_node_put(cpu);
}
#ifdef CONFIG_PROC_FS
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
index ab88b6dd4679..54467d0085a1 100644
--- a/arch/nios2/kernel/time.c
+++ b/arch/nios2/kernel/time.c
@@ -214,12 +214,12 @@ static int __init nios2_timer_get_base_and_freq(struct device_node *np,
{
*base = of_iomap(np, 0);
if (!*base) {
- pr_crit("Unable to map reg for %s\n", np->name);
+ pr_crit("Unable to map reg for %pOFn\n", np);
return -ENXIO;
}
if (of_property_read_u32(np, "clock-frequency", freq)) {
- pr_crit("Unable to get %s clock frequency\n", np->name);
+ pr_crit("Unable to get %pOFn clock frequency\n", np);
return -EINVAL;
}
diff --git a/arch/openrisc/include/uapi/asm/unistd.h b/arch/openrisc/include/uapi/asm/unistd.h
index 11c5a58ab333..ec37df18d8ed 100644
--- a/arch/openrisc/include/uapi/asm/unistd.h
+++ b/arch/openrisc/include/uapi/asm/unistd.h
@@ -20,6 +20,7 @@
#define sys_mmap2 sys_mmap_pgoff
#define __ARCH_WANT_RENAMEAT
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index 9d28ab14d139..e17fcd83120f 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -158,9 +158,8 @@ static struct device_node *setup_find_cpu_node(int cpu)
{
u32 hwid;
struct device_node *cpun;
- struct device_node *cpus = of_find_node_by_path("/cpus");
- for_each_available_child_of_node(cpus, cpun) {
+ for_each_of_cpu_node(cpun) {
if (of_property_read_u32(cpun, "reg", &hwid))
continue;
if (hwid == cpu)
diff --git a/arch/parisc/include/asm/compat.h b/arch/parisc/include/asm/compat.h
index ab8a54771507..e03e3c849f40 100644
--- a/arch/parisc/include/asm/compat.h
+++ b/arch/parisc/include/asm/compat.h
@@ -8,36 +8,22 @@
#include <linux/sched.h>
#include <linux/thread_info.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "parisc\0\0"
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u32 __compat_uid_t;
typedef u32 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u32 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
-typedef s32 compat_key_t;
-typedef s32 compat_timer_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
struct compat_stat {
compat_dev_t st_dev; /* dev_t is 32 bits on parisc */
@@ -48,11 +34,11 @@ struct compat_stat {
u16 st_reserved2; /* old st_gid */
compat_dev_t st_rdev;
compat_off_t st_size;
- compat_time_t st_atime;
+ old_time32_t st_atime;
u32 st_atime_nsec;
- compat_time_t st_mtime;
+ old_time32_t st_mtime;
u32 st_mtime_nsec;
- compat_time_t st_ctime;
+ old_time32_t st_ctime;
u32 st_ctime_nsec;
s32 st_blksize;
s32 st_blocks;
diff --git a/arch/parisc/include/asm/unistd.h b/arch/parisc/include/asm/unistd.h
index 3d507d04eb4c..bc37a4953eaa 100644
--- a/arch/parisc/include/asm/unistd.h
+++ b/arch/parisc/include/asm/unistd.h
@@ -141,6 +141,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
return K_INLINE_SYSCALL(name, 5, arg1, arg2, arg3, arg4, arg5); \
}
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
@@ -151,11 +152,11 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
#define __ARCH_WANT_COMPAT_SYS_TIME
#define __ARCH_WANT_COMPAT_SYS_SCHED_RR_GET_INTERVAL
#define __ARCH_WANT_SYS_UTIME
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_WAITPID
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
diff --git a/arch/powerpc/Kbuild b/arch/powerpc/Kbuild
new file mode 100644
index 000000000000..1625a06802ca
--- /dev/null
+++ b/arch/powerpc/Kbuild
@@ -0,0 +1,16 @@
+subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+
+obj-y += kernel/
+obj-y += mm/
+obj-y += lib/
+obj-y += sysdev/
+obj-y += platforms/
+obj-y += math-emu/
+obj-y += crypto/
+obj-y += net/
+
+obj-$(CONFIG_XMON) += xmon/
+obj-$(CONFIG_KVM) += kvm/
+
+obj-$(CONFIG_PERF_EVENTS) += perf/
+obj-$(CONFIG_KEXEC_FILE) += purgatory/
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index a80669209155..e84943d24e5c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -137,7 +137,7 @@ config PPC
select ARCH_HAS_PMEM_API if PPC64
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MEMBARRIER_CALLBACKS
- select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
+ select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE && PPC64
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX if ((PPC_BOOK3S_64 || PPC32) && !RELOCATABLE && !HIBERNATION)
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
@@ -180,6 +180,8 @@ config PPC
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_CBPF_JIT if !PPC64
+ select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
+ select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
select HAVE_CONTEXT_TRACKING if PPC64
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
@@ -188,6 +190,7 @@ config PPC
select HAVE_EBPF_JIT if PPC64
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_GCC_PLUGINS if GCC_VERSION >= 50200 # plugin support on gcc <= 5.1 is buggy on PPC
@@ -285,12 +288,10 @@ config ARCH_MAY_HAVE_PC_FDC
config PPC_UDBG_16550
bool
- default n
config GENERIC_TBSYNC
bool
default y if PPC32 && SMP
- default n
config AUDIT_ARCH
bool
@@ -309,13 +310,11 @@ config EPAPR_BOOT
bool
help
Used to allow a board to specify it wants an ePAPR compliant wrapper.
- default n
config DEFAULT_UIMAGE
bool
help
Used to allow a board to specify it wants a uImage built by default
- default n
config ARCH_HIBERNATION_POSSIBLE
bool
@@ -329,11 +328,9 @@ config ARCH_SUSPEND_POSSIBLE
config PPC_DCR_NATIVE
bool
- default n
config PPC_DCR_MMIO
bool
- default n
config PPC_DCR
bool
@@ -344,7 +341,6 @@ config PPC_OF_PLATFORM_PCI
bool
depends on PCI
depends on PPC64 # not supported on 32 bits yet
- default n
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
depends on PPC32 || PPC_BOOK3S_64
@@ -447,14 +443,12 @@ config PPC_TRANSACTIONAL_MEM
depends on SMP
select ALTIVEC
select VSX
- default n
---help---
Support user-mode Transactional Memory on POWERPC.
config LD_HEAD_STUB_CATCH
bool "Reserve 256 bytes to cope with linker stubs in HEAD text" if EXPERT
depends on PPC64
- default n
help
Very large kernels can cause linker branch stubs to be generated by
code in head_64.S, which moves the head text sections out of their
@@ -557,7 +551,6 @@ config RELOCATABLE
config RELOCATABLE_TEST
bool "Test relocatable kernel"
depends on (PPC64 && RELOCATABLE)
- default n
help
This runs the relocatable kernel at the address it was initially
loaded at, which tends to be non-zero and therefore test the
@@ -769,7 +762,6 @@ config PPC_SUBPAGE_PROT
config PPC_COPRO_BASE
bool
- default n
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
@@ -892,7 +884,6 @@ config PPC_INDIRECT_PCI
bool
depends on PCI
default y if 40x || 44x
- default n
config EISA
bool
@@ -989,7 +980,6 @@ source "drivers/pcmcia/Kconfig"
config HAS_RAPIDIO
bool
- default n
config RAPIDIO
tristate "RapidIO support"
@@ -1012,7 +1002,6 @@ endmenu
config NONSTATIC_KERNEL
bool
- default n
menu "Advanced setup"
depends on PPC32
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index fd63cd914a74..f4961fbcb48d 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -2,7 +2,6 @@
config PPC_DISABLE_WERROR
bool "Don't build arch/powerpc code with -Werror"
- default n
help
This option tells the compiler NOT to build the code under
arch/powerpc with the -Werror flag (which means warnings
@@ -56,7 +55,6 @@ config PPC_EMULATED_STATS
config CODE_PATCHING_SELFTEST
bool "Run self-tests of the code-patching code"
depends on DEBUG_KERNEL
- default n
config JUMP_LABEL_FEATURE_CHECKS
bool "Enable use of jump label for cpu/mmu_has_feature()"
@@ -70,7 +68,6 @@ config JUMP_LABEL_FEATURE_CHECKS
config JUMP_LABEL_FEATURE_CHECK_DEBUG
bool "Do extra check on feature fixup calls"
depends on DEBUG_KERNEL && JUMP_LABEL_FEATURE_CHECKS
- default n
help
This tries to catch incorrect usage of cpu_has_feature() and
mmu_has_feature() in the code.
@@ -80,16 +77,13 @@ config JUMP_LABEL_FEATURE_CHECK_DEBUG
config FTR_FIXUP_SELFTEST
bool "Run self-tests of the feature-fixup code"
depends on DEBUG_KERNEL
- default n
config MSI_BITMAP_SELFTEST
bool "Run self-tests of the MSI bitmap code"
depends on DEBUG_KERNEL
- default n
config PPC_IRQ_SOFT_MASK_DEBUG
bool "Include extra checks for powerpc irq soft masking"
- default n
config XMON
bool "Include xmon kernel debugger"
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index 11a1acba164a..17be664dafa2 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -112,6 +112,13 @@ KBUILD_LDFLAGS += -m elf$(BITS)$(LDEMULATION)
KBUILD_ARFLAGS += --target=elf$(BITS)-$(GNUTARGET)
endif
+cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard=tls
+ifdef CONFIG_PPC64
+cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard-reg=r13
+else
+cflags-$(CONFIG_STACKPROTECTOR) += -mstack-protector-guard-reg=r2
+endif
+
LDFLAGS_vmlinux-y := -Bstatic
LDFLAGS_vmlinux-$(CONFIG_RELOCATABLE) := -pie
LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-y)
@@ -160,8 +167,17 @@ else
CFLAGS-$(CONFIG_GENERIC_CPU) += -mcpu=powerpc64
endif
+ifdef CONFIG_FUNCTION_TRACER
+CC_FLAGS_FTRACE := -pg
ifdef CONFIG_MPROFILE_KERNEL
- CC_FLAGS_FTRACE := -pg -mprofile-kernel
+CC_FLAGS_FTRACE += -mprofile-kernel
+endif
+# Work around gcc code-gen bugs with -pg / -fno-omit-frame-pointer in gcc <= 4.8
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=44199
+# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=52828
+ifneq ($(cc-name),clang)
+CC_FLAGS_FTRACE += $(call cc-ifversion, -lt, 0409, -mno-sched-epilog)
+endif
endif
CFLAGS-$(CONFIG_TARGET_CPU_BOOL) += $(call cc-option,-mcpu=$(CONFIG_TARGET_CPU))
@@ -229,16 +245,15 @@ ifdef CONFIG_6xx
KBUILD_CFLAGS += -mcpu=powerpc
endif
-# Work around a gcc code-gen bug with -fno-omit-frame-pointer.
-ifdef CONFIG_FUNCTION_TRACER
-KBUILD_CFLAGS += -mno-sched-epilog
-endif
-
cpu-as-$(CONFIG_4xx) += -Wa,-m405
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
cpu-as-$(CONFIG_E200) += -Wa,-me200
cpu-as-$(CONFIG_E500) += -Wa,-me500
-cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
+
+# When using '-many -mpower4' gas will first try and find a matching power4
+# mnemonic and failing that it will allow any valid mnemonic that GAS knows
+# about. GCC will pass -many to GAS when assembling, clang does not.
+cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 -Wa,-many
cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
KBUILD_AFLAGS += $(cpu-as-y)
@@ -258,18 +273,8 @@ head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
head-$(CONFIG_ALTIVEC) += arch/powerpc/kernel/vector.o
head-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += arch/powerpc/kernel/prom_init.o
-core-y += arch/powerpc/kernel/ \
- arch/powerpc/mm/ \
- arch/powerpc/lib/ \
- arch/powerpc/sysdev/ \
- arch/powerpc/platforms/ \
- arch/powerpc/math-emu/ \
- arch/powerpc/crypto/ \
- arch/powerpc/net/
-core-$(CONFIG_XMON) += arch/powerpc/xmon/
-core-$(CONFIG_KVM) += arch/powerpc/kvm/
-core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/
-core-$(CONFIG_KEXEC_FILE) += arch/powerpc/purgatory/
+# See arch/powerpc/Kbuild for content of core part of the kernel
+core-y += arch/powerpc/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
@@ -293,9 +298,6 @@ $(BOOT_TARGETS2): vmlinux
bootwrapper_install:
$(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
-%.dtb: scripts
- $(Q)$(MAKE) $(build)=$(boot) $(patsubst %,$(boot)/%,$@)
-
# Used to create 'merged defconfigs'
# To use it $(call) it with the first argument as the base defconfig
# and the second argument as a space separated list of .config files to merge,
@@ -400,40 +402,20 @@ archclean:
archprepare: checkbin
-# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
-# to stdout and these checks are run even on install targets.
-TOUT := .tmp_gas_check
+ifdef CONFIG_STACKPROTECTOR
+prepare: stack_protector_prepare
+
+stack_protector_prepare: prepare0
+ifdef CONFIG_PPC64
+ $(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "PACA_CANARY") print $$3;}' include/generated/asm-offsets.h))
+else
+ $(eval KBUILD_CFLAGS += -mstack-protector-guard-offset=$(shell awk '{if ($$2 == "TASK_CANARY") print $$3;}' include/generated/asm-offsets.h))
+endif
+endif
-# Check gcc and binutils versions:
-# - gcc-3.4 and binutils-2.14 are a fatal combination
-# - Require gcc 4.0 or above on 64-bit
-# - gcc-4.2.0 has issues compiling modules on 64-bit
+# Check toolchain versions:
+# - gcc-4.6 is the minimum kernel-wide version so nothing required.
checkbin:
- @if test "$(cc-name)" != "clang" \
- && test "$(cc-version)" = "0304" ; then \
- if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
- echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
- echo 'correctly with gcc-3.4 and your version of binutils.'; \
- echo '*** Please upgrade your binutils or downgrade your gcc'; \
- false; \
- fi ; \
- fi
- @if test "$(cc-name)" != "clang" \
- && test "$(cc-version)" -lt "0400" \
- && test "x${CONFIG_PPC64}" = "xy" ; then \
- echo -n "Sorry, GCC v4.0 or above is required to build " ; \
- echo "the 64-bit powerpc kernel." ; \
- false ; \
- fi
- @if test "$(cc-name)" != "clang" \
- && test "$(cc-fullversion)" = "040200" \
- && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \
- echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
- echo 'kernel with modules enabled.' ; \
- echo -n '*** Please use a different GCC version or ' ; \
- echo 'disable kernel modules' ; \
- false ; \
- fi
@if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \
&& $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \
echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \
@@ -441,7 +423,3 @@ checkbin:
echo -n '*** Please use a different binutils version.' ; \
false ; \
fi
-
-
-CLEAN_FILES += $(TOUT)
-
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index f92d0530ceb1..32034a0cc554 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -44,4 +44,5 @@ fdt_sw.c
fdt_wip.c
libfdt.h
libfdt_internal.h
+autoconf.h
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 0fb96c26136f..39354365f54a 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -32,8 +32,8 @@ else
endif
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
- -fno-strict-aliasing -Os -msoft-float -pipe \
- -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
+ -fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
+ -pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
-D$(compress-y)
ifdef CONFIG_PPC64_BOOT_WRAPPER
@@ -197,9 +197,14 @@ $(obj)/empty.c:
$(obj)/zImage.coff.lds $(obj)/zImage.ps3.lds : $(obj)/%: $(srctree)/$(src)/%.S
$(Q)cp $< $@
+$(obj)/serial.c: $(obj)/autoconf.h
+
+$(obj)/autoconf.h: $(obj)/%: $(objtree)/include/generated/%
+ $(Q)cp $< $@
+
clean-files := $(zlib-) $(zlibheader-) $(zliblinuxheader-) \
$(zlib-decomp-) $(libfdt) $(libfdtheader) \
- empty.c zImage.coff.lds zImage.ps3.lds zImage.lds
+ autoconf.h empty.c zImage.coff.lds zImage.ps3.lds zImage.lds
quiet_cmd_bootcc = BOOTCC $@
cmd_bootcc = $(BOOTCC) -Wp,-MD,$(depfile) $(BOOTCFLAGS) -c -o $@ $<
@@ -304,9 +309,9 @@ image-$(CONFIG_PPC_ADDER875) += cuImage.adder875-uboot \
dtbImage.adder875-redboot
# Board ports in arch/powerpc/platform/52xx/Kconfig
-image-$(CONFIG_PPC_LITE5200) += cuImage.lite5200 lite5200.dtb
-image-$(CONFIG_PPC_LITE5200) += cuImage.lite5200b lite5200b.dtb
-image-$(CONFIG_PPC_MEDIA5200) += cuImage.media5200 media5200.dtb
+image-$(CONFIG_PPC_LITE5200) += cuImage.lite5200
+image-$(CONFIG_PPC_LITE5200) += cuImage.lite5200b
+image-$(CONFIG_PPC_MEDIA5200) += cuImage.media5200
# Board ports in arch/powerpc/platform/82xx/Kconfig
image-$(CONFIG_MPC8272_ADS) += cuImage.mpc8272ads
@@ -381,11 +386,11 @@ $(addprefix $(obj)/, $(sort $(filter zImage.%, $(image-y)))): vmlinux $(wrapperb
$(call if_changed,wrap,$(subst $(obj)/zImage.,,$@))
# dtbImage% - a dtbImage is a zImage with an embedded device tree blob
-$(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/%.dtb FORCE
- $(call if_changed,wrap,$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
+$(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/dts/%.dtb FORCE
+ $(call if_changed,wrap,$*,,$(obj)/dts/$*.dtb,$(obj)/ramdisk.image.gz)
-$(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb FORCE
- $(call if_changed,wrap,$*,,$(obj)/$*.dtb)
+$(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/dts/%.dtb FORCE
+ $(call if_changed,wrap,$*,,$(obj)/dts/$*.dtb)
# This cannot be in the root of $(src) as the zImage rule always adds a $(obj)
# prefix
@@ -395,36 +400,33 @@ $(obj)/vmlinux.strip: vmlinux
$(obj)/uImage: vmlinux $(wrapperbits) FORCE
$(call if_changed,wrap,uboot)
-$(obj)/uImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
- $(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
-
-$(obj)/uImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
- $(call if_changed,wrap,uboot-$*,,$(obj)/$*.dtb)
+$(obj)/uImage.initrd.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE
+ $(call if_changed,wrap,uboot-$*,,$(obj)/dts/$*.dtb,$(obj)/ramdisk.image.gz)
-$(obj)/cuImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
- $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
+$(obj)/uImage.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE
+ $(call if_changed,wrap,uboot-$*,,$(obj)/dts/$*.dtb)
-$(obj)/cuImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
- $(call if_changed,wrap,cuboot-$*,,$(obj)/$*.dtb)
+$(obj)/cuImage.initrd.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE
+ $(call if_changed,wrap,cuboot-$*,,$(obj)/dts/$*.dtb,$(obj)/ramdisk.image.gz)
-$(obj)/simpleImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
- $(call if_changed,wrap,simpleboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
+$(obj)/cuImage.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE
+ $(call if_changed,wrap,cuboot-$*,,$(obj)/dts/$*.dtb)
-$(obj)/simpleImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
- $(call if_changed,wrap,simpleboot-$*,,$(obj)/$*.dtb)
+$(obj)/simpleImage.initrd.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE
+ $(call if_changed,wrap,simpleboot-$*,,$(obj)/dts/$*.dtb,$(obj)/ramdisk.image.gz)
-$(obj)/treeImage.initrd.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
- $(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb,$(obj)/ramdisk.image.gz)
+$(obj)/simpleImage.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE
+ $(call if_changed,wrap,simpleboot-$*,,$(obj)/dts/$*.dtb)
-$(obj)/treeImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) FORCE
- $(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb)
+$(obj)/treeImage.initrd.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE
+ $(call if_changed,wrap,treeboot-$*,,$(obj)/dts/$*.dtb,$(obj)/ramdisk.image.gz)
-# Rule to build device tree blobs
-$(obj)/%.dtb: $(src)/dts/%.dts FORCE
- $(call if_changed_dep,dtc)
+$(obj)/treeImage.%: vmlinux $(obj)/dts/%.dtb $(wrapperbits) FORCE
+ $(call if_changed,wrap,treeboot-$*,,$(obj)/dts/$*.dtb)
-$(obj)/%.dtb: $(src)/dts/fsl/%.dts FORCE
- $(call if_changed_dep,dtc)
+# Needed for the above targets to work with dts/fsl/ files
+$(obj)/dts/%.dtb: $(obj)/dts/fsl/%.dtb
+ @cp $< $@
# If there isn't a platform selected then just strip the vmlinux.
ifeq (,$(image-y))
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index dcf2f15e6797..32dfe6d083f3 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -47,8 +47,10 @@ p_end: .long _end
p_pstack: .long _platform_stack_top
#endif
- .weak _zimage_start
.globl _zimage_start
+ /* Clang appears to require the .weak directive to be after the symbol
+ * is defined. See https://bugs.llvm.org/show_bug.cgi?id=38921 */
+ .weak _zimage_start
_zimage_start:
.globl _zimage_start_lib
_zimage_start_lib:
diff --git a/arch/powerpc/boot/dts/Makefile b/arch/powerpc/boot/dts/Makefile
new file mode 100644
index 000000000000..fb335d05aae8
--- /dev/null
+++ b/arch/powerpc/boot/dts/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+subdir-y += fsl
+
+dtstree := $(srctree)/$(src)
+dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
diff --git a/arch/powerpc/boot/dts/fsl/Makefile b/arch/powerpc/boot/dts/fsl/Makefile
new file mode 100644
index 000000000000..3bae982641e9
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+dtstree := $(srctree)/$(src)
+dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h
index 2a0c8b1bf147..2abc8e83b95e 100644
--- a/arch/powerpc/boot/libfdt_env.h
+++ b/arch/powerpc/boot/libfdt_env.h
@@ -5,6 +5,8 @@
#include <types.h>
#include <string.h>
+#define INT_MAX ((int)(~0U>>1))
+
#include "of.h"
typedef unsigned long uintptr_t;
diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c
index 0272570d02de..dfb199ef5b94 100644
--- a/arch/powerpc/boot/opal.c
+++ b/arch/powerpc/boot/opal.c
@@ -13,8 +13,6 @@
#include <libfdt.h>
#include "../include/asm/opal-api.h"
-#ifdef CONFIG_PPC64_BOOT_WRAPPER
-
/* Global OPAL struct used by opal-call.S */
struct opal {
u64 base;
@@ -101,9 +99,3 @@ int opal_console_init(void *devp, struct serial_console_data *scdp)
return 0;
}
-#else
-int opal_console_init(void *devp, struct serial_console_data *scdp)
-{
- return -1;
-}
-#endif /* __powerpc64__ */
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index 48e3743faedf..f045f8494bf9 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -18,6 +18,7 @@
#include "stdio.h"
#include "io.h"
#include "ops.h"
+#include "autoconf.h"
static int serial_open(void)
{
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig
index 67c39f4acede..f686cc1eac0b 100644
--- a/arch/powerpc/configs/g5_defconfig
+++ b/arch/powerpc/configs/g5_defconfig
@@ -262,3 +262,4 @@ CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRINTK_TIME=y
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig
index 59e47ec85336..f71eddafb02f 100644
--- a/arch/powerpc/configs/maple_defconfig
+++ b/arch/powerpc/configs/maple_defconfig
@@ -112,3 +112,4 @@ CONFIG_PPC_EARLY_DEBUG=y
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_PCBC=m
# CONFIG_CRYPTO_HW is not set
+CONFIG_PRINTK_TIME=y
diff --git a/arch/powerpc/configs/powernv_defconfig b/arch/powerpc/configs/powernv_defconfig
index 6ab34e60495f..ef2ef98d3f28 100644
--- a/arch/powerpc/configs/powernv_defconfig
+++ b/arch/powerpc/configs/powernv_defconfig
@@ -44,6 +44,9 @@ CONFIG_PPC_MEMTRACE=y
# CONFIG_PPC_PSERIES is not set
# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_IDLE=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
@@ -350,3 +353,4 @@ CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
CONFIG_VHOST_NET=m
+CONFIG_PRINTK_TIME=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 5033e630afea..f2515674a1e2 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -40,6 +40,9 @@ CONFIG_PS3_LPM=m
CONFIG_PPC_IBM_CELL_BLADE=y
CONFIG_RTAS_FLASH=m
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
CONFIG_CPU_FREQ_PMAC64=y
CONFIG_HZ_100=y
CONFIG_BINFMT_MISC=m
@@ -365,3 +368,4 @@ CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
CONFIG_VHOST_NET=m
+CONFIG_PRINTK_TIME=y
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 187e2f7c12c8..cf8d55f67272 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -171,3 +171,4 @@ CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_LZO=m
+CONFIG_PRINTK_TIME=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index 0dd5cf7b566d..5e09a40cbcbf 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -325,3 +325,4 @@ CONFIG_VIRTUALIZATION=y
CONFIG_KVM_BOOK3S_64=m
CONFIG_KVM_BOOK3S_64_HV=m
CONFIG_VHOST_NET=m
+CONFIG_PRINTK_TIME=y
diff --git a/arch/powerpc/configs/skiroot_defconfig b/arch/powerpc/configs/skiroot_defconfig
index 6bd5e7261335..cfdd08897a06 100644
--- a/arch/powerpc/configs/skiroot_defconfig
+++ b/arch/powerpc/configs/skiroot_defconfig
@@ -3,20 +3,17 @@ CONFIG_ALTIVEC=y
CONFIG_VSX=y
CONFIG_NR_CPUS=2048
CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_KERNEL_XZ=y
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
+# CONFIG_CPU_ISOLATION is not set
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=20
-CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_GZIP is not set
# CONFIG_RD_BZIP2 is not set
@@ -24,8 +21,14 @@ CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_SGETMASK_SYSCALL is not set
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_SHMEM is not set
+# CONFIG_AIO is not set
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_HARDENED=y
CONFIG_JUMP_LABEL=y
CONFIG_STRICT_KERNEL_RWX=y
CONFIG_MODULES=y
@@ -35,7 +38,9 @@ CONFIG_MODULE_SIG_FORCE=y
CONFIG_MODULE_SIG_SHA512=y
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_PPC_VAS is not set
# CONFIG_PPC_PSERIES is not set
+# CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_IDLE=y
CONFIG_HZ_100=y
@@ -48,8 +53,9 @@ CONFIG_NUMA=y
CONFIG_PPC_64K_PAGES=y
CONFIG_SCHED_SMT=y
CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=tty0 console=hvc0 powersave=off"
+CONFIG_CMDLINE="console=tty0 console=hvc0 ipr.fast_reboot=1 quiet"
# CONFIG_SECCOMP is not set
+# CONFIG_PPC_MEM_KEYS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -60,7 +66,6 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_IPV6 is not set
CONFIG_DNS_RESOLVER=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -73,8 +78,10 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_VIRTIO_BLK=m
CONFIG_BLK_DEV_NVME=m
-CONFIG_EEPROM_AT24=y
+CONFIG_NVME_MULTIPATH=y
+CONFIG_EEPROM_AT24=m
# CONFIG_CXL is not set
+# CONFIG_OCXL is not set
CONFIG_BLK_DEV_SD=m
CONFIG_BLK_DEV_SR=m
CONFIG_BLK_DEV_SR_VENDOR=y
@@ -85,7 +92,6 @@ CONFIG_SCSI_FC_ATTRS=y
CONFIG_SCSI_CXGB3_ISCSI=m
CONFIG_SCSI_CXGB4_ISCSI=m
CONFIG_SCSI_BNX2_ISCSI=m
-CONFIG_BE2ISCSI=m
CONFIG_SCSI_AACRAID=m
CONFIG_MEGARAID_NEWGEN=y
CONFIG_MEGARAID_MM=m
@@ -102,7 +108,7 @@ CONFIG_SCSI_VIRTIO=m
CONFIG_SCSI_DH=y
CONFIG_SCSI_DH_ALUA=m
CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI=m
# CONFIG_ATA_SFF is not set
CONFIG_MD=y
CONFIG_BLK_DEV_MD=m
@@ -119,25 +125,72 @@ CONFIG_DM_SNAPSHOT=m
CONFIG_DM_MIRROR=m
CONFIG_DM_ZERO=m
CONFIG_DM_MULTIPATH=m
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
CONFIG_ACENIC=m
CONFIG_ACENIC_OMIT_TIGON_I=y
-CONFIG_TIGON3=y
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+CONFIG_TIGON3=m
CONFIG_BNX2X=m
-CONFIG_CHELSIO_T1=y
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+CONFIG_CHELSIO_T1=m
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
CONFIG_BE2NET=m
-CONFIG_S2IO=m
-CONFIG_E100=m
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
CONFIG_E1000=m
-CONFIG_E1000E=m
+CONFIG_IGB=m
CONFIG_IXGB=m
CONFIG_IXGBE=m
+CONFIG_I40E=m
+CONFIG_S2IO=m
+# CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m
+# CONFIG_MLX4_CORE_GEN2 is not set
CONFIG_MLX5_CORE=m
-CONFIG_MLX5_CORE_EN=y
+# CONFIG_NET_VENDOR_MICREL is not set
CONFIG_MYRI10GE=m
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
CONFIG_QLGE=m
CONFIG_NETXEN_NIC=m
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
CONFIG_SFC=m
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+CONFIG_PHYLIB=y
# CONFIG_USB_NET_DRIVERS is not set
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
@@ -149,39 +202,51 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_IPMI_HANDLER=y
CONFIG_IPMI_DEVICE_INTERFACE=y
CONFIG_IPMI_POWERNV=y
+CONFIG_IPMI_WATCHDOG=y
CONFIG_HW_RANDOM=y
+CONFIG_TCG_TPM=y
CONFIG_TCG_TIS_I2C_NUVOTON=y
+CONFIG_I2C=y
# CONFIG_I2C_COMPAT is not set
CONFIG_I2C_CHARDEV=y
# CONFIG_I2C_HELPER_AUTO is not set
-CONFIG_DRM=y
-CONFIG_DRM_RADEON=y
+CONFIG_I2C_ALGOBIT=y
+CONFIG_I2C_OPAL=m
+CONFIG_PPS=y
+CONFIG_SENSORS_IBMPOWERNV=m
+CONFIG_DRM=m
CONFIG_DRM_AST=m
+CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
-CONFIG_FB_MODE_HELPERS=y
-CONFIG_FB_OF=y
-CONFIG_FB_MATROX=y
-CONFIG_FB_MATROX_MILLENIUM=y
-CONFIG_FB_MATROX_MYSTIQUE=y
-CONFIG_FB_MATROX_G=y
-# CONFIG_LCD_CLASS_DEVICE is not set
-# CONFIG_BACKLIGHT_GENERIC is not set
# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_HID_GENERIC=m
+CONFIG_HID_A4TECH=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_ITE=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_XHCI_HCD=y
-CONFIG_USB_EHCI_HCD=y
+CONFIG_USB=m
+CONFIG_USB_XHCI_HCD=m
+CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_HCD_PPC_OF is not set
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_STORAGE=m
CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_OPAL=m
CONFIG_RTC_DRV_GENERIC=m
CONFIG_VIRT_DRIVERS=y
-CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_PCI=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT4_FS=m
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -195,10 +260,9 @@ CONFIG_UDF_FS=m
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS=y
CONFIG_NLS_DEFAULT="utf8"
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ASCII=y
@@ -207,26 +271,24 @@ CONFIG_NLS_UTF8=y
CONFIG_CRC16=y
CONFIG_CRC_ITU_T=y
CONFIG_LIBCRC32C=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
CONFIG_PRINTK_TIME=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_HARDLOCKUP_DETECTOR=y
CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y
-CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
CONFIG_WQ_WATCHDOG=y
-CONFIG_SCHEDSTATS=y
+# CONFIG_SCHED_DEBUG is not set
# CONFIG_FTRACE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
-CONFIG_SECURITY=y
-CONFIG_IMA=y
-CONFIG_EVM=y
+CONFIG_ENCRYPTED_KEYS=y
# CONFIG_CRYPTO_ECHAINIV is not set
-CONFIG_CRYPTO_ECB=y
-CONFIG_CRYPTO_CMAC=y
-CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_ARC4=y
-CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/include/asm/accounting.h b/arch/powerpc/include/asm/accounting.h
index 3abcf98ed2e0..c607c5d835cc 100644
--- a/arch/powerpc/include/asm/accounting.h
+++ b/arch/powerpc/include/asm/accounting.h
@@ -15,8 +15,10 @@ struct cpu_accounting_data {
/* Accumulated cputime values to flush on ticks*/
unsigned long utime;
unsigned long stime;
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
unsigned long utime_scaled;
unsigned long stime_scaled;
+#endif
unsigned long gtime;
unsigned long hardirq_time;
unsigned long softirq_time;
@@ -25,8 +27,10 @@ struct cpu_accounting_data {
/* Internal counters */
unsigned long starttime; /* TB value snapshot */
unsigned long starttime_user; /* TB value on exit to usermode */
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
unsigned long startspurr; /* SPURR value snapshot */
unsigned long utime_sspurr; /* ->user_time when ->startspurr set */
+#endif
};
#endif
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 1f4691ce4126..ec691d489656 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -63,7 +63,6 @@ void program_check_exception(struct pt_regs *regs);
void alignment_exception(struct pt_regs *regs);
void slb_miss_bad_addr(struct pt_regs *regs);
void StackOverflow(struct pt_regs *regs);
-void nonrecoverable_exception(struct pt_regs *regs);
void kernel_fp_unavailable_exception(struct pt_regs *regs);
void altivec_unavailable_exception(struct pt_regs *regs);
void vsx_unavailable_exception(struct pt_regs *regs);
@@ -78,6 +77,8 @@ void kernel_bad_stack(struct pt_regs *regs);
void system_reset_exception(struct pt_regs *regs);
void machine_check_exception(struct pt_regs *regs);
void emulation_assist_interrupt(struct pt_regs *regs);
+long do_slb_fault(struct pt_regs *regs, unsigned long ea);
+void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err);
/* signals, syscalls and interrupts */
long sys_swapcontext(struct ucontext __user *old_ctx,
@@ -150,4 +151,25 @@ extern s32 patch__memset_nocache, patch__memcpy_nocache;
extern long flush_count_cache;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
+void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr, bool preserve_nv);
+#else
+static inline void kvmppc_save_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
+ bool preserve_nv) { }
+static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
+ bool preserve_nv) { }
+#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+
+void kvmhv_save_host_pmu(void);
+void kvmhv_load_host_pmu(void);
+void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
+void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
+
+int __kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu);
+
+long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
+long kvmppc_h_set_xdabr(struct kvm_vcpu *vcpu, unsigned long dabr,
+ unsigned long dabrx);
+
#endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 751cf931bb3f..e61dd3ae5bc0 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -8,7 +8,97 @@
#include <asm/book3s/32/hash.h>
/* And here we include common definitions */
-#include <asm/pte-common.h>
+
+#define _PAGE_KERNEL_RO 0
+#define _PAGE_KERNEL_ROX 0
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW)
+
+#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
+
+#ifndef __ASSEMBLY__
+
+static inline bool pte_user(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_USER;
+}
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Location of the PFN in the PTE. Most 32-bit platforms use the same
+ * as _PAGE_SHIFT here (ie, naturally aligned).
+ * Platform who don't just pre-define the value so we don't override it here.
+ */
+#define PTE_RPN_SHIFT (PAGE_SHIFT)
+
+/*
+ * The mask covered by the RPN must be a ULL on 32-bit platforms with
+ * 64-bit PTEs.
+ */
+#ifdef CONFIG_PTE_64BIT
+#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
+#else
+#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+#endif
+
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes.
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
+ _PAGE_ACCESSED | _PAGE_SPECIAL)
+
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
+
+/*
+ * Permission masks used to generate the __P and __S table.
+ *
+ * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
+ *
+ * Write permissions imply read permissions for now.
+ */
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER)
+
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+ _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+
+/*
+ * Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
+
+/* Advertise special mapping type for AGP */
+#define PAGE_AGP (PAGE_KERNEL_NC)
+#define HAVE_PAGE_AGP
#define PTE_INDEX_SIZE PTE_SHIFT
#define PMD_INDEX_SIZE 0
@@ -219,7 +309,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
+ pte_update(ptep, _PAGE_RW, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
@@ -234,10 +324,9 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
int psize)
{
unsigned long set = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
- unsigned long clr = ~pte_val(entry) & _PAGE_RO;
+ (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW);
- pte_update(ptep, clr, set);
+ pte_update(ptep, 0, set);
flush_tlb_page(vma, address);
}
@@ -292,7 +381,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
-int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
/* Generic accessors to PTE bits */
static inline int pte_write(pte_t pte) { return !!(pte_val(pte) & _PAGE_RW);}
@@ -301,13 +390,28 @@ static inline int pte_dirty(pte_t pte) { return !!(pte_val(pte) & _PAGE_DIRTY);
static inline int pte_young(pte_t pte) { return !!(pte_val(pte) & _PAGE_ACCESSED); }
static inline int pte_special(pte_t pte) { return !!(pte_val(pte) & _PAGE_SPECIAL); }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
-static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+static inline bool pte_exec(pte_t pte) { return true; }
static inline int pte_present(pte_t pte)
{
return pte_val(pte) & _PAGE_PRESENT;
}
+static inline bool pte_hw_valid(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PRESENT;
+}
+
+static inline bool pte_hashpte(pte_t pte)
+{
+ return !!(pte_val(pte) & _PAGE_HASHPTE);
+}
+
+static inline bool pte_ci(pte_t pte)
+{
+ return !!(pte_val(pte) & _PAGE_NO_CACHE);
+}
+
/*
* We only find page table entry in the last level
* Hence no need for other accessors
@@ -315,17 +419,14 @@ static inline int pte_present(pte_t pte)
#define pte_access_permitted pte_access_permitted
static inline bool pte_access_permitted(pte_t pte, bool write)
{
- unsigned long pteval = pte_val(pte);
/*
* A read-only access is controlled by _PAGE_USER bit.
* We have _PAGE_READ set for WRITE and EXECUTE
*/
- unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_USER;
-
- if (write)
- need_pte_bits |= _PAGE_WRITE;
+ if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
+ return false;
- if ((pteval & need_pte_bits) != need_pte_bits)
+ if (write && !pte_write(pte))
return false;
return true;
@@ -354,6 +455,11 @@ static inline pte_t pte_wrprotect(pte_t pte)
return __pte(pte_val(pte) & ~_PAGE_RW);
}
+static inline pte_t pte_exprotect(pte_t pte)
+{
+ return pte;
+}
+
static inline pte_t pte_mkclean(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_DIRTY);
@@ -364,6 +470,16 @@ static inline pte_t pte_mkold(pte_t pte)
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
}
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_mkpte(pte_t pte)
+{
+ return pte;
+}
+
static inline pte_t pte_mkwrite(pte_t pte)
{
return __pte(pte_val(pte) | _PAGE_RW);
@@ -389,6 +505,16 @@ static inline pte_t pte_mkhuge(pte_t pte)
return pte;
}
+static inline pte_t pte_mkprivileged(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_USER);
+}
+
+static inline pte_t pte_mkuser(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_USER);
+}
+
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 9a3798660cef..15bc16b1dc9c 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -66,7 +66,7 @@ static inline int hash__hugepd_ok(hugepd_t hpd)
* if it is not a pte and have hugepd shift mask
* set, then it is a hugepd directory pointer
*/
- if (!(hpdval & _PAGE_PTE) &&
+ if (!(hpdval & _PAGE_PTE) && (hpdval & _PAGE_PRESENT) &&
((hpdval & HUGEPD_SHIFT_MASK) != 0))
return true;
return false;
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index d52a51b2ce7b..247aff9cc6ba 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -18,6 +18,11 @@
#include <asm/book3s/64/hash-4k.h>
#endif
+/* Bits to set in a PMD/PUD/PGD entry valid bit*/
+#define HASH_PMD_VAL_BITS (0x8000000000000000UL)
+#define HASH_PUD_VAL_BITS (0x8000000000000000UL)
+#define HASH_PGD_VAL_BITS (0x8000000000000000UL)
+
/*
* Size of EA range mapped by our pagetables.
*/
@@ -196,8 +201,7 @@ static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-extern int hash__map_kernel_page(unsigned long ea, unsigned long pa,
- unsigned long flags);
+int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys);
diff --git a/arch/powerpc/include/asm/book3s/64/hugetlb.h b/arch/powerpc/include/asm/book3s/64/hugetlb.h
index 50888388a359..5b0177733994 100644
--- a/arch/powerpc/include/asm/book3s/64/hugetlb.h
+++ b/arch/powerpc/include/asm/book3s/64/hugetlb.h
@@ -39,4 +39,7 @@ static inline bool gigantic_page_supported(void)
}
#endif
+/* hugepd entry valid bit */
+#define HUGEPD_VAL_BITS (0x8000000000000000UL)
+
#endif
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index b3520b549cba..12e522807f9f 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -30,7 +30,7 @@
* SLB
*/
-#define SLB_NUM_BOLTED 3
+#define SLB_NUM_BOLTED 2
#define SLB_CACHE_ENTRIES 8
#define SLB_MIN_SIZE 32
@@ -203,6 +203,18 @@ static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
BUG();
}
+static inline unsigned int ap_to_shift(unsigned long ap)
+{
+ int psize;
+
+ for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
+ if (mmu_psize_defs[psize].ap == ap)
+ return mmu_psize_defs[psize].shift;
+ }
+
+ return -1;
+}
+
static inline unsigned long get_sllp_encoding(int psize)
{
unsigned long sllp;
@@ -487,6 +499,8 @@ int htab_remove_mapping(unsigned long vstart, unsigned long vend,
extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
+extern void hash__setup_new_exec(void);
+
#ifdef CONFIG_PPC_PSERIES
void hpte_init_pseries(void);
#else
@@ -495,11 +509,18 @@ static inline void hpte_init_pseries(void) { }
extern void hpte_init_native(void);
+struct slb_entry {
+ u64 esid;
+ u64 vsid;
+};
+
extern void slb_initialize(void);
-extern void slb_flush_and_rebolt(void);
+void slb_flush_and_restore_bolted(void);
void slb_flush_all_realmode(void);
void __slb_restore_bolted_realmode(void);
void slb_restore_bolted_realmode(void);
+void slb_save_contents(struct slb_entry *slb_ptr);
+void slb_dump_contents(struct slb_entry *slb_ptr);
extern void slb_vmalloc_update(void);
extern void slb_set_size(u16 size);
@@ -512,13 +533,9 @@ extern void slb_set_size(u16 size);
* from mmu context id and effective segment id of the address.
*
* For user processes max context id is limited to MAX_USER_CONTEXT.
-
- * For kernel space, we use context ids 1-4 to map addresses as below:
- * NOTE: each context only support 64TB now.
- * 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ]
- * 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ]
- * 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ]
- * 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ]
+ * more details in get_user_context
+ *
+ * For kernel space get_kernel_context
*
* The proto-VSIDs are then scrambled into real VSIDs with the
* multiplicative hash:
@@ -559,6 +576,21 @@ extern void slb_set_size(u16 size);
#define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
/*
+ * Now certain config support MAX_PHYSMEM more than 512TB. Hence we will need
+ * to use more than one context for linear mapping the kernel.
+ * For vmalloc and memmap, we use just one context with 512TB. With 64 byte
+ * struct page size, we need ony 32 TB in memmap for 2PB (51 bits (MAX_PHYSMEM_BITS)).
+ */
+#if (MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
+#define MAX_KERNEL_CTX_CNT (1UL << (MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
+#else
+#define MAX_KERNEL_CTX_CNT 1
+#endif
+
+#define MAX_VMALLOC_CTX_CNT 1
+#define MAX_MEMMAP_CTX_CNT 1
+
+/*
* 256MB segment
* The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
* available for user + kernel mapping. VSID 0 is reserved as invalid, contexts
@@ -568,12 +600,13 @@ extern void slb_set_size(u16 size);
* We also need to avoid the last segment of the last context, because that
* would give a protovsid of 0x1fffffffff. That will result in a VSID 0
* because of the modulo operation in vsid scramble.
+ *
+ * We add one extra context to MIN_USER_CONTEXT so that we can map kernel
+ * context easily. The +1 is to map the unused 0xe region mapping.
*/
#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
-#define MIN_USER_CONTEXT (5)
-
-/* Would be nice to use KERNEL_REGION_ID here */
-#define KERNEL_REGION_CONTEXT_OFFSET (0xc - 1)
+#define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
+ MAX_MEMMAP_CTX_CNT + 2)
/*
* For platforms that support on 65bit VA we limit the context bits
@@ -734,6 +767,39 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
}
/*
+ * For kernel space, we use context ids as below
+ * below. Range is 512TB per context.
+ *
+ * 0x00001 - [ 0xc000000000000000 - 0xc001ffffffffffff]
+ * 0x00002 - [ 0xc002000000000000 - 0xc003ffffffffffff]
+ * 0x00003 - [ 0xc004000000000000 - 0xc005ffffffffffff]
+ * 0x00004 - [ 0xc006000000000000 - 0xc007ffffffffffff]
+
+ * 0x00005 - [ 0xd000000000000000 - 0xd001ffffffffffff ]
+ * 0x00006 - Not used - Can map 0xe000000000000000 range.
+ * 0x00007 - [ 0xf000000000000000 - 0xf001ffffffffffff ]
+ *
+ * So we can compute the context from the region (top nibble) by
+ * subtracting 11, or 0xc - 1.
+ */
+static inline unsigned long get_kernel_context(unsigned long ea)
+{
+ unsigned long region_id = REGION_ID(ea);
+ unsigned long ctx;
+ /*
+ * For linear mapping we do support multiple context
+ */
+ if (region_id == KERNEL_REGION_ID) {
+ /*
+ * We already verified ea to be not beyond the addr limit.
+ */
+ ctx = 1 + ((ea & ~REGION_MASK) >> MAX_EA_BITS_PER_CONTEXT);
+ } else
+ ctx = (region_id - 0xc) + MAX_KERNEL_CTX_CNT;
+ return ctx;
+}
+
+/*
* This is only valid for addresses >= PAGE_OFFSET
*/
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
@@ -743,20 +809,7 @@ static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
if (!is_kernel_addr(ea))
return 0;
- /*
- * For kernel space, we use context ids 1-4 to map the address space as
- * below:
- *
- * 0x00001 - [ 0xc000000000000000 - 0xc0003fffffffffff ]
- * 0x00002 - [ 0xd000000000000000 - 0xd0003fffffffffff ]
- * 0x00003 - [ 0xe000000000000000 - 0xe0003fffffffffff ]
- * 0x00004 - [ 0xf000000000000000 - 0xf0003fffffffffff ]
- *
- * So we can compute the context from the region (top nibble) by
- * subtracting 11, or 0xc - 1.
- */
- context = (ea >> 60) - KERNEL_REGION_CONTEXT_OFFSET;
-
+ context = get_kernel_context(ea);
return get_vsid(context, ea, ssize);
}
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index 9c8c669a6b6a..6328857f259f 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -208,7 +208,7 @@ extern void radix_init_pseries(void);
static inline void radix_init_pseries(void) { };
#endif
-static inline int get_ea_context(mm_context_t *ctx, unsigned long ea)
+static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
{
int index = ea >> MAX_EA_BITS_PER_CONTEXT;
@@ -223,7 +223,7 @@ static inline int get_ea_context(mm_context_t *ctx, unsigned long ea)
static inline unsigned long get_user_vsid(mm_context_t *ctx,
unsigned long ea, int ssize)
{
- unsigned long context = get_ea_context(ctx, ea);
+ unsigned long context = get_user_context(ctx, ea);
return get_vsid(context, ea, ssize);
}
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index d7ee249d6890..e3d4dd4ae2fa 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -10,6 +10,9 @@
*
* Defined in such a way that we can optimize away code block at build time
* if CONFIG_HUGETLB_PAGE=n.
+ *
+ * returns true for pmd migration entries, THP, devmap, hugetlb
+ * But compile time dependent on CONFIG_HUGETLB_PAGE
*/
static inline int pmd_huge(pmd_t pmd)
{
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 2a2486526d1f..cb5dd4078d42 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -14,10 +14,6 @@
*/
#define _PAGE_BIT_SWAP_TYPE 0
-#define _PAGE_NA 0
-#define _PAGE_RO 0
-#define _PAGE_USER 0
-
#define _PAGE_EXEC 0x00001 /* execute permission */
#define _PAGE_WRITE 0x00002 /* write access allowed */
#define _PAGE_READ 0x00004 /* read access allowed */
@@ -123,10 +119,6 @@
#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | \
_PAGE_RW | _PAGE_EXEC)
/*
- * No page size encoding in the linux PTE
- */
-#define _PAGE_PSIZE 0
-/*
* _PAGE_CHG_MASK masks of bits that are to be preserved across
* pgprot changes
*/
@@ -137,19 +129,12 @@
#define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \
H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4)
/*
- * Mask of bits returned by pte_pgprot()
- */
-#define PAGE_PROT_BITS (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT | \
- H_PAGE_4K_PFN | _PAGE_PRIVILEGED | _PAGE_ACCESSED | \
- _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_EXEC | \
- _PAGE_SOFT_DIRTY | H_PTE_PKEY)
-/*
* We define 2 sets of base prot bits, one for basic pages (ie,
* cacheable kernel and user pages) and one for non cacheable
* pages. We always set _PAGE_COHERENT when SMP is enabled or
* the processor might need it for DMA coherency.
*/
-#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
#define _PAGE_BASE (_PAGE_BASE_NC)
/* Permission masks used to generate the __P and __S table,
@@ -159,8 +144,6 @@
* Write permissions imply read permissions for now (we could make write-only
* pages on BookE but we don't bother for now). Execute permission control is
* possible on platforms that define _PAGE_EXEC
- *
- * Note due to the way vm flags are laid out, the bits are XWR
*/
#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_PRIVILEGED)
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW)
@@ -170,24 +153,6 @@
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_X
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY_X
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_X
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED_X
-#define __S111 PAGE_SHARED_X
-
/* Permission masks used for kernel mappings */
#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
@@ -519,7 +484,11 @@ static inline int pte_special(pte_t pte)
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_SPECIAL));
}
-static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+static inline bool pte_exec(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_EXEC));
+}
+
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline bool pte_soft_dirty(pte_t pte)
@@ -529,12 +498,12 @@ static inline bool pte_soft_dirty(pte_t pte)
static inline pte_t pte_mksoft_dirty(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SOFT_DIRTY);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SOFT_DIRTY));
}
static inline pte_t pte_clear_soft_dirty(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_SOFT_DIRTY);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SOFT_DIRTY));
}
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
@@ -555,7 +524,7 @@ static inline pte_t pte_mk_savedwrite(pte_t pte)
*/
VM_BUG_ON((pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_RWX | _PAGE_PRIVILEGED)) !=
cpu_to_be64(_PAGE_PRESENT | _PAGE_PRIVILEGED));
- return __pte(pte_val(pte) & ~_PAGE_PRIVILEGED);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
}
#define pte_clear_savedwrite pte_clear_savedwrite
@@ -565,14 +534,14 @@ static inline pte_t pte_clear_savedwrite(pte_t pte)
* Used by KSM subsystem to make a protnone pte readonly.
*/
VM_BUG_ON(!pte_protnone(pte));
- return __pte(pte_val(pte) | _PAGE_PRIVILEGED);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
}
#else
#define pte_clear_savedwrite pte_clear_savedwrite
static inline pte_t pte_clear_savedwrite(pte_t pte)
{
VM_WARN_ON(1);
- return __pte(pte_val(pte) & ~_PAGE_WRITE);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -587,6 +556,11 @@ static inline int pte_present(pte_t pte)
return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID));
}
+static inline bool pte_hw_valid(pte_t pte)
+{
+ return !!(pte_raw(pte) & cpu_to_be64(_PAGE_PRESENT));
+}
+
#ifdef CONFIG_PPC_MEM_KEYS
extern bool arch_pte_access_permitted(u64 pte, bool write, bool execute);
#else
@@ -596,25 +570,22 @@ static inline bool arch_pte_access_permitted(u64 pte, bool write, bool execute)
}
#endif /* CONFIG_PPC_MEM_KEYS */
+static inline bool pte_user(pte_t pte)
+{
+ return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
+}
+
#define pte_access_permitted pte_access_permitted
static inline bool pte_access_permitted(pte_t pte, bool write)
{
- unsigned long pteval = pte_val(pte);
- /* Also check for pte_user */
- unsigned long clear_pte_bits = _PAGE_PRIVILEGED;
/*
* _PAGE_READ is needed for any access and will be
* cleared for PROT_NONE
*/
- unsigned long need_pte_bits = _PAGE_PRESENT | _PAGE_READ;
-
- if (write)
- need_pte_bits |= _PAGE_WRITE;
-
- if ((pteval & need_pte_bits) != need_pte_bits)
+ if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
return false;
- if ((pteval & clear_pte_bits) == clear_pte_bits)
+ if (write && !pte_write(pte))
return false;
return arch_pte_access_permitted(pte_val(pte), write, 0);
@@ -643,17 +614,32 @@ static inline pte_t pte_wrprotect(pte_t pte)
{
if (unlikely(pte_savedwrite(pte)))
return pte_clear_savedwrite(pte);
- return __pte(pte_val(pte) & ~_PAGE_WRITE);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_WRITE));
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_EXEC));
}
static inline pte_t pte_mkclean(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_DIRTY);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_DIRTY));
}
static inline pte_t pte_mkold(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_ACCESSED));
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_EXEC));
+}
+
+static inline pte_t pte_mkpte(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PTE));
}
static inline pte_t pte_mkwrite(pte_t pte)
@@ -661,22 +647,22 @@ static inline pte_t pte_mkwrite(pte_t pte)
/*
* write implies read, hence set both
*/
- return __pte(pte_val(pte) | _PAGE_RW);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_RW));
}
static inline pte_t pte_mkdirty(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
}
static inline pte_t pte_mkyoung(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_ACCESSED);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_ACCESSED));
}
static inline pte_t pte_mkspecial(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SPECIAL);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL));
}
static inline pte_t pte_mkhuge(pte_t pte)
@@ -686,7 +672,17 @@ static inline pte_t pte_mkhuge(pte_t pte)
static inline pte_t pte_mkdevmap(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SPECIAL|_PAGE_DEVMAP);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SPECIAL | _PAGE_DEVMAP));
+}
+
+static inline pte_t pte_mkprivileged(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_PRIVILEGED));
+}
+
+static inline pte_t pte_mkuser(pte_t pte)
+{
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_PRIVILEGED));
}
/*
@@ -705,12 +701,8 @@ static inline int pte_devmap(pte_t pte)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
/* FIXME!! check whether this need to be a conditional */
- return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
-}
-
-static inline bool pte_user(pte_t pte)
-{
- return !(pte_raw(pte) & cpu_to_be64(_PAGE_PRIVILEGED));
+ return __pte_raw((pte_raw(pte) & cpu_to_be64(_PAGE_CHG_MASK)) |
+ cpu_to_be64(pgprot_val(newprot)));
}
/* Encode and de-code a swap entry */
@@ -741,6 +733,8 @@ static inline bool pte_user(pte_t pte)
*/
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
#define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
+#define __pmd_to_swp_entry(pmd) (__pte_to_swp_entry(pmd_pte(pmd)))
+#define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x)))
#ifdef CONFIG_MEM_SOFT_DIRTY
#define _PAGE_SWP_SOFT_DIRTY (1UL << (SWP_TYPE_BITS + _PAGE_BIT_SWAP_TYPE))
@@ -751,7 +745,7 @@ static inline bool pte_user(pte_t pte)
#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SWP_SOFT_DIRTY);
+ return __pte_raw(pte_raw(pte) | cpu_to_be64(_PAGE_SWP_SOFT_DIRTY));
}
static inline bool pte_swp_soft_dirty(pte_t pte)
@@ -761,7 +755,7 @@ static inline bool pte_swp_soft_dirty(pte_t pte)
static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
{
- return __pte(pte_val(pte) & ~_PAGE_SWP_SOFT_DIRTY);
+ return __pte_raw(pte_raw(pte) & cpu_to_be64(~_PAGE_SWP_SOFT_DIRTY));
}
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
@@ -850,10 +844,10 @@ static inline pgprot_t pgprot_writecombine(pgprot_t prot)
*/
static inline bool pte_ci(pte_t pte)
{
- unsigned long pte_v = pte_val(pte);
+ __be64 pte_v = pte_raw(pte);
- if (((pte_v & _PAGE_CACHE_CTL) == _PAGE_TOLERANT) ||
- ((pte_v & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT))
+ if (((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_TOLERANT)) ||
+ ((pte_v & cpu_to_be64(_PAGE_CACHE_CTL)) == cpu_to_be64(_PAGE_NON_IDEMPOTENT)))
return true;
return false;
}
@@ -875,8 +869,16 @@ static inline int pmd_none(pmd_t pmd)
static inline int pmd_present(pmd_t pmd)
{
+ /*
+ * A pmd is considerent present if _PAGE_PRESENT is set.
+ * We also need to consider the pmd present which is marked
+ * invalid during a split. Hence we look for _PAGE_INVALID
+ * if we find _PAGE_PRESENT cleared.
+ */
+ if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID))
+ return true;
- return !pmd_none(pmd);
+ return false;
}
static inline int pmd_bad(pmd_t pmd)
@@ -903,7 +905,7 @@ static inline int pud_none(pud_t pud)
static inline int pud_present(pud_t pud)
{
- return !pud_none(pud);
+ return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT));
}
extern struct page *pud_page(pud_t pud);
@@ -950,7 +952,7 @@ static inline int pgd_none(pgd_t pgd)
static inline int pgd_present(pgd_t pgd)
{
- return !pgd_none(pgd);
+ return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT));
}
static inline pte_t pgd_pte(pgd_t pgd)
@@ -1020,17 +1022,16 @@ extern struct page *pgd_page(pgd_t pgd);
#define pgd_ERROR(e) \
pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-static inline int map_kernel_page(unsigned long ea, unsigned long pa,
- unsigned long flags)
+static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
if (radix_enabled()) {
#if defined(CONFIG_PPC_RADIX_MMU) && defined(DEBUG_VM)
unsigned long page_size = 1 << mmu_psize_defs[mmu_io_psize].shift;
WARN((page_size != PAGE_SIZE), "I/O page size != PAGE_SIZE");
#endif
- return radix__map_kernel_page(ea, pa, __pgprot(flags), PAGE_SIZE);
+ return radix__map_kernel_page(ea, pa, prot, PAGE_SIZE);
}
- return hash__map_kernel_page(ea, pa, flags);
+ return hash__map_kernel_page(ea, pa, prot);
}
static inline int __meminit vmemmap_create_mapping(unsigned long start,
@@ -1082,6 +1083,12 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
#define pmd_soft_dirty(pmd) pte_soft_dirty(pmd_pte(pmd))
#define pmd_mksoft_dirty(pmd) pte_pmd(pte_mksoft_dirty(pmd_pte(pmd)))
#define pmd_clear_soft_dirty(pmd) pte_pmd(pte_clear_soft_dirty(pmd_pte(pmd)))
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#define pmd_swp_mksoft_dirty(pmd) pte_pmd(pte_swp_mksoft_dirty(pmd_pte(pmd)))
+#define pmd_swp_soft_dirty(pmd) pte_swp_soft_dirty(pmd_pte(pmd))
+#define pmd_swp_clear_soft_dirty(pmd) pte_pmd(pte_swp_clear_soft_dirty(pmd_pte(pmd)))
+#endif
#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
#ifdef CONFIG_NUMA_BALANCING
@@ -1127,6 +1134,10 @@ pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp,
return hash__pmd_hugepage_update(mm, addr, pmdp, clr, set);
}
+/*
+ * returns true for pmd migration entries, THP, devmap, hugetlb
+ * But compile time dependent on THP config
+ */
static inline int pmd_large(pmd_t pmd)
{
return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE));
@@ -1161,8 +1172,22 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED);
}
+/*
+ * Only returns true for a THP. False for pmd migration entry.
+ * We also need to return true when we come across a pte that
+ * in between a thp split. While splitting THP, we mark the pmd
+ * invalid (pmdp_invalidate()) before we set it with pte page
+ * address. A pmd_trans_huge() check against a pmd entry during that time
+ * should return true.
+ * We should not call this on a hugetlb entry. We should check for HugeTLB
+ * entry using vma->vm_flags
+ * The page table walk rule is explained in Documentation/vm/transhuge.rst
+ */
static inline int pmd_trans_huge(pmd_t pmd)
{
+ if (!pmd_present(pmd))
+ return false;
+
if (radix_enabled())
return radix__pmd_trans_huge(pmd);
return hash__pmd_trans_huge(pmd);
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
index 1154a6dc6d26..671316f9e95d 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
@@ -53,6 +53,7 @@ extern void radix__flush_tlb_lpid_page(unsigned int lpid,
unsigned long addr,
unsigned long page_size);
extern void radix__flush_pwc_lpid(unsigned int lpid);
+extern void radix__flush_tlb_lpid(unsigned int lpid);
extern void radix__local_flush_tlb_lpid(unsigned int lpid);
extern void radix__local_flush_tlb_lpid_guest(unsigned int lpid);
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 85c8af2bb272..74d0db511099 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -8,6 +8,8 @@
#include <linux/types.h>
#include <linux/sched.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#ifdef __BIG_ENDIAN__
#define COMPAT_UTS_MACHINE "ppc\0\0"
@@ -15,34 +17,18 @@
#define COMPAT_UTS_MACHINE "ppcle\0\0"
#endif
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u32 __compat_uid_t;
typedef u32 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u32 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u32 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef s16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_key_t;
-typedef s32 compat_timer_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
struct compat_stat {
compat_dev_t st_dev;
@@ -55,11 +41,11 @@ struct compat_stat {
compat_off_t st_size;
compat_off_t st_blksize;
compat_off_t st_blocks;
- compat_time_t st_atime;
+ old_time32_t st_atime;
u32 st_atime_nsec;
- compat_time_t st_mtime;
+ old_time32_t st_mtime;
u32 st_mtime_nsec;
- compat_time_t st_ctime;
+ old_time32_t st_ctime;
u32 st_ctime_nsec;
u32 __unused4[2];
};
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index d71a90924f3b..deb99fd6e060 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -23,11 +23,13 @@
extern int threads_per_core;
extern int threads_per_subcore;
extern int threads_shift;
+extern bool has_big_cores;
extern cpumask_t threads_core_mask;
#else
#define threads_per_core 1
#define threads_per_subcore 1
#define threads_shift 0
+#define has_big_cores 0
#define threads_core_mask (*get_cpu_mask(0))
#endif
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 133672744b2e..ae73dc8da2d4 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -61,7 +61,6 @@ static inline void arch_vtime_task_switch(struct task_struct *prev)
struct cpu_accounting_data *acct0 = get_accounting(prev);
acct->starttime = acct0->starttime;
- acct->startspurr = acct0->startspurr;
}
#endif
diff --git a/arch/powerpc/include/asm/drmem.h b/arch/powerpc/include/asm/drmem.h
index ce242b9ea8c6..7c1d8e74b25d 100644
--- a/arch/powerpc/include/asm/drmem.h
+++ b/arch/powerpc/include/asm/drmem.h
@@ -99,4 +99,9 @@ void __init walk_drmem_lmbs_early(unsigned long node,
void (*func)(struct drmem_lmb *, const __be32 **));
#endif
+static inline void invalidate_lmb_associativity_index(struct drmem_lmb *lmb)
+{
+ lmb->aa_index = 0xffffffff;
+}
+
#endif /* _ASM_POWERPC_LMB_H */
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 219637ea69a1..8b596d096ebe 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -43,7 +43,6 @@ struct pci_dn;
#define EEH_VALID_PE_ZERO 0x10 /* PE#0 is valid */
#define EEH_ENABLE_IO_FOR_LOG 0x20 /* Enable IO for log */
#define EEH_EARLY_DUMP_LOG 0x40 /* Dump log immediately */
-#define EEH_POSTPONED_PROBE 0x80 /* Powernv may postpone device probe */
/*
* Delay for PE reset, all in ms
@@ -99,13 +98,13 @@ struct eeh_pe {
atomic_t pass_dev_cnt; /* Count of passed through devs */
struct eeh_pe *parent; /* Parent PE */
void *data; /* PE auxillary data */
- struct list_head child_list; /* Link PE to the child list */
- struct list_head edevs; /* Link list of EEH devices */
- struct list_head child; /* Child PEs */
+ struct list_head child_list; /* List of PEs below this PE */
+ struct list_head child; /* Memb. child_list/eeh_phb_pe */
+ struct list_head edevs; /* List of eeh_dev in this PE */
};
#define eeh_pe_for_each_dev(pe, edev, tmp) \
- list_for_each_entry_safe(edev, tmp, &pe->edevs, list)
+ list_for_each_entry_safe(edev, tmp, &pe->edevs, entry)
#define eeh_for_each_pe(root, pe) \
for (pe = root; pe; pe = eeh_pe_next(pe, root))
@@ -142,13 +141,12 @@ struct eeh_dev {
int aer_cap; /* Saved AER capability */
int af_cap; /* Saved AF capability */
struct eeh_pe *pe; /* Associated PE */
- struct list_head list; /* Form link list in the PE */
- struct list_head rmv_list; /* Record the removed edevs */
+ struct list_head entry; /* Membership in eeh_pe.edevs */
+ struct list_head rmv_entry; /* Membership in rmv_list */
struct pci_dn *pdn; /* Associated PCI device node */
struct pci_dev *pdev; /* Associated PCI device */
bool in_error; /* Error flag for edev */
struct pci_dev *physfn; /* Associated SRIOV PF */
- struct pci_bus *bus; /* PCI bus for partial hotplug */
};
static inline struct pci_dn *eeh_dev_to_pdn(struct eeh_dev *edev)
@@ -207,9 +205,8 @@ struct eeh_ops {
void* (*probe)(struct pci_dn *pdn, void *data);
int (*set_option)(struct eeh_pe *pe, int option);
int (*get_pe_addr)(struct eeh_pe *pe);
- int (*get_state)(struct eeh_pe *pe, int *state);
+ int (*get_state)(struct eeh_pe *pe, int *delay);
int (*reset)(struct eeh_pe *pe, int option);
- int (*wait_state)(struct eeh_pe *pe, int max_wait);
int (*get_log)(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len);
int (*configure_bridge)(struct eeh_pe *pe);
int (*err_inject)(struct eeh_pe *pe, int type, int func,
@@ -243,11 +240,7 @@ static inline bool eeh_has_flag(int flag)
static inline bool eeh_enabled(void)
{
- if (eeh_has_flag(EEH_FORCE_DISABLED) ||
- !eeh_has_flag(EEH_ENABLED))
- return false;
-
- return true;
+ return eeh_has_flag(EEH_ENABLED) && !eeh_has_flag(EEH_FORCE_DISABLED);
}
static inline void eeh_serialize_lock(unsigned long *flags)
@@ -270,6 +263,7 @@ typedef void *(*eeh_edev_traverse_func)(struct eeh_dev *edev, void *flag);
typedef void *(*eeh_pe_traverse_func)(struct eeh_pe *pe, void *flag);
void eeh_set_pe_aux_size(int size);
int eeh_phb_pe_create(struct pci_controller *phb);
+int eeh_wait_state(struct eeh_pe *pe, int max_wait);
struct eeh_pe *eeh_phb_pe_get(struct pci_controller *phb);
struct eeh_pe *eeh_pe_next(struct eeh_pe *pe, struct eeh_pe *root);
struct eeh_pe *eeh_pe_get(struct pci_controller *phb,
diff --git a/arch/powerpc/include/asm/error-injection.h b/arch/powerpc/include/asm/error-injection.h
new file mode 100644
index 000000000000..62fd24739852
--- /dev/null
+++ b/arch/powerpc/include/asm/error-injection.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _ASM_ERROR_INJECTION_H
+#define _ASM_ERROR_INJECTION_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <asm/ptrace.h>
+#include <asm-generic/error-injection.h>
+
+void override_function_with_return(struct pt_regs *regs);
+
+#endif /* _ASM_ERROR_INJECTION_H */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index a86feddddad0..3b4767ed3ec5 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -61,14 +61,6 @@
#define MAX_MCE_DEPTH 4
/*
- * EX_LR is only used in EXSLB and where it does not overlap with EX_DAR
- * EX_CCR similarly with DSISR, but being 4 byte registers there is a hole
- * in the save area so it's not necessary to overlap them. Could be used
- * for future savings though if another 4 byte register was to be saved.
- */
-#define EX_LR EX_DAR
-
-/*
* EX_R3 is only used by the bad_stack handler. bad_stack reloads and
* saves DAR from SPRN_DAR, and EX_DAR is not used. So EX_R3 can overlap
* with EX_DAR.
@@ -236,11 +228,10 @@
* PPR save/restore macros used in exceptions_64s.S
* Used for P7 or later processors
*/
-#define SAVE_PPR(area, ra, rb) \
+#define SAVE_PPR(area, ra) \
BEGIN_FTR_SECTION_NESTED(940) \
- ld ra,PACACURRENT(r13); \
- ld rb,area+EX_PPR(r13); /* Read PPR from paca */ \
- std rb,TASKTHREADPPR(ra); \
+ ld ra,area+EX_PPR(r13); /* Read PPR from paca */ \
+ std ra,_PPR(r1); \
END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
#define RESTORE_PPR_PACA(area, ra) \
@@ -508,7 +499,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
3: EXCEPTION_PROLOG_COMMON_1(); \
beq 4f; /* if from kernel mode */ \
ACCOUNT_CPU_USER_ENTRY(r13, r9, r10); \
- SAVE_PPR(area, r9, r10); \
+ SAVE_PPR(area, r9); \
4: EXCEPTION_PROLOG_COMMON_2(area) \
EXCEPTION_PROLOG_COMMON_3(n) \
ACCOUNT_STOLEN_TIME
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 7a051bd21f87..00bc42d95679 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -52,6 +52,8 @@
#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
#define FW_FEATURE_DRMEM_V2 ASM_CONST(0x0000000400000000)
#define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000800000000)
+#define FW_FEATURE_BLOCK_REMOVE ASM_CONST(0x0000001000000000)
+#define FW_FEATURE_PAPR_SCM ASM_CONST(0x0000002000000000)
#ifndef __ASSEMBLY__
@@ -69,7 +71,8 @@ enum {
FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN |
FW_FEATURE_HPT_RESIZE | FW_FEATURE_DRMEM_V2 |
- FW_FEATURE_DRC_INFO,
+ FW_FEATURE_DRC_INFO | FW_FEATURE_BLOCK_REMOVE |
+ FW_FEATURE_PAPR_SCM,
FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL,
FW_FEATURE_POWERNV_ALWAYS = 0,
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 41cc15c14eee..b9fbed84ddca 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -72,7 +72,7 @@ enum fixed_addresses {
static inline void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
- map_kernel_page(fix_to_virt(idx), phys, pgprot_val(flags));
+ map_kernel_page(fix_to_virt(idx), phys, flags);
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index a0b17f9f1ea4..33a4fc891947 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -278,6 +278,7 @@
#define H_COP 0x304
#define H_GET_MPP_X 0x314
#define H_SET_MODE 0x31C
+#define H_BLOCK_REMOVE 0x328
#define H_CLEAR_HPT 0x358
#define H_REQUEST_VMC 0x360
#define H_RESIZE_HPT_PREPARE 0x36C
@@ -295,7 +296,15 @@
#define H_INT_ESB 0x3C8
#define H_INT_SYNC 0x3CC
#define H_INT_RESET 0x3D0
-#define MAX_HCALL_OPCODE H_INT_RESET
+#define H_SCM_READ_METADATA 0x3E4
+#define H_SCM_WRITE_METADATA 0x3E8
+#define H_SCM_BIND_MEM 0x3EC
+#define H_SCM_UNBIND_MEM 0x3F0
+#define H_SCM_QUERY_BLOCK_MEM_BINDING 0x3F4
+#define H_SCM_QUERY_LOGICAL_MEM_BINDING 0x3F8
+#define H_SCM_MEM_QUERY 0x3FC
+#define H_SCM_BLOCK_CLEAR 0x400
+#define MAX_HCALL_OPCODE H_SCM_BLOCK_CLEAR
/* H_VIOCTL functions */
#define H_GET_VIOA_DUMP_SIZE 0x01
@@ -322,6 +331,11 @@
#define H_GET_24X7_DATA 0xF07C
#define H_GET_PERF_COUNTER_INFO 0xF080
+/* Platform-specific hcalls used for nested HV KVM */
+#define H_SET_PARTITION_TABLE 0xF800
+#define H_ENTER_NESTED 0xF804
+#define H_TLB_INVALIDATE 0xF808
+
/* Values for 2nd argument to H_SET_MODE */
#define H_SET_MODE_RESOURCE_SET_CIABR 1
#define H_SET_MODE_RESOURCE_SET_DAWR 2
@@ -461,6 +475,42 @@ struct h_cpu_char_result {
u64 behaviour;
};
+/* Register state for entering a nested guest with H_ENTER_NESTED */
+struct hv_guest_state {
+ u64 version; /* version of this structure layout */
+ u32 lpid;
+ u32 vcpu_token;
+ /* These registers are hypervisor privileged (at least for writing) */
+ u64 lpcr;
+ u64 pcr;
+ u64 amor;
+ u64 dpdes;
+ u64 hfscr;
+ s64 tb_offset;
+ u64 dawr0;
+ u64 dawrx0;
+ u64 ciabr;
+ u64 hdec_expiry;
+ u64 purr;
+ u64 spurr;
+ u64 ic;
+ u64 vtb;
+ u64 hdar;
+ u64 hdsisr;
+ u64 heir;
+ u64 asdr;
+ /* These are OS privileged but need to be set late in guest entry */
+ u64 srr0;
+ u64 srr1;
+ u64 sprg[4];
+ u64 pidr;
+ u64 cfar;
+ u64 ppr;
+};
+
+/* Latest version of hv_guest_state structure */
+#define HV_GUEST_STATE_VERSION 1
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index e0331e754568..3ef40b703c4a 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -3,6 +3,9 @@
#ifdef __KERNEL__
#define ARCH_HAS_IOREMAP_WC
+#ifdef CONFIG_PPC32
+#define ARCH_HAS_IOREMAP_WT
+#endif
/*
* This program is free software; you can redistribute it and/or
@@ -108,25 +111,6 @@ extern bool isa_io_special;
#define IO_SET_SYNC_FLAG()
#endif
-/* gcc 4.0 and older doesn't have 'Z' constraint */
-#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0)
-#define DEF_MMIO_IN_X(name, size, insn) \
-static inline u##size name(const volatile u##size __iomem *addr) \
-{ \
- u##size ret; \
- __asm__ __volatile__("sync;"#insn" %0,0,%1;twi 0,%0,0;isync" \
- : "=r" (ret) : "r" (addr), "m" (*addr) : "memory"); \
- return ret; \
-}
-
-#define DEF_MMIO_OUT_X(name, size, insn) \
-static inline void name(volatile u##size __iomem *addr, u##size val) \
-{ \
- __asm__ __volatile__("sync;"#insn" %1,0,%2" \
- : "=m" (*addr) : "r" (val), "r" (addr) : "memory"); \
- IO_SET_SYNC_FLAG(); \
-}
-#else /* newer gcc */
#define DEF_MMIO_IN_X(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \
{ \
@@ -143,7 +127,6 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \
: "=Z" (*addr) : "r" (val) : "memory"); \
IO_SET_SYNC_FLAG(); \
}
-#endif
#define DEF_MMIO_IN_D(name, size, insn) \
static inline u##size name(const volatile u##size __iomem *addr) \
@@ -746,6 +729,10 @@ static inline void iosync(void)
*
* * ioremap_wc enables write combining
*
+ * * ioremap_wt enables write through
+ *
+ * * ioremap_coherent maps coherent cached memory
+ *
* * iounmap undoes such a mapping and can be hooked
*
* * __ioremap_at (and the pending __iounmap_at) are low level functions to
@@ -767,6 +754,8 @@ extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size,
unsigned long flags);
extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size);
+void __iomem *ioremap_wt(phys_addr_t address, unsigned long size);
+void __iomem *ioremap_coherent(phys_addr_t address, unsigned long size);
#define ioremap_nocache(addr, size) ioremap((addr), (size))
#define ioremap_uc(addr, size) ioremap((addr), (size))
#define ioremap_cache(addr, size) \
@@ -777,12 +766,12 @@ extern void iounmap(volatile void __iomem *addr);
extern void __iomem *__ioremap(phys_addr_t, unsigned long size,
unsigned long flags);
extern void __iomem *__ioremap_caller(phys_addr_t, unsigned long size,
- unsigned long flags, void *caller);
+ pgprot_t prot, void *caller);
extern void __iounmap(volatile void __iomem *addr);
extern void __iomem * __ioremap_at(phys_addr_t pa, void *ea,
- unsigned long size, unsigned long flags);
+ unsigned long size, pgprot_t prot);
extern void __iounmap_at(void *ea, unsigned long size);
/*
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 3d4b88cb8599..35db0cbc9222 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -126,7 +126,7 @@ struct iommu_table {
int it_nid;
};
-#define IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry) \
+#define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
((tbl)->it_ops->useraddrptr((tbl), (entry), false))
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
((tbl)->it_ops->useraddrptr((tbl), (entry), true))
diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h
index 9db24e77b9f4..a9e098a3b881 100644
--- a/arch/powerpc/include/asm/kgdb.h
+++ b/arch/powerpc/include/asm/kgdb.h
@@ -26,9 +26,12 @@
#define BREAK_INSTR_SIZE 4
#define BUFMAX ((NUMREGBYTES * 2) + 512)
#define OUTBUFMAX ((NUMREGBYTES * 2) + 512)
+
+#define BREAK_INSTR 0x7d821008 /* twge r2, r2 */
+
static inline void arch_kgdb_breakpoint(void)
{
- asm(".long 0x7d821008"); /* twge r2, r2 */
+ asm(stringify_in_c(.long BREAK_INSTR));
}
#define CACHE_FLUSH_IS_SAFE 1
#define DBG_MAX_REG_NUM 70
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index a790d5cf6ea3..1f321914676d 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -84,7 +84,6 @@
#define BOOK3S_INTERRUPT_INST_STORAGE 0x400
#define BOOK3S_INTERRUPT_INST_SEGMENT 0x480
#define BOOK3S_INTERRUPT_EXTERNAL 0x500
-#define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501
#define BOOK3S_INTERRUPT_EXTERNAL_HV 0x502
#define BOOK3S_INTERRUPT_ALIGNMENT 0x600
#define BOOK3S_INTERRUPT_PROGRAM 0x700
@@ -134,8 +133,7 @@
#define BOOK3S_IRQPRIO_EXTERNAL 14
#define BOOK3S_IRQPRIO_DECREMENTER 15
#define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 16
-#define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 17
-#define BOOK3S_IRQPRIO_MAX 18
+#define BOOK3S_IRQPRIO_MAX 17
#define BOOK3S_HFLAG_DCBZ32 0x1
#define BOOK3S_HFLAG_SLB 0x2
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 83a9aa3cf689..09f8e9ba69bc 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -188,14 +188,37 @@ extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
struct kvm_vcpu *vcpu,
unsigned long ea, unsigned long dsisr);
+extern int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 root,
+ u64 *pte_ret_p);
+extern int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 table,
+ int table_index, u64 *pte_ret_p);
extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
struct kvmppc_pte *gpte, bool data, bool iswrite);
+extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
+ unsigned int shift, struct kvm_memory_slot *memslot,
+ unsigned int lpid);
+extern bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable,
+ bool writing, unsigned long gpa,
+ unsigned int lpid);
+extern int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
+ unsigned long gpa,
+ struct kvm_memory_slot *memslot,
+ bool writing, bool kvm_ro,
+ pte_t *inserted_pte, unsigned int *levelp);
extern int kvmppc_init_vm_radix(struct kvm *kvm);
extern void kvmppc_free_radix(struct kvm *kvm);
+extern void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd,
+ unsigned int lpid);
extern int kvmppc_radix_init(void);
extern void kvmppc_radix_exit(void);
extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn);
+extern void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
+ unsigned long gpa, unsigned int shift,
+ struct kvm_memory_slot *memslot,
+ unsigned int lpid);
extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gfn);
extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
@@ -271,6 +294,21 @@ static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
#endif
+long kvmhv_nested_init(void);
+void kvmhv_nested_exit(void);
+void kvmhv_vm_nested_init(struct kvm *kvm);
+long kvmhv_set_partition_table(struct kvm_vcpu *vcpu);
+void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
+void kvmhv_release_all_nested(struct kvm *kvm);
+long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
+long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
+int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
+ u64 time_limit, unsigned long lpcr);
+void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
+void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
+ struct hv_guest_state *hr);
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu);
+
void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
extern int kvm_irq_bypass;
@@ -301,12 +339,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
- vcpu->arch.cr = val;
+ vcpu->arch.regs.ccr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cr;
+ return vcpu->arch.regs.ccr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
@@ -384,9 +422,6 @@ extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
/* TO = 31 for unconditional trap */
#define INS_TW 0x7fe00008
-/* LPIDs we support with this build -- runtime limit may be lower */
-#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
-
#define SPLIT_HACK_MASK 0xff000000
#define SPLIT_HACK_OFFS 0xfb000000
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index dc435a5af7d6..6d298145d564 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -23,6 +23,108 @@
#include <linux/string.h>
#include <asm/bitops.h>
#include <asm/book3s/64/mmu-hash.h>
+#include <asm/cpu_has_feature.h>
+#include <asm/ppc-opcode.h>
+
+#ifdef CONFIG_PPC_PSERIES
+static inline bool kvmhv_on_pseries(void)
+{
+ return !cpu_has_feature(CPU_FTR_HVMODE);
+}
+#else
+static inline bool kvmhv_on_pseries(void)
+{
+ return false;
+}
+#endif
+
+/*
+ * Structure for a nested guest, that is, for a guest that is managed by
+ * one of our guests.
+ */
+struct kvm_nested_guest {
+ struct kvm *l1_host; /* L1 VM that owns this nested guest */
+ int l1_lpid; /* lpid L1 guest thinks this guest is */
+ int shadow_lpid; /* real lpid of this nested guest */
+ pgd_t *shadow_pgtable; /* our page table for this guest */
+ u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
+ u64 process_table; /* process table entry for this guest */
+ long refcnt; /* number of pointers to this struct */
+ struct mutex tlb_lock; /* serialize page faults and tlbies */
+ struct kvm_nested_guest *next;
+ cpumask_t need_tlb_flush;
+ cpumask_t cpu_in_guest;
+ short prev_cpu[NR_CPUS];
+};
+
+/*
+ * We define a nested rmap entry as a single 64-bit quantity
+ * 0xFFF0000000000000 12-bit lpid field
+ * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number
+ * 0x0000000000000001 1-bit single entry flag
+ */
+#define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL
+#define RMAP_NESTED_LPID_SHIFT (52)
+#define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL
+#define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL
+
+/* Structure for a nested guest rmap entry */
+struct rmap_nested {
+ struct llist_node list;
+ u64 rmap;
+};
+
+/*
+ * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
+ * safe against removal of the list entry or NULL list
+ * @pos: a (struct rmap_nested *) to use as a loop cursor
+ * @node: pointer to the first entry
+ * NOTE: this can be NULL
+ * @rmapp: an (unsigned long *) in which to return the rmap entries on each
+ * iteration
+ * NOTE: this must point to already allocated memory
+ *
+ * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
+ * rmap entry in the memslot. The list is always terminated by a "single entry"
+ * stored in the list element of the final entry of the llist. If there is ONLY
+ * a single entry then this is itself in the rmap entry of the memslot, not a
+ * llist head pointer.
+ *
+ * Note that the iterator below assumes that a nested rmap entry is always
+ * non-zero. This is true for our usage because the LPID field is always
+ * non-zero (zero is reserved for the host).
+ *
+ * This should be used to iterate over the list of rmap_nested entries with
+ * processing done on the u64 rmap value given by each iteration. This is safe
+ * against removal of list entries and it is always safe to call free on (pos).
+ *
+ * e.g.
+ * struct rmap_nested *cursor;
+ * struct llist_node *first;
+ * unsigned long rmap;
+ * for_each_nest_rmap_safe(cursor, first, &rmap) {
+ * do_something(rmap);
+ * free(cursor);
+ * }
+ */
+#define for_each_nest_rmap_safe(pos, node, rmapp) \
+ for ((pos) = llist_entry((node), typeof(*(pos)), list); \
+ (node) && \
+ (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
+ ((u64) (node)) : ((pos)->rmap))) && \
+ (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
+ ((struct llist_node *) ((pos) = NULL)) : \
+ (pos)->list.next)), true); \
+ (pos) = llist_entry((node), typeof(*(pos)), list))
+
+struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
+ bool create);
+void kvmhv_put_nested(struct kvm_nested_guest *gp);
+int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
+
+/* Encoding of first parameter for H_TLB_INVALIDATE */
+#define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
+ ___PPC_R(r))
/* Power architecture requires HPT is at least 256kiB, at most 64TiB */
#define PPC_MIN_HPT_ORDER 18
@@ -435,6 +537,7 @@ static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
}
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
+extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
extern void kvmhv_rm_send_ipi(int cpu);
@@ -482,7 +585,7 @@ static inline u64 sanitize_msr(u64 msr)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
{
- vcpu->arch.cr = vcpu->arch.cr_tm;
+ vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
vcpu->arch.regs.xer = vcpu->arch.xer_tm;
vcpu->arch.regs.link = vcpu->arch.lr_tm;
vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
@@ -499,7 +602,7 @@ static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
{
- vcpu->arch.cr_tm = vcpu->arch.cr;
+ vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
vcpu->arch.xer_tm = vcpu->arch.regs.xer;
vcpu->arch.lr_tm = vcpu->arch.regs.link;
vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
@@ -515,6 +618,17 @@ static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
}
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
+extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
+ unsigned long gpa, unsigned int level,
+ unsigned long mmu_seq, unsigned int lpid,
+ unsigned long *rmapp, struct rmap_nested **n_rmap);
+extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
+ struct rmap_nested **n_rmap);
+extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ unsigned long gpa, unsigned long hpa,
+ unsigned long nbytes);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index d978fdf698af..eb3ba6390108 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -25,6 +25,9 @@
#define XICS_MFRR 0xc
#define XICS_IPI 2 /* interrupt source # for IPIs */
+/* LPIDs we support with this build -- runtime limit may be lower */
+#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
+
/* Maximum number of threads per physical core */
#define MAX_SMT_THREADS 8
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index d513e3ed1c65..f0cef625f17c 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -46,12 +46,12 @@ static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
{
- vcpu->arch.cr = val;
+ vcpu->arch.regs.ccr = val;
}
static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cr;
+ return vcpu->arch.regs.ccr;
}
static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 906bcbdfd2a1..fac6f631ed29 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -46,6 +46,7 @@
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#include <asm/kvm_book3s_asm.h> /* for MAX_SMT_THREADS */
#define KVM_MAX_VCPU_ID (MAX_SMT_THREADS * KVM_MAX_VCORES)
+#define KVM_MAX_NESTED_GUESTS KVMPPC_NR_LPIDS
#else
#define KVM_MAX_VCPU_ID KVM_MAX_VCPUS
@@ -94,6 +95,7 @@ struct dtl_entry;
struct kvmppc_vcpu_book3s;
struct kvmppc_book3s_shadow_vcpu;
+struct kvm_nested_guest;
struct kvm_vm_stat {
ulong remote_tlb_flush;
@@ -287,10 +289,12 @@ struct kvm_arch {
u8 radix;
u8 fwnmi_enabled;
bool threads_indep;
+ bool nested_enable;
pgd_t *pgtable;
u64 process_table;
struct dentry *debugfs_dir;
struct dentry *htab_dentry;
+ struct dentry *radix_dentry;
struct kvm_resize_hpt *resize_hpt; /* protected by kvm->lock */
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
@@ -311,6 +315,9 @@ struct kvm_arch {
#endif
struct kvmppc_ops *kvm_ops;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ u64 l1_ptcr;
+ int max_nested_lpid;
+ struct kvm_nested_guest *nested_guests[KVM_MAX_NESTED_GUESTS];
/* This array can grow quite large, keep it at the end */
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
#endif
@@ -360,7 +367,9 @@ struct kvmppc_pte {
bool may_write : 1;
bool may_execute : 1;
unsigned long wimg;
+ unsigned long rc;
u8 page_size; /* MMU_PAGE_xxx */
+ u8 page_shift;
};
struct kvmppc_mmu {
@@ -537,8 +546,6 @@ struct kvm_vcpu_arch {
ulong tar;
#endif
- u32 cr;
-
#ifdef CONFIG_PPC_BOOK3S
ulong hflags;
ulong guest_owned_ext;
@@ -707,6 +714,7 @@ struct kvm_vcpu_arch {
u8 hcall_needed;
u8 epr_flags; /* KVMPPC_EPR_xxx */
u8 epr_needed;
+ u8 external_oneshot; /* clear external irq after delivery */
u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
@@ -781,6 +789,10 @@ struct kvm_vcpu_arch {
u32 emul_inst;
u32 online;
+
+ /* For support of nested guests */
+ struct kvm_nested_guest *nested;
+ u32 nested_vcpu_id;
#endif
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index e991821dd7fa..9b89b1918dfc 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -194,9 +194,7 @@ extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
(stt)->size, (ioba), (npages)) ? \
H_PARAMETER : H_SUCCESS)
-extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
- unsigned long tce);
-extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
+extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
unsigned long *ua, unsigned long **prmap);
extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
unsigned long idx, unsigned long tce);
@@ -327,6 +325,7 @@ struct kvmppc_ops {
int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
unsigned long flags);
void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
+ int (*enable_nested)(struct kvm *kvm);
};
extern struct kvmppc_ops *kvmppc_hv_ops;
@@ -585,6 +584,7 @@ extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status);
+extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
#else
static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
u32 priority) { return -1; }
@@ -607,6 +607,7 @@ static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { retur
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
int level, bool line_status) { return -ENODEV; }
+static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
#endif /* CONFIG_KVM_XIVE */
/*
@@ -652,6 +653,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
unsigned long mfrr);
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
+void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
/*
* Host-side operations we want to set up while running in real
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index a47de82fb8e2..8311869005fa 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -35,7 +35,7 @@ struct machdep_calls {
char *name;
#ifdef CONFIG_PPC64
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
- unsigned long flags, void *caller);
+ pgprot_t prot, void *caller);
void (*iounmap)(volatile void __iomem *token);
#ifdef CONFIG_PM
@@ -108,6 +108,7 @@ struct machdep_calls {
/* Early exception handlers called in realmode */
int (*hmi_exception_early)(struct pt_regs *regs);
+ long (*machine_check_early)(struct pt_regs *regs);
/* Called during machine check exception to retrive fixup address. */
bool (*mce_check_early_recovery)(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h
index 3a1226e9b465..a8b8903e1844 100644
--- a/arch/powerpc/include/asm/mce.h
+++ b/arch/powerpc/include/asm/mce.h
@@ -210,4 +210,7 @@ extern void release_mce_event(void);
extern void machine_check_queue_event(void);
extern void machine_check_print_event_info(struct machine_check_event *evt,
bool user_mode);
+#ifdef CONFIG_PPC_BOOK3S_64
+void flush_and_reload_slb(void);
+#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* __ASM_PPC64_MCE_H__ */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 13ea441ac531..eb20eb3b8fb0 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -309,6 +309,21 @@ static inline u16 get_mm_addr_key(struct mm_struct *mm, unsigned long address)
*/
#define MMU_PAGE_COUNT 16
+/*
+ * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
+ * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
+ * page_to_nid does a page->section->node lookup
+ * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
+ * memory requirements with large number of sections.
+ * 51 bits is the max physical real address on POWER9
+ */
+#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
+ defined (CONFIG_PPC_64K_PAGES)
+#define MAX_PHYSMEM_BITS 51
+#else
+#define MAX_PHYSMEM_BITS 46
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/book3s/64/mmu.h>
#else /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index b694d6af1150..0381394a425b 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -82,7 +82,7 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
{
int context_id;
- context_id = get_ea_context(&mm->context, ea);
+ context_id = get_user_context(&mm->context, ea);
if (!context_id)
return true;
return false;
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index fad8ddd697ac..0abf2e7fd222 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -393,7 +393,14 @@ extern struct bus_type mpic_subsys;
#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
/* Get the version of primary MPIC */
+#ifdef CONFIG_MPIC
extern u32 fsl_mpic_primary_get_version(void);
+#else
+static inline u32 fsl_mpic_primary_get_version(void)
+{
+ return 0;
+}
+#endif
/* Allocate the controller structure and setup the linux irq descs
* for the range if interrupts passed in. No HW initialization is
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index a507a65b0866..f7b129a83054 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -128,14 +128,65 @@ extern int icache_44x_need_flush;
#include <asm/nohash/32/pte-8xx.h>
#endif
-/* And here we include common definitions */
-#include <asm/pte-common.h>
+/*
+ * Location of the PFN in the PTE. Most 32-bit platforms use the same
+ * as _PAGE_SHIFT here (ie, naturally aligned).
+ * Platform who don't just pre-define the value so we don't override it here.
+ */
+#ifndef PTE_RPN_SHIFT
+#define PTE_RPN_SHIFT (PAGE_SHIFT)
+#endif
+
+/*
+ * The mask covered by the RPN must be a ULL on 32-bit platforms with
+ * 64-bit PTEs.
+ */
+#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
+#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1))
+#else
+#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+#endif
+
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes.
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
#ifndef __ASSEMBLY__
#define pte_clear(mm, addr, ptep) \
do { pte_update(ptep, ~0, 0); } while (0)
+#ifndef pte_mkwrite
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_RW);
+}
+#endif
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+#ifndef pte_wrprotect
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_RW);
+}
+#endif
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_EXEC);
+}
+
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK)
@@ -244,7 +295,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
- pte_update(ptep, (_PAGE_RW | _PAGE_HWWRITE), _PAGE_RO);
+ unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
+ unsigned long set = pte_val(pte_wrprotect(__pte(0)));
+
+ pte_update(ptep, clr, set);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
@@ -258,9 +312,10 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address,
int psize)
{
- unsigned long set = pte_val(entry) &
- (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
- unsigned long clr = ~pte_val(entry) & (_PAGE_RO | _PAGE_NA);
+ pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
+ pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
+ unsigned long set = pte_val(entry) & pte_val(pte_set);
+ unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
pte_update(ptep, clr, set);
@@ -323,7 +378,7 @@ static inline int pte_young(pte_t pte)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 })
-int map_kernel_page(unsigned long va, phys_addr_t pa, int flags);
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h
index bb4b3a4b92a0..661f4599f2fc 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-40x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h
@@ -50,13 +50,56 @@
#define _PAGE_EXEC 0x200 /* hardware: EX permission */
#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */
+/* No page size encoding in the linux PTE */
+#define _PAGE_PSIZE 0
+
+/* cache related flags non existing on 40x */
+#define _PAGE_COHERENT 0
+
+#define _PAGE_KERNEL_RO 0
+#define _PAGE_KERNEL_ROX _PAGE_EXEC
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE | _PAGE_EXEC)
+
#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */
+#define _PMD_PRESENT_MASK _PMD_PRESENT
#define _PMD_BAD 0x802
#define _PMD_SIZE_4M 0x0c0
#define _PMD_SIZE_16M 0x0e0
+#define _PMD_USER 0
+
+#define _PTE_NONE_MASK 0
/* Until my rework is finished, 40x still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _PAGE_BASE (_PAGE_BASE_NC)
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#ifndef __ASSEMBLY__
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE));
+}
+
+#define pte_wrprotect pte_wrprotect
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
+}
+
+#define pte_mkclean pte_mkclean
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h
index f812c0272364..78bc304f750e 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h
@@ -85,14 +85,44 @@
#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */
#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */
+/* No page size encoding in the linux PTE */
+#define _PAGE_PSIZE 0
+
+#define _PAGE_KERNEL_RO 0
+#define _PAGE_KERNEL_ROX _PAGE_EXEC
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
+
/* TODO: Add large page lowmem mapping support */
#define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)
+#define _PMD_USER 0
/* ERPN in a PTE never gets cleared, ignore it */
#define _PTE_NONE_MASK 0xffffffff00000000ULL
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+#if defined(CONFIG_SMP)
+#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
+#else
+#define _PAGE_BASE (_PAGE_BASE_NC)
+#endif
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
index f04cb46ae8a1..6bfe041ef59d 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h
@@ -29,10 +29,10 @@
*/
/* Definitions for 8xx embedded chips. */
-#define _PAGE_PRESENT 0x0001 /* Page is valid */
-#define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
-#define _PAGE_PRIVILEGED 0x0004 /* No ASID (context) compare */
-#define _PAGE_HUGE 0x0008 /* SPS: Small Page Size (1 if 16k, 512k or 8M)*/
+#define _PAGE_PRESENT 0x0001 /* V: Page is valid */
+#define _PAGE_NO_CACHE 0x0002 /* CI: cache inhibit */
+#define _PAGE_SH 0x0004 /* SH: No ASID (context) compare */
+#define _PAGE_SPS 0x0008 /* SPS: Small Page Size (1 if 16k, 512k or 8M)*/
#define _PAGE_DIRTY 0x0100 /* C: page changed */
/* These 4 software bits must be masked out when the L2 entry is loaded
@@ -46,18 +46,95 @@
#define _PAGE_NA 0x0200 /* Supervisor NA, User no access */
#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
+/* cache related flags non existing on 8xx */
+#define _PAGE_COHERENT 0
+#define _PAGE_WRITETHRU 0
+
+#define _PAGE_KERNEL_RO (_PAGE_SH | _PAGE_RO)
+#define _PAGE_KERNEL_ROX (_PAGE_SH | _PAGE_RO | _PAGE_EXEC)
+#define _PAGE_KERNEL_RW (_PAGE_SH | _PAGE_DIRTY)
+#define _PAGE_KERNEL_RWX (_PAGE_SH | _PAGE_DIRTY | _PAGE_EXEC)
+
#define _PMD_PRESENT 0x0001
+#define _PMD_PRESENT_MASK _PMD_PRESENT
#define _PMD_BAD 0x0fd0
#define _PMD_PAGE_MASK 0x000c
#define _PMD_PAGE_8M 0x000c
#define _PMD_PAGE_512K 0x0004
#define _PMD_USER 0x0020 /* APG 1 */
+#define _PTE_NONE_MASK 0
+
/* Until my rework is finished, 8xx still needs atomic PTE updates */
#define PTE_ATOMIC_UPDATES 1
#ifdef CONFIG_PPC_16K_PAGES
-#define _PAGE_PSIZE _PAGE_HUGE
+#define _PAGE_PSIZE _PAGE_SPS
+#else
+#define _PAGE_PSIZE 0
+#endif
+
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#define _PAGE_BASE (_PAGE_BASE_NC)
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA)
+#define PAGE_SHARED __pgprot(_PAGE_BASE)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_RO)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_RO)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC)
+
+#ifndef __ASSEMBLY__
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_RO);
+}
+
+#define pte_wrprotect pte_wrprotect
+
+static inline int pte_write(pte_t pte)
+{
+ return !(pte_val(pte) & _PAGE_RO);
+}
+
+#define pte_write pte_write
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_RO);
+}
+
+#define pte_mkwrite pte_mkwrite
+
+static inline bool pte_user(pte_t pte)
+{
+ return !(pte_val(pte) & _PAGE_SH);
+}
+
+#define pte_user pte_user
+
+static inline pte_t pte_mkprivileged(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SH);
+}
+
+#define pte_mkprivileged pte_mkprivileged
+
+static inline pte_t pte_mkuser(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_SH);
+}
+
+#define pte_mkuser pte_mkuser
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SPS);
+}
+
+#define pte_mkhuge pte_mkhuge
#endif
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h
index d1ee24e9e137..0fc1bd42bb3e 100644
--- a/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h
+++ b/arch/powerpc/include/asm/nohash/32/pte-fsl-booke.h
@@ -31,11 +31,44 @@
#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
#define _PAGE_SPECIAL 0x00800 /* S: Special page */
+#define _PAGE_KERNEL_RO 0
+#define _PAGE_KERNEL_ROX _PAGE_EXEC
+#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW)
+#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
+
+/* No page size encoding in the linux PTE */
+#define _PAGE_PSIZE 0
+
#define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)
+#define _PMD_USER 0
+
+#define _PTE_NONE_MASK 0
#define PTE_WIMGE_SHIFT (6)
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED)
+#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
+#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
+#else
+#define _PAGE_BASE (_PAGE_BASE_NC)
+#endif
+
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_BOOKE_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 7cd6809f4d33..dc6bb9da3f23 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -89,11 +89,47 @@
* Include the PTE bits definitions
*/
#include <asm/nohash/pte-book3e.h>
-#include <asm/pte-common.h>
+
+#define _PAGE_SAO 0
+
+#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
+
+/*
+ * _PAGE_CHG_MASK masks of bits that are to be preserved across
+ * pgprot changes.
+ */
+#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
+
+#define H_PAGE_4K_PFN 0
#ifndef __ASSEMBLY__
/* pte_clear moved to later in this file */
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_RW);
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_RW);
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_EXEC);
+}
+
#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
@@ -327,8 +363,7 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) })
#define __swp_entry_to_pte(x) __pte((x).val)
-extern int map_kernel_page(unsigned long ea, unsigned long pa,
- unsigned long flags);
+int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
extern int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys);
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index b321c82b3624..70ff23974b59 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -8,18 +8,50 @@
#include <asm/nohash/32/pgtable.h>
#endif
+/* Permission masks used for kernel mappings */
+#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
+#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
+#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
+ _PAGE_NO_CACHE | _PAGE_GUARDED)
+#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
+
+/*
+ * Protection used for kernel text. We want the debuggers to be able to
+ * set breakpoints anywhere, so don't write protect the kernel text
+ * on platforms where such control is possible.
+ */
+#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
+ defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
+#else
+#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
+#endif
+
+/* Make modules code happy. We don't set RO yet */
+#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
+
+/* Advertise special mapping type for AGP */
+#define PAGE_AGP (PAGE_KERNEL_NC)
+#define HAVE_PAGE_AGP
+
#ifndef __ASSEMBLY__
/* Generic accessors to PTE bits */
+#ifndef pte_write
static inline int pte_write(pte_t pte)
{
- return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO;
+ return pte_val(pte) & _PAGE_RW;
}
+#endif
static inline int pte_read(pte_t pte) { return 1; }
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
-static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
+static inline bool pte_hashpte(pte_t pte) { return false; }
+static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
+static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
#ifdef CONFIG_NUMA_BALANCING
/*
@@ -29,8 +61,7 @@ static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PA
*/
static inline int pte_protnone(pte_t pte)
{
- return (pte_val(pte) &
- (_PAGE_PRESENT | _PAGE_USER)) == _PAGE_PRESENT;
+ return pte_present(pte) && !pte_user(pte);
}
static inline int pmd_protnone(pmd_t pmd)
@@ -44,6 +75,23 @@ static inline int pte_present(pte_t pte)
return pte_val(pte) & _PAGE_PRESENT;
}
+static inline bool pte_hw_valid(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_PRESENT;
+}
+
+/*
+ * Don't just check for any non zero bits in __PAGE_USER, since for book3e
+ * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
+ * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
+ */
+#ifndef pte_user
+static inline bool pte_user(pte_t pte)
+{
+ return (pte_val(pte) & _PAGE_USER) == _PAGE_USER;
+}
+#endif
+
/*
* We only find page table entry in the last level
* Hence no need for other accessors
@@ -77,53 +125,53 @@ static inline unsigned long pte_pfn(pte_t pte) {
return pte_val(pte) >> PTE_RPN_SHIFT; }
/* Generic modifiers for PTE bits */
-static inline pte_t pte_wrprotect(pte_t pte)
+static inline pte_t pte_exprotect(pte_t pte)
{
- pte_basic_t ptev;
-
- ptev = pte_val(pte) & ~(_PAGE_RW | _PAGE_HWWRITE);
- ptev |= _PAGE_RO;
- return __pte(ptev);
+ return __pte(pte_val(pte) & ~_PAGE_EXEC);
}
+#ifndef pte_mkclean
static inline pte_t pte_mkclean(pte_t pte)
{
- return __pte(pte_val(pte) & ~(_PAGE_DIRTY | _PAGE_HWWRITE));
+ return __pte(pte_val(pte) & ~_PAGE_DIRTY);
}
+#endif
static inline pte_t pte_mkold(pte_t pte)
{
return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
}
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t pte_mkpte(pte_t pte)
{
- pte_basic_t ptev;
-
- ptev = pte_val(pte) & ~_PAGE_RO;
- ptev |= _PAGE_RW;
- return __pte(ptev);
+ return pte;
}
-static inline pte_t pte_mkdirty(pte_t pte)
+static inline pte_t pte_mkspecial(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_DIRTY);
+ return __pte(pte_val(pte) | _PAGE_SPECIAL);
}
-static inline pte_t pte_mkyoung(pte_t pte)
+#ifndef pte_mkhuge
+static inline pte_t pte_mkhuge(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_ACCESSED);
+ return __pte(pte_val(pte));
}
+#endif
-static inline pte_t pte_mkspecial(pte_t pte)
+#ifndef pte_mkprivileged
+static inline pte_t pte_mkprivileged(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_SPECIAL);
+ return __pte(pte_val(pte) & ~_PAGE_USER);
}
+#endif
-static inline pte_t pte_mkhuge(pte_t pte)
+#ifndef pte_mkuser
+static inline pte_t pte_mkuser(pte_t pte)
{
- return __pte(pte_val(pte) | _PAGE_HUGE);
+ return __pte(pte_val(pte) | _PAGE_USER);
}
+#endif
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
@@ -197,6 +245,8 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre
#if _PAGE_WRITETHRU != 0
#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
_PAGE_COHERENT | _PAGE_WRITETHRU))
+#else
+#define pgprot_cached_wthru(prot) pgprot_noncached(prot)
#endif
#define pgprot_cached_noncoherent(prot) \
diff --git a/arch/powerpc/include/asm/nohash/pte-book3e.h b/arch/powerpc/include/asm/nohash/pte-book3e.h
index 12730b81cd98..dd40d200f274 100644
--- a/arch/powerpc/include/asm/nohash/pte-book3e.h
+++ b/arch/powerpc/include/asm/nohash/pte-book3e.h
@@ -77,7 +77,48 @@
#define _PMD_PRESENT 0
#define _PMD_PRESENT_MASK (PAGE_MASK)
#define _PMD_BAD (~PAGE_MASK)
+#define _PMD_USER 0
+#else
+#define _PTE_NONE_MASK 0
+#endif
+
+/*
+ * We define 2 sets of base prot bits, one for basic pages (ie,
+ * cacheable kernel and user pages) and one for non cacheable
+ * pages. We always set _PAGE_COHERENT when SMP is enabled or
+ * the processor might need it for DMA coherency.
+ */
+#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
+#if defined(CONFIG_SMP)
+#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
+#else
+#define _PAGE_BASE (_PAGE_BASE_NC)
#endif
+/* Permission masks used to generate the __P and __S table */
+#define PAGE_NONE __pgprot(_PAGE_BASE)
+#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
+#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+
+#ifndef __ASSEMBLY__
+static inline pte_t pte_mkprivileged(pte_t pte)
+{
+ return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED);
+}
+
+#define pte_mkprivileged pte_mkprivileged
+
+static inline pte_t pte_mkuser(pte_t pte)
+{
+ return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER);
+}
+
+#define pte_mkuser pte_mkuser
+#endif /* __ASSEMBLY__ */
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_NOHASH_PTE_BOOK3E_H */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 8365353330b4..870fb7b239ea 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -1050,6 +1050,7 @@ enum OpalSysCooling {
enum {
OPAL_REBOOT_NORMAL = 0,
OPAL_REBOOT_PLATFORM_ERROR = 1,
+ OPAL_REBOOT_FULL_IPL = 2,
};
/* Argument to OPAL_PCI_TCE_KILL */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index ad4f16164619..e843bc5d1a0f 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -113,7 +113,13 @@ struct paca_struct {
* on the linear mapping */
/* SLB related definitions */
u16 vmalloc_sllp;
- u16 slb_cache_ptr;
+ u8 slb_cache_ptr;
+ u8 stab_rr; /* stab/slb round-robin counter */
+#ifdef CONFIG_DEBUG_VM
+ u8 in_kernel_slb_handler;
+#endif
+ u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */
+ u32 slb_kern_bitmap;
u32 slb_cache[SLB_CACHE_ENTRIES];
#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -160,7 +166,6 @@ struct paca_struct {
*/
struct task_struct *__current; /* Pointer to current */
u64 kstack; /* Saved Kernel stack addr */
- u64 stab_rr; /* stab/slb round-robin counter */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */
@@ -250,6 +255,15 @@ struct paca_struct {
#ifdef CONFIG_PPC_PSERIES
u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */
#endif /* CONFIG_PPC_PSERIES */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* Capture SLB related old contents in MCE handler. */
+ struct slb_entry *mce_faulty_slbs;
+ u16 slb_save_cache_ptr;
+#endif /* CONFIG_PPC_BOOK3S_64 */
+#ifdef CONFIG_STACKPROTECTOR
+ unsigned long canary;
+#endif
} ____cacheline_aligned;
extern void copy_mm_to_paca(struct mm_struct *mm);
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 14c79a7dc855..9679b7519a35 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -20,6 +20,25 @@ struct mm_struct;
#include <asm/nohash/pgtable.h>
#endif /* !CONFIG_PPC_BOOK3S */
+/* Note due to the way vm flags are laid out, the bits are XWR */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_READONLY_X
+#define __P101 PAGE_READONLY_X
+#define __P110 PAGE_COPY_X
+#define __P111 PAGE_COPY_X
+
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_READONLY_X
+#define __S101 PAGE_READONLY_X
+#define __S110 PAGE_SHARED_X
+#define __S111 PAGE_SHARED_X
+
#ifndef __ASSEMBLY__
#include <asm/tlbflush.h>
@@ -27,6 +46,16 @@ struct mm_struct;
/* Keep these as a macros to avoid include dependency mess */
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
+/*
+ * Select all bits except the pfn
+ */
+static inline pgprot_t pte_pgprot(pte_t pte)
+{
+ unsigned long pte_flags;
+
+ pte_flags = pte_val(pte) & ~PTE_RPN_MASK;
+ return __pgprot(pte_flags);
+}
/*
* ZERO_PAGE is a global shared page that is always zero: used
diff --git a/arch/powerpc/include/asm/pnv-pci.h b/arch/powerpc/include/asm/pnv-pci.h
index 7f627e3f4da4..630eb8b1b7ed 100644
--- a/arch/powerpc/include/asm/pnv-pci.h
+++ b/arch/powerpc/include/asm/pnv-pci.h
@@ -54,7 +54,6 @@ void pnv_cxl_release_hwirq_ranges(struct cxl_irq_ranges *irqs,
struct pnv_php_slot {
struct hotplug_slot slot;
- struct hotplug_slot_info slot_info;
uint64_t id;
char *name;
int slot_no;
@@ -72,6 +71,7 @@ struct pnv_php_slot {
struct pci_dev *pdev;
struct pci_bus *bus;
bool power_state_check;
+ u8 attention_state;
void *fdt;
void *dt;
struct of_changeset ocs;
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 665af14850e4..6093bc8f74e5 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -104,6 +104,7 @@
#define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MSGSNDP 142
#define OP_31_XOP_MSGCLRP 174
+#define OP_31_XOP_TLBIE 306
#define OP_31_XOP_MFSPR 339
#define OP_31_XOP_LWAX 341
#define OP_31_XOP_LHAX 343
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 726288048652..f67da277d652 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -58,6 +58,7 @@ void eeh_save_bars(struct eeh_dev *edev);
int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
void eeh_pe_state_mark(struct eeh_pe *pe, int state);
+void eeh_pe_mark_isolated(struct eeh_pe *pe);
void eeh_pe_state_clear(struct eeh_pe *pe, int state);
void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state);
void eeh_pe_dev_mode_mark(struct eeh_pe *pe, int mode);
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 52fadded5c1e..7d04d60a39c9 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -32,9 +32,9 @@
/* Default SMT priority is set to 3. Use 11- 13bits to save priority. */
#define PPR_PRIORITY 3
#ifdef __ASSEMBLY__
-#define INIT_PPR (PPR_PRIORITY << 50)
+#define DEFAULT_PPR (PPR_PRIORITY << 50)
#else
-#define INIT_PPR ((u64)PPR_PRIORITY << 50)
+#define DEFAULT_PPR ((u64)PPR_PRIORITY << 50)
#endif /* __ASSEMBLY__ */
#endif /* CONFIG_PPC64 */
@@ -273,6 +273,7 @@ struct thread_struct {
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */
unsigned long trap_nr; /* last trap # on this thread */
+ u8 load_slb; /* Ages out SLB preload cache entries */
u8 load_fp;
#ifdef CONFIG_ALTIVEC
u8 load_vec;
@@ -341,7 +342,6 @@ struct thread_struct {
* onwards.
*/
int dscr_inherit;
- unsigned long ppr; /* used to save/restore SMT priority */
unsigned long tidr;
#endif
#ifdef CONFIG_PPC_BOOK3S_64
@@ -389,7 +389,6 @@ struct thread_struct {
.regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
.addr_limit = KERNEL_DS, \
.fpexc_mode = 0, \
- .ppr = INIT_PPR, \
.fscr = FSCR_TAR | FSCR_EBB \
}
#endif
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h
deleted file mode 100644
index bef56141a549..000000000000
--- a/arch/powerpc/include/asm/pte-common.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Included from asm/pgtable-*.h only ! */
-
-/*
- * Some bits are only used on some cpu families... Make sure that all
- * the undefined gets a sensible default
- */
-#ifndef _PAGE_HASHPTE
-#define _PAGE_HASHPTE 0
-#endif
-#ifndef _PAGE_HWWRITE
-#define _PAGE_HWWRITE 0
-#endif
-#ifndef _PAGE_EXEC
-#define _PAGE_EXEC 0
-#endif
-#ifndef _PAGE_ENDIAN
-#define _PAGE_ENDIAN 0
-#endif
-#ifndef _PAGE_COHERENT
-#define _PAGE_COHERENT 0
-#endif
-#ifndef _PAGE_WRITETHRU
-#define _PAGE_WRITETHRU 0
-#endif
-#ifndef _PAGE_4K_PFN
-#define _PAGE_4K_PFN 0
-#endif
-#ifndef _PAGE_SAO
-#define _PAGE_SAO 0
-#endif
-#ifndef _PAGE_PSIZE
-#define _PAGE_PSIZE 0
-#endif
-/* _PAGE_RO and _PAGE_RW shall not be defined at the same time */
-#ifndef _PAGE_RO
-#define _PAGE_RO 0
-#else
-#define _PAGE_RW 0
-#endif
-
-#ifndef _PAGE_PTE
-#define _PAGE_PTE 0
-#endif
-/* At least one of _PAGE_PRIVILEGED or _PAGE_USER must be defined */
-#ifndef _PAGE_PRIVILEGED
-#define _PAGE_PRIVILEGED 0
-#else
-#ifndef _PAGE_USER
-#define _PAGE_USER 0
-#endif
-#endif
-#ifndef _PAGE_NA
-#define _PAGE_NA 0
-#endif
-#ifndef _PAGE_HUGE
-#define _PAGE_HUGE 0
-#endif
-
-#ifndef _PMD_PRESENT_MASK
-#define _PMD_PRESENT_MASK _PMD_PRESENT
-#endif
-#ifndef _PMD_USER
-#define _PMD_USER 0
-#endif
-#ifndef _PAGE_KERNEL_RO
-#define _PAGE_KERNEL_RO (_PAGE_PRIVILEGED | _PAGE_RO)
-#endif
-#ifndef _PAGE_KERNEL_ROX
-#define _PAGE_KERNEL_ROX (_PAGE_PRIVILEGED | _PAGE_RO | _PAGE_EXEC)
-#endif
-#ifndef _PAGE_KERNEL_RW
-#define _PAGE_KERNEL_RW (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | \
- _PAGE_HWWRITE)
-#endif
-#ifndef _PAGE_KERNEL_RWX
-#define _PAGE_KERNEL_RWX (_PAGE_PRIVILEGED | _PAGE_DIRTY | _PAGE_RW | \
- _PAGE_HWWRITE | _PAGE_EXEC)
-#endif
-#ifndef _PAGE_HPTEFLAGS
-#define _PAGE_HPTEFLAGS _PAGE_HASHPTE
-#endif
-#ifndef _PTE_NONE_MASK
-#define _PTE_NONE_MASK _PAGE_HPTEFLAGS
-#endif
-
-#ifndef __ASSEMBLY__
-
-/*
- * Don't just check for any non zero bits in __PAGE_USER, since for book3e
- * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
- * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too.
- */
-static inline bool pte_user(pte_t pte)
-{
- return (pte_val(pte) & (_PAGE_USER | _PAGE_PRIVILEGED)) == _PAGE_USER;
-}
-#endif /* __ASSEMBLY__ */
-
-/* Location of the PFN in the PTE. Most 32-bit platforms use the same
- * as _PAGE_SHIFT here (ie, naturally aligned).
- * Platform who don't just pre-define the value so we don't override it here
- */
-#ifndef PTE_RPN_SHIFT
-#define PTE_RPN_SHIFT (PAGE_SHIFT)
-#endif
-
-/* The mask covered by the RPN must be a ULL on 32-bit platforms with
- * 64-bit PTEs
- */
-#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
-#define PTE_RPN_MASK (~((1ULL<<PTE_RPN_SHIFT)-1))
-#else
-#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
-#endif
-
-/* _PAGE_CHG_MASK masks of bits that are to be preserved across
- * pgprot changes
- */
-#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
- _PAGE_ACCESSED | _PAGE_SPECIAL)
-
-/* Mask of bits returned by pte_pgprot() */
-#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
- _PAGE_WRITETHRU | _PAGE_ENDIAN | _PAGE_4K_PFN | \
- _PAGE_USER | _PAGE_ACCESSED | _PAGE_RO | _PAGE_NA | \
- _PAGE_PRIVILEGED | \
- _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC)
-
-/*
- * We define 2 sets of base prot bits, one for basic pages (ie,
- * cacheable kernel and user pages) and one for non cacheable
- * pages. We always set _PAGE_COHERENT when SMP is enabled or
- * the processor might need it for DMA coherency.
- */
-#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
-#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU) || \
- defined(CONFIG_PPC_E500MC)
-#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
-#else
-#define _PAGE_BASE (_PAGE_BASE_NC)
-#endif
-
-/* Permission masks used to generate the __P and __S table,
- *
- * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
- *
- * Write permissions imply read permissions for now (we could make write-only
- * pages on BookE but we don't bother for now). Execute permission control is
- * possible on platforms that define _PAGE_EXEC
- *
- * Note due to the way vm flags are laid out, the bits are XWR
- */
-#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA)
-#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | \
- _PAGE_EXEC)
-#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
- _PAGE_EXEC)
-#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO)
-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RO | \
- _PAGE_EXEC)
-
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY_X
-#define __P101 PAGE_READONLY_X
-#define __P110 PAGE_COPY_X
-#define __P111 PAGE_COPY_X
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY_X
-#define __S101 PAGE_READONLY_X
-#define __S110 PAGE_SHARED_X
-#define __S111 PAGE_SHARED_X
-
-/* Permission masks used for kernel mappings */
-#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
-#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
- _PAGE_NO_CACHE)
-#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
- _PAGE_NO_CACHE | _PAGE_GUARDED)
-#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
-#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
-#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
-
-/* Protection used for kernel text. We want the debuggers to be able to
- * set breakpoints anywhere, so don't write protect the kernel text
- * on platforms where such control is possible.
- */
-#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
- defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_X
-#else
-#define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX
-#endif
-
-/* Make modules code happy. We don't set RO yet */
-#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
-
-/* Advertise special mapping type for AGP */
-#define PAGE_AGP (PAGE_KERNEL_NC)
-#define HAVE_PAGE_AGP
-
-#ifndef _PAGE_READ
-/* if not defined, we should not find _PAGE_WRITE too */
-#define _PAGE_READ 0
-#define _PAGE_WRITE _PAGE_RW
-#endif
-
-#ifndef H_PAGE_4K_PFN
-#define H_PAGE_4K_PFN 0
-#endif
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 5b480e1d5909..f73886a1a7f5 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -26,6 +26,37 @@
#include <uapi/asm/ptrace.h>
#include <asm/asm-const.h>
+#ifndef __ASSEMBLY__
+struct pt_regs
+{
+ union {
+ struct user_pt_regs user_regs;
+ struct {
+ unsigned long gpr[32];
+ unsigned long nip;
+ unsigned long msr;
+ unsigned long orig_gpr3;
+ unsigned long ctr;
+ unsigned long link;
+ unsigned long xer;
+ unsigned long ccr;
+#ifdef CONFIG_PPC64
+ unsigned long softe;
+#else
+ unsigned long mq;
+#endif
+ unsigned long trap;
+ unsigned long dar;
+ unsigned long dsisr;
+ unsigned long result;
+ };
+ };
+
+#ifdef CONFIG_PPC64
+ unsigned long ppr;
+#endif
+};
+#endif
#ifdef __powerpc64__
@@ -102,6 +133,11 @@ static inline long regs_return_value(struct pt_regs *regs)
return -regs->gpr[3];
}
+static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
+{
+ regs->gpr[3] = rc;
+}
+
#ifdef __powerpc64__
#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
#else
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e5b314ed054e..de52c3166ba4 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -118,11 +118,16 @@
#define MSR_TS_S __MASK(MSR_TS_S_LG) /* Transaction Suspended */
#define MSR_TS_T __MASK(MSR_TS_T_LG) /* Transaction Transactional */
#define MSR_TS_MASK (MSR_TS_T | MSR_TS_S) /* Transaction State bits */
-#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
#define MSR_TM_RESV(x) (((x) & MSR_TS_MASK) == MSR_TS_MASK) /* Reserved */
#define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T)
#define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S)
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+#define MSR_TM_ACTIVE(x) (((x) & MSR_TS_MASK) != 0) /* Transaction active? */
+#else
+#define MSR_TM_ACTIVE(x) 0
+#endif
+
#if defined(CONFIG_PPC_BOOK3S_64)
#define MSR_64BIT MSR_SF
@@ -415,6 +420,7 @@
#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
#define HFSCR_FP __MASK(FSCR_FP_LG)
+#define HFSCR_INTR_CAUSE (ASM_CONST(0xFF) << 56) /* interrupt cause */
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 ASM_CONST(0x8000000000000000)
@@ -766,6 +772,7 @@
#define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */
#define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */
#define HSRR1_DENORM 0x00100000 /* Denorm exception */
+#define HSRR1_HISI_WRITE 0x00010000 /* HISI bcs couldn't update mem */
#define SPRN_TBCTL 0x35f /* PA6T Timebase control register */
#define TBCTL_FREEZE 0x0000000000000000ull /* Freeze all tbs */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 71e393c46a49..bb38dd67d47d 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -125,6 +125,7 @@ struct rtas_suspend_me_data {
#define RTAS_TYPE_INFO 0xE2
#define RTAS_TYPE_DEALLOC 0xE3
#define RTAS_TYPE_DUMP 0xE4
+#define RTAS_TYPE_HOTPLUG 0xE5
/* I don't add PowerMGM events right now, this is a different topic */
#define RTAS_TYPE_PMGM_POWER_SW_ON 0x60
#define RTAS_TYPE_PMGM_POWER_SW_OFF 0x61
@@ -185,11 +186,23 @@ static inline uint8_t rtas_error_disposition(const struct rtas_error_log *elog)
return (elog->byte1 & 0x18) >> 3;
}
+static inline
+void rtas_set_disposition_recovered(struct rtas_error_log *elog)
+{
+ elog->byte1 &= ~0x18;
+ elog->byte1 |= (RTAS_DISP_FULLY_RECOVERED << 3);
+}
+
static inline uint8_t rtas_error_extended(const struct rtas_error_log *elog)
{
return (elog->byte1 & 0x04) >> 2;
}
+static inline uint8_t rtas_error_initiator(const struct rtas_error_log *elog)
+{
+ return (elog->byte2 & 0xf0) >> 4;
+}
+
#define rtas_error_type(x) ((x)->byte3)
static inline
@@ -275,6 +288,7 @@ inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log)
#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
#define PSERIES_ELOG_SECT_ID_HOTPLUG (('H' << 8) | 'P')
+#define PSERIES_ELOG_SECT_ID_MCE (('M' << 8) | 'C')
/* Vendor specific Platform Event Log Format, Version 6, section header */
struct pseries_errorlog {
@@ -316,6 +330,7 @@ struct pseries_hp_errorlog {
#define PSERIES_HP_ELOG_RESOURCE_MEM 2
#define PSERIES_HP_ELOG_RESOURCE_SLOT 3
#define PSERIES_HP_ELOG_RESOURCE_PHB 4
+#define PSERIES_HP_ELOG_RESOURCE_PMEM 6
#define PSERIES_HP_ELOG_ACTION_ADD 1
#define PSERIES_HP_ELOG_ACTION_REMOVE 2
diff --git a/arch/powerpc/include/asm/slice.h b/arch/powerpc/include/asm/slice.h
index e40406cf5628..a595461c9cb0 100644
--- a/arch/powerpc/include/asm/slice.h
+++ b/arch/powerpc/include/asm/slice.h
@@ -32,6 +32,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize);
void slice_init_new_context_exec(struct mm_struct *mm);
+void slice_setup_new_exec(void);
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 95b66a0c639b..41695745032c 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -100,6 +100,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
+DECLARE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
static inline struct cpumask *cpu_sibling_mask(int cpu)
{
@@ -116,6 +117,11 @@ static inline struct cpumask *cpu_l2_cache_mask(int cpu)
return per_cpu(cpu_l2_cache_map, cpu);
}
+static inline struct cpumask *cpu_smallcore_mask(int cpu)
+{
+ return per_cpu(cpu_smallcore_map, cpu);
+}
+
extern int cpu_to_core_id(int cpu);
/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
@@ -166,6 +172,11 @@ static inline const struct cpumask *cpu_sibling_mask(int cpu)
return cpumask_of(cpu);
}
+static inline const struct cpumask *cpu_smallcore_mask(int cpu)
+{
+ return cpumask_of(cpu);
+}
+
#endif /* CONFIG_SMP */
#ifdef CONFIG_PPC64
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h
index 28f5dae25db6..68da49320592 100644
--- a/arch/powerpc/include/asm/sparsemem.h
+++ b/arch/powerpc/include/asm/sparsemem.h
@@ -9,17 +9,6 @@
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
*/
#define SECTION_SIZE_BITS 24
-/*
- * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
- * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
- * page_to_nid does a page->section->node lookup
- * Hence only increase for VMEMMAP.
- */
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-#define MAX_PHYSMEM_BITS 47
-#else
-#define MAX_PHYSMEM_BITS 46
-#endif
#endif /* CONFIG_SPARSEMEM */
diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
new file mode 100644
index 000000000000..1c8460e23583
--- /dev/null
+++ b/arch/powerpc/include/asm/stackprotector.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GCC stack protector support.
+ *
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H
+
+#include <linux/random.h>
+#include <linux/version.h>
+#include <asm/reg.h>
+#include <asm/current.h>
+#include <asm/paca.h>
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ canary = get_random_canary();
+ canary ^= mftb();
+ canary ^= LINUX_VERSION_CODE;
+ canary &= CANARY_MASK;
+
+ current->stack_canary = canary;
+#ifdef CONFIG_PPC64
+ get_paca()->canary = canary;
+#endif
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 3c0002044bc9..544cac0474cb 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -29,6 +29,7 @@
#include <asm/page.h>
#include <asm/accounting.h>
+#define SLB_PRELOAD_NR 16U
/*
* low level task data.
*/
@@ -44,6 +45,10 @@ struct thread_info {
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC32)
struct cpu_accounting_data accounting;
#endif
+ unsigned char slb_preload_nr;
+ unsigned char slb_preload_tail;
+ u32 slb_preload_esid[SLB_PRELOAD_NR];
+
/* low level flags - has atomic operations done on it */
unsigned long flags ____cacheline_aligned_in_smp;
};
@@ -72,6 +77,12 @@ static inline struct thread_info *current_thread_info(void)
}
extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+
+#ifdef CONFIG_PPC_BOOK3S_64
+void arch_setup_new_exec(void);
+#define arch_setup_new_exec arch_setup_new_exec
+#endif
+
#endif /* __ASSEMBLY__ */
/*
@@ -81,7 +92,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_FSCHECK 3 /* Check FS is USER_DS on return */
-#define TIF_32BIT 4 /* 32 bit binary */
+#define TIF_SYSCALL_EMU 4 /* syscall emulation active */
#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
#define TIF_PATCH_PENDING 6 /* pending live patching update */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
@@ -100,6 +111,7 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
#define TIF_ELF2ABI 18 /* function descriptors must die! */
#endif
#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_32BIT 20 /* 32 bit binary */
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -120,9 +132,10 @@ extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
#define _TIF_FSCHECK (1<<TIF_FSCHECK)
+#define _TIF_SYSCALL_EMU (1<<TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
- _TIF_NOHZ)
+ _TIF_NOHZ | _TIF_SYSCALL_EMU)
#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
_TIF_NOTIFY_RESUME | _TIF_UPROBE | \
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index d018e8602694..58ef8c43a89d 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -201,6 +201,21 @@ TRACE_EVENT(tlbie,
__entry->r)
);
+TRACE_EVENT(tlbia,
+
+ TP_PROTO(unsigned long id),
+ TP_ARGS(id),
+ TP_STRUCT__entry(
+ __field(unsigned long, id)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ ),
+
+ TP_printk("ctx.id=0x%lx", __entry->id)
+);
+
#endif /* _TRACE_POWERPC_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index bac225bb7f64..15bea9a0f260 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -260,7 +260,7 @@ do { \
({ \
long __gu_err; \
__long_type(*(ptr)) __gu_val; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
if (!is_kernel_addr((unsigned long)__gu_addr)) \
might_fault(); \
@@ -274,7 +274,7 @@ do { \
({ \
long __gu_err = -EFAULT; \
__long_type(*(ptr)) __gu_val = 0; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
might_fault(); \
if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
barrier_nospec(); \
@@ -288,7 +288,7 @@ do { \
({ \
long __gu_err; \
__long_type(*(ptr)) __gu_val; \
- const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
__chk_user_ptr(ptr); \
barrier_nospec(); \
__get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index c19379f0a32e..b0de85b477e1 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -22,6 +22,7 @@
#include <linux/compiler.h>
#include <linux/linkage.h>
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
@@ -35,7 +36,6 @@
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLD_UNAME
@@ -47,6 +47,7 @@
#endif
#ifdef CONFIG_PPC64
#define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_SYS_NEWFSTATAT
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
diff --git a/arch/powerpc/include/asm/user.h b/arch/powerpc/include/asm/user.h
index 5c0e082eae7b..99443b8594e7 100644
--- a/arch/powerpc/include/asm/user.h
+++ b/arch/powerpc/include/asm/user.h
@@ -31,7 +31,7 @@
* to write an integer number of pages.
*/
struct user {
- struct pt_regs regs; /* entire machine state */
+ struct user_pt_regs regs; /* entire machine state */
size_t u_tsize; /* text size (pages) */
size_t u_dsize; /* data size (pages) */
size_t u_ssize; /* stack size (pages) */
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index 1b32b56a03d3..8c876c166ef2 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -634,6 +634,7 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
+#define KVM_REG_PPC_PTCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc0)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h
index 5e3edc2a7634..f5f1ccc740fc 100644
--- a/arch/powerpc/include/uapi/asm/ptrace.h
+++ b/arch/powerpc/include/uapi/asm/ptrace.h
@@ -29,7 +29,12 @@
#ifndef __ASSEMBLY__
-struct pt_regs {
+#ifdef __KERNEL__
+struct user_pt_regs
+#else
+struct pt_regs
+#endif
+{
unsigned long gpr[32];
unsigned long nip;
unsigned long msr;
@@ -160,6 +165,10 @@ struct pt_regs {
#define PTRACE_GETVSRREGS 0x1b
#define PTRACE_SETVSRREGS 0x1c
+/* Syscall emulation defines */
+#define PTRACE_SYSEMU 0x1d
+#define PTRACE_SYSEMU_SINGLESTEP 0x1e
+
/*
* Get or set a debug register. The first 16 are DABR registers and the
* second 16 are IABR registers.
diff --git a/arch/powerpc/include/uapi/asm/sigcontext.h b/arch/powerpc/include/uapi/asm/sigcontext.h
index 2fbe485acdb4..630aeda56d59 100644
--- a/arch/powerpc/include/uapi/asm/sigcontext.h
+++ b/arch/powerpc/include/uapi/asm/sigcontext.h
@@ -22,7 +22,11 @@ struct sigcontext {
#endif
unsigned long handler;
unsigned long oldmask;
- struct pt_regs __user *regs;
+#ifdef __KERNEL__
+ struct user_pt_regs __user *regs;
+#else
+ struct pt_regs *regs;
+#endif
#ifdef __powerpc64__
elf_gregset_t gp_regs;
elf_fpregset_t fp_regs;
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 3b66f2c19c84..53d4b8d5b54d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -5,7 +5,8 @@
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+# Disable clang warning for using setjmp without setjmp.h header
+CFLAGS_crash.o += $(call cc-disable-warning, builtin-requires-header)
ifdef CONFIG_PPC64
CFLAGS_prom_init.o += $(NO_MINIMAL_TOC)
@@ -20,12 +21,14 @@ CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
+
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
-CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_prom_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_btext.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
-CFLAGS_REMOVE_prom.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_cputable.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_prom_init.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_btext.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_prom.o = $(CC_FLAGS_FTRACE)
endif
obj-y := cputable.o ptrace.o syscalls.o \
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 89cf15566c4e..9ffc72ded73a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -79,11 +79,16 @@ int main(void)
{
OFFSET(THREAD, task_struct, thread);
OFFSET(MM, task_struct, mm);
+#ifdef CONFIG_STACKPROTECTOR
+ OFFSET(TASK_CANARY, task_struct, stack_canary);
+#ifdef CONFIG_PPC64
+ OFFSET(PACA_CANARY, paca_struct, canary);
+#endif
+#endif
OFFSET(MMCONTEXTID, mm_struct, context.id);
#ifdef CONFIG_PPC64
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
- OFFSET(TASKTHREADPPR, task_struct, thread.ppr);
#else
OFFSET(THREAD_INFO, task_struct, stack);
DEFINE(THREAD_INFO_GAP, _ALIGN_UP(sizeof(struct thread_info), 16));
@@ -173,7 +178,6 @@ int main(void)
OFFSET(PACAKSAVE, paca_struct, kstack);
OFFSET(PACACURRENT, paca_struct, __current);
OFFSET(PACASAVEDMSR, paca_struct, saved_msr);
- OFFSET(PACASTABRR, paca_struct, stab_rr);
OFFSET(PACAR1, paca_struct, saved_r1);
OFFSET(PACATOC, paca_struct, kernel_toc);
OFFSET(PACAKBASE, paca_struct, kernelbase);
@@ -212,6 +216,7 @@ int main(void)
#ifdef CONFIG_PPC_BOOK3S_64
OFFSET(PACASLBCACHE, paca_struct, slb_cache);
OFFSET(PACASLBCACHEPTR, paca_struct, slb_cache_ptr);
+ OFFSET(PACASTABRR, paca_struct, stab_rr);
OFFSET(PACAVMALLOCSLLP, paca_struct, vmalloc_sllp);
#ifdef CONFIG_PPC_MM_SLICES
OFFSET(MMUPSIZESLLP, mmu_psize_def, sllp);
@@ -274,11 +279,6 @@ int main(void)
/* Interrupt register frame */
DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE);
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
-#ifdef CONFIG_PPC64
- /* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
- DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
- DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
-#endif /* CONFIG_PPC64 */
STACK_PT_REGS_OFFSET(GPR0, gpr[0]);
STACK_PT_REGS_OFFSET(GPR1, gpr[1]);
STACK_PT_REGS_OFFSET(GPR2, gpr[2]);
@@ -322,10 +322,7 @@ int main(void)
STACK_PT_REGS_OFFSET(_ESR, dsisr);
#else /* CONFIG_PPC64 */
STACK_PT_REGS_OFFSET(SOFTE, softe);
-
- /* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
- DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
- DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
+ STACK_PT_REGS_OFFSET(_PPR, ppr);
#endif /* CONFIG_PPC64 */
#if defined(CONFIG_PPC32)
@@ -387,12 +384,12 @@ int main(void)
OFFSET(CFG_SYSCALL_MAP64, vdso_data, syscall_map_64);
OFFSET(TVAL64_TV_SEC, timeval, tv_sec);
OFFSET(TVAL64_TV_USEC, timeval, tv_usec);
- OFFSET(TVAL32_TV_SEC, compat_timeval, tv_sec);
- OFFSET(TVAL32_TV_USEC, compat_timeval, tv_usec);
+ OFFSET(TVAL32_TV_SEC, old_timeval32, tv_sec);
+ OFFSET(TVAL32_TV_USEC, old_timeval32, tv_usec);
OFFSET(TSPC64_TV_SEC, timespec, tv_sec);
OFFSET(TSPC64_TV_NSEC, timespec, tv_nsec);
- OFFSET(TSPC32_TV_SEC, compat_timespec, tv_sec);
- OFFSET(TSPC32_TV_NSEC, compat_timespec, tv_nsec);
+ OFFSET(TSPC32_TV_SEC, old_timespec32, tv_sec);
+ OFFSET(TSPC32_TV_NSEC, old_timespec32, tv_nsec);
#else
OFFSET(TVAL32_TV_SEC, timeval, tv_sec);
OFFSET(TVAL32_TV_USEC, timeval, tv_usec);
@@ -438,7 +435,7 @@ int main(void)
#ifdef CONFIG_PPC_BOOK3S
OFFSET(VCPU_TAR, kvm_vcpu, arch.tar);
#endif
- OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+ OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
OFFSET(VCPU_PC, kvm_vcpu, arch.regs.nip);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(VCPU_MSR, kvm_vcpu, arch.shregs.msr);
@@ -503,6 +500,7 @@ int main(void)
OFFSET(VCPU_VPA, kvm_vcpu, arch.vpa.pinned_addr);
OFFSET(VCPU_VPA_DIRTY, kvm_vcpu, arch.vpa.dirty);
OFFSET(VCPU_HEIR, kvm_vcpu, arch.emul_inst);
+ OFFSET(VCPU_NESTED, kvm_vcpu, arch.nested);
OFFSET(VCPU_CPU, kvm_vcpu, cpu);
OFFSET(VCPU_THREAD_CPU, kvm_vcpu, arch.thread_cpu);
#endif
@@ -695,7 +693,7 @@ int main(void)
#endif /* CONFIG_PPC_BOOK3S_64 */
#else /* CONFIG_PPC_BOOK3S */
- OFFSET(VCPU_CR, kvm_vcpu, arch.cr);
+ OFFSET(VCPU_CR, kvm_vcpu, arch.regs.ccr);
OFFSET(VCPU_XER, kvm_vcpu, arch.regs.xer);
OFFSET(VCPU_LR, kvm_vcpu, arch.regs.link);
OFFSET(VCPU_CTR, kvm_vcpu, arch.regs.ctr);
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index b2072d5bbf2b..b4241ed1456e 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -163,7 +163,7 @@ void btext_map(void)
offset = ((unsigned long) dispDeviceBase) - base;
size = dispDeviceRowBytes * dispDeviceRect[3] + offset
+ dispDeviceRect[0];
- vbase = __ioremap(base, size, pgprot_val(pgprot_noncached_wc(__pgprot(0))));
+ vbase = ioremap_wc(base, size);
if (!vbase)
return;
logicalDisplayBase = vbase + offset;
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index a8f20e5928e1..be57bd07596d 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -20,6 +20,8 @@
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/prom.h>
+#include <asm/cputhreads.h>
+#include <asm/smp.h>
#include "cacheinfo.h"
@@ -627,17 +629,48 @@ static ssize_t level_show(struct kobject *k, struct kobj_attribute *attr, char *
static struct kobj_attribute cache_level_attr =
__ATTR(level, 0444, level_show, NULL);
+static unsigned int index_dir_to_cpu(struct cache_index_dir *index)
+{
+ struct kobject *index_dir_kobj = &index->kobj;
+ struct kobject *cache_dir_kobj = index_dir_kobj->parent;
+ struct kobject *cpu_dev_kobj = cache_dir_kobj->parent;
+ struct device *dev = kobj_to_dev(cpu_dev_kobj);
+
+ return dev->id;
+}
+
+/*
+ * On big-core systems, each core has two groups of CPUs each of which
+ * has its own L1-cache. The thread-siblings which share l1-cache with
+ * @cpu can be obtained via cpu_smallcore_mask().
+ */
+static const struct cpumask *get_big_core_shared_cpu_map(int cpu, struct cache *cache)
+{
+ if (cache->level == 1)
+ return cpu_smallcore_mask(cpu);
+
+ return &cache->shared_cpu_map;
+}
+
static ssize_t shared_cpu_map_show(struct kobject *k, struct kobj_attribute *attr, char *buf)
{
struct cache_index_dir *index;
struct cache *cache;
- int ret;
+ const struct cpumask *mask;
+ int ret, cpu;
index = kobj_to_cache_index_dir(k);
cache = index->cache;
+ if (has_big_cores) {
+ cpu = index_dir_to_cpu(index);
+ mask = get_big_core_shared_cpu_map(cpu, cache);
+ } else {
+ mask = &cache->shared_cpu_map;
+ }
+
ret = scnprintf(buf, PAGE_SIZE - 1, "%*pb\n",
- cpumask_pr_args(&cache->shared_cpu_map));
+ cpumask_pr_args(mask));
buf[ret++] = '\n';
buf[ret] = '\0';
return ret;
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 458b928dbd84..c317080db771 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -147,8 +147,8 @@ __init_hvmode_206:
rldicl. r0,r3,4,63
bnelr
ld r5,CPU_SPEC_FEATURES(r4)
- LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE)
- xor r5,r5,r6
+ LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE | CPU_FTR_P9_TM_HV_ASSIST)
+ andc r5,r5,r6
std r5,CPU_SPEC_FEATURES(r4)
blr
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index d10ad258d41a..bbdc4706c159 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -110,7 +110,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
vaddr = __va(paddr);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
} else {
- vaddr = __ioremap(paddr, PAGE_SIZE, 0);
+ vaddr = ioremap_cache(paddr, PAGE_SIZE);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
iounmap(vaddr);
}
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 88f3963ca30f..5fc335f4d9cd 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -11,7 +11,7 @@
*
*/
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/memblock.h>
#include <linux/pfn.h>
#include <linux/of_platform.h>
@@ -59,7 +59,7 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = {
.sync_single_for_device = swiotlb_sync_single_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device,
- .mapping_error = swiotlb_dma_mapping_error,
+ .mapping_error = dma_direct_mapping_error,
.get_required_mask = swiotlb_powerpc_get_required,
};
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 6ebba3e48b01..6cae6b56ffd6 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -169,6 +169,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
int n = 0, l = 0;
char buffer[128];
+ if (!pdn) {
+ pr_warn("EEH: Note: No error log for absent device.\n");
+ return 0;
+ }
+
n += scnprintf(buf+n, len-n, "%04x:%02x:%02x.%01x\n",
pdn->phb->global_number, pdn->busno,
PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
@@ -399,7 +404,7 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
}
/* Isolate the PHB and send event */
- eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(phb_pe);
eeh_serialize_unlock(flags);
pr_err("EEH: PHB#%x failure detected, location: %s\n",
@@ -558,7 +563,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
* with other functions on this device, and functions under
* bridges.
*/
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(pe);
eeh_serialize_unlock(flags);
/* Most EEH events are due to device driver bugs. Having
@@ -676,7 +681,7 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
/* Check if the request is finished successfully */
if (active_flag) {
- rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
+ rc = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
if (rc < 0)
return rc;
@@ -825,7 +830,8 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
break;
case pcie_hot_reset:
- eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(pe);
+ eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
if (!(pe->type & EEH_PE_VF))
@@ -833,7 +839,8 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
eeh_ops->reset(pe, EEH_RESET_HOT);
break;
case pcie_warm_reset:
- eeh_pe_state_mark_with_cfg(pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(pe);
+ eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
if (!(pe->type & EEH_PE_VF))
@@ -913,16 +920,15 @@ int eeh_pe_reset_full(struct eeh_pe *pe)
break;
/* Wait until the PE is in a functioning state */
- state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
- if (eeh_state_active(state))
- break;
-
+ state = eeh_wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
if (state < 0) {
pr_warn("%s: Unrecoverable slot failure on PHB#%x-PE#%x",
__func__, pe->phb->global_number, pe->addr);
ret = -ENOTRECOVERABLE;
break;
}
+ if (eeh_state_active(state))
+ break;
/* Set error in case this is our last attempt */
ret = -EIO;
@@ -1036,6 +1042,11 @@ void eeh_probe_devices(void)
pdn = hose->pci_data;
traverse_pci_dn(pdn, eeh_ops->probe, NULL);
}
+ if (eeh_enabled())
+ pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
+ else
+ pr_info("EEH: No capable adapters found\n");
+
}
/**
@@ -1079,18 +1090,7 @@ static int eeh_init(void)
eeh_dev_phb_init_dynamic(hose);
/* Initialize EEH event */
- ret = eeh_event_init();
- if (ret)
- return ret;
-
- eeh_probe_devices();
-
- if (eeh_enabled())
- pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
- else if (!eeh_has_flag(EEH_POSTPONED_PROBE))
- pr_info("EEH: No capable adapters found\n");
-
- return ret;
+ return eeh_event_init();
}
core_initcall_sync(eeh_init);
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c
index a34e6912c15e..d8c90f3284b5 100644
--- a/arch/powerpc/kernel/eeh_dev.c
+++ b/arch/powerpc/kernel/eeh_dev.c
@@ -60,8 +60,6 @@ struct eeh_dev *eeh_dev_init(struct pci_dn *pdn)
/* Associate EEH device with OF node */
pdn->edev = edev;
edev->pdn = pdn;
- INIT_LIST_HEAD(&edev->list);
- INIT_LIST_HEAD(&edev->rmv_list);
return edev;
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 67619b4b3f96..9446248eb6b8 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -35,8 +35,8 @@
#include <asm/rtas.h>
struct eeh_rmv_data {
- struct list_head edev_list;
- int removed;
+ struct list_head removed_vf_list;
+ int removed_dev_count;
};
static int eeh_result_priority(enum pci_ers_result result)
@@ -281,6 +281,10 @@ static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
struct pci_driver *driver;
enum pci_ers_result new_result;
+ if (!edev->pdev) {
+ eeh_edev_info(edev, "no device");
+ return;
+ }
device_lock(&edev->pdev->dev);
if (eeh_edev_actionable(edev)) {
driver = eeh_pcid_get(edev->pdev);
@@ -400,7 +404,7 @@ static void *eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
* EEH device is created.
*/
if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
- if (list_is_last(&edev->list, &edev->pe->edevs))
+ if (list_is_last(&edev->entry, &edev->pe->edevs))
eeh_pe_restore_bars(edev->pe);
return NULL;
@@ -465,10 +469,9 @@ static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev,
return rc;
}
-static void *eeh_add_virt_device(void *data, void *userdata)
+static void *eeh_add_virt_device(struct eeh_dev *edev)
{
struct pci_driver *driver;
- struct eeh_dev *edev = (struct eeh_dev *)data;
struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
struct pci_dn *pdn = eeh_dev_to_pdn(edev);
@@ -499,7 +502,6 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata)
struct pci_driver *driver;
struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
- int *removed = rmv_data ? &rmv_data->removed : NULL;
/*
* Actually, we should remove the PCI bridges as well.
@@ -521,7 +523,7 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata)
if (eeh_dev_removed(edev))
return NULL;
- if (removed) {
+ if (rmv_data) {
if (eeh_pe_passed(edev->pe))
return NULL;
driver = eeh_pcid_get(dev);
@@ -539,10 +541,9 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata)
/* Remove it from PCI subsystem */
pr_debug("EEH: Removing %s without EEH sensitive driver\n",
pci_name(dev));
- edev->bus = dev->bus;
edev->mode |= EEH_DEV_DISCONNECTED;
- if (removed)
- (*removed)++;
+ if (rmv_data)
+ rmv_data->removed_dev_count++;
if (edev->physfn) {
#ifdef CONFIG_PCI_IOV
@@ -558,7 +559,7 @@ static void *eeh_rmv_device(struct eeh_dev *edev, void *userdata)
pdn->pe_number = IODA_INVALID_PE;
#endif
if (rmv_data)
- list_add(&edev->rmv_list, &rmv_data->edev_list);
+ list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
} else {
pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(dev);
@@ -727,7 +728,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
* the device up before the scripts have taken it down,
* potentially weird things happen.
*/
- if (!driver_eeh_aware || rmv_data->removed) {
+ if (!driver_eeh_aware || rmv_data->removed_dev_count) {
pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
(driver_eeh_aware ? "partial" : "complete"));
ssleep(5);
@@ -737,10 +738,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
* PE. We should disconnect it so the binding can be
* rebuilt when adding PCI devices.
*/
- edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
+ edev = list_first_entry(&pe->edevs, struct eeh_dev, entry);
eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
if (pe->type & EEH_PE_VF) {
- eeh_add_virt_device(edev, NULL);
+ eeh_add_virt_device(edev);
} else {
if (!driver_eeh_aware)
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
@@ -789,7 +790,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
struct eeh_pe *tmp_pe;
int rc = 0;
enum pci_ers_result result = PCI_ERS_RESULT_NONE;
- struct eeh_rmv_data rmv_data = {LIST_HEAD_INIT(rmv_data.edev_list), 0};
+ struct eeh_rmv_data rmv_data =
+ {LIST_HEAD_INIT(rmv_data.removed_vf_list), 0};
bus = eeh_pe_bus_get(pe);
if (!bus) {
@@ -806,10 +808,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
pe->phb->global_number, pe->addr,
pe->freeze_count);
- goto hard_fail;
+ result = PCI_ERS_RESULT_DISCONNECT;
}
- pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
- pe->freeze_count, eeh_max_freezes);
/* Walk the various device drivers attached to this slot through
* a reset sequence, giving each an opportunity to do what it needs
@@ -821,31 +821,39 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
* the error. Override the result if necessary to have partially
* hotplug for this case.
*/
- pr_info("EEH: Notify device drivers to shutdown\n");
- eeh_set_channel_state(pe, pci_channel_io_frozen);
- eeh_set_irq_state(pe, false);
- eeh_pe_report("error_detected(IO frozen)", pe, eeh_report_error,
- &result);
- if ((pe->type & EEH_PE_PHB) &&
- result != PCI_ERS_RESULT_NONE &&
- result != PCI_ERS_RESULT_NEED_RESET)
- result = PCI_ERS_RESULT_NEED_RESET;
+ if (result != PCI_ERS_RESULT_DISCONNECT) {
+ pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
+ pe->freeze_count, eeh_max_freezes);
+ pr_info("EEH: Notify device drivers to shutdown\n");
+ eeh_set_channel_state(pe, pci_channel_io_frozen);
+ eeh_set_irq_state(pe, false);
+ eeh_pe_report("error_detected(IO frozen)", pe,
+ eeh_report_error, &result);
+ if ((pe->type & EEH_PE_PHB) &&
+ result != PCI_ERS_RESULT_NONE &&
+ result != PCI_ERS_RESULT_NEED_RESET)
+ result = PCI_ERS_RESULT_NEED_RESET;
+ }
/* Get the current PCI slot state. This can take a long time,
* sometimes over 300 seconds for certain systems.
*/
- rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
- if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
- pr_warn("EEH: Permanent failure\n");
- goto hard_fail;
+ if (result != PCI_ERS_RESULT_DISCONNECT) {
+ rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
+ if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
+ pr_warn("EEH: Permanent failure\n");
+ result = PCI_ERS_RESULT_DISCONNECT;
+ }
}
/* Since rtas may enable MMIO when posting the error log,
* don't post the error log until after all dev drivers
* have been informed.
*/
- pr_info("EEH: Collect temporary log\n");
- eeh_slot_error_detail(pe, EEH_LOG_TEMP);
+ if (result != PCI_ERS_RESULT_DISCONNECT) {
+ pr_info("EEH: Collect temporary log\n");
+ eeh_slot_error_detail(pe, EEH_LOG_TEMP);
+ }
/* If all device drivers were EEH-unaware, then shut
* down all of the device drivers, and hope they
@@ -857,7 +865,7 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
if (rc) {
pr_warn("%s: Unable to reset, err=%d\n",
__func__, rc);
- goto hard_fail;
+ result = PCI_ERS_RESULT_DISCONNECT;
}
}
@@ -866,9 +874,9 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
pr_info("EEH: Enable I/O for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
- if (rc < 0)
- goto hard_fail;
- if (rc) {
+ if (rc < 0) {
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else {
pr_info("EEH: Notify device drivers to resume I/O\n");
@@ -882,9 +890,9 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
pr_info("EEH: Enabled DMA for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
- if (rc < 0)
- goto hard_fail;
- if (rc) {
+ if (rc < 0) {
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else {
/*
@@ -897,12 +905,6 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
}
}
- /* If any device has a hard failure, then shut off everything. */
- if (result == PCI_ERS_RESULT_DISCONNECT) {
- pr_warn("EEH: Device driver gave up\n");
- goto hard_fail;
- }
-
/* If any device called out for a reset, then reset the slot */
if (result == PCI_ERS_RESULT_NEED_RESET) {
pr_info("EEH: Reset without hotplug activity\n");
@@ -910,88 +912,81 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
if (rc) {
pr_warn("%s: Cannot reset, err=%d\n",
__func__, rc);
- goto hard_fail;
+ result = PCI_ERS_RESULT_DISCONNECT;
+ } else {
+ result = PCI_ERS_RESULT_NONE;
+ eeh_set_channel_state(pe, pci_channel_io_normal);
+ eeh_set_irq_state(pe, true);
+ eeh_pe_report("slot_reset", pe, eeh_report_reset,
+ &result);
}
-
- pr_info("EEH: Notify device drivers "
- "the completion of reset\n");
- result = PCI_ERS_RESULT_NONE;
- eeh_set_channel_state(pe, pci_channel_io_normal);
- eeh_set_irq_state(pe, true);
- eeh_pe_report("slot_reset", pe, eeh_report_reset, &result);
- }
-
- /* All devices should claim they have recovered by now. */
- if ((result != PCI_ERS_RESULT_RECOVERED) &&
- (result != PCI_ERS_RESULT_NONE)) {
- pr_warn("EEH: Not recovered\n");
- goto hard_fail;
- }
-
- /*
- * For those hot removed VFs, we should add back them after PF get
- * recovered properly.
- */
- list_for_each_entry_safe(edev, tmp, &rmv_data.edev_list, rmv_list) {
- eeh_add_virt_device(edev, NULL);
- list_del(&edev->rmv_list);
}
- /* Tell all device drivers that they can resume operations */
- pr_info("EEH: Notify device driver to resume\n");
- eeh_set_channel_state(pe, pci_channel_io_normal);
- eeh_set_irq_state(pe, true);
- eeh_pe_report("resume", pe, eeh_report_resume, NULL);
- eeh_for_each_pe(pe, tmp_pe) {
- eeh_pe_for_each_dev(tmp_pe, edev, tmp) {
- edev->mode &= ~EEH_DEV_NO_HANDLER;
- edev->in_error = false;
+ if ((result == PCI_ERS_RESULT_RECOVERED) ||
+ (result == PCI_ERS_RESULT_NONE)) {
+ /*
+ * For those hot removed VFs, we should add back them after PF
+ * get recovered properly.
+ */
+ list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list,
+ rmv_entry) {
+ eeh_add_virt_device(edev);
+ list_del(&edev->rmv_entry);
}
- }
- pr_info("EEH: Recovery successful.\n");
- goto final;
+ /* Tell all device drivers that they can resume operations */
+ pr_info("EEH: Notify device driver to resume\n");
+ eeh_set_channel_state(pe, pci_channel_io_normal);
+ eeh_set_irq_state(pe, true);
+ eeh_pe_report("resume", pe, eeh_report_resume, NULL);
+ eeh_for_each_pe(pe, tmp_pe) {
+ eeh_pe_for_each_dev(tmp_pe, edev, tmp) {
+ edev->mode &= ~EEH_DEV_NO_HANDLER;
+ edev->in_error = false;
+ }
+ }
-hard_fail:
- /*
- * About 90% of all real-life EEH failures in the field
- * are due to poorly seated PCI cards. Only 10% or so are
- * due to actual, failed cards.
- */
- pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
- "Please try reseating or replacing it\n",
- pe->phb->global_number, pe->addr);
+ pr_info("EEH: Recovery successful.\n");
+ } else {
+ /*
+ * About 90% of all real-life EEH failures in the field
+ * are due to poorly seated PCI cards. Only 10% or so are
+ * due to actual, failed cards.
+ */
+ pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
+ "Please try reseating or replacing it\n",
+ pe->phb->global_number, pe->addr);
- eeh_slot_error_detail(pe, EEH_LOG_PERM);
+ eeh_slot_error_detail(pe, EEH_LOG_PERM);
- /* Notify all devices that they're about to go down. */
- eeh_set_channel_state(pe, pci_channel_io_perm_failure);
- eeh_set_irq_state(pe, false);
- eeh_pe_report("error_detected(permanent failure)", pe,
- eeh_report_failure, NULL);
+ /* Notify all devices that they're about to go down. */
+ eeh_set_channel_state(pe, pci_channel_io_perm_failure);
+ eeh_set_irq_state(pe, false);
+ eeh_pe_report("error_detected(permanent failure)", pe,
+ eeh_report_failure, NULL);
- /* Mark the PE to be removed permanently */
- eeh_pe_state_mark(pe, EEH_PE_REMOVED);
+ /* Mark the PE to be removed permanently */
+ eeh_pe_state_mark(pe, EEH_PE_REMOVED);
- /*
- * Shut down the device drivers for good. We mark
- * all removed devices correctly to avoid access
- * the their PCI config any more.
- */
- if (pe->type & EEH_PE_VF) {
- eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
- eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
- } else {
- eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
- eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+ /*
+ * Shut down the device drivers for good. We mark
+ * all removed devices correctly to avoid access
+ * the their PCI config any more.
+ */
+ if (pe->type & EEH_PE_VF) {
+ eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
+ eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+ } else {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
+ eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
- pci_lock_rescan_remove();
- pci_hp_remove_devices(bus);
- pci_unlock_rescan_remove();
- /* The passed PE should no longer be used */
- return;
+ pci_lock_rescan_remove();
+ pci_hp_remove_devices(bus);
+ pci_unlock_rescan_remove();
+ /* The passed PE should no longer be used */
+ return;
+ }
}
-final:
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
}
@@ -1026,7 +1021,7 @@ void eeh_handle_special_event(void)
phb_pe = eeh_phb_pe_get(hose);
if (!phb_pe) continue;
- eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(phb_pe);
}
eeh_serialize_unlock(flags);
@@ -1041,11 +1036,9 @@ void eeh_handle_special_event(void)
/* Purge all events of the PHB */
eeh_remove_event(pe, true);
- if (rc == EEH_NEXT_ERR_DEAD_PHB)
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
- else
- eeh_pe_state_mark(pe,
- EEH_PE_ISOLATED | EEH_PE_RECOVERING);
+ if (rc != EEH_NEXT_ERR_DEAD_PHB)
+ eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
+ eeh_pe_mark_isolated(pe);
eeh_serialize_unlock(flags);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 1b238ecc553e..6fa2032e0594 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -75,7 +75,6 @@ static struct eeh_pe *eeh_pe_alloc(struct pci_controller *phb, int type)
pe->type = type;
pe->phb = phb;
INIT_LIST_HEAD(&pe->child_list);
- INIT_LIST_HEAD(&pe->child);
INIT_LIST_HEAD(&pe->edevs);
pe->data = (void *)pe + ALIGN(sizeof(struct eeh_pe),
@@ -110,6 +109,57 @@ int eeh_phb_pe_create(struct pci_controller *phb)
}
/**
+ * eeh_wait_state - Wait for PE state
+ * @pe: EEH PE
+ * @max_wait: maximal period in millisecond
+ *
+ * Wait for the state of associated PE. It might take some time
+ * to retrieve the PE's state.
+ */
+int eeh_wait_state(struct eeh_pe *pe, int max_wait)
+{
+ int ret;
+ int mwait;
+
+ /*
+ * According to PAPR, the state of PE might be temporarily
+ * unavailable. Under the circumstance, we have to wait
+ * for indicated time determined by firmware. The maximal
+ * wait time is 5 minutes, which is acquired from the original
+ * EEH implementation. Also, the original implementation
+ * also defined the minimal wait time as 1 second.
+ */
+#define EEH_STATE_MIN_WAIT_TIME (1000)
+#define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
+
+ while (1) {
+ ret = eeh_ops->get_state(pe, &mwait);
+
+ if (ret != EEH_STATE_UNAVAILABLE)
+ return ret;
+
+ if (max_wait <= 0) {
+ pr_warn("%s: Timeout when getting PE's state (%d)\n",
+ __func__, max_wait);
+ return EEH_STATE_NOT_SUPPORT;
+ }
+
+ if (mwait < EEH_STATE_MIN_WAIT_TIME) {
+ pr_warn("%s: Firmware returned bad wait value %d\n",
+ __func__, mwait);
+ mwait = EEH_STATE_MIN_WAIT_TIME;
+ } else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
+ pr_warn("%s: Firmware returned too long wait value %d\n",
+ __func__, mwait);
+ mwait = EEH_STATE_MAX_WAIT_TIME;
+ }
+
+ msleep(min(mwait, max_wait));
+ max_wait -= mwait;
+ }
+}
+
+/**
* eeh_phb_pe_get - Retrieve PHB PE based on the given PHB
* @phb: PCI controller
*
@@ -360,7 +410,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
edev->pe = pe;
/* Put the edev to PE */
- list_add_tail(&edev->list, &pe->edevs);
+ list_add_tail(&edev->entry, &pe->edevs);
pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n",
pdn->phb->global_number,
pdn->busno,
@@ -369,7 +419,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
pe->addr);
return 0;
} else if (pe && (pe->type & EEH_PE_INVALID)) {
- list_add_tail(&edev->list, &pe->edevs);
+ list_add_tail(&edev->entry, &pe->edevs);
edev->pe = pe;
/*
* We're running to here because of PCI hotplug caused by
@@ -379,7 +429,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
while (parent) {
if (!(parent->type & EEH_PE_INVALID))
break;
- parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
+ parent->type &= ~EEH_PE_INVALID;
parent = parent->parent;
}
@@ -429,7 +479,7 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
* link the EEH device accordingly.
*/
list_add_tail(&pe->child, &parent->child_list);
- list_add_tail(&edev->list, &pe->edevs);
+ list_add_tail(&edev->entry, &pe->edevs);
edev->pe = pe;
pr_debug("EEH: Add %04x:%02x:%02x.%01x to "
"Device PE#%x, Parent PE#%x\n",
@@ -457,7 +507,8 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
int cnt;
struct pci_dn *pdn = eeh_dev_to_pdn(edev);
- if (!edev->pe) {
+ pe = eeh_dev_to_pe(edev);
+ if (!pe) {
pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n",
__func__, pdn->phb->global_number,
pdn->busno,
@@ -467,9 +518,8 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
}
/* Remove the EEH device */
- pe = eeh_dev_to_pe(edev);
edev->pe = NULL;
- list_del(&edev->list);
+ list_del(&edev->entry);
/*
* Check if the parent PE includes any EEH devices.
@@ -541,56 +591,50 @@ void eeh_pe_update_time_stamp(struct eeh_pe *pe)
}
/**
- * __eeh_pe_state_mark - Mark the state for the PE
- * @data: EEH PE
- * @flag: state
+ * eeh_pe_state_mark - Mark specified state for PE and its associated device
+ * @pe: EEH PE
*
- * The function is used to mark the indicated state for the given
- * PE. Also, the associated PCI devices will be put into IO frozen
- * state as well.
+ * EEH error affects the current PE and its child PEs. The function
+ * is used to mark appropriate state for the affected PEs and the
+ * associated devices.
*/
-static void *__eeh_pe_state_mark(struct eeh_pe *pe, void *flag)
+void eeh_pe_state_mark(struct eeh_pe *root, int state)
{
- int state = *((int *)flag);
- struct eeh_dev *edev, *tmp;
- struct pci_dev *pdev;
-
- /* Keep the state of permanently removed PE intact */
- if (pe->state & EEH_PE_REMOVED)
- return NULL;
-
- pe->state |= state;
-
- /* Offline PCI devices if applicable */
- if (!(state & EEH_PE_ISOLATED))
- return NULL;
-
- eeh_pe_for_each_dev(pe, edev, tmp) {
- pdev = eeh_dev_to_pci_dev(edev);
- if (pdev)
- pdev->error_state = pci_channel_io_frozen;
- }
-
- /* Block PCI config access if required */
- if (pe->state & EEH_PE_CFG_RESTRICTED)
- pe->state |= EEH_PE_CFG_BLOCKED;
+ struct eeh_pe *pe;
- return NULL;
+ eeh_for_each_pe(root, pe)
+ if (!(pe->state & EEH_PE_REMOVED))
+ pe->state |= state;
}
+EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
/**
- * eeh_pe_state_mark - Mark specified state for PE and its associated device
+ * eeh_pe_mark_isolated
* @pe: EEH PE
*
- * EEH error affects the current PE and its child PEs. The function
- * is used to mark appropriate state for the affected PEs and the
- * associated devices.
+ * Record that a PE has been isolated by marking the PE and it's children as
+ * EEH_PE_ISOLATED (and EEH_PE_CFG_BLOCKED, if required) and their PCI devices
+ * as pci_channel_io_frozen.
*/
-void eeh_pe_state_mark(struct eeh_pe *pe, int state)
+void eeh_pe_mark_isolated(struct eeh_pe *root)
{
- eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
+ struct eeh_pe *pe;
+ struct eeh_dev *edev;
+ struct pci_dev *pdev;
+
+ eeh_pe_state_mark(root, EEH_PE_ISOLATED);
+ eeh_for_each_pe(root, pe) {
+ list_for_each_entry(edev, &pe->edevs, entry) {
+ pdev = eeh_dev_to_pci_dev(edev);
+ if (pdev)
+ pdev->error_state = pci_channel_io_frozen;
+ }
+ /* Block PCI config access if required */
+ if (pe->state & EEH_PE_CFG_RESTRICTED)
+ pe->state |= EEH_PE_CFG_BLOCKED;
+ }
}
-EXPORT_SYMBOL_GPL(eeh_pe_state_mark);
+EXPORT_SYMBOL_GPL(eeh_pe_mark_isolated);
static void *__eeh_pe_dev_mode_mark(struct eeh_dev *edev, void *flag)
{
@@ -671,28 +715,6 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state)
eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
}
-/**
- * eeh_pe_state_mark_with_cfg - Mark PE state with unblocked config space
- * @pe: PE
- * @state: PE state to be set
- *
- * Set specified flag to PE and its child PEs. The PCI config space
- * of some PEs is blocked automatically when EEH_PE_ISOLATED is set,
- * which isn't needed in some situations. The function allows to set
- * the specified flag to indicated PEs without blocking their PCI
- * config space.
- */
-void eeh_pe_state_mark_with_cfg(struct eeh_pe *pe, int state)
-{
- eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
- if (!(state & EEH_PE_ISOLATED))
- return;
-
- /* Clear EEH_PE_CFG_BLOCKED, which might be set just now */
- state = EEH_PE_CFG_BLOCKED;
- eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
-}
-
/*
* Some PCI bridges (e.g. PLX bridges) have primary/secondary
* buses assigned explicitly by firmware, and we probably have
@@ -945,7 +967,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
return pe->bus;
/* Retrieve the parent PCI bus of first (top) PCI device */
- edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, list);
+ edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
pdev = eeh_dev_to_pci_dev(edev);
if (pdev)
return pdev->bus;
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index e58c3f467db5..77decded1175 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -794,7 +794,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
lis r10,MSR_KERNEL@h
ori r10,r10,MSR_KERNEL@l
bl transfer_to_handler_full
- .long nonrecoverable_exception
+ .long unrecoverable_exception
.long ret_from_except
#endif
@@ -1297,7 +1297,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_601)
rlwinm r3,r3,0,0,30
stw r3,_TRAP(r1)
4: addi r3,r1,STACK_FRAME_OVERHEAD
- bl nonrecoverable_exception
+ bl unrecoverable_exception
/* shouldn't return */
b 4b
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2206912ea4f0..7b1693adff2a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -171,7 +171,7 @@ system_call: /* label this so stack traces look sane */
* based on caller's run-mode / personality.
*/
ld r11,SYS_CALL_TABLE@toc(2)
- andi. r10,r10,_TIF_32BIT
+ andis. r10,r10,_TIF_32BIT@h
beq 15f
addi r11,r11,8 /* use 32-bit syscall entries */
clrldi r3,r3,32
@@ -386,10 +386,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
4: /* Anything else left to do? */
BEGIN_FTR_SECTION
- lis r3,INIT_PPR@highest /* Set thread.ppr = 3 */
- ld r10,PACACURRENT(r13)
+ lis r3,DEFAULT_PPR@highest /* Set default PPR */
sldi r3,r3,32 /* bits 11-13 are used for ppr */
- std r3,TASKTHREADPPR(r10)
+ std r3,_PPR(r1)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
@@ -624,6 +623,10 @@ _GLOBAL(_switch)
addi r6,r4,-THREAD /* Convert THREAD to 'current' */
std r6,PACACURRENT(r13) /* Set new 'current' */
+#if defined(CONFIG_STACKPROTECTOR)
+ ld r6, TASK_CANARY(r6)
+ std r6, PACA_CANARY(r13)
+#endif
ld r8,KSP(r4) /* new stack pointer */
#ifdef CONFIG_PPC_BOOK3S_64
@@ -672,7 +675,9 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
isync
slbie r6
+BEGIN_FTR_SECTION
slbie r6 /* Workaround POWER5 < DD2.1 issue */
+END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
slbmte r7,r0
isync
2:
@@ -936,12 +941,6 @@ fast_exception_return:
andi. r0,r3,MSR_RI
beq- .Lunrecov_restore
- /* Load PPR from thread struct before we clear MSR:RI */
-BEGIN_FTR_SECTION
- ld r2,PACACURRENT(r13)
- ld r2,TASKTHREADPPR(r2)
-END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
-
/*
* Clear RI before restoring r13. If we are returning to
* userspace and we take an exception after restoring r13,
@@ -962,7 +961,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
andi. r0,r3,MSR_PR
beq 1f
BEGIN_FTR_SECTION
- mtspr SPRN_PPR,r2 /* Restore PPR */
+ /* Restore PPR */
+ ld r2,_PPR(r1)
+ mtspr SPRN_PPR,r2
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ACCOUNT_CPU_USER_EXIT(r13, r2, r4)
REST_GPR(13, r1)
@@ -1118,7 +1119,7 @@ _ASM_NOKPROBE_SYMBOL(fast_exception_return);
_GLOBAL(enter_rtas)
mflr r0
std r0,16(r1)
- stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
+ stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
/* Because RTAS is running in 32b mode, it clobbers the high order half
* of all registers that it saves. We therefore save those registers
@@ -1250,7 +1251,7 @@ rtas_restore_regs:
ld r8,_DSISR(r1)
mtdsisr r8
- addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
+ addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
ld r0,16(r1) /* get return address */
mtlr r0
@@ -1261,7 +1262,7 @@ rtas_restore_regs:
_GLOBAL(enter_prom)
mflr r0
std r0,16(r1)
- stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
+ stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
/* Because PROM is running in 32b mode, it clobbers the high order half
* of all registers that it saves. We therefore save those registers
@@ -1318,8 +1319,8 @@ _GLOBAL(enter_prom)
REST_10GPRS(22, r1)
ld r4,_CCR(r1)
mtcr r4
-
- addi r1,r1,PROM_FRAME_SIZE
+
+ addi r1,r1,SWITCH_FRAME_SIZE
ld r0,16(r1)
mtlr r0
blr
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 2d8fc8c9da7a..89d32bb79d5e 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -244,14 +244,13 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
SET_SCRATCH0(r13) /* save r13 */
EXCEPTION_PROLOG_0(PACA_EXMC)
BEGIN_FTR_SECTION
- b machine_check_powernv_early
+ b machine_check_common_early
FTR_SECTION_ELSE
b machine_check_pSeries_0
ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
EXC_REAL_END(machine_check, 0x200, 0x100)
EXC_VIRT_NONE(0x4200, 0x100)
-TRAMP_REAL_BEGIN(machine_check_powernv_early)
-BEGIN_FTR_SECTION
+TRAMP_REAL_BEGIN(machine_check_common_early)
EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
/*
* Register contents:
@@ -305,7 +304,9 @@ BEGIN_FTR_SECTION
/* Save r9 through r13 from EXMC save area to stack frame. */
EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
mfmsr r11 /* get MSR value */
+BEGIN_FTR_SECTION
ori r11,r11,MSR_ME /* turn on ME bit */
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
ori r11,r11,MSR_RI /* turn on RI bit */
LOAD_HANDLER(r12, machine_check_handle_early)
1: mtspr SPRN_SRR0,r12
@@ -324,13 +325,15 @@ BEGIN_FTR_SECTION
andc r11,r11,r10 /* Turn off MSR_ME */
b 1b
b . /* prevent speculative execution */
-END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
TRAMP_REAL_BEGIN(machine_check_pSeries)
.globl machine_check_fwnmi
machine_check_fwnmi:
SET_SCRATCH0(r13) /* save r13 */
EXCEPTION_PROLOG_0(PACA_EXMC)
+BEGIN_FTR_SECTION
+ b machine_check_common_early
+END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
machine_check_pSeries_0:
EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200)
/*
@@ -440,6 +443,9 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
bl machine_check_early
std r3,RESULT(r1) /* Save result */
ld r12,_MSR(r1)
+BEGIN_FTR_SECTION
+ b 4f
+END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
#ifdef CONFIG_PPC_P7_NAP
/*
@@ -463,11 +469,12 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
*/
rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
beq 5f
- andi. r11,r12,MSR_PR /* See if coming from user. */
+4: andi. r11,r12,MSR_PR /* See if coming from user. */
bne 9f /* continue in V mode if we are. */
5:
#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+BEGIN_FTR_SECTION
/*
* We are coming from kernel context. Check if we are coming from
* guest. if yes, then we can continue. We will fall through
@@ -476,6 +483,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
lbz r11,HSTATE_IN_GUEST(r13)
cmpwi r11,0 /* Check if coming from guest */
bne 9f /* continue if we are. */
+END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
#endif
/*
* At this point we are not sure about what context we come from.
@@ -510,6 +518,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
cmpdi r3,0 /* see if we handled MCE successfully */
beq 1b /* if !handled then panic */
+BEGIN_FTR_SECTION
/*
* Return from MC interrupt.
* Queue up the MCE event so that we can log it later, while
@@ -518,10 +527,24 @@ EXC_COMMON_BEGIN(machine_check_handle_early)
bl machine_check_queue_event
MACHINE_CHECK_HANDLER_WINDUP
RFI_TO_USER_OR_KERNEL
+FTR_SECTION_ELSE
+ /*
+ * pSeries: Return from MC interrupt. Before that stay on emergency
+ * stack and call machine_check_exception to log the MCE event.
+ */
+ LOAD_HANDLER(r10,mce_return)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
+ mtspr SPRN_SRR1,r10
+ RFI_TO_KERNEL
+ b .
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
9:
/* Deliver the machine check to host kernel in V mode. */
MACHINE_CHECK_HANDLER_WINDUP
- b machine_check_pSeries
+ SET_SCRATCH0(r13) /* save r13 */
+ EXCEPTION_PROLOG_0(PACA_EXMC)
+ b machine_check_pSeries_0
EXC_COMMON_BEGIN(unrecover_mce)
/* Invoke machine_check_exception to print MCE event and panic. */
@@ -535,6 +558,13 @@ EXC_COMMON_BEGIN(unrecover_mce)
bl unrecoverable_exception
b 1b
+EXC_COMMON_BEGIN(mce_return)
+ /* Invoke machine_check_exception to print MCE event and return. */
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl machine_check_exception
+ MACHINE_CHECK_HANDLER_WINDUP
+ RFI_TO_KERNEL
+ b .
EXC_REAL(data_access, 0x300, 0x80)
EXC_VIRT(data_access, 0x4300, 0x80, 0x300)
@@ -566,28 +596,36 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXSLB)
- EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
- mr r12,r3 /* save r3 */
- mfspr r3,SPRN_DAR
- mfspr r11,SPRN_SRR1
- crset 4*cr6+eq
- BRANCH_TO_COMMON(r10, slb_miss_common)
+EXCEPTION_PROLOG(PACA_EXSLB, data_access_slb_common, EXC_STD, KVMTEST_PR, 0x380);
EXC_REAL_END(data_access_slb, 0x380, 0x80)
EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXSLB)
- EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
- mr r12,r3 /* save r3 */
- mfspr r3,SPRN_DAR
- mfspr r11,SPRN_SRR1
- crset 4*cr6+eq
- BRANCH_TO_COMMON(r10, slb_miss_common)
+EXCEPTION_RELON_PROLOG(PACA_EXSLB, data_access_slb_common, EXC_STD, NOTEST, 0x380);
EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
+
TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
+EXC_COMMON_BEGIN(data_access_slb_common)
+ mfspr r10,SPRN_DAR
+ std r10,PACA_EXSLB+EX_DAR(r13)
+ EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
+ ld r4,PACA_EXSLB+EX_DAR(r13)
+ std r4,_DAR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_slb_fault
+ cmpdi r3,0
+ bne- 1f
+ b fast_exception_return
+1: /* Error case */
+ std r3,RESULT(r1)
+ bl save_nvgprs
+ RECONCILE_IRQ_STATE(r10, r11)
+ ld r4,_DAR(r1)
+ ld r5,RESULT(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_bad_slb_fault
+ b ret_from_except
+
EXC_REAL(instruction_access, 0x400, 0x80)
EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400)
@@ -610,160 +648,34 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXSLB)
- EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
- mr r12,r3 /* save r3 */
- mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- mfspr r11,SPRN_SRR1
- crclr 4*cr6+eq
- BRANCH_TO_COMMON(r10, slb_miss_common)
+EXCEPTION_PROLOG(PACA_EXSLB, instruction_access_slb_common, EXC_STD, KVMTEST_PR, 0x480);
EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
- SET_SCRATCH0(r13)
- EXCEPTION_PROLOG_0(PACA_EXSLB)
- EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
- mr r12,r3 /* save r3 */
- mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- mfspr r11,SPRN_SRR1
- crclr 4*cr6+eq
- BRANCH_TO_COMMON(r10, slb_miss_common)
+EXCEPTION_RELON_PROLOG(PACA_EXSLB, instruction_access_slb_common, EXC_STD, NOTEST, 0x480);
EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
-TRAMP_KVM(PACA_EXSLB, 0x480)
-
-
-/*
- * This handler is used by the 0x380 and 0x480 SLB miss interrupts, as well as
- * the virtual mode 0x4380 and 0x4480 interrupts if AIL is enabled.
- */
-EXC_COMMON_BEGIN(slb_miss_common)
- /*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contains the saved r3,
- * r11 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * cr6.eq is set for a D-SLB miss, clear for a I-SLB miss
- * We assume we aren't going to take any exceptions during this
- * procedure.
- */
- mflr r10
- stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
- std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
-
- andi. r9,r11,MSR_PR // Check for exception from userspace
- cmpdi cr4,r9,MSR_PR // And save the result in CR4 for later
-
- /*
- * Test MSR_RI before calling slb_allocate_realmode, because the
- * MSR in r11 gets clobbered. However we still want to allocate
- * SLB in case MSR_RI=0, to minimise the risk of getting stuck in
- * recursive SLB faults. So use cr5 for this, which is preserved.
- */
- andi. r11,r11,MSR_RI /* check for unrecoverable exception */
- cmpdi cr5,r11,MSR_RI
-
- crset 4*cr0+eq
-#ifdef CONFIG_PPC_BOOK3S_64
-BEGIN_MMU_FTR_SECTION
- bl slb_allocate
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
-#endif
-
- ld r10,PACA_EXSLB+EX_LR(r13)
- lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
- mtlr r10
-
- /*
- * Large address, check whether we have to allocate new contexts.
- */
- beq- 8f
- bne- cr5,2f /* if unrecoverable exception, oops */
-
- /* All done -- return from exception. */
-
- bne cr4,1f /* returning to kernel */
-
- mtcrf 0x80,r9
- mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
- mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
- mtcrf 0x02,r9 /* I/D indication is in cr6 */
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
-
- RESTORE_CTR(r9, PACA_EXSLB)
- RESTORE_PPR_PACA(PACA_EXSLB, r9)
- mr r3,r12
- ld r9,PACA_EXSLB+EX_R9(r13)
- ld r10,PACA_EXSLB+EX_R10(r13)
- ld r11,PACA_EXSLB+EX_R11(r13)
- ld r12,PACA_EXSLB+EX_R12(r13)
- ld r13,PACA_EXSLB+EX_R13(r13)
- RFI_TO_USER
- b . /* prevent speculative execution */
-1:
- mtcrf 0x80,r9
- mtcrf 0x08,r9 /* MSR[PR] indication is in cr4 */
- mtcrf 0x04,r9 /* MSR[RI] indication is in cr5 */
- mtcrf 0x02,r9 /* I/D indication is in cr6 */
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
-
- RESTORE_CTR(r9, PACA_EXSLB)
- RESTORE_PPR_PACA(PACA_EXSLB, r9)
- mr r3,r12
- ld r9,PACA_EXSLB+EX_R9(r13)
- ld r10,PACA_EXSLB+EX_R10(r13)
- ld r11,PACA_EXSLB+EX_R11(r13)
- ld r12,PACA_EXSLB+EX_R12(r13)
- ld r13,PACA_EXSLB+EX_R13(r13)
- RFI_TO_KERNEL
- b . /* prevent speculative execution */
-
-
-2: std r3,PACA_EXSLB+EX_DAR(r13)
- mr r3,r12
- mfspr r11,SPRN_SRR0
- mfspr r12,SPRN_SRR1
- LOAD_HANDLER(r10,unrecov_slb)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- mtspr SPRN_SRR1,r10
- RFI_TO_KERNEL
- b .
-
-8: std r3,PACA_EXSLB+EX_DAR(r13)
- mr r3,r12
- mfspr r11,SPRN_SRR0
- mfspr r12,SPRN_SRR1
- LOAD_HANDLER(r10, large_addr_slb)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- mtspr SPRN_SRR1,r10
- RFI_TO_KERNEL
- b .
+TRAMP_KVM(PACA_EXSLB, 0x480)
-EXC_COMMON_BEGIN(unrecov_slb)
- EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
- RECONCILE_IRQ_STATE(r10, r11)
+EXC_COMMON_BEGIN(instruction_access_slb_common)
+ EXCEPTION_PROLOG_COMMON(0x480, PACA_EXSLB)
+ ld r4,_NIP(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_slb_fault
+ cmpdi r3,0
+ bne- 1f
+ b fast_exception_return
+1: /* Error case */
+ std r3,RESULT(r1)
bl save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl unrecoverable_exception
- b 1b
-
-EXC_COMMON_BEGIN(large_addr_slb)
- EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
RECONCILE_IRQ_STATE(r10, r11)
- ld r3, PACA_EXSLB+EX_DAR(r13)
- std r3, _DAR(r1)
- beq cr6, 2f
- li r10, 0x481 /* fix trap number for I-SLB miss */
- std r10, _TRAP(r1)
-2: bl save_nvgprs
- addi r3, r1, STACK_FRAME_OVERHEAD
- bl slb_miss_large_addr
+ ld r4,_NIP(r1)
+ ld r5,RESULT(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl do_bad_slb_fault
b ret_from_except
+
EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
.globl hardware_interrupt_hv;
hardware_interrupt_hv:
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index a711d22339ea..761b28b1427d 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -1444,8 +1444,8 @@ static ssize_t fadump_register_store(struct kobject *kobj,
break;
case 1:
if (fw_dump.dump_registered == 1) {
- ret = -EEXIST;
- goto unlock_out;
+ /* Un-register Firmware-assisted dump */
+ fadump_unregister_dump(&fdm);
}
/* Register Firmware-assisted dump */
ret = register_fadump();
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 6582f824d620..134a573a9f2d 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -642,7 +642,7 @@ DTLBMissIMMR:
mtspr SPRN_MD_TWC, r10
mfspr r10, SPRN_IMMR /* Get current IMMR */
rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */
- ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
+ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
_PAGE_PRESENT | _PAGE_NO_CACHE
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
@@ -660,7 +660,7 @@ DTLBMissLinear:
li r11, MD_PS8MEG | MD_SVALID | M_APG2
mtspr SPRN_MD_TWC, r11
rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
- ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
+ ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
_PAGE_PRESENT
mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
@@ -679,7 +679,7 @@ ITLBMissLinear:
li r11, MI_PS8MEG | MI_SVALID | M_APG2
mtspr SPRN_MI_TWC, r11
rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */
- ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY | \
+ ori r10, r10, 0xf0 | MI_SPS16K | _PAGE_SH | _PAGE_DIRTY | \
_PAGE_PRESENT
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
diff --git a/arch/powerpc/kernel/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c
index aa9f1b8261db..7e89d02a84e1 100644
--- a/arch/powerpc/kernel/io-workarounds.c
+++ b/arch/powerpc/kernel/io-workarounds.c
@@ -153,10 +153,10 @@ static const struct ppc_pci_io iowa_pci_io = {
#ifdef CONFIG_PPC_INDIRECT_MMIO
static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size,
- unsigned long flags, void *caller)
+ pgprot_t prot, void *caller)
{
struct iowa_bus *bus;
- void __iomem *res = __ioremap_caller(addr, size, flags, caller);
+ void __iomem *res = __ioremap_caller(addr, size, prot, caller);
int busno;
bus = iowa_pci_find(0, (unsigned long)addr);
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 19b4c628f3be..f0dc680e659a 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -785,9 +785,9 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
vaddr = page_address(page) + offset;
uaddr = (unsigned long)vaddr;
- npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
if (tbl) {
+ npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
align = 0;
if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
((unsigned long)vaddr & ~PAGE_MASK) == 0)
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index 1df6c74aa731..fda3ae48480c 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -110,14 +110,14 @@ static void pci_process_ISA_OF_ranges(struct device_node *isa_node,
size = 0x10000;
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
- size, pgprot_val(pgprot_noncached(__pgprot(0))));
+ size, pgprot_noncached(PAGE_KERNEL));
return;
inval_range:
printk(KERN_ERR "no ISA IO ranges or unexpected isa range, "
"mapping 64k\n");
__ioremap_at(phb_io_base_phys, (void *)ISA_IO_BASE,
- 0x10000, pgprot_val(pgprot_noncached(__pgprot(0))));
+ 0x10000, pgprot_noncached(PAGE_KERNEL));
}
@@ -253,7 +253,7 @@ void __init isa_bridge_init_non_pci(struct device_node *np)
*/
isa_io_base = ISA_IO_BASE;
__ioremap_at(pbase, (void *)ISA_IO_BASE,
- size, pgprot_val(pgprot_noncached(__pgprot(0))));
+ size, pgprot_noncached(PAGE_KERNEL));
pr_debug("ISA: Non-PCI bridge is %pOF\n", np);
}
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 35e240a0a408..59c578f865aa 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -24,6 +24,7 @@
#include <asm/processor.h>
#include <asm/machdep.h>
#include <asm/debug.h>
+#include <asm/code-patching.h>
#include <linux/slab.h>
/*
@@ -144,7 +145,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
return 0;
- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
+ if (*(u32 *)regs->nip == BREAK_INSTR)
regs->nip += BREAK_INSTR_SIZE;
return 1;
@@ -441,16 +442,42 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
return -1;
}
+int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
+{
+ int err;
+ unsigned int instr;
+ unsigned int *addr = (unsigned int *)bpt->bpt_addr;
+
+ err = probe_kernel_address(addr, instr);
+ if (err)
+ return err;
+
+ err = patch_instruction(addr, BREAK_INSTR);
+ if (err)
+ return -EFAULT;
+
+ *(unsigned int *)bpt->saved_instr = instr;
+
+ return 0;
+}
+
+int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
+{
+ int err;
+ unsigned int instr = *(unsigned int *)bpt->saved_instr;
+ unsigned int *addr = (unsigned int *)bpt->bpt_addr;
+
+ err = patch_instruction(addr, instr);
+ if (err)
+ return -EFAULT;
+
+ return 0;
+}
+
/*
* Global data
*/
-struct kgdb_arch arch_kgdb_ops = {
-#ifdef __LITTLE_ENDIAN__
- .gdb_bpt_instr = {0x08, 0x10, 0x82, 0x7d},
-#else
- .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
-#endif
-};
+struct kgdb_arch arch_kgdb_ops;
static int kgdb_not_implemented(struct pt_regs *regs)
{
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index efdd16a79075..bd933a75f0bc 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -488,10 +488,11 @@ long machine_check_early(struct pt_regs *regs)
{
long handled = 0;
- __this_cpu_inc(irq_stat.mce_exceptions);
-
- if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
- handled = cur_cpu_spec->machine_check_early(regs);
+ /*
+ * See if platform is capable of handling machine check.
+ */
+ if (ppc_md.machine_check_early)
+ handled = ppc_md.machine_check_early(regs);
return handled;
}
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 3497c8329c1d..6b800eec31f2 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -60,7 +60,7 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
/* flush SLBs and reload */
#ifdef CONFIG_PPC_BOOK3S_64
-static void flush_and_reload_slb(void)
+void flush_and_reload_slb(void)
{
/* Invalidate all SLBs */
slb_flush_all_realmode();
@@ -89,6 +89,13 @@ static void flush_and_reload_slb(void)
static void flush_erat(void)
{
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
+ flush_and_reload_slb();
+ return;
+ }
+#endif
+ /* PPC_INVALIDATE_ERAT can only be used on ISA v3 and newer */
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
}
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 77371c9ef3d8..2d861a36662e 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -74,6 +74,14 @@ int module_finalize(const Elf_Ehdr *hdr,
(void *)sect->sh_addr + sect->sh_size);
#endif /* CONFIG_PPC64 */
+#ifdef PPC64_ELF_ABI_v1
+ sect = find_section(hdr, sechdrs, ".opd");
+ if (sect != NULL) {
+ me->arch.start_opd = sect->sh_addr;
+ me->arch.end_opd = sect->sh_addr + sect->sh_size;
+ }
+#endif /* PPC64_ELF_ABI_v1 */
+
#ifdef CONFIG_PPC_BARRIER_NOSPEC
sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
if (sect != NULL)
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index b8d61e019d06..8661eea78503 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -360,11 +360,6 @@ int module_frob_arch_sections(Elf64_Ehdr *hdr,
else if (strcmp(secstrings+sechdrs[i].sh_name,"__versions")==0)
dedotify_versions((void *)hdr + sechdrs[i].sh_offset,
sechdrs[i].sh_size);
- else if (!strcmp(secstrings + sechdrs[i].sh_name, ".opd")) {
- me->arch.start_opd = sechdrs[i].sh_addr;
- me->arch.end_opd = sechdrs[i].sh_addr +
- sechdrs[i].sh_size;
- }
/* We don't handle .init for the moment: rename to _init */
while ((p = strstr(secstrings + sechdrs[i].sh_name, ".init")))
@@ -685,7 +680,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
case R_PPC64_REL32:
/* 32 bits relative (used by relative exception tables) */
- *(u32 *)location = value - (unsigned long)location;
+ /* Convert value to relative */
+ value -= (unsigned long)location;
+ if (value + 0x80000000 > 0xffffffff) {
+ pr_err("%s: REL32 %li out of range!\n",
+ me->name, (long int)value);
+ return -ENOEXEC;
+ }
+ *(u32 *)location = value;
break;
case R_PPC64_TOCSAVE:
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index d63b488d34d7..4da8ed576229 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -17,7 +17,6 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/export.h>
-#include <linux/syscalls.h>
#include <asm/processor.h>
#include <asm/io.h>
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index dff28f903512..9d8c10d55407 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -159,7 +159,7 @@ static int pcibios_map_phb_io_space(struct pci_controller *hose)
/* Establish the mapping */
if (__ioremap_at(phys_page, area->addr, size_page,
- pgprot_val(pgprot_noncached(__pgprot(0)))) == NULL)
+ pgprot_noncached(PAGE_KERNEL)) == NULL)
return -ENOMEM;
/* Fixup hose IO resource */
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 5d983d8bac27..4d5322cfad25 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -43,6 +43,7 @@
#include <linux/uaccess.h>
#include <linux/elf-randomize.h>
#include <linux/pkeys.h>
+#include <linux/seq_buf.h>
#include <asm/pgtable.h>
#include <asm/io.h>
@@ -65,6 +66,7 @@
#include <asm/livepatch.h>
#include <asm/cpu_has_feature.h>
#include <asm/asm-prototypes.h>
+#include <asm/stacktrace.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
@@ -102,24 +104,18 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
}
}
-static inline bool msr_tm_active(unsigned long msr)
-{
- return MSR_TM_ACTIVE(msr);
-}
-
static bool tm_active_with_fp(struct task_struct *tsk)
{
- return msr_tm_active(tsk->thread.regs->msr) &&
+ return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
(tsk->thread.ckpt_regs.msr & MSR_FP);
}
static bool tm_active_with_altivec(struct task_struct *tsk)
{
- return msr_tm_active(tsk->thread.regs->msr) &&
+ return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
(tsk->thread.ckpt_regs.msr & MSR_VEC);
}
#else
-static inline bool msr_tm_active(unsigned long msr) { return false; }
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
@@ -247,7 +243,8 @@ void enable_kernel_fp(void)
* giveup as this would save to the 'live' structure not the
* checkpointed structure.
*/
- if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+ if (!MSR_TM_ACTIVE(cpumsr) &&
+ MSR_TM_ACTIVE(current->thread.regs->msr))
return;
__giveup_fpu(current);
}
@@ -311,7 +308,8 @@ void enable_kernel_altivec(void)
* giveup as this would save to the 'live' structure not the
* checkpointed structure.
*/
- if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+ if (!MSR_TM_ACTIVE(cpumsr) &&
+ MSR_TM_ACTIVE(current->thread.regs->msr))
return;
__giveup_altivec(current);
}
@@ -397,7 +395,8 @@ void enable_kernel_vsx(void)
* giveup as this would save to the 'live' structure not the
* checkpointed structure.
*/
- if(!msr_tm_active(cpumsr) && msr_tm_active(current->thread.regs->msr))
+ if (!MSR_TM_ACTIVE(cpumsr) &&
+ MSR_TM_ACTIVE(current->thread.regs->msr))
return;
__giveup_vsx(current);
}
@@ -530,7 +529,7 @@ void restore_math(struct pt_regs *regs)
{
unsigned long msr;
- if (!msr_tm_active(regs->msr) &&
+ if (!MSR_TM_ACTIVE(regs->msr) &&
!current->thread.load_fp && !loadvec(current->thread))
return;
@@ -1252,17 +1251,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
return last;
}
-static int instructions_to_print = 16;
+#define NR_INSN_TO_PRINT 16
static void show_instructions(struct pt_regs *regs)
{
int i;
- unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
- sizeof(int));
+ unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
printk("Instruction dump:");
- for (i = 0; i < instructions_to_print; i++) {
+ for (i = 0; i < NR_INSN_TO_PRINT; i++) {
int instr;
if (!(i % 8))
@@ -1277,7 +1275,7 @@ static void show_instructions(struct pt_regs *regs)
#endif
if (!__kernel_text_address(pc) ||
- probe_kernel_address((unsigned int __user *)pc, instr)) {
+ probe_kernel_address((const void *)pc, instr)) {
pr_cont("XXXXXXXX ");
} else {
if (regs->nip == pc)
@@ -1295,43 +1293,43 @@ static void show_instructions(struct pt_regs *regs)
void show_user_instructions(struct pt_regs *regs)
{
unsigned long pc;
- int i;
+ int n = NR_INSN_TO_PRINT;
+ struct seq_buf s;
+ char buf[96]; /* enough for 8 times 9 + 2 chars */
- pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
+ pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
/*
* Make sure the NIP points at userspace, not kernel text/data or
* elsewhere.
*/
- if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
+ if (!__access_ok(pc, NR_INSN_TO_PRINT * sizeof(int), USER_DS)) {
pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
current->comm, current->pid);
return;
}
- pr_info("%s[%d]: code: ", current->comm, current->pid);
+ seq_buf_init(&s, buf, sizeof(buf));
- for (i = 0; i < instructions_to_print; i++) {
- int instr;
+ while (n) {
+ int i;
- if (!(i % 8) && (i > 0)) {
- pr_cont("\n");
- pr_info("%s[%d]: code: ", current->comm, current->pid);
- }
+ seq_buf_clear(&s);
- if (probe_kernel_address((unsigned int __user *)pc, instr)) {
- pr_cont("XXXXXXXX ");
- } else {
- if (regs->nip == pc)
- pr_cont("<%08x> ", instr);
- else
- pr_cont("%08x ", instr);
+ for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
+ int instr;
+
+ if (probe_kernel_address((const void *)pc, instr)) {
+ seq_buf_printf(&s, "XXXXXXXX ");
+ continue;
+ }
+ seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
}
- pc += sizeof(int);
+ if (!seq_buf_has_overflowed(&s))
+ pr_info("%s[%d]: code: %s\n", current->comm,
+ current->pid, s.buffer);
}
-
- pr_cont("\n");
}
struct regbit {
@@ -1485,6 +1483,15 @@ void flush_thread(void)
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
}
+#ifdef CONFIG_PPC_BOOK3S_64
+void arch_setup_new_exec(void)
+{
+ if (radix_enabled())
+ return;
+ hash__setup_new_exec();
+}
+#endif
+
int set_thread_uses_vas(void)
{
#ifdef CONFIG_PPC_BOOK3S_64
@@ -1705,7 +1712,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.dscr = mfspr(SPRN_DSCR);
}
if (cpu_has_feature(CPU_FTR_HAS_PPR))
- p->thread.ppr = INIT_PPR;
+ childregs->ppr = DEFAULT_PPR;
p->thread.tidr = 0;
#endif
@@ -1713,6 +1720,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
return 0;
}
+void preload_new_slb_context(unsigned long start, unsigned long sp);
+
/*
* Set up a thread for executing a new program
*/
@@ -1720,6 +1729,10 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
{
#ifdef CONFIG_PPC64
unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ preload_new_slb_context(start, sp);
+#endif
#endif
/*
@@ -1810,6 +1823,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
#ifdef CONFIG_VSX
current->thread.used_vsr = 0;
#endif
+ current->thread.load_slb = 0;
current->thread.load_fp = 0;
memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
current->thread.fp_save_area = NULL;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 9b38a2e5dd35..f33ff4163a51 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -43,11 +43,13 @@
#include <asm/btext.h>
#include <asm/sections.h>
#include <asm/machdep.h>
-#include <asm/opal.h>
#include <asm/asm-prototypes.h>
#include <linux/linux_logo.h>
+/* All of prom_init bss lives here */
+#define __prombss __section(.bss.prominit)
+
/*
* Eventually bump that one up
*/
@@ -87,7 +89,7 @@
#define OF_WORKAROUNDS 0
#else
#define OF_WORKAROUNDS of_workarounds
-int of_workarounds;
+static int of_workarounds __prombss;
#endif
#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
@@ -148,29 +150,31 @@ extern void copy_and_flush(unsigned long dest, unsigned long src,
unsigned long size, unsigned long offset);
/* prom structure */
-static struct prom_t __initdata prom;
+static struct prom_t __prombss prom;
-static unsigned long prom_entry __initdata;
+static unsigned long __prombss prom_entry;
#define PROM_SCRATCH_SIZE 256
-static char __initdata of_stdout_device[256];
-static char __initdata prom_scratch[PROM_SCRATCH_SIZE];
+static char __prombss of_stdout_device[256];
+static char __prombss prom_scratch[PROM_SCRATCH_SIZE];
-static unsigned long __initdata dt_header_start;
-static unsigned long __initdata dt_struct_start, dt_struct_end;
-static unsigned long __initdata dt_string_start, dt_string_end;
+static unsigned long __prombss dt_header_start;
+static unsigned long __prombss dt_struct_start, dt_struct_end;
+static unsigned long __prombss dt_string_start, dt_string_end;
-static unsigned long __initdata prom_initrd_start, prom_initrd_end;
+static unsigned long __prombss prom_initrd_start, prom_initrd_end;
#ifdef CONFIG_PPC64
-static int __initdata prom_iommu_force_on;
-static int __initdata prom_iommu_off;
-static unsigned long __initdata prom_tce_alloc_start;
-static unsigned long __initdata prom_tce_alloc_end;
+static int __prombss prom_iommu_force_on;
+static int __prombss prom_iommu_off;
+static unsigned long __prombss prom_tce_alloc_start;
+static unsigned long __prombss prom_tce_alloc_end;
#endif
-static bool prom_radix_disable __initdata = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
+#ifdef CONFIG_PPC_PSERIES
+static bool __prombss prom_radix_disable;
+#endif
struct platform_support {
bool hash_mmu;
@@ -188,26 +192,25 @@ struct platform_support {
#define PLATFORM_LPAR 0x0001
#define PLATFORM_POWERMAC 0x0400
#define PLATFORM_GENERIC 0x0500
-#define PLATFORM_OPAL 0x0600
-static int __initdata of_platform;
+static int __prombss of_platform;
-static char __initdata prom_cmd_line[COMMAND_LINE_SIZE];
+static char __prombss prom_cmd_line[COMMAND_LINE_SIZE];
-static unsigned long __initdata prom_memory_limit;
+static unsigned long __prombss prom_memory_limit;
-static unsigned long __initdata alloc_top;
-static unsigned long __initdata alloc_top_high;
-static unsigned long __initdata alloc_bottom;
-static unsigned long __initdata rmo_top;
-static unsigned long __initdata ram_top;
+static unsigned long __prombss alloc_top;
+static unsigned long __prombss alloc_top_high;
+static unsigned long __prombss alloc_bottom;
+static unsigned long __prombss rmo_top;
+static unsigned long __prombss ram_top;
-static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE];
-static int __initdata mem_reserve_cnt;
+static struct mem_map_entry __prombss mem_reserve_map[MEM_RESERVE_MAP_SIZE];
+static int __prombss mem_reserve_cnt;
-static cell_t __initdata regbuf[1024];
+static cell_t __prombss regbuf[1024];
-static bool rtas_has_query_cpu_stopped;
+static bool __prombss rtas_has_query_cpu_stopped;
/*
@@ -522,8 +525,8 @@ static void add_string(char **str, const char *q)
static char *tohex(unsigned int x)
{
- static char digits[] = "0123456789abcdef";
- static char result[9];
+ static const char digits[] __initconst = "0123456789abcdef";
+ static char result[9] __prombss;
int i;
result[8] = 0;
@@ -664,6 +667,8 @@ static void __init early_cmdline_parse(void)
#endif
}
+#ifdef CONFIG_PPC_PSERIES
+ prom_radix_disable = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
opt = strstr(prom_cmd_line, "disable_radix");
if (opt) {
opt += 13;
@@ -679,9 +684,10 @@ static void __init early_cmdline_parse(void)
}
if (prom_radix_disable)
prom_debug("Radix disabled from cmdline\n");
+#endif /* CONFIG_PPC_PSERIES */
}
-#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+#ifdef CONFIG_PPC_PSERIES
/*
* The architecture vector has an array of PVR mask/value pairs,
* followed by # option vectors - 1, followed by the option vectors.
@@ -782,7 +788,7 @@ struct ibm_arch_vec {
struct option_vector6 vec6;
} __packed;
-struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
+static const struct ibm_arch_vec ibm_architecture_vec_template __initconst = {
.pvrs = {
{
.mask = cpu_to_be32(0xfffe0000), /* POWER5/POWER5+ */
@@ -920,9 +926,11 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
},
};
+static struct ibm_arch_vec __prombss ibm_architecture_vec ____cacheline_aligned;
+
/* Old method - ELF header with PT_NOTE sections only works on BE */
#ifdef __BIG_ENDIAN__
-static struct fake_elf {
+static const struct fake_elf {
Elf32_Ehdr elfhdr;
Elf32_Phdr phdr[2];
struct chrpnote {
@@ -955,7 +963,7 @@ static struct fake_elf {
u32 ignore_me;
} rpadesc;
} rpanote;
-} fake_elf = {
+} fake_elf __initconst = {
.elfhdr = {
.e_ident = { 0x7f, 'E', 'L', 'F',
ELFCLASS32, ELFDATA2MSB, EV_CURRENT },
@@ -1129,14 +1137,21 @@ static void __init prom_check_platform_support(void)
};
int prop_len = prom_getproplen(prom.chosen,
"ibm,arch-vec-5-platform-support");
+
+ /* First copy the architecture vec template */
+ ibm_architecture_vec = ibm_architecture_vec_template;
+
if (prop_len > 1) {
int i;
- u8 vec[prop_len];
+ u8 vec[8];
prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n",
prop_len);
+ if (prop_len > sizeof(vec))
+ prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
+ prop_len);
prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
&vec, sizeof(vec));
- for (i = 0; i < prop_len; i += 2) {
+ for (i = 0; i < sizeof(vec); i += 2) {
prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
, vec[i]
, vec[i + 1]);
@@ -1225,7 +1240,7 @@ static void __init prom_send_capabilities(void)
}
#endif /* __BIG_ENDIAN__ */
}
-#endif /* #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
+#endif /* CONFIG_PPC_PSERIES */
/*
* Memory allocation strategy... our layout is normally:
@@ -1562,88 +1577,6 @@ static void __init prom_close_stdin(void)
}
}
-#ifdef CONFIG_PPC_POWERNV
-
-#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
-static u64 __initdata prom_opal_base;
-static u64 __initdata prom_opal_entry;
-#endif
-
-/*
- * Allocate room for and instantiate OPAL
- */
-static void __init prom_instantiate_opal(void)
-{
- phandle opal_node;
- ihandle opal_inst;
- u64 base, entry;
- u64 size = 0, align = 0x10000;
- __be64 val64;
- u32 rets[2];
-
- prom_debug("prom_instantiate_opal: start...\n");
-
- opal_node = call_prom("finddevice", 1, 1, ADDR("/ibm,opal"));
- prom_debug("opal_node: %x\n", opal_node);
- if (!PHANDLE_VALID(opal_node))
- return;
-
- val64 = 0;
- prom_getprop(opal_node, "opal-runtime-size", &val64, sizeof(val64));
- size = be64_to_cpu(val64);
- if (size == 0)
- return;
- val64 = 0;
- prom_getprop(opal_node, "opal-runtime-alignment", &val64,sizeof(val64));
- align = be64_to_cpu(val64);
-
- base = alloc_down(size, align, 0);
- if (base == 0) {
- prom_printf("OPAL allocation failed !\n");
- return;
- }
-
- opal_inst = call_prom("open", 1, 1, ADDR("/ibm,opal"));
- if (!IHANDLE_VALID(opal_inst)) {
- prom_printf("opening opal package failed (%x)\n", opal_inst);
- return;
- }
-
- prom_printf("instantiating opal at 0x%llx...", base);
-
- if (call_prom_ret("call-method", 4, 3, rets,
- ADDR("load-opal-runtime"),
- opal_inst,
- base >> 32, base & 0xffffffff) != 0
- || (rets[0] == 0 && rets[1] == 0)) {
- prom_printf(" failed\n");
- return;
- }
- entry = (((u64)rets[0]) << 32) | rets[1];
-
- prom_printf(" done\n");
-
- reserve_mem(base, size);
-
- prom_debug("opal base = 0x%llx\n", base);
- prom_debug("opal align = 0x%llx\n", align);
- prom_debug("opal entry = 0x%llx\n", entry);
- prom_debug("opal size = 0x%llx\n", size);
-
- prom_setprop(opal_node, "/ibm,opal", "opal-base-address",
- &base, sizeof(base));
- prom_setprop(opal_node, "/ibm,opal", "opal-entry-address",
- &entry, sizeof(entry));
-
-#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
- prom_opal_base = base;
- prom_opal_entry = entry;
-#endif
- prom_debug("prom_instantiate_opal: end...\n");
-}
-
-#endif /* CONFIG_PPC_POWERNV */
-
/*
* Allocate room for and instantiate RTAS
*/
@@ -2150,10 +2083,6 @@ static int __init prom_find_machine_type(void)
}
}
#ifdef CONFIG_PPC64
- /* Try to detect OPAL */
- if (PHANDLE_VALID(call_prom("finddevice", 1, 1, ADDR("/ibm,opal"))))
- return PLATFORM_OPAL;
-
/* Try to figure out if it's an IBM pSeries or any other
* PAPR compliant platform. We assume it is if :
* - /device_type is "chrp" (please, do NOT use that for future
@@ -2202,7 +2131,7 @@ static void __init prom_check_displays(void)
ihandle ih;
int i;
- static unsigned char default_colors[] = {
+ static const unsigned char default_colors[] __initconst = {
0x00, 0x00, 0x00,
0x00, 0x00, 0xaa,
0x00, 0xaa, 0x00,
@@ -2398,7 +2327,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
char *namep, *prev_name, *sstart, *p, *ep, *lp, *path;
unsigned long soff;
unsigned char *valp;
- static char pname[MAX_PROPERTY_NAME];
+ static char pname[MAX_PROPERTY_NAME] __prombss;
int l, room, has_phandle = 0;
dt_push_token(OF_DT_BEGIN_NODE, mem_start, mem_end);
@@ -2481,14 +2410,11 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
has_phandle = 1;
}
- /* Add a "linux,phandle" property if no "phandle" property already
- * existed (can happen with OPAL)
- */
+ /* Add a "phandle" property if none already exist */
if (!has_phandle) {
- soff = dt_find_string("linux,phandle");
+ soff = dt_find_string("phandle");
if (soff == 0)
- prom_printf("WARNING: Can't find string index for"
- " <linux-phandle> node %s\n", path);
+ prom_printf("WARNING: Can't find string index for <phandle> node %s\n", path);
else {
dt_push_token(OF_DT_PROP, mem_start, mem_end);
dt_push_token(4, mem_start, mem_end);
@@ -2548,9 +2474,9 @@ static void __init flatten_device_tree(void)
dt_string_start = mem_start;
mem_start += 4; /* hole */
- /* Add "linux,phandle" in there, we'll need it */
+ /* Add "phandle" in there, we'll need it */
namep = make_room(&mem_start, &mem_end, 16, 1);
- strcpy(namep, "linux,phandle");
+ strcpy(namep, "phandle");
mem_start = (unsigned long)namep + strlen(namep) + 1;
/* Build string array */
@@ -3172,7 +3098,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
early_cmdline_parse();
-#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
+#ifdef CONFIG_PPC_PSERIES
/*
* On pSeries, inform the firmware about our capabilities
*/
@@ -3216,15 +3142,9 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* On non-powermacs, try to instantiate RTAS. PowerMacs don't
* have a usable RTAS implementation.
*/
- if (of_platform != PLATFORM_POWERMAC &&
- of_platform != PLATFORM_OPAL)
+ if (of_platform != PLATFORM_POWERMAC)
prom_instantiate_rtas();
-#ifdef CONFIG_PPC_POWERNV
- if (of_platform == PLATFORM_OPAL)
- prom_instantiate_opal();
-#endif /* CONFIG_PPC_POWERNV */
-
#ifdef CONFIG_PPC64
/* instantiate sml */
prom_instantiate_sml();
@@ -3237,8 +3157,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*
* (This must be done after instanciating RTAS)
*/
- if (of_platform != PLATFORM_POWERMAC &&
- of_platform != PLATFORM_OPAL)
+ if (of_platform != PLATFORM_POWERMAC)
prom_hold_cpus();
/*
@@ -3282,11 +3201,9 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/*
* in case stdin is USB and still active on IBM machines...
* Unfortunately quiesce crashes on some powermacs if we have
- * closed stdin already (in particular the powerbook 101). It
- * appears that the OPAL version of OFW doesn't like it either.
+ * closed stdin already (in particular the powerbook 101).
*/
- if (of_platform != PLATFORM_POWERMAC &&
- of_platform != PLATFORM_OPAL)
+ if (of_platform != PLATFORM_POWERMAC)
prom_close_stdin();
/*
@@ -3304,10 +3221,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
hdr = dt_header_start;
/* Don't print anything after quiesce under OPAL, it crashes OFW */
- if (of_platform != PLATFORM_OPAL) {
- prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
- prom_debug("->dt_header_start=0x%lx\n", hdr);
- }
+ prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase);
+ prom_debug("->dt_header_start=0x%lx\n", hdr);
#ifdef CONFIG_PPC32
reloc_got2(-offset);
@@ -3315,13 +3230,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
unreloc_toc();
#endif
-#ifdef CONFIG_PPC_EARLY_DEBUG_OPAL
- /* OPAL early debug gets the OPAL base & entry in r8 and r9 */
- __start(hdr, kbase, 0, 0, 0,
- prom_opal_base, prom_opal_entry);
-#else
__start(hdr, kbase, 0, 0, 0, 0, 0);
-#endif
return 0;
}
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index acb6b9226352..667df97d2595 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -28,6 +28,18 @@ OBJ="$2"
ERROR=0
+function check_section()
+{
+ file=$1
+ section=$2
+ size=$(objdump -h -j $section $file 2>/dev/null | awk "\$2 == \"$section\" {print \$3}")
+ size=${size:-0}
+ if [ $size -ne 0 ]; then
+ ERROR=1
+ echo "Error: Section $section not empty in prom_init.c" >&2
+ fi
+}
+
for UNDEF in $($NM -u $OBJ | awk '{print $2}')
do
# On 64-bit nm gives us the function descriptors, which have
@@ -66,4 +78,8 @@ do
fi
done
+check_section $OBJ .data
+check_section $OBJ .bss
+check_section $OBJ .init.data
+
exit $ERROR
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 9667666eb18e..afb819f4ca68 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -297,7 +297,7 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
}
#endif
- if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
+ if (regno < (sizeof(struct user_pt_regs) / sizeof(unsigned long))) {
*data = ((unsigned long *)task->thread.regs)[regno];
return 0;
}
@@ -360,10 +360,10 @@ static int gpr_get(struct task_struct *target, const struct user_regset *regset,
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.regs->orig_gpr3,
offsetof(struct pt_regs, orig_gpr3),
- sizeof(struct pt_regs));
+ sizeof(struct user_pt_regs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- sizeof(struct pt_regs), -1);
+ sizeof(struct user_pt_regs), -1);
return ret;
}
@@ -853,10 +853,10 @@ static int tm_cgpr_get(struct task_struct *target,
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.ckpt_regs.orig_gpr3,
offsetof(struct pt_regs, orig_gpr3),
- sizeof(struct pt_regs));
+ sizeof(struct user_pt_regs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
- sizeof(struct pt_regs), -1);
+ sizeof(struct user_pt_regs), -1);
return ret;
}
@@ -1609,7 +1609,7 @@ static int ppr_get(struct task_struct *target,
void *kbuf, void __user *ubuf)
{
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.ppr, 0, sizeof(u64));
+ &target->thread.regs->ppr, 0, sizeof(u64));
}
static int ppr_set(struct task_struct *target,
@@ -1618,7 +1618,7 @@ static int ppr_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.ppr, 0, sizeof(u64));
+ &target->thread.regs->ppr, 0, sizeof(u64));
}
static int dscr_get(struct task_struct *target,
@@ -2508,6 +2508,7 @@ void ptrace_disable(struct task_struct *child)
{
/* make sure the single step bit is not set. */
user_disable_single_step(child);
+ clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
}
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
@@ -3130,7 +3131,7 @@ long arch_ptrace(struct task_struct *child, long request,
case PTRACE_GETREGS: /* Get all pt_regs from the child. */
return copy_regset_to_user(child, &user_ppc_native_view,
REGSET_GPR,
- 0, sizeof(struct pt_regs),
+ 0, sizeof(struct user_pt_regs),
datavp);
#ifdef CONFIG_PPC64
@@ -3139,7 +3140,7 @@ long arch_ptrace(struct task_struct *child, long request,
case PTRACE_SETREGS: /* Set all gp regs in the child. */
return copy_regset_from_user(child, &user_ppc_native_view,
REGSET_GPR,
- 0, sizeof(struct pt_regs),
+ 0, sizeof(struct user_pt_regs),
datavp);
case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
@@ -3264,6 +3265,16 @@ long do_syscall_trace_enter(struct pt_regs *regs)
{
user_exit();
+ if (test_thread_flag(TIF_SYSCALL_EMU)) {
+ ptrace_report_syscall(regs);
+ /*
+ * Returning -1 will skip the syscall execution. We want to
+ * avoid clobbering any register also, thus, not 'gotoing'
+ * skip label.
+ */
+ return -1;
+ }
+
/*
* The tracer may decide to abort the syscall, if so tracehook
* will return !0. Note that the tracer may also just change
@@ -3324,3 +3335,42 @@ void do_syscall_trace_leave(struct pt_regs *regs)
user_enter();
}
+
+void __init pt_regs_check(void)
+{
+ BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
+ offsetof(struct user_pt_regs, gpr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
+ offsetof(struct user_pt_regs, nip));
+ BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
+ offsetof(struct user_pt_regs, msr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
+ offsetof(struct user_pt_regs, msr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
+ offsetof(struct user_pt_regs, orig_gpr3));
+ BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
+ offsetof(struct user_pt_regs, ctr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
+ offsetof(struct user_pt_regs, link));
+ BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
+ offsetof(struct user_pt_regs, xer));
+ BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
+ offsetof(struct user_pt_regs, ccr));
+#ifdef __powerpc64__
+ BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
+ offsetof(struct user_pt_regs, softe));
+#else
+ BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
+ offsetof(struct user_pt_regs, mq));
+#endif
+ BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
+ offsetof(struct user_pt_regs, trap));
+ BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
+ offsetof(struct user_pt_regs, dar));
+ BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
+ offsetof(struct user_pt_regs, dsisr));
+ BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
+ offsetof(struct user_pt_regs, result));
+
+ BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
+}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 8afd146bc9c7..de35bd8f047f 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -981,7 +981,15 @@ int rtas_ibm_suspend_me(u64 handle)
goto out;
}
- stop_topology_update();
+ cpu_hotplug_disable();
+
+ /* Check if we raced with a CPU-Offline Operation */
+ if (unlikely(!cpumask_equal(cpu_present_mask, cpu_online_mask))) {
+ pr_err("%s: Raced against a concurrent CPU-Offline\n",
+ __func__);
+ atomic_set(&data.error, -EBUSY);
+ goto out_hotplug_enable;
+ }
/* Call function on all CPUs. One of us will make the
* rtas call
@@ -994,7 +1002,8 @@ int rtas_ibm_suspend_me(u64 handle)
if (atomic_read(&data.error) != 0)
printk(KERN_ERR "Error doing global join\n");
- start_topology_update();
+out_hotplug_enable:
+ cpu_hotplug_enable();
/* Take down CPUs not online prior to suspend */
cpuret = rtas_offline_cpus_mask(offline_mask);
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 44d66c33d59d..38cadae4ca4f 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -91,6 +91,8 @@ static char *rtas_event_type(int type)
return "Dump Notification Event";
case RTAS_TYPE_PRRN:
return "Platform Resource Reassignment Event";
+ case RTAS_TYPE_HOTPLUG:
+ return "Hotplug Event";
}
return rtas_type[0];
@@ -150,8 +152,10 @@ static void printk_log_rtas(char *buf, int len)
} else {
struct rtas_error_log *errlog = (struct rtas_error_log *)buf;
- printk(RTAS_DEBUG "event: %d, Type: %s, Severity: %d\n",
- error_log_cnt, rtas_event_type(rtas_error_type(errlog)),
+ printk(RTAS_DEBUG "event: %d, Type: %s (%d), Severity: %d\n",
+ error_log_cnt,
+ rtas_event_type(rtas_error_type(errlog)),
+ rtas_error_type(errlog),
rtas_error_severity(errlog));
}
}
@@ -274,27 +278,16 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
}
#ifdef CONFIG_PPC_PSERIES
-static s32 prrn_update_scope;
-
-static void prrn_work_fn(struct work_struct *work)
+static void handle_prrn_event(s32 scope)
{
/*
* For PRRN, we must pass the negative of the scope value in
* the RTAS event.
*/
- pseries_devicetree_update(-prrn_update_scope);
+ pseries_devicetree_update(-scope);
numa_update_cpu_topology(false);
}
-static DECLARE_WORK(prrn_work, prrn_work_fn);
-
-static void prrn_schedule_update(u32 scope)
-{
- flush_work(&prrn_work);
- prrn_update_scope = scope;
- schedule_work(&prrn_work);
-}
-
static void handle_rtas_event(const struct rtas_error_log *log)
{
if (rtas_error_type(log) != RTAS_TYPE_PRRN || !prrn_is_enabled())
@@ -303,7 +296,7 @@ static void handle_rtas_event(const struct rtas_error_log *log)
/* For PRRN Events the extended log length is used to denote
* the scope for calling rtas update-nodes.
*/
- prrn_schedule_update(rtas_error_extended_log_length(log));
+ handle_prrn_event(rtas_error_extended_log_length(log));
}
#else
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 93fa0c99681e..9ca9db707bcb 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -33,6 +33,7 @@
#include <linux/serial_8250.h>
#include <linux/percpu.h>
#include <linux/memblock.h>
+#include <linux/bootmem.h>
#include <linux/of_platform.h>
#include <linux/hugetlb.h>
#include <asm/debugfs.h>
@@ -966,6 +967,8 @@ void __init setup_arch(char **cmdline_p)
initmem_init();
+ early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
+
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 6a501b25dd85..faf00222b324 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -243,13 +243,19 @@ static void cpu_ready_for_interrupts(void)
}
/*
- * Fixup HFSCR:TM based on CPU features. The bit is set by our
- * early asm init because at that point we haven't updated our
- * CPU features from firmware and device-tree. Here we have,
- * so let's do it.
+ * Set HFSCR:TM based on CPU features:
+ * In the special case of TM no suspend (P9N DD2.1), Linux is
+ * told TM is off via the dt-ftrs but told to (partially) use
+ * it via OPAL_REINIT_CPUS_TM_SUSPEND_DISABLED. So HFSCR[TM]
+ * will be off from dt-ftrs but we need to turn it on for the
+ * no suspend case.
*/
- if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
- mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ if (cpu_has_feature(CPU_FTR_TM_COMP))
+ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) | HFSCR_TM);
+ else
+ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
+ }
/* Set IR and DR in PACA MSR */
get_paca()->kernel_msr = MSR_KERNEL;
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 61c1fadbc644..3f15edf25a0d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -34,6 +34,8 @@
#include <linux/topology.h>
#include <linux/profile.h>
#include <linux/processor.h>
+#include <linux/random.h>
+#include <linux/stackprotector.h>
#include <asm/ptrace.h>
#include <linux/atomic.h>
@@ -74,14 +76,32 @@ static DEFINE_PER_CPU(int, cpu_state) = { 0 };
#endif
struct thread_info *secondary_ti;
+bool has_big_cores;
DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map);
+DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map);
DEFINE_PER_CPU(cpumask_var_t, cpu_core_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map);
EXPORT_PER_CPU_SYMBOL(cpu_core_map);
+EXPORT_SYMBOL_GPL(has_big_cores);
+
+#define MAX_THREAD_LIST_SIZE 8
+#define THREAD_GROUP_SHARE_L1 1
+struct thread_groups {
+ unsigned int property;
+ unsigned int nr_groups;
+ unsigned int threads_per_group;
+ unsigned int thread_list[MAX_THREAD_LIST_SIZE];
+};
+
+/*
+ * On big-cores system, cpu_l1_cache_map for each CPU corresponds to
+ * the set its siblings that share the L1-cache.
+ */
+DEFINE_PER_CPU(cpumask_var_t, cpu_l1_cache_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
@@ -674,6 +694,185 @@ static void set_cpus_unrelated(int i, int j,
}
#endif
+/*
+ * parse_thread_groups: Parses the "ibm,thread-groups" device tree
+ * property for the CPU device node @dn and stores
+ * the parsed output in the thread_groups
+ * structure @tg if the ibm,thread-groups[0]
+ * matches @property.
+ *
+ * @dn: The device node of the CPU device.
+ * @tg: Pointer to a thread group structure into which the parsed
+ * output of "ibm,thread-groups" is stored.
+ * @property: The property of the thread-group that the caller is
+ * interested in.
+ *
+ * ibm,thread-groups[0..N-1] array defines which group of threads in
+ * the CPU-device node can be grouped together based on the property.
+ *
+ * ibm,thread-groups[0] tells us the property based on which the
+ * threads are being grouped together. If this value is 1, it implies
+ * that the threads in the same group share L1, translation cache.
+ *
+ * ibm,thread-groups[1] tells us how many such thread groups exist.
+ *
+ * ibm,thread-groups[2] tells us the number of threads in each such
+ * group.
+ *
+ * ibm,thread-groups[3..N-1] is the list of threads identified by
+ * "ibm,ppc-interrupt-server#s" arranged as per their membership in
+ * the grouping.
+ *
+ * Example: If ibm,thread-groups = [1,2,4,5,6,7,8,9,10,11,12] it
+ * implies that there are 2 groups of 4 threads each, where each group
+ * of threads share L1, translation cache.
+ *
+ * The "ibm,ppc-interrupt-server#s" of the first group is {5,6,7,8}
+ * and the "ibm,ppc-interrupt-server#s" of the second group is {9, 10,
+ * 11, 12} structure
+ *
+ * Returns 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ */
+static int parse_thread_groups(struct device_node *dn,
+ struct thread_groups *tg,
+ unsigned int property)
+{
+ int i;
+ u32 thread_group_array[3 + MAX_THREAD_LIST_SIZE];
+ u32 *thread_list;
+ size_t total_threads;
+ int ret;
+
+ ret = of_property_read_u32_array(dn, "ibm,thread-groups",
+ thread_group_array, 3);
+ if (ret)
+ return ret;
+
+ tg->property = thread_group_array[0];
+ tg->nr_groups = thread_group_array[1];
+ tg->threads_per_group = thread_group_array[2];
+ if (tg->property != property ||
+ tg->nr_groups < 1 ||
+ tg->threads_per_group < 1)
+ return -ENODATA;
+
+ total_threads = tg->nr_groups * tg->threads_per_group;
+
+ ret = of_property_read_u32_array(dn, "ibm,thread-groups",
+ thread_group_array,
+ 3 + total_threads);
+ if (ret)
+ return ret;
+
+ thread_list = &thread_group_array[3];
+
+ for (i = 0 ; i < total_threads; i++)
+ tg->thread_list[i] = thread_list[i];
+
+ return 0;
+}
+
+/*
+ * get_cpu_thread_group_start : Searches the thread group in tg->thread_list
+ * that @cpu belongs to.
+ *
+ * @cpu : The logical CPU whose thread group is being searched.
+ * @tg : The thread-group structure of the CPU node which @cpu belongs
+ * to.
+ *
+ * Returns the index to tg->thread_list that points to the the start
+ * of the thread_group that @cpu belongs to.
+ *
+ * Returns -1 if cpu doesn't belong to any of the groups pointed to by
+ * tg->thread_list.
+ */
+static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg)
+{
+ int hw_cpu_id = get_hard_smp_processor_id(cpu);
+ int i, j;
+
+ for (i = 0; i < tg->nr_groups; i++) {
+ int group_start = i * tg->threads_per_group;
+
+ for (j = 0; j < tg->threads_per_group; j++) {
+ int idx = group_start + j;
+
+ if (tg->thread_list[idx] == hw_cpu_id)
+ return group_start;
+ }
+ }
+
+ return -1;
+}
+
+static int init_cpu_l1_cache_map(int cpu)
+
+{
+ struct device_node *dn = of_get_cpu_node(cpu, NULL);
+ struct thread_groups tg = {.property = 0,
+ .nr_groups = 0,
+ .threads_per_group = 0};
+ int first_thread = cpu_first_thread_sibling(cpu);
+ int i, cpu_group_start = -1, err = 0;
+
+ if (!dn)
+ return -ENODATA;
+
+ err = parse_thread_groups(dn, &tg, THREAD_GROUP_SHARE_L1);
+ if (err)
+ goto out;
+
+ zalloc_cpumask_var_node(&per_cpu(cpu_l1_cache_map, cpu),
+ GFP_KERNEL,
+ cpu_to_node(cpu));
+
+ cpu_group_start = get_cpu_thread_group_start(cpu, &tg);
+
+ if (unlikely(cpu_group_start == -1)) {
+ WARN_ON_ONCE(1);
+ err = -ENODATA;
+ goto out;
+ }
+
+ for (i = first_thread; i < first_thread + threads_per_core; i++) {
+ int i_group_start = get_cpu_thread_group_start(i, &tg);
+
+ if (unlikely(i_group_start == -1)) {
+ WARN_ON_ONCE(1);
+ err = -ENODATA;
+ goto out;
+ }
+
+ if (i_group_start == cpu_group_start)
+ cpumask_set_cpu(i, per_cpu(cpu_l1_cache_map, cpu));
+ }
+
+out:
+ of_node_put(dn);
+ return err;
+}
+
+static int init_big_cores(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ int err = init_cpu_l1_cache_map(cpu);
+
+ if (err)
+ return err;
+
+ zalloc_cpumask_var_node(&per_cpu(cpu_smallcore_map, cpu),
+ GFP_KERNEL,
+ cpu_to_node(cpu));
+ }
+
+ has_big_cores = true;
+ return 0;
+}
+
void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned int cpu;
@@ -712,6 +911,12 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
cpumask_set_cpu(boot_cpuid, cpu_l2_cache_mask(boot_cpuid));
cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid));
+ init_big_cores();
+ if (has_big_cores) {
+ cpumask_set_cpu(boot_cpuid,
+ cpu_smallcore_mask(boot_cpuid));
+ }
+
if (smp_ops && smp_ops->probe)
smp_ops->probe();
}
@@ -995,10 +1200,28 @@ static void remove_cpu_from_masks(int cpu)
set_cpus_unrelated(cpu, i, cpu_core_mask);
set_cpus_unrelated(cpu, i, cpu_l2_cache_mask);
set_cpus_unrelated(cpu, i, cpu_sibling_mask);
+ if (has_big_cores)
+ set_cpus_unrelated(cpu, i, cpu_smallcore_mask);
}
}
#endif
+static inline void add_cpu_to_smallcore_masks(int cpu)
+{
+ struct cpumask *this_l1_cache_map = per_cpu(cpu_l1_cache_map, cpu);
+ int i, first_thread = cpu_first_thread_sibling(cpu);
+
+ if (!has_big_cores)
+ return;
+
+ cpumask_set_cpu(cpu, cpu_smallcore_mask(cpu));
+
+ for (i = first_thread; i < first_thread + threads_per_core; i++) {
+ if (cpu_online(i) && cpumask_test_cpu(i, this_l1_cache_map))
+ set_cpus_related(i, cpu, cpu_smallcore_mask);
+ }
+}
+
static void add_cpu_to_masks(int cpu)
{
int first_thread = cpu_first_thread_sibling(cpu);
@@ -1015,6 +1238,7 @@ static void add_cpu_to_masks(int cpu)
if (cpu_online(i))
set_cpus_related(i, cpu, cpu_sibling_mask);
+ add_cpu_to_smallcore_masks(cpu);
/*
* Copy the thread sibling mask into the cache sibling mask
* and mark any CPUs that share an L2 with this CPU.
@@ -1044,6 +1268,7 @@ static bool shared_caches;
void start_secondary(void *unused)
{
unsigned int cpu = smp_processor_id();
+ struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask;
mmgrab(&init_mm);
current->active_mm = &init_mm;
@@ -1069,11 +1294,13 @@ void start_secondary(void *unused)
/* Update topology CPU masks */
add_cpu_to_masks(cpu);
+ if (has_big_cores)
+ sibling_mask = cpu_smallcore_mask;
/*
* Check for any shared caches. Note that this must be done on a
* per-core basis because one core in the pair might be disabled.
*/
- if (!cpumask_equal(cpu_l2_cache_mask(cpu), cpu_sibling_mask(cpu)))
+ if (!cpumask_equal(cpu_l2_cache_mask(cpu), sibling_mask(cpu)))
shared_caches = true;
set_numa_node(numa_cpu_lookup_table[cpu]);
@@ -1083,6 +1310,8 @@ void start_secondary(void *unused)
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
+ boot_init_stack_canary();
+
local_irq_enable();
/* We can enable ftrace for secondary cpus now */
@@ -1140,6 +1369,13 @@ static const struct cpumask *shared_cache_mask(int cpu)
return cpu_l2_cache_mask(cpu);
}
+#ifdef CONFIG_SCHED_SMT
+static const struct cpumask *smallcore_smt_mask(int cpu)
+{
+ return cpu_smallcore_mask(cpu);
+}
+#endif
+
static struct sched_domain_topology_level power9_topology[] = {
#ifdef CONFIG_SCHED_SMT
{ cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
@@ -1167,6 +1403,13 @@ void __init smp_cpus_done(unsigned int max_cpus)
shared_proc_topology_init();
dump_numa_cpu_topology();
+#ifdef CONFIG_SCHED_SMT
+ if (has_big_cores) {
+ pr_info("Using small cores at SMT level\n");
+ power9_topology[0].mask = smallcore_smt_mask;
+ powerpc_topology[0].mask = smallcore_smt_mask;
+ }
+#endif
/*
* If any CPU detects that it's sharing a cache with another CPU then
* use the deeper topology that is aware of this sharing.
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index f83bf6f72cb0..185216becb8b 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -262,7 +262,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
addi r1,r1,-128
#ifdef CONFIG_PPC_BOOK3S_64
- bl slb_flush_and_rebolt
+ bl slb_flush_and_restore_bolted
#endif
bl do_after_copyback
addi r1,r1,128
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 70f145e02487..3646affae963 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -111,6 +111,7 @@ struct clock_event_device decrementer_clockevent = {
.rating = 200,
.irq = 0,
.set_next_event = decrementer_set_next_event,
+ .set_state_oneshot_stopped = decrementer_shutdown,
.set_state_shutdown = decrementer_shutdown,
.tick_resume = decrementer_shutdown,
.features = CLOCK_EVT_FEAT_ONESHOT |
@@ -175,7 +176,7 @@ static void calc_cputime_factors(void)
* Read the SPURR on systems that have it, otherwise the PURR,
* or if that doesn't exist return the timebase value passed in.
*/
-static unsigned long read_spurr(unsigned long tb)
+static inline unsigned long read_spurr(unsigned long tb)
{
if (cpu_has_feature(CPU_FTR_SPURR))
return mfspr(SPRN_SPURR);
@@ -281,26 +282,17 @@ static inline u64 calculate_stolen_time(u64 stop_tb)
* Account time for a transition between system, hard irq
* or soft irq state.
*/
-static unsigned long vtime_delta(struct task_struct *tsk,
- unsigned long *stime_scaled,
- unsigned long *steal_time)
+static unsigned long vtime_delta_scaled(struct cpu_accounting_data *acct,
+ unsigned long now, unsigned long stime)
{
- unsigned long now, nowscaled, deltascaled;
- unsigned long stime;
+ unsigned long stime_scaled = 0;
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+ unsigned long nowscaled, deltascaled;
unsigned long utime, utime_scaled;
- struct cpu_accounting_data *acct = get_accounting(tsk);
- WARN_ON_ONCE(!irqs_disabled());
-
- now = mftb();
nowscaled = read_spurr(now);
- stime = now - acct->starttime;
- acct->starttime = now;
deltascaled = nowscaled - acct->startspurr;
acct->startspurr = nowscaled;
-
- *steal_time = calculate_stolen_time(now);
-
utime = acct->utime - acct->utime_sspurr;
acct->utime_sspurr = acct->utime;
@@ -314,17 +306,38 @@ static unsigned long vtime_delta(struct task_struct *tsk,
* the user ticks get saved up in paca->user_time_scaled to be
* used by account_process_tick.
*/
- *stime_scaled = stime;
+ stime_scaled = stime;
utime_scaled = utime;
if (deltascaled != stime + utime) {
if (utime) {
- *stime_scaled = deltascaled * stime / (stime + utime);
- utime_scaled = deltascaled - *stime_scaled;
+ stime_scaled = deltascaled * stime / (stime + utime);
+ utime_scaled = deltascaled - stime_scaled;
} else {
- *stime_scaled = deltascaled;
+ stime_scaled = deltascaled;
}
}
acct->utime_scaled += utime_scaled;
+#endif
+
+ return stime_scaled;
+}
+
+static unsigned long vtime_delta(struct task_struct *tsk,
+ unsigned long *stime_scaled,
+ unsigned long *steal_time)
+{
+ unsigned long now, stime;
+ struct cpu_accounting_data *acct = get_accounting(tsk);
+
+ WARN_ON_ONCE(!irqs_disabled());
+
+ now = mftb();
+ stime = now - acct->starttime;
+ acct->starttime = now;
+
+ *stime_scaled = vtime_delta_scaled(acct, now, stime);
+
+ *steal_time = calculate_stolen_time(now);
return stime;
}
@@ -341,7 +354,9 @@ void vtime_account_system(struct task_struct *tsk)
if ((tsk->flags & PF_VCPU) && !irq_count()) {
acct->gtime += stime;
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
acct->utime_scaled += stime_scaled;
+#endif
} else {
if (hardirq_count())
acct->hardirq_time += stime;
@@ -350,7 +365,9 @@ void vtime_account_system(struct task_struct *tsk)
else
acct->stime += stime;
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
acct->stime_scaled += stime_scaled;
+#endif
}
}
EXPORT_SYMBOL_GPL(vtime_account_system);
@@ -364,6 +381,21 @@ void vtime_account_idle(struct task_struct *tsk)
acct->idle_time += stime + steal_time;
}
+static void vtime_flush_scaled(struct task_struct *tsk,
+ struct cpu_accounting_data *acct)
+{
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
+ if (acct->utime_scaled)
+ tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
+ if (acct->stime_scaled)
+ tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
+
+ acct->utime_scaled = 0;
+ acct->utime_sspurr = 0;
+ acct->stime_scaled = 0;
+#endif
+}
+
/*
* Account the whole cputime accumulated in the paca
* Must be called with interrupts disabled.
@@ -378,14 +410,13 @@ void vtime_flush(struct task_struct *tsk)
if (acct->utime)
account_user_time(tsk, cputime_to_nsecs(acct->utime));
- if (acct->utime_scaled)
- tsk->utimescaled += cputime_to_nsecs(acct->utime_scaled);
-
if (acct->gtime)
account_guest_time(tsk, cputime_to_nsecs(acct->gtime));
- if (acct->steal_time)
+ if (IS_ENABLED(CONFIG_PPC_SPLPAR) && acct->steal_time) {
account_steal_time(cputime_to_nsecs(acct->steal_time));
+ acct->steal_time = 0;
+ }
if (acct->idle_time)
account_idle_time(cputime_to_nsecs(acct->idle_time));
@@ -393,8 +424,6 @@ void vtime_flush(struct task_struct *tsk)
if (acct->stime)
account_system_index_time(tsk, cputime_to_nsecs(acct->stime),
CPUTIME_SYSTEM);
- if (acct->stime_scaled)
- tsk->stimescaled += cputime_to_nsecs(acct->stime_scaled);
if (acct->hardirq_time)
account_system_index_time(tsk, cputime_to_nsecs(acct->hardirq_time),
@@ -403,14 +432,12 @@ void vtime_flush(struct task_struct *tsk)
account_system_index_time(tsk, cputime_to_nsecs(acct->softirq_time),
CPUTIME_SOFTIRQ);
+ vtime_flush_scaled(tsk, acct);
+
acct->utime = 0;
- acct->utime_scaled = 0;
- acct->utime_sspurr = 0;
acct->gtime = 0;
- acct->steal_time = 0;
acct->idle_time = 0;
acct->stime = 0;
- acct->stime_scaled = 0;
acct->hardirq_time = 0;
acct->softirq_time = 0;
}
@@ -984,10 +1011,14 @@ static void register_decrementer_clockevent(int cpu)
*dec = decrementer_clockevent;
dec->cpumask = cpumask_of(cpu);
+ clockevents_config_and_register(dec, ppc_tb_freq, 2, decrementer_max);
+
printk_once(KERN_DEBUG "clockevent: %s mult[%x] shift[%d] cpu[%d]\n",
dec->name, dec->mult, dec->shift, cpu);
- clockevents_register_device(dec);
+ /* Set values for KVM, see kvm_emulate_dec() */
+ decrementer_clockevent.mult = dec->mult;
+ decrementer_clockevent.shift = dec->shift;
}
static void enable_large_decrementer(void)
@@ -1035,18 +1066,7 @@ static void __init set_decrementer_max(void)
static void __init init_decrementer_clockevent(void)
{
- int cpu = smp_processor_id();
-
- clockevents_calc_mult_shift(&decrementer_clockevent, ppc_tb_freq, 4);
-
- decrementer_clockevent.max_delta_ns =
- clockevent_delta2ns(decrementer_max, &decrementer_clockevent);
- decrementer_clockevent.max_delta_ticks = decrementer_max;
- decrementer_clockevent.min_delta_ns =
- clockevent_delta2ns(2, &decrementer_clockevent);
- decrementer_clockevent.min_delta_ticks = 2;
-
- register_decrementer_clockevent(cpu);
+ register_decrementer_clockevent(smp_processor_id());
}
void secondary_cpu_time_init(void)
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 7716374786bd..9fabdce255cd 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -92,13 +92,14 @@ _GLOBAL(tm_abort)
blr
EXPORT_SYMBOL_GPL(tm_abort);
-/* void tm_reclaim(struct thread_struct *thread,
+/*
+ * void tm_reclaim(struct thread_struct *thread,
* uint8_t cause)
*
* - Performs a full reclaim. This destroys outstanding
- * transactions and updates thread->regs.tm_ckpt_* with the
- * original checkpointed state. Note that thread->regs is
- * unchanged.
+ * transactions and updates thread.ckpt_regs, thread.ckfp_state and
+ * thread.ckvr_state with the original checkpointed state. Note that
+ * thread->regs is unchanged.
*
* Purpose is to both abort transactions of, and preserve the state of,
* a transactions at a context switch. We preserve/restore both sets of process
@@ -163,15 +164,16 @@ _GLOBAL(tm_reclaim)
*/
TRECLAIM(R4) /* Cause in r4 */
- /* ******************** GPRs ******************** */
- /* Stash the checkpointed r13 away in the scratch SPR and get the real
- * paca
+ /*
+ * ******************** GPRs ********************
+ * Stash the checkpointed r13 in the scratch SPR and get the real paca.
*/
SET_SCRATCH0(r13)
GET_PACA(r13)
- /* Stash the checkpointed r1 away in paca tm_scratch and get the real
- * stack pointer back
+ /*
+ * Stash the checkpointed r1 away in paca->tm_scratch and get the real
+ * stack pointer back into r1.
*/
std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13)
@@ -209,14 +211,15 @@ _GLOBAL(tm_reclaim)
addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */
- /* Make r7 look like an exception frame so that we
- * can use the neat GPRx(n) macros. r7 is NOT a pt_regs ptr!
+ /*
+ * Make r7 look like an exception frame so that we can use the neat
+ * GPRx(n) macros. r7 is NOT a pt_regs ptr!
*/
subi r7, r7, STACK_FRAME_OVERHEAD
/* Sync the userland GPRs 2-12, 14-31 to thread->regs: */
SAVE_GPR(0, r7) /* user r0 */
- SAVE_GPR(2, r7) /* user r2 */
+ SAVE_GPR(2, r7) /* user r2 */
SAVE_4GPRS(3, r7) /* user r3-r6 */
SAVE_GPR(8, r7) /* user r8 */
SAVE_GPR(9, r7) /* user r9 */
@@ -237,7 +240,8 @@ _GLOBAL(tm_reclaim)
/* ******************** NIP ******************** */
mfspr r3, SPRN_TFHAR
std r3, _NIP(r7) /* Returns to failhandler */
- /* The checkpointed NIP is ignored when rescheduling/rechkpting,
+ /*
+ * The checkpointed NIP is ignored when rescheduling/rechkpting,
* but is used in signal return to 'wind back' to the abort handler.
*/
@@ -260,12 +264,13 @@ _GLOBAL(tm_reclaim)
std r3, THREAD_TM_TAR(r12)
std r4, THREAD_TM_DSCR(r12)
- /* MSR and flags: We don't change CRs, and we don't need to alter
- * MSR.
+ /*
+ * MSR and flags: We don't change CRs, and we don't need to alter MSR.
*/
- /* ******************** FPR/VR/VSRs ************
+ /*
+ * ******************** FPR/VR/VSRs ************
* After reclaiming, capture the checkpointed FPRs/VRs.
*
* We enabled VEC/FP/VSX in the msr above, so we can execute these
@@ -275,7 +280,7 @@ _GLOBAL(tm_reclaim)
/* Altivec (VEC/VMX/VR)*/
addi r7, r3, THREAD_CKVRSTATE
- SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */
+ SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 ckvr_state */
mfvscr v0
li r6, VRSTATE_VSCR
stvx v0, r7, r6
@@ -286,12 +291,13 @@ _GLOBAL(tm_reclaim)
/* Floating Point (FP) */
addi r7, r3, THREAD_CKFPSTATE
- SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 transact fp state */
+ SAVE_32FPRS_VSRS(0, R6, R7) /* r6 scratch, r7 ckfp_state */
mffs fr0
stfd fr0,FPSTATE_FPSCR(r7)
- /* TM regs, incl TEXASR -- these live in thread_struct. Note they've
+ /*
+ * TM regs, incl TEXASR -- these live in thread_struct. Note they've
* been updated by the treclaim, to explain to userland the failure
* cause (aborted).
*/
@@ -327,7 +333,7 @@ _GLOBAL(tm_reclaim)
blr
- /*
+ /*
* void __tm_recheckpoint(struct thread_struct *thread)
* - Restore the checkpointed register state saved by tm_reclaim
* when we switch_to a process.
@@ -343,7 +349,8 @@ _GLOBAL(__tm_recheckpoint)
std r2, STK_GOT(r1)
stdu r1, -TM_FRAME_SIZE(r1)
- /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD].
+ /*
+ * We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD].
* This is used for backing up the NVGPRs:
*/
SAVE_NVGPRS(r1)
@@ -352,8 +359,9 @@ _GLOBAL(__tm_recheckpoint)
addi r7, r3, PT_CKPT_REGS /* Thread's ckpt_regs */
- /* Make r7 look like an exception frame so that we
- * can use the neat GPRx(n) macros. r7 is now NOT a pt_regs ptr!
+ /*
+ * Make r7 look like an exception frame so that we can use the neat
+ * GPRx(n) macros. r7 is now NOT a pt_regs ptr!
*/
subi r7, r7, STACK_FRAME_OVERHEAD
@@ -421,14 +429,15 @@ restore_gprs:
REST_NVGPRS(r7) /* GPR14-31 */
- /* Load up PPR and DSCR here so we don't run with user values for long
- */
+ /* Load up PPR and DSCR here so we don't run with user values for long */
mtspr SPRN_DSCR, r5
mtspr SPRN_PPR, r6
- /* Do final sanity check on TEXASR to make sure FS is set. Do this
+ /*
+ * Do final sanity check on TEXASR to make sure FS is set. Do this
* here before we load up the userspace r1 so any bugs we hit will get
- * a call chain */
+ * a call chain.
+ */
mfspr r5, SPRN_TEXASR
srdi r5, r5, 16
li r6, (TEXASR_FS)@h
@@ -436,8 +445,9 @@ restore_gprs:
1: tdeqi r6, 0
EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0
- /* Do final sanity check on MSR to make sure we are not transactional
- * or suspended
+ /*
+ * Do final sanity check on MSR to make sure we are not transactional
+ * or suspended.
*/
mfmsr r6
li r5, (MSR_TS_MASK)@higher
@@ -453,8 +463,8 @@ restore_gprs:
REST_GPR(6, r7)
/*
- * Store r1 and r5 on the stack so that we can access them
- * after we clear MSR RI.
+ * Store r1 and r5 on the stack so that we can access them after we
+ * clear MSR RI.
*/
REST_GPR(5, r7)
@@ -484,7 +494,8 @@ restore_gprs:
HMT_MEDIUM
- /* Our transactional state has now changed.
+ /*
+ * Our transactional state has now changed.
*
* Now just get out of here. Transactional (current) state will be
* updated once restore is called on the return path in the _switch-ed
diff --git a/arch/powerpc/kernel/trace/Makefile b/arch/powerpc/kernel/trace/Makefile
index d22d8bafb643..b1725ad3e13d 100644
--- a/arch/powerpc/kernel/trace/Makefile
+++ b/arch/powerpc/kernel/trace/Makefile
@@ -3,11 +3,9 @@
# Makefile for the powerpc trace subsystem
#
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
-
ifdef CONFIG_FUNCTION_TRACER
# do not trace tracer code
-CFLAGS_REMOVE_ftrace.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
endif
obj32-$(CONFIG_FUNCTION_TRACER) += ftrace_32.o
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index 4bfbb54dee51..4bf051d3e21e 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -30,6 +30,16 @@
#ifdef CONFIG_DYNAMIC_FTRACE
+
+/*
+ * We generally only have a single long_branch tramp and at most 2 or 3 plt
+ * tramps generated. But, we don't use the plt tramps currently. We also allot
+ * 2 tramps after .text and .init.text. So, we only end up with around 3 usable
+ * tramps in total. Set aside 8 just to be sure.
+ */
+#define NUM_FTRACE_TRAMPS 8
+static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
+
static unsigned int
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{
@@ -85,13 +95,16 @@ static int test_24bit_addr(unsigned long ip, unsigned long addr)
return create_branch((unsigned int *)ip, addr, 0);
}
-#ifdef CONFIG_MODULES
-
static int is_bl_op(unsigned int op)
{
return (op & 0xfc000003) == 0x48000001;
}
+static int is_b_op(unsigned int op)
+{
+ return (op & 0xfc000003) == 0x48000000;
+}
+
static unsigned long find_bl_target(unsigned long ip, unsigned int op)
{
static int offset;
@@ -104,6 +117,7 @@ static unsigned long find_bl_target(unsigned long ip, unsigned int op)
return ip + (long)offset;
}
+#ifdef CONFIG_MODULES
#ifdef CONFIG_PPC64
static int
__ftrace_make_nop(struct module *mod,
@@ -270,6 +284,146 @@ __ftrace_make_nop(struct module *mod,
#endif /* PPC64 */
#endif /* CONFIG_MODULES */
+static unsigned long find_ftrace_tramp(unsigned long ip)
+{
+ int i;
+
+ /*
+ * We have the compiler generated long_branch tramps at the end
+ * and we prefer those
+ */
+ for (i = NUM_FTRACE_TRAMPS - 1; i >= 0; i--)
+ if (!ftrace_tramps[i])
+ continue;
+ else if (create_branch((void *)ip, ftrace_tramps[i], 0))
+ return ftrace_tramps[i];
+
+ return 0;
+}
+
+static int add_ftrace_tramp(unsigned long tramp)
+{
+ int i;
+
+ for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
+ if (!ftrace_tramps[i]) {
+ ftrace_tramps[i] = tramp;
+ return 0;
+ }
+
+ return -1;
+}
+
+/*
+ * If this is a compiler generated long_branch trampoline (essentially, a
+ * trampoline that has a branch to _mcount()), we re-write the branch to
+ * instead go to ftrace_[regs_]caller() and note down the location of this
+ * trampoline.
+ */
+static int setup_mcount_compiler_tramp(unsigned long tramp)
+{
+ int i, op;
+ unsigned long ptr;
+ static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
+
+ /* Is this a known long jump tramp? */
+ for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
+ if (!ftrace_tramps[i])
+ break;
+ else if (ftrace_tramps[i] == tramp)
+ return 0;
+
+ /* Is this a known plt tramp? */
+ for (i = 0; i < NUM_FTRACE_TRAMPS; i++)
+ if (!ftrace_plt_tramps[i])
+ break;
+ else if (ftrace_plt_tramps[i] == tramp)
+ return -1;
+
+ /* New trampoline -- read where this goes */
+ if (probe_kernel_read(&op, (void *)tramp, sizeof(int))) {
+ pr_debug("Fetching opcode failed.\n");
+ return -1;
+ }
+
+ /* Is this a 24 bit branch? */
+ if (!is_b_op(op)) {
+ pr_debug("Trampoline is not a long branch tramp.\n");
+ return -1;
+ }
+
+ /* lets find where the pointer goes */
+ ptr = find_bl_target(tramp, op);
+
+ if (ptr != ppc_global_function_entry((void *)_mcount)) {
+ pr_debug("Trampoline target %p is not _mcount\n", (void *)ptr);
+ return -1;
+ }
+
+ /* Let's re-write the tramp to go to ftrace_[regs_]caller */
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ ptr = ppc_global_function_entry((void *)ftrace_regs_caller);
+#else
+ ptr = ppc_global_function_entry((void *)ftrace_caller);
+#endif
+ if (!create_branch((void *)tramp, ptr, 0)) {
+ pr_debug("%ps is not reachable from existing mcount tramp\n",
+ (void *)ptr);
+ return -1;
+ }
+
+ if (patch_branch((unsigned int *)tramp, ptr, 0)) {
+ pr_debug("REL24 out of range!\n");
+ return -1;
+ }
+
+ if (add_ftrace_tramp(tramp)) {
+ pr_debug("No tramp locations left\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned long tramp, ip = rec->ip;
+ unsigned int op;
+
+ /* Read where this goes */
+ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
+ pr_err("Fetching opcode failed.\n");
+ return -EFAULT;
+ }
+
+ /* Make sure that that this is still a 24bit jump */
+ if (!is_bl_op(op)) {
+ pr_err("Not expected bl: opcode is %x\n", op);
+ return -EINVAL;
+ }
+
+ /* Let's find where the pointer goes */
+ tramp = find_bl_target(ip, op);
+
+ pr_devel("ip:%lx jumps to %lx", ip, tramp);
+
+ if (setup_mcount_compiler_tramp(tramp)) {
+ /* Are other trampolines reachable? */
+ if (!find_ftrace_tramp(ip)) {
+ pr_err("No ftrace trampolines reachable from %ps\n",
+ (void *)ip);
+ return -EINVAL;
+ }
+ }
+
+ if (patch_instruction((unsigned int *)ip, PPC_INST_NOP)) {
+ pr_err("Patching NOP failed.\n");
+ return -EPERM;
+ }
+
+ return 0;
+}
+
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
@@ -286,7 +440,8 @@ int ftrace_make_nop(struct module *mod,
old = ftrace_call_replace(ip, addr, 1);
new = PPC_INST_NOP;
return ftrace_modify_code(ip, old, new);
- }
+ } else if (core_kernel_text(ip))
+ return __ftrace_make_nop_kernel(rec, addr);
#ifdef CONFIG_MODULES
/*
@@ -456,6 +611,53 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
#endif /* CONFIG_PPC64 */
#endif /* CONFIG_MODULES */
+static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int op;
+ void *ip = (void *)rec->ip;
+ unsigned long tramp, entry, ptr;
+
+ /* Make sure we're being asked to patch branch to a known ftrace addr */
+ entry = ppc_global_function_entry((void *)ftrace_caller);
+ ptr = ppc_global_function_entry((void *)addr);
+
+ if (ptr != entry) {
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ entry = ppc_global_function_entry((void *)ftrace_regs_caller);
+ if (ptr != entry) {
+#endif
+ pr_err("Unknown ftrace addr to patch: %ps\n", (void *)ptr);
+ return -EINVAL;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ }
+#endif
+ }
+
+ /* Make sure we have a nop */
+ if (probe_kernel_read(&op, ip, sizeof(op))) {
+ pr_err("Unable to read ftrace location %p\n", ip);
+ return -EFAULT;
+ }
+
+ if (op != PPC_INST_NOP) {
+ pr_err("Unexpected call sequence at %p: %x\n", ip, op);
+ return -EINVAL;
+ }
+
+ tramp = find_ftrace_tramp((unsigned long)ip);
+ if (!tramp) {
+ pr_err("No ftrace trampolines reachable from %ps\n", ip);
+ return -EINVAL;
+ }
+
+ if (patch_branch(ip, tramp, BRANCH_SET_LINK)) {
+ pr_err("Error patching branch to ftrace tramp!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
@@ -471,7 +673,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
old = PPC_INST_NOP;
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
- }
+ } else if (core_kernel_text(ip))
+ return __ftrace_make_call_kernel(rec, addr);
#ifdef CONFIG_MODULES
/*
@@ -603,6 +806,12 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
old = ftrace_call_replace(ip, old_addr, 1);
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
+ } else if (core_kernel_text(ip)) {
+ /*
+ * We always patch out of range locations to go to the regs
+ * variant, so there is nothing to do here
+ */
+ return 0;
}
#ifdef CONFIG_MODULES
@@ -654,10 +863,54 @@ void arch_ftrace_update_code(int command)
ftrace_modify_all_code(command);
}
+#ifdef CONFIG_PPC64
+#define PACATOC offsetof(struct paca_struct, kernel_toc)
+
+#define PPC_LO(v) ((v) & 0xffff)
+#define PPC_HI(v) (((v) >> 16) & 0xffff)
+#define PPC_HA(v) PPC_HI ((v) + 0x8000)
+
+extern unsigned int ftrace_tramp_text[], ftrace_tramp_init[];
+
+int __init ftrace_dyn_arch_init(void)
+{
+ int i;
+ unsigned int *tramp[] = { ftrace_tramp_text, ftrace_tramp_init };
+ u32 stub_insns[] = {
+ 0xe98d0000 | PACATOC, /* ld r12,PACATOC(r13) */
+ 0x3d8c0000, /* addis r12,r12,<high> */
+ 0x398c0000, /* addi r12,r12,<low> */
+ 0x7d8903a6, /* mtctr r12 */
+ 0x4e800420, /* bctr */
+ };
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ unsigned long addr = ppc_global_function_entry((void *)ftrace_regs_caller);
+#else
+ unsigned long addr = ppc_global_function_entry((void *)ftrace_caller);
+#endif
+ long reladdr = addr - kernel_toc_addr();
+
+ if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) {
+ pr_err("Address of %ps out of range of kernel_toc.\n",
+ (void *)addr);
+ return -1;
+ }
+
+ for (i = 0; i < 2; i++) {
+ memcpy(tramp[i], stub_insns, sizeof(stub_insns));
+ tramp[i][1] |= PPC_HA(reladdr);
+ tramp[i][2] |= PPC_LO(reladdr);
+ add_ftrace_tramp((unsigned long)tramp[i]);
+ }
+
+ return 0;
+}
+#else
int __init ftrace_dyn_arch_init(void)
{
return 0;
}
+#endif
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --git a/arch/powerpc/kernel/trace/ftrace_64.S b/arch/powerpc/kernel/trace/ftrace_64.S
index e25f77c10a72..1782af2d1496 100644
--- a/arch/powerpc/kernel/trace/ftrace_64.S
+++ b/arch/powerpc/kernel/trace/ftrace_64.S
@@ -14,6 +14,18 @@
#include <asm/ppc-opcode.h>
#include <asm/export.h>
+.pushsection ".tramp.ftrace.text","aw",@progbits;
+.globl ftrace_tramp_text
+ftrace_tramp_text:
+ .space 64
+.popsection
+
+.pushsection ".tramp.ftrace.init","aw",@progbits;
+.globl ftrace_tramp_init
+ftrace_tramp_init:
+ .space 64
+.popsection
+
_GLOBAL(mcount)
_GLOBAL(_mcount)
EXPORT_SYMBOL(_mcount)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index ab1bd06d7c44..9a86572db1ef 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -247,8 +247,6 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
mdelay(MSEC_PER_SEC);
}
- if (in_interrupt())
- panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
do_exit(signr);
@@ -535,10 +533,10 @@ int machine_check_e500mc(struct pt_regs *regs)
printk("Caused by (from MCSR=%lx): ", reason);
if (reason & MCSR_MCP)
- printk("Machine Check Signal\n");
+ pr_cont("Machine Check Signal\n");
if (reason & MCSR_ICPERR) {
- printk("Instruction Cache Parity Error\n");
+ pr_cont("Instruction Cache Parity Error\n");
/*
* This is recoverable by invalidating the i-cache.
@@ -556,7 +554,7 @@ int machine_check_e500mc(struct pt_regs *regs)
}
if (reason & MCSR_DCPERR_MC) {
- printk("Data Cache Parity Error\n");
+ pr_cont("Data Cache Parity Error\n");
/*
* In write shadow mode we auto-recover from the error, but it
@@ -575,38 +573,38 @@ int machine_check_e500mc(struct pt_regs *regs)
}
if (reason & MCSR_L2MMU_MHIT) {
- printk("Hit on multiple TLB entries\n");
+ pr_cont("Hit on multiple TLB entries\n");
recoverable = 0;
}
if (reason & MCSR_NMI)
- printk("Non-maskable interrupt\n");
+ pr_cont("Non-maskable interrupt\n");
if (reason & MCSR_IF) {
- printk("Instruction Fetch Error Report\n");
+ pr_cont("Instruction Fetch Error Report\n");
recoverable = 0;
}
if (reason & MCSR_LD) {
- printk("Load Error Report\n");
+ pr_cont("Load Error Report\n");
recoverable = 0;
}
if (reason & MCSR_ST) {
- printk("Store Error Report\n");
+ pr_cont("Store Error Report\n");
recoverable = 0;
}
if (reason & MCSR_LDG) {
- printk("Guarded Load Error Report\n");
+ pr_cont("Guarded Load Error Report\n");
recoverable = 0;
}
if (reason & MCSR_TLBSYNC)
- printk("Simultaneous tlbsync operations\n");
+ pr_cont("Simultaneous tlbsync operations\n");
if (reason & MCSR_BSL2_ERR) {
- printk("Level 2 Cache Error\n");
+ pr_cont("Level 2 Cache Error\n");
recoverable = 0;
}
@@ -616,7 +614,7 @@ int machine_check_e500mc(struct pt_regs *regs)
addr = mfspr(SPRN_MCAR);
addr |= (u64)mfspr(SPRN_MCARU) << 32;
- printk("Machine Check %s Address: %#llx\n",
+ pr_cont("Machine Check %s Address: %#llx\n",
reason & MCSR_MEA ? "Effective" : "Physical", addr);
}
@@ -640,29 +638,29 @@ int machine_check_e500(struct pt_regs *regs)
printk("Caused by (from MCSR=%lx): ", reason);
if (reason & MCSR_MCP)
- printk("Machine Check Signal\n");
+ pr_cont("Machine Check Signal\n");
if (reason & MCSR_ICPERR)
- printk("Instruction Cache Parity Error\n");
+ pr_cont("Instruction Cache Parity Error\n");
if (reason & MCSR_DCP_PERR)
- printk("Data Cache Push Parity Error\n");
+ pr_cont("Data Cache Push Parity Error\n");
if (reason & MCSR_DCPERR)
- printk("Data Cache Parity Error\n");
+ pr_cont("Data Cache Parity Error\n");
if (reason & MCSR_BUS_IAERR)
- printk("Bus - Instruction Address Error\n");
+ pr_cont("Bus - Instruction Address Error\n");
if (reason & MCSR_BUS_RAERR)
- printk("Bus - Read Address Error\n");
+ pr_cont("Bus - Read Address Error\n");
if (reason & MCSR_BUS_WAERR)
- printk("Bus - Write Address Error\n");
+ pr_cont("Bus - Write Address Error\n");
if (reason & MCSR_BUS_IBERR)
- printk("Bus - Instruction Data Error\n");
+ pr_cont("Bus - Instruction Data Error\n");
if (reason & MCSR_BUS_RBERR)
- printk("Bus - Read Data Bus Error\n");
+ pr_cont("Bus - Read Data Bus Error\n");
if (reason & MCSR_BUS_WBERR)
- printk("Bus - Write Data Bus Error\n");
+ pr_cont("Bus - Write Data Bus Error\n");
if (reason & MCSR_BUS_IPERR)
- printk("Bus - Instruction Parity Error\n");
+ pr_cont("Bus - Instruction Parity Error\n");
if (reason & MCSR_BUS_RPERR)
- printk("Bus - Read Parity Error\n");
+ pr_cont("Bus - Read Parity Error\n");
return 0;
}
@@ -680,19 +678,19 @@ int machine_check_e200(struct pt_regs *regs)
printk("Caused by (from MCSR=%lx): ", reason);
if (reason & MCSR_MCP)
- printk("Machine Check Signal\n");
+ pr_cont("Machine Check Signal\n");
if (reason & MCSR_CP_PERR)
- printk("Cache Push Parity Error\n");
+ pr_cont("Cache Push Parity Error\n");
if (reason & MCSR_CPERR)
- printk("Cache Parity Error\n");
+ pr_cont("Cache Parity Error\n");
if (reason & MCSR_EXCP_ERR)
- printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
+ pr_cont("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
if (reason & MCSR_BUS_IRERR)
- printk("Bus - Read Bus Error on instruction fetch\n");
+ pr_cont("Bus - Read Bus Error on instruction fetch\n");
if (reason & MCSR_BUS_DRERR)
- printk("Bus - Read Bus Error on data load\n");
+ pr_cont("Bus - Read Bus Error on data load\n");
if (reason & MCSR_BUS_WRERR)
- printk("Bus - Write Bus Error on buffered store or cache line push\n");
+ pr_cont("Bus - Write Bus Error on buffered store or cache line push\n");
return 0;
}
@@ -705,30 +703,30 @@ int machine_check_generic(struct pt_regs *regs)
printk("Caused by (from SRR1=%lx): ", reason);
switch (reason & 0x601F0000) {
case 0x80000:
- printk("Machine check signal\n");
+ pr_cont("Machine check signal\n");
break;
case 0: /* for 601 */
case 0x40000:
case 0x140000: /* 7450 MSS error and TEA */
- printk("Transfer error ack signal\n");
+ pr_cont("Transfer error ack signal\n");
break;
case 0x20000:
- printk("Data parity error signal\n");
+ pr_cont("Data parity error signal\n");
break;
case 0x10000:
- printk("Address parity error signal\n");
+ pr_cont("Address parity error signal\n");
break;
case 0x20000000:
- printk("L1 Data Cache error\n");
+ pr_cont("L1 Data Cache error\n");
break;
case 0x40000000:
- printk("L1 Instruction Cache error\n");
+ pr_cont("L1 Instruction Cache error\n");
break;
case 0x00100000:
- printk("L2 data cache parity error\n");
+ pr_cont("L2 data cache parity error\n");
break;
default:
- printk("Unknown values in msr\n");
+ pr_cont("Unknown values in msr\n");
}
return 0;
}
@@ -741,9 +739,7 @@ void machine_check_exception(struct pt_regs *regs)
if (!nested)
nmi_enter();
- /* 64s accounts the mce in machine_check_early when in HVMODE */
- if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE))
- __this_cpu_inc(irq_stat.mce_exceptions);
+ __this_cpu_inc(irq_stat.mce_exceptions);
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
@@ -767,12 +763,17 @@ void machine_check_exception(struct pt_regs *regs)
if (check_io_access(regs))
goto bail;
- die("Machine check", regs, SIGBUS);
-
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
nmi_panic(regs, "Unrecoverable Machine check");
+ if (!nested)
+ nmi_exit();
+
+ die("Machine check", regs, SIGBUS);
+
+ return;
+
bail:
if (!nested)
nmi_exit();
@@ -1433,7 +1434,7 @@ void program_check_exception(struct pt_regs *regs)
goto bail;
} else {
printk(KERN_EMERG "Unexpected TM Bad Thing exception "
- "at %lx (msr 0x%x)\n", regs->nip, reason);
+ "at %lx (msr 0x%lx)\n", regs->nip, regs->msr);
die("Unrecoverable exception", regs, SIGABRT);
}
}
@@ -1547,14 +1548,6 @@ void StackOverflow(struct pt_regs *regs)
panic("kernel stack overflow");
}
-void nonrecoverable_exception(struct pt_regs *regs)
-{
- printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
- regs->nip, regs->msr);
- debugger(regs);
- die("nonrecoverable exception", regs, SIGKILL);
-}
-
void kernel_fp_unavailable_exception(struct pt_regs *regs)
{
enum ctx_state prev_state = exception_enter();
@@ -1750,16 +1743,20 @@ void fp_unavailable_tm(struct pt_regs *regs)
* checkpointed FP registers need to be loaded.
*/
tm_reclaim_current(TM_CAUSE_FAC_UNAV);
- /* Reclaim didn't save out any FPRs to transact_fprs. */
+
+ /*
+ * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
+ * then it was overwrite by the thr->fp_state by tm_reclaim_thread().
+ *
+ * At this point, ck{fp,vr}_state contains the exact values we want to
+ * recheckpoint.
+ */
/* Enable FP for the task: */
current->thread.load_fp = 1;
- /* This loads and recheckpoints the FP registers from
- * thread.fpr[]. They will remain in registers after the
- * checkpoint so we don't need to reload them after.
- * If VMX is in use, the VRs now hold checkpointed values,
- * so we don't want to load the VRs from the thread_struct.
+ /*
+ * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
*/
tm_recheckpoint(&current->thread);
}
@@ -2086,8 +2083,8 @@ void SPEFloatingPointRoundException(struct pt_regs *regs)
*/
void unrecoverable_exception(struct pt_regs *regs)
{
- printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
- regs->trap, regs->nip);
+ pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
+ regs->trap, regs->nip, regs->msr);
die("Unrecoverable exception", regs, SIGABRT);
}
NOKPROBE_SYMBOL(unrecoverable_exception);
diff --git a/arch/powerpc/kernel/vdso32/datapage.S b/arch/powerpc/kernel/vdso32/datapage.S
index 3745113fcc65..2a7eb5452aba 100644
--- a/arch/powerpc/kernel/vdso32/datapage.S
+++ b/arch/powerpc/kernel/vdso32/datapage.S
@@ -37,6 +37,7 @@ data_page_branch:
mtlr r0
addi r3, r3, __kernel_datapage_offset-data_page_branch
lwz r0,0(r3)
+ .cfi_restore lr
add r3,r0,r3
blr
.cfi_endproc
diff --git a/arch/powerpc/kernel/vdso32/gettimeofday.S b/arch/powerpc/kernel/vdso32/gettimeofday.S
index 769c2624e0a6..1e0bc5955a40 100644
--- a/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -139,6 +139,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
*/
99:
li r0,__NR_clock_gettime
+ .cfi_restore lr
sc
blr
.cfi_endproc
diff --git a/arch/powerpc/kernel/vdso64/datapage.S b/arch/powerpc/kernel/vdso64/datapage.S
index abf17feffe40..bf9668691511 100644
--- a/arch/powerpc/kernel/vdso64/datapage.S
+++ b/arch/powerpc/kernel/vdso64/datapage.S
@@ -37,6 +37,7 @@ data_page_branch:
mtlr r0
addi r3, r3, __kernel_datapage_offset-data_page_branch
lwz r0,0(r3)
+ .cfi_restore lr
add r3,r0,r3
blr
.cfi_endproc
diff --git a/arch/powerpc/kernel/vdso64/gettimeofday.S b/arch/powerpc/kernel/vdso64/gettimeofday.S
index c002adcc694c..a4ed9edfd5f0 100644
--- a/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -169,6 +169,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
*/
99:
li r0,__NR_clock_gettime
+ .cfi_restore lr
sc
blr
.cfi_endproc
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 105a976323aa..434581bcd5b4 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -4,6 +4,9 @@
#else
#define PROVIDE32(x) PROVIDE(x)
#endif
+
+#define BSS_FIRST_SECTIONS *(.bss.prominit)
+
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
#include <asm/cache.h>
@@ -99,6 +102,9 @@ SECTIONS
#endif
/* careful! __ftr_alt_* sections need to be close to .text */
*(.text.hot TEXT_MAIN .text.fixup .text.unlikely .fixup __ftr_alt_* .ref.text);
+#ifdef CONFIG_PPC64
+ *(.tramp.ftrace.text);
+#endif
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
@@ -181,7 +187,15 @@ SECTIONS
*/
. = ALIGN(STRICT_ALIGN_SIZE);
__init_begin = .;
- INIT_TEXT_SECTION(PAGE_SIZE) :kernel
+ . = ALIGN(PAGE_SIZE);
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
+ _sinittext = .;
+ INIT_TEXT
+ _einittext = .;
+#ifdef CONFIG_PPC64
+ *(.tramp.ftrace.init);
+#endif
+ } :kernel
/* .exit.text is discarded at runtime, not link time,
* to deal with references from __bug_table
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index f872c04bb5b1..64f1135e7732 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -3,8 +3,6 @@
# Makefile for Kernel-based Virtual Machine module
#
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
-
ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
KVM := ../../../virt/kvm
@@ -75,7 +73,8 @@ kvm-hv-y += \
book3s_hv.o \
book3s_hv_interrupts.o \
book3s_64_mmu_hv.o \
- book3s_64_mmu_radix.o
+ book3s_64_mmu_radix.o \
+ book3s_hv_nested.o
kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
book3s_hv_tm.o
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 87348e498c89..fd9893bc7aa1 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -78,8 +78,11 @@ void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
ulong pc = kvmppc_get_pc(vcpu);
+ ulong lr = kvmppc_get_lr(vcpu);
if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
+ if ((lr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
+ kvmppc_set_lr(vcpu, lr & ~SPLIT_HACK_MASK);
vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
}
}
@@ -150,7 +153,6 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec)
case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
- case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
@@ -236,18 +238,35 @@ EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
- unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
-
- if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
- vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
+ /*
+ * This case (KVM_INTERRUPT_SET) should never actually arise for
+ * a pseries guest (because pseries guests expect their interrupt
+ * controllers to continue asserting an external interrupt request
+ * until it is acknowledged at the interrupt controller), but is
+ * included to avoid ABI breakage and potentially for other
+ * sorts of guest.
+ *
+ * There is a subtlety here: HV KVM does not test the
+ * external_oneshot flag in the code that synthesizes
+ * external interrupts for the guest just before entering
+ * the guest. That is OK even if userspace did do a
+ * KVM_INTERRUPT_SET on a pseries guest vcpu, because the
+ * caller (kvm_vcpu_ioctl_interrupt) does a kvm_vcpu_kick()
+ * which ends up doing a smp_send_reschedule(), which will
+ * pull the guest all the way out to the host, meaning that
+ * we will call kvmppc_core_prepare_to_enter() before entering
+ * the guest again, and that will handle the external_oneshot
+ * flag correctly.
+ */
+ if (irq->irq == KVM_INTERRUPT_SET)
+ vcpu->arch.external_oneshot = 1;
- kvmppc_book3s_queue_irqprio(vcpu, vec);
+ kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
}
void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
{
kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
- kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
}
void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
@@ -278,7 +297,6 @@ static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
vec = BOOK3S_INTERRUPT_DECREMENTER;
break;
case BOOK3S_IRQPRIO_EXTERNAL:
- case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
vec = BOOK3S_INTERRUPT_EXTERNAL;
break;
@@ -352,8 +370,16 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
case BOOK3S_IRQPRIO_DECREMENTER:
/* DEC interrupts get cleared by mtdec */
return false;
- case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
- /* External interrupts get cleared by userspace */
+ case BOOK3S_IRQPRIO_EXTERNAL:
+ /*
+ * External interrupts get cleared by userspace
+ * except when set by the KVM_INTERRUPT ioctl with
+ * KVM_INTERRUPT_SET (not KVM_INTERRUPT_SET_LEVEL).
+ */
+ if (vcpu->arch.external_oneshot) {
+ vcpu->arch.external_oneshot = 0;
+ return true;
+ }
return false;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 68e14afecac8..c615617e78ac 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -268,14 +268,13 @@ int kvmppc_mmu_hv_init(void)
{
unsigned long host_lpid, rsvd_lpid;
- if (!cpu_has_feature(CPU_FTR_HVMODE))
- return -EINVAL;
-
if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE))
return -EINVAL;
/* POWER7 has 10-bit LPIDs (12-bit in POWER8) */
- host_lpid = mfspr(SPRN_LPID);
+ host_lpid = 0;
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ host_lpid = mfspr(SPRN_LPID);
rsvd_lpid = LPID_RSVD;
kvmppc_init_lpid(rsvd_lpid + 1);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 998f8d089ac7..d68162ee159b 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -10,6 +10,9 @@
#include <linux/string.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
+#include <linux/anon_inodes.h>
+#include <linux/file.h>
+#include <linux/debugfs.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
@@ -26,87 +29,74 @@
*/
static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
-int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
- struct kvmppc_pte *gpte, bool data, bool iswrite)
+int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 root,
+ u64 *pte_ret_p)
{
struct kvm *kvm = vcpu->kvm;
- u32 pid;
int ret, level, ps;
- __be64 prte, rpte;
- unsigned long ptbl;
- unsigned long root, pte, index;
- unsigned long rts, bits, offset;
- unsigned long gpa;
- unsigned long proc_tbl_size;
-
- /* Work out effective PID */
- switch (eaddr >> 62) {
- case 0:
- pid = vcpu->arch.pid;
- break;
- case 3:
- pid = 0;
- break;
- default:
- return -EINVAL;
- }
- proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
- if (pid * 16 >= proc_tbl_size)
- return -EINVAL;
-
- /* Read partition table to find root of tree for effective PID */
- ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
- ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
- if (ret)
- return ret;
+ unsigned long rts, bits, offset, index;
+ u64 pte, base, gpa;
+ __be64 rpte;
- root = be64_to_cpu(prte);
rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
((root & RTS2_MASK) >> RTS2_SHIFT);
bits = root & RPDS_MASK;
- root = root & RPDB_MASK;
+ base = root & RPDB_MASK;
offset = rts + 31;
- /* current implementations only support 52-bit space */
+ /* Current implementations only support 52-bit space */
if (offset != 52)
return -EINVAL;
+ /* Walk each level of the radix tree */
for (level = 3; level >= 0; --level) {
+ u64 addr;
+ /* Check a valid size */
if (level && bits != p9_supported_radix_bits[level])
return -EINVAL;
if (level == 0 && !(bits == 5 || bits == 9))
return -EINVAL;
offset -= bits;
index = (eaddr >> offset) & ((1UL << bits) - 1);
- /* check that low bits of page table base are zero */
- if (root & ((1UL << (bits + 3)) - 1))
+ /* Check that low bits of page table base are zero */
+ if (base & ((1UL << (bits + 3)) - 1))
return -EINVAL;
- ret = kvm_read_guest(kvm, root + index * 8,
- &rpte, sizeof(rpte));
- if (ret)
+ /* Read the entry from guest memory */
+ addr = base + (index * sizeof(rpte));
+ ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte));
+ if (ret) {
+ if (pte_ret_p)
+ *pte_ret_p = addr;
return ret;
+ }
pte = __be64_to_cpu(rpte);
if (!(pte & _PAGE_PRESENT))
return -ENOENT;
+ /* Check if a leaf entry */
if (pte & _PAGE_PTE)
break;
- bits = pte & 0x1f;
- root = pte & 0x0fffffffffffff00ul;
+ /* Get ready to walk the next level */
+ base = pte & RPDB_MASK;
+ bits = pte & RPDS_MASK;
}
- /* need a leaf at lowest level; 512GB pages not supported */
+
+ /* Need a leaf at lowest level; 512GB pages not supported */
if (level < 0 || level == 3)
return -EINVAL;
- /* offset is now log base 2 of the page size */
+ /* We found a valid leaf PTE */
+ /* Offset is now log base 2 of the page size */
gpa = pte & 0x01fffffffffff000ul;
if (gpa & ((1ul << offset) - 1))
return -EINVAL;
- gpa += eaddr & ((1ul << offset) - 1);
+ gpa |= eaddr & ((1ul << offset) - 1);
for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
if (offset == mmu_psize_defs[ps].shift)
break;
gpte->page_size = ps;
+ gpte->page_shift = offset;
gpte->eaddr = eaddr;
gpte->raddr = gpa;
@@ -115,6 +105,77 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gpte->may_read = !!(pte & _PAGE_READ);
gpte->may_write = !!(pte & _PAGE_WRITE);
gpte->may_execute = !!(pte & _PAGE_EXEC);
+
+ gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY);
+
+ if (pte_ret_p)
+ *pte_ret_p = pte;
+
+ return 0;
+}
+
+/*
+ * Used to walk a partition or process table radix tree in guest memory
+ * Note: We exploit the fact that a partition table and a process
+ * table have the same layout, a partition-scoped page table and a
+ * process-scoped page table have the same layout, and the 2nd
+ * doubleword of a partition table entry has the same layout as
+ * the PTCR register.
+ */
+int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, u64 table,
+ int table_index, u64 *pte_ret_p)
+{
+ struct kvm *kvm = vcpu->kvm;
+ int ret;
+ unsigned long size, ptbl, root;
+ struct prtb_entry entry;
+
+ if ((table & PRTS_MASK) > 24)
+ return -EINVAL;
+ size = 1ul << ((table & PRTS_MASK) + 12);
+
+ /* Is the table big enough to contain this entry? */
+ if ((table_index * sizeof(entry)) >= size)
+ return -EINVAL;
+
+ /* Read the table to find the root of the radix tree */
+ ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry));
+ ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry));
+ if (ret)
+ return ret;
+
+ /* Root is stored in the first double word */
+ root = be64_to_cpu(entry.prtb0);
+
+ return kvmppc_mmu_walk_radix_tree(vcpu, eaddr, gpte, root, pte_ret_p);
+}
+
+int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
+ struct kvmppc_pte *gpte, bool data, bool iswrite)
+{
+ u32 pid;
+ u64 pte;
+ int ret;
+
+ /* Work out effective PID */
+ switch (eaddr >> 62) {
+ case 0:
+ pid = vcpu->arch.pid;
+ break;
+ case 3:
+ pid = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = kvmppc_mmu_radix_translate_table(vcpu, eaddr, gpte,
+ vcpu->kvm->arch.process_table, pid, &pte);
+ if (ret)
+ return ret;
+
+ /* Check privilege (applies only to process scoped translations) */
if (kvmppc_get_msr(vcpu) & MSR_PR) {
if (pte & _PAGE_PRIVILEGED) {
gpte->may_read = 0;
@@ -137,20 +198,46 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
}
static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
- unsigned int pshift)
+ unsigned int pshift, unsigned int lpid)
{
unsigned long psize = PAGE_SIZE;
+ int psi;
+ long rc;
+ unsigned long rb;
if (pshift)
psize = 1UL << pshift;
+ else
+ pshift = PAGE_SHIFT;
addr &= ~(psize - 1);
- radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize);
+
+ if (!kvmhv_on_pseries()) {
+ radix__flush_tlb_lpid_page(lpid, addr, psize);
+ return;
+ }
+
+ psi = shift_to_mmu_psize(pshift);
+ rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
+ lpid, rb);
+ if (rc)
+ pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
}
-static void kvmppc_radix_flush_pwc(struct kvm *kvm)
+static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
{
- radix__flush_pwc_lpid(kvm->arch.lpid);
+ long rc;
+
+ if (!kvmhv_on_pseries()) {
+ radix__flush_pwc_lpid(lpid);
+ return;
+ }
+
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ if (rc)
+ pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
}
static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
@@ -195,23 +282,38 @@ static void kvmppc_pmd_free(pmd_t *pmdp)
kmem_cache_free(kvm_pmd_cache, pmdp);
}
-static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
- unsigned long gpa, unsigned int shift)
+/* Called with kvm->mmu_lock held */
+void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa,
+ unsigned int shift, struct kvm_memory_slot *memslot,
+ unsigned int lpid)
{
- unsigned long page_size = 1ul << shift;
unsigned long old;
+ unsigned long gfn = gpa >> PAGE_SHIFT;
+ unsigned long page_size = PAGE_SIZE;
+ unsigned long hpa;
old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
- kvmppc_radix_tlbie_page(kvm, gpa, shift);
- if (old & _PAGE_DIRTY) {
- unsigned long gfn = gpa >> PAGE_SHIFT;
- struct kvm_memory_slot *memslot;
+ kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
+
+ /* The following only applies to L1 entries */
+ if (lpid != kvm->arch.lpid)
+ return;
+ if (!memslot) {
memslot = gfn_to_memslot(kvm, gfn);
- if (memslot && memslot->dirty_bitmap)
- kvmppc_update_dirty_map(memslot, gfn, page_size);
+ if (!memslot)
+ return;
}
+ if (shift)
+ page_size = 1ul << shift;
+
+ gpa &= ~(page_size - 1);
+ hpa = old & PTE_RPN_MASK;
+ kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size);
+
+ if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap)
+ kvmppc_update_dirty_map(memslot, gfn, page_size);
}
/*
@@ -224,7 +326,8 @@ static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
* and emit a warning if encountered, but there may already be data
* corruption due to the unexpected mappings.
*/
-static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full)
+static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
+ unsigned int lpid)
{
if (full) {
memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
@@ -238,14 +341,15 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full)
WARN_ON_ONCE(1);
kvmppc_unmap_pte(kvm, p,
pte_pfn(*p) << PAGE_SHIFT,
- PAGE_SHIFT);
+ PAGE_SHIFT, NULL, lpid);
}
}
kvmppc_pte_free(pte);
}
-static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full)
+static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full,
+ unsigned int lpid)
{
unsigned long im;
pmd_t *p = pmd;
@@ -260,20 +364,21 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full)
WARN_ON_ONCE(1);
kvmppc_unmap_pte(kvm, (pte_t *)p,
pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
- PMD_SHIFT);
+ PMD_SHIFT, NULL, lpid);
}
} else {
pte_t *pte;
pte = pte_offset_map(p, 0);
- kvmppc_unmap_free_pte(kvm, pte, full);
+ kvmppc_unmap_free_pte(kvm, pte, full, lpid);
pmd_clear(p);
}
}
kvmppc_pmd_free(pmd);
}
-static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud)
+static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
+ unsigned int lpid)
{
unsigned long iu;
pud_t *p = pud;
@@ -287,36 +392,40 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud)
pmd_t *pmd;
pmd = pmd_offset(p, 0);
- kvmppc_unmap_free_pmd(kvm, pmd, true);
+ kvmppc_unmap_free_pmd(kvm, pmd, true, lpid);
pud_clear(p);
}
}
pud_free(kvm->mm, pud);
}
-void kvmppc_free_radix(struct kvm *kvm)
+void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid)
{
unsigned long ig;
- pgd_t *pgd;
- if (!kvm->arch.pgtable)
- return;
- pgd = kvm->arch.pgtable;
for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
pud_t *pud;
if (!pgd_present(*pgd))
continue;
pud = pud_offset(pgd, 0);
- kvmppc_unmap_free_pud(kvm, pud);
+ kvmppc_unmap_free_pud(kvm, pud, lpid);
pgd_clear(pgd);
}
- pgd_free(kvm->mm, kvm->arch.pgtable);
- kvm->arch.pgtable = NULL;
+}
+
+void kvmppc_free_radix(struct kvm *kvm)
+{
+ if (kvm->arch.pgtable) {
+ kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable,
+ kvm->arch.lpid);
+ pgd_free(kvm->mm, kvm->arch.pgtable);
+ kvm->arch.pgtable = NULL;
+ }
}
static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
- unsigned long gpa)
+ unsigned long gpa, unsigned int lpid)
{
pte_t *pte = pte_offset_kernel(pmd, 0);
@@ -326,13 +435,13 @@ static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
* flushing the PWC again.
*/
pmd_clear(pmd);
- kvmppc_radix_flush_pwc(kvm);
+ kvmppc_radix_flush_pwc(kvm, lpid);
- kvmppc_unmap_free_pte(kvm, pte, false);
+ kvmppc_unmap_free_pte(kvm, pte, false, lpid);
}
static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
- unsigned long gpa)
+ unsigned long gpa, unsigned int lpid)
{
pmd_t *pmd = pmd_offset(pud, 0);
@@ -342,9 +451,9 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
* so can be freed without flushing the PWC again.
*/
pud_clear(pud);
- kvmppc_radix_flush_pwc(kvm);
+ kvmppc_radix_flush_pwc(kvm, lpid);
- kvmppc_unmap_free_pmd(kvm, pmd, false);
+ kvmppc_unmap_free_pmd(kvm, pmd, false, lpid);
}
/*
@@ -356,8 +465,10 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
*/
#define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
-static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
- unsigned int level, unsigned long mmu_seq)
+int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
+ unsigned long gpa, unsigned int level,
+ unsigned long mmu_seq, unsigned int lpid,
+ unsigned long *rmapp, struct rmap_nested **n_rmap)
{
pgd_t *pgd;
pud_t *pud, *new_pud = NULL;
@@ -366,7 +477,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
int ret;
/* Traverse the guest's 2nd-level tree, allocate new levels needed */
- pgd = kvm->arch.pgtable + pgd_index(gpa);
+ pgd = pgtable + pgd_index(gpa);
pud = NULL;
if (pgd_present(*pgd))
pud = pud_offset(pgd, gpa);
@@ -423,7 +534,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
goto out_unlock;
}
/* Valid 1GB page here already, remove it */
- kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT);
+ kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL,
+ lpid);
}
if (level == 2) {
if (!pud_none(*pud)) {
@@ -432,9 +544,11 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
* install a large page, so remove and free the page
* table page.
*/
- kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa);
+ kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid);
}
kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
+ if (rmapp && n_rmap)
+ kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
ret = 0;
goto out_unlock;
}
@@ -458,7 +572,7 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
PTE_BITS_MUST_MATCH);
kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
- 0, pte_val(pte), lgpa, PMD_SHIFT);
+ 0, pte_val(pte), lgpa, PMD_SHIFT);
ret = 0;
goto out_unlock;
}
@@ -472,7 +586,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
goto out_unlock;
}
/* Valid 2MB page here already, remove it */
- kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT);
+ kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL,
+ lpid);
}
if (level == 1) {
if (!pmd_none(*pmd)) {
@@ -481,9 +596,11 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
* install a large page, so remove and free the page
* table page.
*/
- kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa);
+ kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid);
}
kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
+ if (rmapp && n_rmap)
+ kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
ret = 0;
goto out_unlock;
}
@@ -508,6 +625,8 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
goto out_unlock;
}
kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
+ if (rmapp && n_rmap)
+ kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap);
ret = 0;
out_unlock:
@@ -521,95 +640,49 @@ static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
return ret;
}
-int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
- unsigned long ea, unsigned long dsisr)
+bool kvmppc_hv_handle_set_rc(struct kvm *kvm, pgd_t *pgtable, bool writing,
+ unsigned long gpa, unsigned int lpid)
+{
+ unsigned long pgflags;
+ unsigned int shift;
+ pte_t *ptep;
+
+ /*
+ * Need to set an R or C bit in the 2nd-level tables;
+ * since we are just helping out the hardware here,
+ * it is sufficient to do what the hardware does.
+ */
+ pgflags = _PAGE_ACCESSED;
+ if (writing)
+ pgflags |= _PAGE_DIRTY;
+ /*
+ * We are walking the secondary (partition-scoped) page table here.
+ * We can do this without disabling irq because the Linux MM
+ * subsystem doesn't do THP splits and collapses on this tree.
+ */
+ ptep = __find_linux_pte(pgtable, gpa, NULL, &shift);
+ if (ptep && pte_present(*ptep) && (!writing || pte_write(*ptep))) {
+ kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift);
+ return true;
+ }
+ return false;
+}
+
+int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
+ unsigned long gpa,
+ struct kvm_memory_slot *memslot,
+ bool writing, bool kvm_ro,
+ pte_t *inserted_pte, unsigned int *levelp)
{
struct kvm *kvm = vcpu->kvm;
- unsigned long mmu_seq;
- unsigned long gpa, gfn, hva;
- struct kvm_memory_slot *memslot;
struct page *page = NULL;
- long ret;
- bool writing;
+ unsigned long mmu_seq;
+ unsigned long hva, gfn = gpa >> PAGE_SHIFT;
bool upgrade_write = false;
bool *upgrade_p = &upgrade_write;
pte_t pte, *ptep;
- unsigned long pgflags;
unsigned int shift, level;
-
- /* Check for unusual errors */
- if (dsisr & DSISR_UNSUPP_MMU) {
- pr_err("KVM: Got unsupported MMU fault\n");
- return -EFAULT;
- }
- if (dsisr & DSISR_BADACCESS) {
- /* Reflect to the guest as DSI */
- pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
- kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
- return RESUME_GUEST;
- }
-
- /* Translate the logical address and get the page */
- gpa = vcpu->arch.fault_gpa & ~0xfffUL;
- gpa &= ~0xF000000000000000ul;
- gfn = gpa >> PAGE_SHIFT;
- if (!(dsisr & DSISR_PRTABLE_FAULT))
- gpa |= ea & 0xfff;
- memslot = gfn_to_memslot(kvm, gfn);
-
- /* No memslot means it's an emulated MMIO region */
- if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
- if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
- DSISR_SET_RC)) {
- /*
- * Bad address in guest page table tree, or other
- * unusual error - reflect it to the guest as DSI.
- */
- kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
- return RESUME_GUEST;
- }
- return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
- dsisr & DSISR_ISSTORE);
- }
-
- writing = (dsisr & DSISR_ISSTORE) != 0;
- if (memslot->flags & KVM_MEM_READONLY) {
- if (writing) {
- /* give the guest a DSI */
- dsisr = DSISR_ISSTORE | DSISR_PROTFAULT;
- kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
- return RESUME_GUEST;
- }
- upgrade_p = NULL;
- }
-
- if (dsisr & DSISR_SET_RC) {
- /*
- * Need to set an R or C bit in the 2nd-level tables;
- * since we are just helping out the hardware here,
- * it is sufficient to do what the hardware does.
- */
- pgflags = _PAGE_ACCESSED;
- if (writing)
- pgflags |= _PAGE_DIRTY;
- /*
- * We are walking the secondary page table here. We can do this
- * without disabling irq.
- */
- spin_lock(&kvm->mmu_lock);
- ptep = __find_linux_pte(kvm->arch.pgtable,
- gpa, NULL, &shift);
- if (ptep && pte_present(*ptep) &&
- (!writing || pte_write(*ptep))) {
- kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
- gpa, shift);
- dsisr &= ~DSISR_SET_RC;
- }
- spin_unlock(&kvm->mmu_lock);
- if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
- DSISR_PROTFAULT | DSISR_SET_RC)))
- return RESUME_GUEST;
- }
+ int ret;
/* used to check for invalidations in progress */
mmu_seq = kvm->mmu_notifier_seq;
@@ -622,7 +695,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
* is that the page is writable.
*/
hva = gfn_to_hva_memslot(memslot, gfn);
- if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
+ if (!kvm_ro && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
upgrade_write = true;
} else {
unsigned long pfn;
@@ -690,7 +763,12 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
/* Allocate space in the tree and write the PTE */
- ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
+ ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level,
+ mmu_seq, kvm->arch.lpid, NULL, NULL);
+ if (inserted_pte)
+ *inserted_pte = pte;
+ if (levelp)
+ *levelp = level;
if (page) {
if (!ret && (pte_val(pte) & _PAGE_WRITE))
@@ -698,6 +776,82 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
put_page(page);
}
+ return ret;
+}
+
+int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned long ea, unsigned long dsisr)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long gpa, gfn;
+ struct kvm_memory_slot *memslot;
+ long ret;
+ bool writing = !!(dsisr & DSISR_ISSTORE);
+ bool kvm_ro = false;
+
+ /* Check for unusual errors */
+ if (dsisr & DSISR_UNSUPP_MMU) {
+ pr_err("KVM: Got unsupported MMU fault\n");
+ return -EFAULT;
+ }
+ if (dsisr & DSISR_BADACCESS) {
+ /* Reflect to the guest as DSI */
+ pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
+ kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
+ return RESUME_GUEST;
+ }
+
+ /* Translate the logical address */
+ gpa = vcpu->arch.fault_gpa & ~0xfffUL;
+ gpa &= ~0xF000000000000000ul;
+ gfn = gpa >> PAGE_SHIFT;
+ if (!(dsisr & DSISR_PRTABLE_FAULT))
+ gpa |= ea & 0xfff;
+
+ /* Get the corresponding memslot */
+ memslot = gfn_to_memslot(kvm, gfn);
+
+ /* No memslot means it's an emulated MMIO region */
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
+ if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
+ DSISR_SET_RC)) {
+ /*
+ * Bad address in guest page table tree, or other
+ * unusual error - reflect it to the guest as DSI.
+ */
+ kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
+ return RESUME_GUEST;
+ }
+ return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, writing);
+ }
+
+ if (memslot->flags & KVM_MEM_READONLY) {
+ if (writing) {
+ /* give the guest a DSI */
+ kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE |
+ DSISR_PROTFAULT);
+ return RESUME_GUEST;
+ }
+ kvm_ro = true;
+ }
+
+ /* Failed to set the reference/change bits */
+ if (dsisr & DSISR_SET_RC) {
+ spin_lock(&kvm->mmu_lock);
+ if (kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable,
+ writing, gpa, kvm->arch.lpid))
+ dsisr &= ~DSISR_SET_RC;
+ spin_unlock(&kvm->mmu_lock);
+
+ if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
+ DSISR_PROTFAULT | DSISR_SET_RC)))
+ return RESUME_GUEST;
+ }
+
+ /* Try to insert a pte */
+ ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing,
+ kvm_ro, NULL, NULL);
+
if (ret == 0 || ret == -EAGAIN)
ret = RESUME_GUEST;
return ret;
@@ -710,20 +864,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
pte_t *ptep;
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;
- unsigned long old;
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
- if (ptep && pte_present(*ptep)) {
- old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0,
- gpa, shift);
- kvmppc_radix_tlbie_page(kvm, gpa, shift);
- if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
- unsigned long psize = PAGE_SIZE;
- if (shift)
- psize = 1ul << shift;
- kvmppc_update_dirty_map(memslot, gfn, psize);
- }
- }
+ if (ptep && pte_present(*ptep))
+ kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
+ kvm->arch.lpid);
return 0;
}
@@ -778,7 +923,7 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
ret = 1 << (shift - PAGE_SHIFT);
kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
gpa, shift);
- kvmppc_radix_tlbie_page(kvm, gpa, shift);
+ kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
}
return ret;
}
@@ -863,6 +1008,215 @@ static void pmd_ctor(void *addr)
memset(addr, 0, RADIX_PMD_TABLE_SIZE);
}
+struct debugfs_radix_state {
+ struct kvm *kvm;
+ struct mutex mutex;
+ unsigned long gpa;
+ int lpid;
+ int chars_left;
+ int buf_index;
+ char buf[128];
+ u8 hdr;
+};
+
+static int debugfs_radix_open(struct inode *inode, struct file *file)
+{
+ struct kvm *kvm = inode->i_private;
+ struct debugfs_radix_state *p;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ kvm_get_kvm(kvm);
+ p->kvm = kvm;
+ mutex_init(&p->mutex);
+ file->private_data = p;
+
+ return nonseekable_open(inode, file);
+}
+
+static int debugfs_radix_release(struct inode *inode, struct file *file)
+{
+ struct debugfs_radix_state *p = file->private_data;
+
+ kvm_put_kvm(p->kvm);
+ kfree(p);
+ return 0;
+}
+
+static ssize_t debugfs_radix_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct debugfs_radix_state *p = file->private_data;
+ ssize_t ret, r;
+ unsigned long n;
+ struct kvm *kvm;
+ unsigned long gpa;
+ pgd_t *pgt;
+ struct kvm_nested_guest *nested;
+ pgd_t pgd, *pgdp;
+ pud_t pud, *pudp;
+ pmd_t pmd, *pmdp;
+ pte_t *ptep;
+ int shift;
+ unsigned long pte;
+
+ kvm = p->kvm;
+ if (!kvm_is_radix(kvm))
+ return 0;
+
+ ret = mutex_lock_interruptible(&p->mutex);
+ if (ret)
+ return ret;
+
+ if (p->chars_left) {
+ n = p->chars_left;
+ if (n > len)
+ n = len;
+ r = copy_to_user(buf, p->buf + p->buf_index, n);
+ n -= r;
+ p->chars_left -= n;
+ p->buf_index += n;
+ buf += n;
+ len -= n;
+ ret = n;
+ if (r) {
+ if (!n)
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+
+ gpa = p->gpa;
+ nested = NULL;
+ pgt = NULL;
+ while (len != 0 && p->lpid >= 0) {
+ if (gpa >= RADIX_PGTABLE_RANGE) {
+ gpa = 0;
+ pgt = NULL;
+ if (nested) {
+ kvmhv_put_nested(nested);
+ nested = NULL;
+ }
+ p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid);
+ p->hdr = 0;
+ if (p->lpid < 0)
+ break;
+ }
+ if (!pgt) {
+ if (p->lpid == 0) {
+ pgt = kvm->arch.pgtable;
+ } else {
+ nested = kvmhv_get_nested(kvm, p->lpid, false);
+ if (!nested) {
+ gpa = RADIX_PGTABLE_RANGE;
+ continue;
+ }
+ pgt = nested->shadow_pgtable;
+ }
+ }
+ n = 0;
+ if (!p->hdr) {
+ if (p->lpid > 0)
+ n = scnprintf(p->buf, sizeof(p->buf),
+ "\nNested LPID %d: ", p->lpid);
+ n += scnprintf(p->buf + n, sizeof(p->buf) - n,
+ "pgdir: %lx\n", (unsigned long)pgt);
+ p->hdr = 1;
+ goto copy;
+ }
+
+ pgdp = pgt + pgd_index(gpa);
+ pgd = READ_ONCE(*pgdp);
+ if (!(pgd_val(pgd) & _PAGE_PRESENT)) {
+ gpa = (gpa & PGDIR_MASK) + PGDIR_SIZE;
+ continue;
+ }
+
+ pudp = pud_offset(&pgd, gpa);
+ pud = READ_ONCE(*pudp);
+ if (!(pud_val(pud) & _PAGE_PRESENT)) {
+ gpa = (gpa & PUD_MASK) + PUD_SIZE;
+ continue;
+ }
+ if (pud_val(pud) & _PAGE_PTE) {
+ pte = pud_val(pud);
+ shift = PUD_SHIFT;
+ goto leaf;
+ }
+
+ pmdp = pmd_offset(&pud, gpa);
+ pmd = READ_ONCE(*pmdp);
+ if (!(pmd_val(pmd) & _PAGE_PRESENT)) {
+ gpa = (gpa & PMD_MASK) + PMD_SIZE;
+ continue;
+ }
+ if (pmd_val(pmd) & _PAGE_PTE) {
+ pte = pmd_val(pmd);
+ shift = PMD_SHIFT;
+ goto leaf;
+ }
+
+ ptep = pte_offset_kernel(&pmd, gpa);
+ pte = pte_val(READ_ONCE(*ptep));
+ if (!(pte & _PAGE_PRESENT)) {
+ gpa += PAGE_SIZE;
+ continue;
+ }
+ shift = PAGE_SHIFT;
+ leaf:
+ n = scnprintf(p->buf, sizeof(p->buf),
+ " %lx: %lx %d\n", gpa, pte, shift);
+ gpa += 1ul << shift;
+ copy:
+ p->chars_left = n;
+ if (n > len)
+ n = len;
+ r = copy_to_user(buf, p->buf, n);
+ n -= r;
+ p->chars_left -= n;
+ p->buf_index = n;
+ buf += n;
+ len -= n;
+ ret += n;
+ if (r) {
+ if (!ret)
+ ret = -EFAULT;
+ break;
+ }
+ }
+ p->gpa = gpa;
+ if (nested)
+ kvmhv_put_nested(nested);
+
+ out:
+ mutex_unlock(&p->mutex);
+ return ret;
+}
+
+static ssize_t debugfs_radix_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ return -EACCES;
+}
+
+static const struct file_operations debugfs_radix_fops = {
+ .owner = THIS_MODULE,
+ .open = debugfs_radix_open,
+ .release = debugfs_radix_release,
+ .read = debugfs_radix_read,
+ .write = debugfs_radix_write,
+ .llseek = generic_file_llseek,
+};
+
+void kvmhv_radix_debugfs_init(struct kvm *kvm)
+{
+ kvm->arch.radix_dentry = debugfs_create_file("radix", 0400,
+ kvm->arch.debugfs_dir, kvm,
+ &debugfs_radix_fops);
+}
+
int kvmppc_radix_init(void)
{
unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 9a3f2646ecc7..62a8d03ba7e9 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -363,6 +363,40 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
return ret;
}
+static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
+ unsigned long tce)
+{
+ unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
+ enum dma_data_direction dir = iommu_tce_direction(tce);
+ struct kvmppc_spapr_tce_iommu_table *stit;
+ unsigned long ua = 0;
+
+ /* Allow userspace to poison TCE table */
+ if (dir == DMA_NONE)
+ return H_SUCCESS;
+
+ if (iommu_tce_check_gpa(stt->page_shift, gpa))
+ return H_TOO_HARD;
+
+ if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
+ return H_TOO_HARD;
+
+ list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
+ unsigned long hpa = 0;
+ struct mm_iommu_table_group_mem_t *mem;
+ long shift = stit->tbl->it_page_shift;
+
+ mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
+ if (!mem)
+ return H_TOO_HARD;
+
+ if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
+ return H_TOO_HARD;
+ }
+
+ return H_SUCCESS;
+}
+
static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
{
unsigned long hpa = 0;
@@ -376,11 +410,10 @@ static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
{
struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua)
- /* it_userspace allocation might be delayed */
- return H_TOO_HARD;
+ return H_SUCCESS;
mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
if (!mem)
@@ -401,7 +434,7 @@ static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
long ret;
if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
- return H_HARDWARE;
+ return H_TOO_HARD;
if (dir == DMA_NONE)
return H_SUCCESS;
@@ -449,15 +482,15 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
return H_TOO_HARD;
if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
- return H_HARDWARE;
+ return H_TOO_HARD;
if (mm_iommu_mapped_inc(mem))
- return H_CLOSED;
+ return H_TOO_HARD;
ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
if (WARN_ON_ONCE(ret)) {
mm_iommu_mapped_dec(mem);
- return H_HARDWARE;
+ return H_TOO_HARD;
}
if (dir != DMA_NONE)
@@ -517,8 +550,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
+ if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
ret = H_PARAMETER;
goto unlock_exit;
}
@@ -533,14 +565,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
entry, ua, dir);
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
+ if (ret != H_SUCCESS) {
+ kvmppc_clear_tce(stit->tbl, entry);
goto unlock_exit;
-
- WARN_ON_ONCE(1);
- kvmppc_clear_tce(stit->tbl, entry);
+ }
}
kvmppc_tce_put(stt, entry, tce);
@@ -583,7 +611,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
return ret;
idx = srcu_read_lock(&vcpu->kvm->srcu);
- if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
ret = H_TOO_HARD;
goto unlock_exit;
}
@@ -599,10 +627,26 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
ret = kvmppc_tce_validate(stt, tce);
if (ret != H_SUCCESS)
goto unlock_exit;
+ }
+
+ for (i = 0; i < npages; ++i) {
+ /*
+ * This looks unsafe, because we validate, then regrab
+ * the TCE from userspace which could have been changed by
+ * another thread.
+ *
+ * But it actually is safe, because the relevant checks will be
+ * re-executed in the following code. If userspace tries to
+ * change this dodgily it will result in a messier failure mode
+ * but won't threaten the host.
+ */
+ if (get_user(tce, tces + i)) {
+ ret = H_TOO_HARD;
+ goto unlock_exit;
+ }
+ tce = be64_to_cpu(tce);
- if (kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
- &ua, NULL))
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -610,14 +654,10 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
stit->tbl, entry + i, ua,
iommu_tce_direction(tce));
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
+ if (ret != H_SUCCESS) {
+ kvmppc_clear_tce(stit->tbl, entry);
goto unlock_exit;
-
- WARN_ON_ONCE(1);
- kvmppc_clear_tce(stit->tbl, entry);
+ }
}
kvmppc_tce_put(stt, entry + i, tce);
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 6821ead4b4eb..2206bc729b9a 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -87,6 +87,7 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
}
EXPORT_SYMBOL_GPL(kvmppc_find_table);
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
/*
* Validates TCE address.
* At the moment flags and page mask are validated.
@@ -94,14 +95,14 @@ EXPORT_SYMBOL_GPL(kvmppc_find_table);
* to the table and user space is supposed to process them), we can skip
* checking other things (such as TCE is a guest RAM address or the page
* was actually allocated).
- *
- * WARNING: This will be called in real-mode on HV KVM and virtual
- * mode on PR KVM
*/
-long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
+static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
+ unsigned long tce)
{
unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
enum dma_data_direction dir = iommu_tce_direction(tce);
+ struct kvmppc_spapr_tce_iommu_table *stit;
+ unsigned long ua = 0;
/* Allow userspace to poison TCE table */
if (dir == DMA_NONE)
@@ -110,9 +111,25 @@ long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
if (iommu_tce_check_gpa(stt->page_shift, gpa))
return H_PARAMETER;
+ if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
+ return H_TOO_HARD;
+
+ list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
+ unsigned long hpa = 0;
+ struct mm_iommu_table_group_mem_t *mem;
+ long shift = stit->tbl->it_page_shift;
+
+ mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
+ if (!mem)
+ return H_TOO_HARD;
+
+ if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
+ return H_TOO_HARD;
+ }
+
return H_SUCCESS;
}
-EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
/* Note on the use of page_address() in real mode,
*
@@ -164,10 +181,10 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
}
EXPORT_SYMBOL_GPL(kvmppc_tce_put);
-long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
+long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
unsigned long *ua, unsigned long **prmap)
{
- unsigned long gfn = gpa >> PAGE_SHIFT;
+ unsigned long gfn = tce >> PAGE_SHIFT;
struct kvm_memory_slot *memslot;
memslot = search_memslots(kvm_memslots(kvm), gfn);
@@ -175,7 +192,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
return -EINVAL;
*ua = __gfn_to_hva_memslot(memslot, gfn) |
- (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+ (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
if (prmap)
@@ -184,7 +201,7 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
return 0;
}
-EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
+EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
@@ -197,7 +214,7 @@ static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
if (!ret && ((*direction == DMA_FROM_DEVICE) ||
(*direction == DMA_BIDIRECTIONAL))) {
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
/*
* kvmppc_rm_tce_iommu_do_map() updates the UA cache after
* calling this so we still get here a valid UA.
@@ -223,7 +240,7 @@ static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
{
struct mm_iommu_table_group_mem_t *mem = NULL;
const unsigned long pgsize = 1ULL << tbl->it_page_shift;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua)
/* it_userspace allocation might be delayed */
@@ -287,7 +304,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
{
long ret;
unsigned long hpa = 0;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
struct mm_iommu_table_group_mem_t *mem;
if (!pua)
@@ -300,10 +317,10 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
&hpa)))
- return H_HARDWARE;
+ return H_TOO_HARD;
if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
- return H_CLOSED;
+ return H_TOO_HARD;
ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
if (ret) {
@@ -368,13 +385,12 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
if (ret != H_SUCCESS)
return ret;
- ret = kvmppc_tce_validate(stt, tce);
+ ret = kvmppc_rm_tce_validate(stt, tce);
if (ret != H_SUCCESS)
return ret;
dir = iommu_tce_direction(tce);
- if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
+ if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
return H_PARAMETER;
entry = ioba >> stt->page_shift;
@@ -387,14 +403,10 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
stit->tbl, entry, ua, dir);
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
+ if (ret != H_SUCCESS) {
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
return ret;
-
- WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
+ }
}
kvmppc_tce_put(stt, entry, tce);
@@ -480,7 +492,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
*/
struct mm_iommu_table_group_mem_t *mem;
- if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL))
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
return H_TOO_HARD;
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
@@ -496,12 +508,12 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
* We do not require memory to be preregistered in this case
* so lock rmap and do __find_linux_pte_or_hugepte().
*/
- if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
return H_TOO_HARD;
rmap = (void *) vmalloc_to_phys(rmap);
if (WARN_ON_ONCE_RM(!rmap))
- return H_HARDWARE;
+ return H_TOO_HARD;
/*
* Synchronize with the MMU notifier callbacks in
@@ -521,14 +533,16 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
for (i = 0; i < npages; ++i) {
unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
- ret = kvmppc_tce_validate(stt, tce);
+ ret = kvmppc_rm_tce_validate(stt, tce);
if (ret != H_SUCCESS)
goto unlock_exit;
+ }
+
+ for (i = 0; i < npages; ++i) {
+ unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
ua = 0;
- if (kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
- &ua, NULL))
+ if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
return H_PARAMETER;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
@@ -536,14 +550,11 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
stit->tbl, entry + i, ua,
iommu_tce_direction(tce));
- if (ret == H_SUCCESS)
- continue;
-
- if (ret == H_TOO_HARD)
+ if (ret != H_SUCCESS) {
+ kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
+ entry);
goto unlock_exit;
-
- WARN_ON_ONCE_RM(1);
- kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
+ }
}
kvmppc_tce_put(stt, entry + i, tce);
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 36b11c5a0dbb..8c7e933e942e 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -36,7 +36,6 @@
#define OP_31_XOP_MTSR 210
#define OP_31_XOP_MTSRIN 242
#define OP_31_XOP_TLBIEL 274
-#define OP_31_XOP_TLBIE 306
/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
#define OP_31_XOP_FAKE_SC1 308
#define OP_31_XOP_SLBMTE 402
@@ -110,7 +109,7 @@ static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
vcpu->arch.tar_tm = vcpu->arch.tar;
vcpu->arch.lr_tm = vcpu->arch.regs.link;
- vcpu->arch.cr_tm = vcpu->arch.cr;
+ vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
vcpu->arch.xer_tm = vcpu->arch.regs.xer;
vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
}
@@ -129,7 +128,7 @@ static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
vcpu->arch.tar = vcpu->arch.tar_tm;
vcpu->arch.regs.link = vcpu->arch.lr_tm;
- vcpu->arch.cr = vcpu->arch.cr_tm;
+ vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
vcpu->arch.regs.xer = vcpu->arch.xer_tm;
vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
}
@@ -141,7 +140,7 @@ static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
uint64_t texasr;
/* CR0 = 0 | MSR[TS] | 0 */
- vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
<< CR0_SHIFT);
@@ -220,7 +219,7 @@ void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
tm_abort(ra_val);
/* CR0 = 0 | MSR[TS] | 0 */
- vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)) |
(((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
<< CR0_SHIFT);
@@ -494,8 +493,8 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
preempt_disable();
- vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
- (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));
+ vcpu->arch.regs.ccr = (CR0_TBEGIN_FAILURE |
+ (vcpu->arch.regs.ccr & ~(CR0_MASK << CR0_SHIFT)));
vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
(((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3e3a71594e63..bf8def2159c3 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -50,6 +50,7 @@
#include <asm/reg.h>
#include <asm/ppc-opcode.h>
#include <asm/asm-prototypes.h>
+#include <asm/archrandom.h>
#include <asm/debug.h>
#include <asm/disassemble.h>
#include <asm/cputable.h>
@@ -104,6 +105,10 @@ static bool indep_threads_mode = true;
module_param(indep_threads_mode, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(indep_threads_mode, "Independent-threads mode (only on POWER9)");
+static bool one_vm_per_core;
+module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires indep_threads_mode=N)");
+
#ifdef CONFIG_KVM_XICS
static struct kernel_param_ops module_param_ops = {
.set = param_set_int,
@@ -117,6 +122,16 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644);
MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
#endif
+/* If set, guests are allowed to create and control nested guests */
+static bool nested = true;
+module_param(nested, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
+
+static inline bool nesting_enabled(struct kvm *kvm)
+{
+ return kvm->arch.nested_enable && kvm_is_radix(kvm);
+}
+
/* If set, the threads on each CPU core have to be in the same MMU mode */
static bool no_mixing_hpt_and_radix;
@@ -173,6 +188,10 @@ static bool kvmppc_ipi_thread(int cpu)
{
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+ /* If we're a nested hypervisor, fall back to ordinary IPIs for now */
+ if (kvmhv_on_pseries())
+ return false;
+
/* On POWER9 we can use msgsnd to IPI any cpu */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu);
@@ -410,8 +429,8 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
- pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
- vcpu->arch.cr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
+ pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n",
+ vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr);
pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
pr_err("fault dar = %.16lx dsisr = %.8x\n",
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
@@ -730,8 +749,7 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
/*
* Ensure that the read of vcore->dpdes comes after the read
* of vcpu->doorbell_request. This barrier matches the
- * lwsync in book3s_hv_rmhandlers.S just before the
- * fast_guest_return label.
+ * smb_wmb() in kvmppc_guest_entry_inject().
*/
smp_rmb();
vc = vcpu->arch.vcore;
@@ -912,6 +930,19 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
break;
}
return RESUME_HOST;
+ case H_SET_DABR:
+ ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4));
+ break;
+ case H_SET_XDABR:
+ ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ break;
+ case H_GET_TCE:
+ ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
case H_PUT_TCE:
ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
@@ -935,6 +966,32 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
+ case H_RANDOM:
+ if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
+ ret = H_HARDWARE;
+ break;
+
+ case H_SET_PARTITION_TABLE:
+ ret = H_FUNCTION;
+ if (nesting_enabled(vcpu->kvm))
+ ret = kvmhv_set_partition_table(vcpu);
+ break;
+ case H_ENTER_NESTED:
+ ret = H_FUNCTION;
+ if (!nesting_enabled(vcpu->kvm))
+ break;
+ ret = kvmhv_enter_nested_guest(vcpu);
+ if (ret == H_INTERRUPT) {
+ kvmppc_set_gpr(vcpu, 3, 0);
+ return -EINTR;
+ }
+ break;
+ case H_TLB_INVALIDATE:
+ ret = H_FUNCTION;
+ if (nesting_enabled(vcpu->kvm))
+ ret = kvmhv_do_nested_tlbie(vcpu);
+ break;
+
default:
return RESUME_HOST;
}
@@ -943,6 +1000,24 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
+/*
+ * Handle H_CEDE in the nested virtualization case where we haven't
+ * called the real-mode hcall handlers in book3s_hv_rmhandlers.S.
+ * This has to be done early, not in kvmppc_pseries_do_hcall(), so
+ * that the cede logic in kvmppc_run_single_vcpu() works properly.
+ */
+static void kvmppc_nested_cede(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.shregs.msr |= MSR_EE;
+ vcpu->arch.ceded = 1;
+ smp_mb();
+ if (vcpu->arch.prodded) {
+ vcpu->arch.prodded = 0;
+ smp_mb();
+ vcpu->arch.ceded = 0;
+ }
+}
+
static int kvmppc_hcall_impl_hv(unsigned long cmd)
{
switch (cmd) {
@@ -1085,7 +1160,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-/* Called with vcpu->arch.vcore->lock held */
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
@@ -1190,7 +1264,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOK3S_INTERRUPT_H_INST_STORAGE:
vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
- vcpu->arch.fault_dsisr = 0;
+ vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
+ DSISR_SRR1_MATCH_64S;
+ if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
r = RESUME_PAGE_FAULT;
break;
/*
@@ -1206,10 +1283,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
- /* Need vcore unlocked to call kvmppc_get_last_inst */
- spin_unlock(&vcpu->arch.vcore->lock);
r = kvmppc_emulate_debug_inst(run, vcpu);
- spin_lock(&vcpu->arch.vcore->lock);
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
@@ -1225,12 +1299,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
r = EMULATE_FAIL;
if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
- cpu_has_feature(CPU_FTR_ARCH_300)) {
- /* Need vcore unlocked to call kvmppc_get_last_inst */
- spin_unlock(&vcpu->arch.vcore->lock);
+ cpu_has_feature(CPU_FTR_ARCH_300))
r = kvmppc_emulate_doorbell_instr(vcpu);
- spin_lock(&vcpu->arch.vcore->lock);
- }
if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
@@ -1265,6 +1335,104 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
return r;
}
+static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
+{
+ int r;
+ int srcu_idx;
+
+ vcpu->stat.sum_exits++;
+
+ /*
+ * This can happen if an interrupt occurs in the last stages
+ * of guest entry or the first stages of guest exit (i.e. after
+ * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
+ * and before setting it to KVM_GUEST_MODE_HOST_HV).
+ * That can happen due to a bug, or due to a machine check
+ * occurring at just the wrong time.
+ */
+ if (vcpu->arch.shregs.msr & MSR_HV) {
+ pr_emerg("KVM trap in HV mode while nested!\n");
+ pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n",
+ vcpu->arch.trap, kvmppc_get_pc(vcpu),
+ vcpu->arch.shregs.msr);
+ kvmppc_dump_regs(vcpu);
+ return RESUME_HOST;
+ }
+ switch (vcpu->arch.trap) {
+ /* We're good on these - the host merely wanted to get our attention */
+ case BOOK3S_INTERRUPT_HV_DECREMENTER:
+ vcpu->stat.dec_exits++;
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_EXTERNAL:
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_HOST;
+ break;
+ case BOOK3S_INTERRUPT_H_DOORBELL:
+ case BOOK3S_INTERRUPT_H_VIRT:
+ vcpu->stat.ext_intr_exits++;
+ r = RESUME_GUEST;
+ break;
+ /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
+ case BOOK3S_INTERRUPT_HMI:
+ case BOOK3S_INTERRUPT_PERFMON:
+ case BOOK3S_INTERRUPT_SYSTEM_RESET:
+ r = RESUME_GUEST;
+ break;
+ case BOOK3S_INTERRUPT_MACHINE_CHECK:
+ /* Pass the machine check to the L1 guest */
+ r = RESUME_HOST;
+ /* Print the MCE event to host console. */
+ machine_check_print_event_info(&vcpu->arch.mce_evt, false);
+ break;
+ /*
+ * We get these next two if the guest accesses a page which it thinks
+ * it has mapped but which is not actually present, either because
+ * it is for an emulated I/O device or because the corresonding
+ * host page has been paged out.
+ */
+ case BOOK3S_INTERRUPT_H_DATA_STORAGE:
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ r = kvmhv_nested_page_fault(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+ break;
+ case BOOK3S_INTERRUPT_H_INST_STORAGE:
+ vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
+ vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) &
+ DSISR_SRR1_MATCH_64S;
+ if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
+ vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
+ srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
+ r = kvmhv_nested_page_fault(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
+ break;
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ case BOOK3S_INTERRUPT_HV_SOFTPATCH:
+ /*
+ * This occurs for various TM-related instructions that
+ * we need to emulate on POWER9 DD2.2. We have already
+ * handled the cases where the guest was in real-suspend
+ * mode and was transitioning to transactional state.
+ */
+ r = kvmhv_p9_tm_emulation(vcpu);
+ break;
+#endif
+
+ case BOOK3S_INTERRUPT_HV_RM_HARD:
+ vcpu->arch.trap = 0;
+ r = RESUME_GUEST;
+ if (!xive_enabled())
+ kvmppc_xics_rm_complete(vcpu, 0);
+ break;
+ default:
+ r = RESUME_HOST;
+ break;
+ }
+
+ return r;
+}
+
static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
@@ -1555,6 +1723,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
case KVM_REG_PPC_ONLINE:
*val = get_reg_val(id, vcpu->arch.online);
break;
+ case KVM_REG_PPC_PTCR:
+ *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr);
+ break;
default:
r = -EINVAL;
break;
@@ -1786,6 +1957,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
atomic_dec(&vcpu->arch.vcore->online_count);
vcpu->arch.online = i;
break;
+ case KVM_REG_PPC_PTCR:
+ vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val);
+ break;
default:
r = -EINVAL;
break;
@@ -2019,15 +2193,18 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
* Set the default HFSCR for the guest from the host value.
* This value is only used on POWER9.
* On POWER9, we want to virtualize the doorbell facility, so we
- * turn off the HFSCR bit, which causes those instructions to trap.
+ * don't set the HFSCR_MSGP bit, and that causes those instructions
+ * to trap and then we emulate them.
*/
- vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
- if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB |
+ HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP;
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ vcpu->arch.hfscr &= mfspr(SPRN_HFSCR);
+ if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ vcpu->arch.hfscr |= HFSCR_TM;
+ }
+ if (cpu_has_feature(CPU_FTR_TM_COMP))
vcpu->arch.hfscr |= HFSCR_TM;
- else if (!cpu_has_feature(CPU_FTR_TM_COMP))
- vcpu->arch.hfscr &= ~HFSCR_TM;
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- vcpu->arch.hfscr &= ~HFSCR_MSGP;
kvmppc_mmu_book3s_hv_init(vcpu);
@@ -2242,10 +2419,18 @@ static void kvmppc_release_hwthread(int cpu)
static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
{
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+ cpumask_t *cpu_in_guest;
int i;
cpu = cpu_first_thread_sibling(cpu);
- cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
+ if (nested) {
+ cpumask_set_cpu(cpu, &nested->need_tlb_flush);
+ cpu_in_guest = &nested->cpu_in_guest;
+ } else {
+ cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
+ cpu_in_guest = &kvm->arch.cpu_in_guest;
+ }
/*
* Make sure setting of bit in need_tlb_flush precedes
* testing of cpu_in_guest bits. The matching barrier on
@@ -2253,13 +2438,23 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
*/
smp_mb();
for (i = 0; i < threads_per_core; ++i)
- if (cpumask_test_cpu(cpu + i, &kvm->arch.cpu_in_guest))
+ if (cpumask_test_cpu(cpu + i, cpu_in_guest))
smp_call_function_single(cpu + i, do_nothing, NULL, 1);
}
static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
{
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
struct kvm *kvm = vcpu->kvm;
+ int prev_cpu;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return;
+
+ if (nested)
+ prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id];
+ else
+ prev_cpu = vcpu->arch.prev_cpu;
/*
* With radix, the guest can do TLB invalidations itself,
@@ -2273,12 +2468,46 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
* ran to flush the TLB. The TLB is shared between threads,
* so we use a single bit in .need_tlb_flush for all 4 threads.
*/
- if (vcpu->arch.prev_cpu != pcpu) {
- if (vcpu->arch.prev_cpu >= 0 &&
- cpu_first_thread_sibling(vcpu->arch.prev_cpu) !=
+ if (prev_cpu != pcpu) {
+ if (prev_cpu >= 0 &&
+ cpu_first_thread_sibling(prev_cpu) !=
cpu_first_thread_sibling(pcpu))
- radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu);
- vcpu->arch.prev_cpu = pcpu;
+ radix_flush_cpu(kvm, prev_cpu, vcpu);
+ if (nested)
+ nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
+ else
+ vcpu->arch.prev_cpu = pcpu;
+ }
+}
+
+static void kvmppc_radix_check_need_tlb_flush(struct kvm *kvm, int pcpu,
+ struct kvm_nested_guest *nested)
+{
+ cpumask_t *need_tlb_flush;
+ int lpid;
+
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ pcpu &= ~0x3UL;
+
+ if (nested) {
+ lpid = nested->shadow_lpid;
+ need_tlb_flush = &nested->need_tlb_flush;
+ } else {
+ lpid = kvm->arch.lpid;
+ need_tlb_flush = &kvm->arch.need_tlb_flush;
+ }
+
+ mtspr(SPRN_LPID, lpid);
+ isync();
+ smp_mb();
+
+ if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
+ radix__local_flush_tlb_lpid_guest(lpid);
+ /* Clear the bit after the TLB flush */
+ cpumask_clear_cpu(pcpu, need_tlb_flush);
}
}
@@ -2493,6 +2722,10 @@ static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
return false;
+ /* In one_vm_per_core mode, require all vcores to be from the same vm */
+ if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm)
+ return false;
+
/* Some POWER9 chips require all threads to be in the same MMU mode */
if (no_mixing_hpt_and_radix &&
kvm_is_radix(vc->kvm) != kvm_is_radix(cip->vc[0]->kvm))
@@ -2600,6 +2833,14 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
spin_lock(&vc->lock);
now = get_tb();
for_each_runnable_thread(i, vcpu, vc) {
+ /*
+ * It's safe to unlock the vcore in the loop here, because
+ * for_each_runnable_thread() is safe against removal of
+ * the vcpu, and the vcore state is VCORE_EXITING here,
+ * so any vcpus becoming runnable will have their arch.trap
+ * set to zero and can't actually run in the guest.
+ */
+ spin_unlock(&vc->lock);
/* cancel pending dec exception if dec is positive */
if (now < vcpu->arch.dec_expires &&
kvmppc_core_pending_dec(vcpu))
@@ -2615,6 +2856,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
vcpu->arch.ret = ret;
vcpu->arch.trap = 0;
+ spin_lock(&vc->lock);
if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
if (vcpu->arch.pending_exceptions)
kvmppc_core_prepare_to_enter(vcpu);
@@ -2963,8 +3205,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
spin_unlock(&core_info.vc[sub]->lock);
if (kvm_is_radix(vc->kvm)) {
- int tmp = pcpu;
-
/*
* Do we need to flush the process scoped TLB for the LPAR?
*
@@ -2975,17 +3215,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
*
* Hash must be flushed in realmode in order to use tlbiel.
*/
- mtspr(SPRN_LPID, vc->kvm->arch.lpid);
- isync();
-
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- tmp &= ~0x3UL;
-
- if (cpumask_test_cpu(tmp, &vc->kvm->arch.need_tlb_flush)) {
- radix__local_flush_tlb_lpid_guest(vc->kvm->arch.lpid);
- /* Clear the bit after the TLB flush */
- cpumask_clear_cpu(tmp, &vc->kvm->arch.need_tlb_flush);
- }
+ kvmppc_radix_check_need_tlb_flush(vc->kvm, pcpu, NULL);
}
/*
@@ -3080,6 +3310,300 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
}
/*
+ * Load up hypervisor-mode registers on P9.
+ */
+static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ s64 hdec;
+ u64 tb, purr, spurr;
+ int trap;
+ unsigned long host_hfscr = mfspr(SPRN_HFSCR);
+ unsigned long host_ciabr = mfspr(SPRN_CIABR);
+ unsigned long host_dawr = mfspr(SPRN_DAWR);
+ unsigned long host_dawrx = mfspr(SPRN_DAWRX);
+ unsigned long host_psscr = mfspr(SPRN_PSSCR);
+ unsigned long host_pidr = mfspr(SPRN_PID);
+
+ hdec = time_limit - mftb();
+ if (hdec < 0)
+ return BOOK3S_INTERRUPT_HV_DECREMENTER;
+ mtspr(SPRN_HDEC, hdec);
+
+ if (vc->tb_offset) {
+ u64 new_tb = mftb() + vc->tb_offset;
+ mtspr(SPRN_TBU40, new_tb);
+ tb = mftb();
+ if ((tb & 0xffffff) < (new_tb & 0xffffff))
+ mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ vc->tb_offset_applied = vc->tb_offset;
+ }
+
+ if (vc->pcr)
+ mtspr(SPRN_PCR, vc->pcr);
+ mtspr(SPRN_DPDES, vc->dpdes);
+ mtspr(SPRN_VTB, vc->vtb);
+
+ local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
+ local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
+ mtspr(SPRN_PURR, vcpu->arch.purr);
+ mtspr(SPRN_SPURR, vcpu->arch.spurr);
+
+ if (cpu_has_feature(CPU_FTR_DAWR)) {
+ mtspr(SPRN_DAWR, vcpu->arch.dawr);
+ mtspr(SPRN_DAWRX, vcpu->arch.dawrx);
+ }
+ mtspr(SPRN_CIABR, vcpu->arch.ciabr);
+ mtspr(SPRN_IC, vcpu->arch.ic);
+ mtspr(SPRN_PID, vcpu->arch.pid);
+
+ mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+
+ mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
+
+ mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
+ mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
+ mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
+ mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
+
+ mtspr(SPRN_AMOR, ~0UL);
+
+ mtspr(SPRN_LPCR, lpcr);
+ isync();
+
+ kvmppc_xive_push_vcpu(vcpu);
+
+ mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
+ mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
+
+ trap = __kvmhv_vcpu_entry_p9(vcpu);
+
+ /* Advance host PURR/SPURR by the amount used by guest */
+ purr = mfspr(SPRN_PURR);
+ spurr = mfspr(SPRN_SPURR);
+ mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
+ purr - vcpu->arch.purr);
+ mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
+ spurr - vcpu->arch.spurr);
+ vcpu->arch.purr = purr;
+ vcpu->arch.spurr = spurr;
+
+ vcpu->arch.ic = mfspr(SPRN_IC);
+ vcpu->arch.pid = mfspr(SPRN_PID);
+ vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
+
+ vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
+ vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
+ vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
+ vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
+
+ mtspr(SPRN_PSSCR, host_psscr);
+ mtspr(SPRN_HFSCR, host_hfscr);
+ mtspr(SPRN_CIABR, host_ciabr);
+ mtspr(SPRN_DAWR, host_dawr);
+ mtspr(SPRN_DAWRX, host_dawrx);
+ mtspr(SPRN_PID, host_pidr);
+
+ /*
+ * Since this is radix, do a eieio; tlbsync; ptesync sequence in
+ * case we interrupted the guest between a tlbie and a ptesync.
+ */
+ asm volatile("eieio; tlbsync; ptesync");
+
+ mtspr(SPRN_LPID, vcpu->kvm->arch.host_lpid); /* restore host LPID */
+ isync();
+
+ vc->dpdes = mfspr(SPRN_DPDES);
+ vc->vtb = mfspr(SPRN_VTB);
+ mtspr(SPRN_DPDES, 0);
+ if (vc->pcr)
+ mtspr(SPRN_PCR, 0);
+
+ if (vc->tb_offset_applied) {
+ u64 new_tb = mftb() - vc->tb_offset_applied;
+ mtspr(SPRN_TBU40, new_tb);
+ tb = mftb();
+ if ((tb & 0xffffff) < (new_tb & 0xffffff))
+ mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ vc->tb_offset_applied = 0;
+ }
+
+ mtspr(SPRN_HDEC, 0x7fffffff);
+ mtspr(SPRN_LPCR, vcpu->kvm->arch.host_lpcr);
+
+ return trap;
+}
+
+/*
+ * Virtual-mode guest entry for POWER9 and later when the host and
+ * guest are both using the radix MMU. The LPIDR has already been set.
+ */
+int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ unsigned long host_dscr = mfspr(SPRN_DSCR);
+ unsigned long host_tidr = mfspr(SPRN_TIDR);
+ unsigned long host_iamr = mfspr(SPRN_IAMR);
+ s64 dec;
+ u64 tb;
+ int trap, save_pmu;
+
+ dec = mfspr(SPRN_DEC);
+ tb = mftb();
+ if (dec < 512)
+ return BOOK3S_INTERRUPT_HV_DECREMENTER;
+ local_paca->kvm_hstate.dec_expires = dec + tb;
+ if (local_paca->kvm_hstate.dec_expires < time_limit)
+ time_limit = local_paca->kvm_hstate.dec_expires;
+
+ vcpu->arch.ceded = 0;
+
+ kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */
+
+ kvmppc_subcore_enter_guest();
+
+ vc->entry_exit_map = 1;
+ vc->in_guest = 1;
+
+ if (vcpu->arch.vpa.pinned_addr) {
+ struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
+ u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
+ lp->yield_count = cpu_to_be32(yield_count);
+ vcpu->arch.vpa.dirty = 1;
+ }
+
+ if (cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
+
+ kvmhv_load_guest_pmu(vcpu);
+
+ msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
+ load_fp_state(&vcpu->arch.fp);
+#ifdef CONFIG_ALTIVEC
+ load_vr_state(&vcpu->arch.vr);
+#endif
+
+ mtspr(SPRN_DSCR, vcpu->arch.dscr);
+ mtspr(SPRN_IAMR, vcpu->arch.iamr);
+ mtspr(SPRN_PSPB, vcpu->arch.pspb);
+ mtspr(SPRN_FSCR, vcpu->arch.fscr);
+ mtspr(SPRN_TAR, vcpu->arch.tar);
+ mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
+ mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
+ mtspr(SPRN_BESCR, vcpu->arch.bescr);
+ mtspr(SPRN_WORT, vcpu->arch.wort);
+ mtspr(SPRN_TIDR, vcpu->arch.tid);
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
+ mtspr(SPRN_AMR, vcpu->arch.amr);
+ mtspr(SPRN_UAMOR, vcpu->arch.uamor);
+
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
+
+ mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
+
+ if (kvmhv_on_pseries()) {
+ /* call our hypervisor to load up HV regs and go */
+ struct hv_guest_state hvregs;
+
+ kvmhv_save_hv_regs(vcpu, &hvregs);
+ hvregs.lpcr = lpcr;
+ vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
+ hvregs.version = HV_GUEST_STATE_VERSION;
+ if (vcpu->arch.nested) {
+ hvregs.lpid = vcpu->arch.nested->shadow_lpid;
+ hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
+ } else {
+ hvregs.lpid = vcpu->kvm->arch.lpid;
+ hvregs.vcpu_token = vcpu->vcpu_id;
+ }
+ hvregs.hdec_expiry = time_limit;
+ trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
+ __pa(&vcpu->arch.regs));
+ kvmhv_restore_hv_return_state(vcpu, &hvregs);
+ vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
+ vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
+ vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
+
+ /* H_CEDE has to be handled now, not later */
+ if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
+ kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
+ kvmppc_nested_cede(vcpu);
+ trap = 0;
+ }
+ } else {
+ trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
+ }
+
+ vcpu->arch.slb_max = 0;
+ dec = mfspr(SPRN_DEC);
+ tb = mftb();
+ vcpu->arch.dec_expires = dec + tb;
+ vcpu->cpu = -1;
+ vcpu->arch.thread_cpu = -1;
+ vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
+
+ vcpu->arch.iamr = mfspr(SPRN_IAMR);
+ vcpu->arch.pspb = mfspr(SPRN_PSPB);
+ vcpu->arch.fscr = mfspr(SPRN_FSCR);
+ vcpu->arch.tar = mfspr(SPRN_TAR);
+ vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
+ vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
+ vcpu->arch.bescr = mfspr(SPRN_BESCR);
+ vcpu->arch.wort = mfspr(SPRN_WORT);
+ vcpu->arch.tid = mfspr(SPRN_TIDR);
+ vcpu->arch.amr = mfspr(SPRN_AMR);
+ vcpu->arch.uamor = mfspr(SPRN_UAMOR);
+ vcpu->arch.dscr = mfspr(SPRN_DSCR);
+
+ mtspr(SPRN_PSPB, 0);
+ mtspr(SPRN_WORT, 0);
+ mtspr(SPRN_AMR, 0);
+ mtspr(SPRN_UAMOR, 0);
+ mtspr(SPRN_DSCR, host_dscr);
+ mtspr(SPRN_TIDR, host_tidr);
+ mtspr(SPRN_IAMR, host_iamr);
+ mtspr(SPRN_PSPB, 0);
+
+ msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
+ store_fp_state(&vcpu->arch.fp);
+#ifdef CONFIG_ALTIVEC
+ store_vr_state(&vcpu->arch.vr);
+#endif
+
+ if (cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
+
+ save_pmu = 1;
+ if (vcpu->arch.vpa.pinned_addr) {
+ struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
+ u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
+ lp->yield_count = cpu_to_be32(yield_count);
+ vcpu->arch.vpa.dirty = 1;
+ save_pmu = lp->pmcregs_in_use;
+ }
+
+ kvmhv_save_guest_pmu(vcpu, save_pmu);
+
+ vc->entry_exit_map = 0x101;
+ vc->in_guest = 0;
+
+ mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
+
+ kvmhv_load_host_pmu();
+
+ kvmppc_subcore_exit_guest();
+
+ return trap;
+}
+
+/*
* Wait for some other vcpu thread to execute us, and
* wake us up when we need to handle something in the host.
*/
@@ -3256,6 +3780,11 @@ out:
trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
}
+/*
+ * This never fails for a radix guest, as none of the operations it does
+ * for a radix guest can fail or have a way to report failure.
+ * kvmhv_run_single_vcpu() relies on this fact.
+ */
static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu)
{
int r = 0;
@@ -3405,6 +3934,171 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
return vcpu->arch.ret;
}
+int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
+ struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr)
+{
+ int trap, r, pcpu;
+ int srcu_idx;
+ struct kvmppc_vcore *vc;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *nested = vcpu->arch.nested;
+
+ trace_kvmppc_run_vcpu_enter(vcpu);
+
+ kvm_run->exit_reason = 0;
+ vcpu->arch.ret = RESUME_GUEST;
+ vcpu->arch.trap = 0;
+
+ vc = vcpu->arch.vcore;
+ vcpu->arch.ceded = 0;
+ vcpu->arch.run_task = current;
+ vcpu->arch.kvm_run = kvm_run;
+ vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
+ vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
+ vcpu->arch.busy_preempt = TB_NIL;
+ vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
+ vc->runnable_threads[0] = vcpu;
+ vc->n_runnable = 1;
+ vc->runner = vcpu;
+
+ /* See if the MMU is ready to go */
+ if (!kvm->arch.mmu_ready)
+ kvmhv_setup_mmu(vcpu);
+
+ if (need_resched())
+ cond_resched();
+
+ kvmppc_update_vpas(vcpu);
+
+ init_vcore_to_run(vc);
+ vc->preempt_tb = TB_NIL;
+
+ preempt_disable();
+ pcpu = smp_processor_id();
+ vc->pcpu = pcpu;
+ kvmppc_prepare_radix_vcpu(vcpu, pcpu);
+
+ local_irq_disable();
+ hard_irq_disable();
+ if (signal_pending(current))
+ goto sigpend;
+ if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready)
+ goto out;
+
+ if (!nested) {
+ kvmppc_core_prepare_to_enter(vcpu);
+ if (vcpu->arch.doorbell_request) {
+ vc->dpdes = 1;
+ smp_wmb();
+ vcpu->arch.doorbell_request = 0;
+ }
+ if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
+ &vcpu->arch.pending_exceptions))
+ lpcr |= LPCR_MER;
+ } else if (vcpu->arch.pending_exceptions ||
+ vcpu->arch.doorbell_request ||
+ xive_interrupt_pending(vcpu)) {
+ vcpu->arch.ret = RESUME_HOST;
+ goto out;
+ }
+
+ kvmppc_clear_host_core(pcpu);
+
+ local_paca->kvm_hstate.tid = 0;
+ local_paca->kvm_hstate.napping = 0;
+ local_paca->kvm_hstate.kvm_split_mode = NULL;
+ kvmppc_start_thread(vcpu, vc);
+ kvmppc_create_dtl_entry(vcpu, vc);
+ trace_kvm_guest_enter(vcpu);
+
+ vc->vcore_state = VCORE_RUNNING;
+ trace_kvmppc_run_core(vc, 0);
+
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ kvmppc_radix_check_need_tlb_flush(kvm, pcpu, nested);
+
+ trace_hardirqs_on();
+ guest_enter_irqoff();
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+
+ this_cpu_disable_ftrace();
+
+ trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
+ vcpu->arch.trap = trap;
+
+ this_cpu_enable_ftrace();
+
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ mtspr(SPRN_LPID, kvm->arch.host_lpid);
+ isync();
+ }
+
+ trace_hardirqs_off();
+ set_irq_happened(trap);
+
+ kvmppc_set_host_core(pcpu);
+
+ local_irq_enable();
+ guest_exit();
+
+ cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
+
+ preempt_enable();
+
+ /* cancel pending decrementer exception if DEC is now positive */
+ if (get_tb() < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu))
+ kvmppc_core_dequeue_dec(vcpu);
+
+ trace_kvm_guest_exit(vcpu);
+ r = RESUME_GUEST;
+ if (trap) {
+ if (!nested)
+ r = kvmppc_handle_exit_hv(kvm_run, vcpu, current);
+ else
+ r = kvmppc_handle_nested_exit(vcpu);
+ }
+ vcpu->arch.ret = r;
+
+ if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded &&
+ !kvmppc_vcpu_woken(vcpu)) {
+ kvmppc_set_timer(vcpu);
+ while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
+ if (signal_pending(current)) {
+ vcpu->stat.signal_exits++;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ vcpu->arch.ret = -EINTR;
+ break;
+ }
+ spin_lock(&vc->lock);
+ kvmppc_vcore_blocked(vc);
+ spin_unlock(&vc->lock);
+ }
+ }
+ vcpu->arch.ceded = 0;
+
+ vc->vcore_state = VCORE_INACTIVE;
+ trace_kvmppc_run_core(vc, 1);
+
+ done:
+ kvmppc_remove_runnable(vc, vcpu);
+ trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
+
+ return vcpu->arch.ret;
+
+ sigpend:
+ vcpu->stat.signal_exits++;
+ kvm_run->exit_reason = KVM_EXIT_INTR;
+ vcpu->arch.ret = -EINTR;
+ out:
+ local_irq_enable();
+ preempt_enable();
+ goto done;
+}
+
static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
int r;
@@ -3480,7 +4174,20 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
do {
- r = kvmppc_run_vcpu(run, vcpu);
+ /*
+ * The early POWER9 chips that can't mix radix and HPT threads
+ * on the same core also need the workaround for the problem
+ * where the TLB would prefetch entries in the guest exit path
+ * for radix guests using the guest PIDR value and LPID 0.
+ * The workaround is in the old path (kvmppc_run_vcpu())
+ * but not the new path (kvmhv_run_single_vcpu()).
+ */
+ if (kvm->arch.threads_indep && kvm_is_radix(kvm) &&
+ !no_mixing_hpt_and_radix)
+ r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
+ vcpu->arch.vcore->lpcr);
+ else
+ r = kvmppc_run_vcpu(run, vcpu);
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
!(vcpu->arch.shregs.msr & MSR_PR)) {
@@ -3559,6 +4266,10 @@ static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01);
kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L);
+ /* If running as a nested hypervisor, we don't support HPT guests */
+ if (kvmhv_on_pseries())
+ info->flags |= KVM_PPC_NO_HASH;
+
return 0;
}
@@ -3723,8 +4434,7 @@ void kvmppc_setup_partition_table(struct kvm *kvm)
__pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
dw1 = PATB_GR | kvm->arch.process_table;
}
-
- mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1);
+ kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1);
}
/*
@@ -3820,6 +4530,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* Must be called with kvm->lock held and mmu_ready = 0 and no vcpus running */
int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
{
+ if (nesting_enabled(kvm))
+ kvmhv_release_all_nested(kvm);
kvmppc_free_radix(kvm);
kvmppc_update_lpcr(kvm, LPCR_VPM1,
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
@@ -3841,6 +4553,7 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
kvmppc_free_hpt(&kvm->arch.hpt);
kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+ kvmppc_rmap_reset(kvm);
kvm->arch.radix = 1;
return 0;
}
@@ -3940,6 +4653,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvmppc_alloc_host_rm_ops();
+ kvmhv_vm_nested_init(kvm);
+
/*
* Since we don't flush the TLB when tearing down a VM,
* and this lpid might have previously been used,
@@ -3958,9 +4673,13 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
/* Init LPCR for virtual RMA mode */
- kvm->arch.host_lpid = mfspr(SPRN_LPID);
- kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
- lpcr &= LPCR_PECE | LPCR_LPES;
+ if (cpu_has_feature(CPU_FTR_HVMODE)) {
+ kvm->arch.host_lpid = mfspr(SPRN_LPID);
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
+ lpcr &= LPCR_PECE | LPCR_LPES;
+ } else {
+ lpcr = 0;
+ }
lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
LPCR_VPM0 | LPCR_VPM1;
kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
@@ -4027,8 +4746,14 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
* On POWER9, we only need to do this if the "indep_threads_mode"
* module parameter has been set to N.
*/
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- kvm->arch.threads_indep = indep_threads_mode;
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (!indep_threads_mode && !cpu_has_feature(CPU_FTR_HVMODE)) {
+ pr_warn("KVM: Ignoring indep_threads_mode=N in nested hypervisor\n");
+ kvm->arch.threads_indep = true;
+ } else {
+ kvm->arch.threads_indep = indep_threads_mode;
+ }
+ }
if (!kvm->arch.threads_indep)
kvm_hv_vm_activated();
@@ -4051,6 +4776,8 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
snprintf(buf, sizeof(buf), "vm%d", current->pid);
kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
kvmppc_mmu_debugfs_init(kvm);
+ if (radix_enabled())
+ kvmhv_radix_debugfs_init(kvm);
return 0;
}
@@ -4073,13 +4800,21 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvmppc_free_vcores(kvm);
- kvmppc_free_lpid(kvm->arch.lpid);
if (kvm_is_radix(kvm))
kvmppc_free_radix(kvm);
else
kvmppc_free_hpt(&kvm->arch.hpt);
+ /* Perform global invalidation and return lpid to the pool */
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (nesting_enabled(kvm))
+ kvmhv_release_all_nested(kvm);
+ kvm->arch.process_table = 0;
+ kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0);
+ }
+ kvmppc_free_lpid(kvm->arch.lpid);
+
kvmppc_free_pimap(kvm);
}
@@ -4104,11 +4839,15 @@ static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
static int kvmppc_core_check_processor_compat_hv(void)
{
- if (!cpu_has_feature(CPU_FTR_HVMODE) ||
- !cpu_has_feature(CPU_FTR_ARCH_206))
- return -EIO;
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_ARCH_206))
+ return 0;
- return 0;
+ /* POWER9 in radix mode is capable of being a nested hypervisor. */
+ if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled())
+ return 0;
+
+ return -EIO;
}
#ifdef CONFIG_KVM_XICS
@@ -4426,6 +5165,10 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
if (radix && !radix_enabled())
return -EINVAL;
+ /* If we're a nested hypervisor, we currently only support radix */
+ if (kvmhv_on_pseries() && !radix)
+ return -EINVAL;
+
mutex_lock(&kvm->lock);
if (radix != kvm_is_radix(kvm)) {
if (kvm->arch.mmu_ready) {
@@ -4458,6 +5201,19 @@ static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
return err;
}
+static int kvmhv_enable_nested(struct kvm *kvm)
+{
+ if (!nested)
+ return -EPERM;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300) || no_mixing_hpt_and_radix)
+ return -ENODEV;
+
+ /* kvm == NULL means the caller is testing if the capability exists */
+ if (kvm)
+ kvm->arch.nested_enable = true;
+ return 0;
+}
+
static struct kvmppc_ops kvm_ops_hv = {
.get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
.set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
@@ -4497,6 +5253,7 @@ static struct kvmppc_ops kvm_ops_hv = {
.configure_mmu = kvmhv_configure_mmu,
.get_rmmu_info = kvmhv_get_rmmu_info,
.set_smt_mode = kvmhv_set_smt_mode,
+ .enable_nested = kvmhv_enable_nested,
};
static int kvm_init_subcore_bitmap(void)
@@ -4547,6 +5304,10 @@ static int kvmppc_book3s_init_hv(void)
if (r < 0)
return -ENODEV;
+ r = kvmhv_nested_init();
+ if (r)
+ return r;
+
r = kvm_init_subcore_bitmap();
if (r)
return r;
@@ -4557,7 +5318,8 @@ static int kvmppc_book3s_init_hv(void)
* indirectly, via OPAL.
*/
#ifdef CONFIG_SMP
- if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) {
+ if (!xive_enabled() && !kvmhv_on_pseries() &&
+ !local_paca->kvm_hstate.xics_phys) {
struct device_node *np;
np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
@@ -4605,6 +5367,7 @@ static void kvmppc_book3s_exit_hv(void)
if (kvmppc_radix_possible())
kvmppc_radix_exit();
kvmppc_hv_ops = NULL;
+ kvmhv_nested_exit();
}
module_init(kvmppc_book3s_init_hv);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index fc6bb9630a9c..a71e2fc00a4e 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -231,6 +231,15 @@ void kvmhv_rm_send_ipi(int cpu)
void __iomem *xics_phys;
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
+ /* For a nested hypervisor, use the XICS via hcall */
+ if (kvmhv_on_pseries()) {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
+ IPI_PRIORITY);
+ return;
+ }
+
/* On POWER9 we can use msgsnd for any destination cpu. */
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
msg |= get_hard_smp_processor_id(cpu);
@@ -460,12 +469,19 @@ static long kvmppc_read_one_intr(bool *again)
return 1;
/* Now read the interrupt from the ICP */
- xics_phys = local_paca->kvm_hstate.xics_phys;
- rc = 0;
- if (!xics_phys)
- rc = opal_int_get_xirr(&xirr, false);
- else
- xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
+ if (kvmhv_on_pseries()) {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
+ xirr = cpu_to_be32(retbuf[0]);
+ } else {
+ xics_phys = local_paca->kvm_hstate.xics_phys;
+ rc = 0;
+ if (!xics_phys)
+ rc = opal_int_get_xirr(&xirr, false);
+ else
+ xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
+ }
if (rc < 0)
return 1;
@@ -494,7 +510,13 @@ static long kvmppc_read_one_intr(bool *again)
*/
if (xisr == XICS_IPI) {
rc = 0;
- if (xics_phys) {
+ if (kvmhv_on_pseries()) {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ plpar_hcall_raw(H_IPI, retbuf,
+ hard_smp_processor_id(), 0xff);
+ plpar_hcall_raw(H_EOI, retbuf, h_xirr);
+ } else if (xics_phys) {
__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
} else {
@@ -520,7 +542,13 @@ static long kvmppc_read_one_intr(bool *again)
/* We raced with the host,
* we need to resend that IPI, bummer
*/
- if (xics_phys)
+ if (kvmhv_on_pseries()) {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ plpar_hcall_raw(H_IPI, retbuf,
+ hard_smp_processor_id(),
+ IPI_PRIORITY);
+ } else if (xics_phys)
__raw_rm_writeb(IPI_PRIORITY,
xics_phys + XICS_MFRR);
else
@@ -729,3 +757,51 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
smp_mb();
local_paca->kvm_hstate.kvm_split_mode = NULL;
}
+
+/*
+ * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
+ * Can we inject a Decrementer or a External interrupt?
+ */
+void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
+{
+ int ext;
+ unsigned long vec = 0;
+ unsigned long lpcr;
+
+ /* Insert EXTERNAL bit into LPCR at the MER bit position */
+ ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
+ lpcr = mfspr(SPRN_LPCR);
+ lpcr |= ext << LPCR_MER_SH;
+ mtspr(SPRN_LPCR, lpcr);
+ isync();
+
+ if (vcpu->arch.shregs.msr & MSR_EE) {
+ if (ext) {
+ vec = BOOK3S_INTERRUPT_EXTERNAL;
+ } else {
+ long int dec = mfspr(SPRN_DEC);
+ if (!(lpcr & LPCR_LD))
+ dec = (int) dec;
+ if (dec < 0)
+ vec = BOOK3S_INTERRUPT_DECREMENTER;
+ }
+ }
+ if (vec) {
+ unsigned long msr, old_msr = vcpu->arch.shregs.msr;
+
+ kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
+ kvmppc_set_srr1(vcpu, old_msr);
+ kvmppc_set_pc(vcpu, vec);
+ msr = vcpu->arch.intr_msr;
+ if (MSR_TM_ACTIVE(old_msr))
+ msr |= MSR_TS_S;
+ vcpu->arch.shregs.msr = msr;
+ }
+
+ if (vcpu->arch.doorbell_request) {
+ mtspr(SPRN_DPDES, 1);
+ vcpu->arch.vcore->dpdes = 1;
+ smp_wmb();
+ vcpu->arch.doorbell_request = 0;
+ }
+}
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 666b91c79eb4..a6d10010d9e8 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -64,52 +64,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Save host PMU registers */
-BEGIN_FTR_SECTION
- /* Work around P8 PMAE bug */
- li r3, -1
- clrrdi r3, r3, 10
- mfspr r8, SPRN_MMCR2
- mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */
- isync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- li r3, 1
- sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
- mfspr r7, SPRN_MMCR0 /* save MMCR0 */
- mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
- mfspr r6, SPRN_MMCRA
- /* Clear MMCRA in order to disable SDAR updates */
- li r5, 0
- mtspr SPRN_MMCRA, r5
- isync
- lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */
- cmpwi r5, 0
- beq 31f /* skip if not */
- mfspr r5, SPRN_MMCR1
- mfspr r9, SPRN_SIAR
- mfspr r10, SPRN_SDAR
- std r7, HSTATE_MMCR0(r13)
- std r5, HSTATE_MMCR1(r13)
- std r6, HSTATE_MMCRA(r13)
- std r9, HSTATE_SIAR(r13)
- std r10, HSTATE_SDAR(r13)
-BEGIN_FTR_SECTION
- mfspr r9, SPRN_SIER
- std r8, HSTATE_MMCR2(r13)
- std r9, HSTATE_SIER(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- mfspr r3, SPRN_PMC1
- mfspr r5, SPRN_PMC2
- mfspr r6, SPRN_PMC3
- mfspr r7, SPRN_PMC4
- mfspr r8, SPRN_PMC5
- mfspr r9, SPRN_PMC6
- stw r3, HSTATE_PMC1(r13)
- stw r5, HSTATE_PMC2(r13)
- stw r6, HSTATE_PMC3(r13)
- stw r7, HSTATE_PMC4(r13)
- stw r8, HSTATE_PMC5(r13)
- stw r9, HSTATE_PMC6(r13)
-31:
+ bl kvmhv_save_host_pmu
/*
* Put whatever is in the decrementer into the
@@ -161,3 +116,51 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr
+
+_GLOBAL(kvmhv_save_host_pmu)
+BEGIN_FTR_SECTION
+ /* Work around P8 PMAE bug */
+ li r3, -1
+ clrrdi r3, r3, 10
+ mfspr r8, SPRN_MMCR2
+ mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */
+ isync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mfspr r7, SPRN_MMCR0 /* save MMCR0 */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
+ mfspr r6, SPRN_MMCRA
+ /* Clear MMCRA in order to disable SDAR updates */
+ li r5, 0
+ mtspr SPRN_MMCRA, r5
+ isync
+ lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */
+ cmpwi r5, 0
+ beq 31f /* skip if not */
+ mfspr r5, SPRN_MMCR1
+ mfspr r9, SPRN_SIAR
+ mfspr r10, SPRN_SDAR
+ std r7, HSTATE_MMCR0(r13)
+ std r5, HSTATE_MMCR1(r13)
+ std r6, HSTATE_MMCRA(r13)
+ std r9, HSTATE_SIAR(r13)
+ std r10, HSTATE_SDAR(r13)
+BEGIN_FTR_SECTION
+ mfspr r9, SPRN_SIER
+ std r8, HSTATE_MMCR2(r13)
+ std r9, HSTATE_SIER(r13)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ mfspr r3, SPRN_PMC1
+ mfspr r5, SPRN_PMC2
+ mfspr r6, SPRN_PMC3
+ mfspr r7, SPRN_PMC4
+ mfspr r8, SPRN_PMC5
+ mfspr r9, SPRN_PMC6
+ stw r3, HSTATE_PMC1(r13)
+ stw r5, HSTATE_PMC2(r13)
+ stw r6, HSTATE_PMC3(r13)
+ stw r7, HSTATE_PMC4(r13)
+ stw r8, HSTATE_PMC5(r13)
+ stw r9, HSTATE_PMC6(r13)
+31: blr
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
new file mode 100644
index 000000000000..401d2ecbebc5
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -0,0 +1,1291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corporation, 2018
+ * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
+ * Paul Mackerras <paulus@ozlabs.org>
+ *
+ * Description: KVM functions specific to running nested KVM-HV guests
+ * on Book3S processors (specifically POWER9 and later).
+ */
+
+#include <linux/kernel.h>
+#include <linux/kvm_host.h>
+#include <linux/llist.h>
+
+#include <asm/kvm_ppc.h>
+#include <asm/kvm_book3s.h>
+#include <asm/mmu.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/pte-walk.h>
+#include <asm/reg.h>
+
+static struct patb_entry *pseries_partition_tb;
+
+static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
+static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
+
+void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ hr->pcr = vc->pcr;
+ hr->dpdes = vc->dpdes;
+ hr->hfscr = vcpu->arch.hfscr;
+ hr->tb_offset = vc->tb_offset;
+ hr->dawr0 = vcpu->arch.dawr;
+ hr->dawrx0 = vcpu->arch.dawrx;
+ hr->ciabr = vcpu->arch.ciabr;
+ hr->purr = vcpu->arch.purr;
+ hr->spurr = vcpu->arch.spurr;
+ hr->ic = vcpu->arch.ic;
+ hr->vtb = vc->vtb;
+ hr->srr0 = vcpu->arch.shregs.srr0;
+ hr->srr1 = vcpu->arch.shregs.srr1;
+ hr->sprg[0] = vcpu->arch.shregs.sprg0;
+ hr->sprg[1] = vcpu->arch.shregs.sprg1;
+ hr->sprg[2] = vcpu->arch.shregs.sprg2;
+ hr->sprg[3] = vcpu->arch.shregs.sprg3;
+ hr->pidr = vcpu->arch.pid;
+ hr->cfar = vcpu->arch.cfar;
+ hr->ppr = vcpu->arch.ppr;
+}
+
+static void byteswap_pt_regs(struct pt_regs *regs)
+{
+ unsigned long *addr = (unsigned long *) regs;
+
+ for (; addr < ((unsigned long *) (regs + 1)); addr++)
+ *addr = swab64(*addr);
+}
+
+static void byteswap_hv_regs(struct hv_guest_state *hr)
+{
+ hr->version = swab64(hr->version);
+ hr->lpid = swab32(hr->lpid);
+ hr->vcpu_token = swab32(hr->vcpu_token);
+ hr->lpcr = swab64(hr->lpcr);
+ hr->pcr = swab64(hr->pcr);
+ hr->amor = swab64(hr->amor);
+ hr->dpdes = swab64(hr->dpdes);
+ hr->hfscr = swab64(hr->hfscr);
+ hr->tb_offset = swab64(hr->tb_offset);
+ hr->dawr0 = swab64(hr->dawr0);
+ hr->dawrx0 = swab64(hr->dawrx0);
+ hr->ciabr = swab64(hr->ciabr);
+ hr->hdec_expiry = swab64(hr->hdec_expiry);
+ hr->purr = swab64(hr->purr);
+ hr->spurr = swab64(hr->spurr);
+ hr->ic = swab64(hr->ic);
+ hr->vtb = swab64(hr->vtb);
+ hr->hdar = swab64(hr->hdar);
+ hr->hdsisr = swab64(hr->hdsisr);
+ hr->heir = swab64(hr->heir);
+ hr->asdr = swab64(hr->asdr);
+ hr->srr0 = swab64(hr->srr0);
+ hr->srr1 = swab64(hr->srr1);
+ hr->sprg[0] = swab64(hr->sprg[0]);
+ hr->sprg[1] = swab64(hr->sprg[1]);
+ hr->sprg[2] = swab64(hr->sprg[2]);
+ hr->sprg[3] = swab64(hr->sprg[3]);
+ hr->pidr = swab64(hr->pidr);
+ hr->cfar = swab64(hr->cfar);
+ hr->ppr = swab64(hr->ppr);
+}
+
+static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
+ struct hv_guest_state *hr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ hr->dpdes = vc->dpdes;
+ hr->hfscr = vcpu->arch.hfscr;
+ hr->purr = vcpu->arch.purr;
+ hr->spurr = vcpu->arch.spurr;
+ hr->ic = vcpu->arch.ic;
+ hr->vtb = vc->vtb;
+ hr->srr0 = vcpu->arch.shregs.srr0;
+ hr->srr1 = vcpu->arch.shregs.srr1;
+ hr->sprg[0] = vcpu->arch.shregs.sprg0;
+ hr->sprg[1] = vcpu->arch.shregs.sprg1;
+ hr->sprg[2] = vcpu->arch.shregs.sprg2;
+ hr->sprg[3] = vcpu->arch.shregs.sprg3;
+ hr->pidr = vcpu->arch.pid;
+ hr->cfar = vcpu->arch.cfar;
+ hr->ppr = vcpu->arch.ppr;
+ switch (trap) {
+ case BOOK3S_INTERRUPT_H_DATA_STORAGE:
+ hr->hdar = vcpu->arch.fault_dar;
+ hr->hdsisr = vcpu->arch.fault_dsisr;
+ hr->asdr = vcpu->arch.fault_gpa;
+ break;
+ case BOOK3S_INTERRUPT_H_INST_STORAGE:
+ hr->asdr = vcpu->arch.fault_gpa;
+ break;
+ case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
+ hr->heir = vcpu->arch.emul_inst;
+ break;
+ }
+}
+
+static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
+{
+ /*
+ * Don't let L1 enable features for L2 which we've disabled for L1,
+ * but preserve the interrupt cause field.
+ */
+ hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
+
+ /* Don't let data address watchpoint match in hypervisor state */
+ hr->dawrx0 &= ~DAWRX_HYP;
+
+ /* Don't let completed instruction address breakpt match in HV state */
+ if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
+ hr->ciabr &= ~CIABR_PRIV;
+}
+
+static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ vc->pcr = hr->pcr;
+ vc->dpdes = hr->dpdes;
+ vcpu->arch.hfscr = hr->hfscr;
+ vcpu->arch.dawr = hr->dawr0;
+ vcpu->arch.dawrx = hr->dawrx0;
+ vcpu->arch.ciabr = hr->ciabr;
+ vcpu->arch.purr = hr->purr;
+ vcpu->arch.spurr = hr->spurr;
+ vcpu->arch.ic = hr->ic;
+ vc->vtb = hr->vtb;
+ vcpu->arch.shregs.srr0 = hr->srr0;
+ vcpu->arch.shregs.srr1 = hr->srr1;
+ vcpu->arch.shregs.sprg0 = hr->sprg[0];
+ vcpu->arch.shregs.sprg1 = hr->sprg[1];
+ vcpu->arch.shregs.sprg2 = hr->sprg[2];
+ vcpu->arch.shregs.sprg3 = hr->sprg[3];
+ vcpu->arch.pid = hr->pidr;
+ vcpu->arch.cfar = hr->cfar;
+ vcpu->arch.ppr = hr->ppr;
+}
+
+void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
+ struct hv_guest_state *hr)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+
+ vc->dpdes = hr->dpdes;
+ vcpu->arch.hfscr = hr->hfscr;
+ vcpu->arch.purr = hr->purr;
+ vcpu->arch.spurr = hr->spurr;
+ vcpu->arch.ic = hr->ic;
+ vc->vtb = hr->vtb;
+ vcpu->arch.fault_dar = hr->hdar;
+ vcpu->arch.fault_dsisr = hr->hdsisr;
+ vcpu->arch.fault_gpa = hr->asdr;
+ vcpu->arch.emul_inst = hr->heir;
+ vcpu->arch.shregs.srr0 = hr->srr0;
+ vcpu->arch.shregs.srr1 = hr->srr1;
+ vcpu->arch.shregs.sprg0 = hr->sprg[0];
+ vcpu->arch.shregs.sprg1 = hr->sprg[1];
+ vcpu->arch.shregs.sprg2 = hr->sprg[2];
+ vcpu->arch.shregs.sprg3 = hr->sprg[3];
+ vcpu->arch.pid = hr->pidr;
+ vcpu->arch.cfar = hr->cfar;
+ vcpu->arch.ppr = hr->ppr;
+}
+
+long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
+{
+ long int err, r;
+ struct kvm_nested_guest *l2;
+ struct pt_regs l2_regs, saved_l1_regs;
+ struct hv_guest_state l2_hv, saved_l1_hv;
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ u64 hv_ptr, regs_ptr;
+ u64 hdec_exp;
+ s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
+ u64 mask;
+ unsigned long lpcr;
+
+ if (vcpu->kvm->arch.l1_ptcr == 0)
+ return H_NOT_AVAILABLE;
+
+ /* copy parameters in */
+ hv_ptr = kvmppc_get_gpr(vcpu, 4);
+ err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
+ sizeof(struct hv_guest_state));
+ if (err)
+ return H_PARAMETER;
+ if (kvmppc_need_byteswap(vcpu))
+ byteswap_hv_regs(&l2_hv);
+ if (l2_hv.version != HV_GUEST_STATE_VERSION)
+ return H_P2;
+
+ regs_ptr = kvmppc_get_gpr(vcpu, 5);
+ err = kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
+ sizeof(struct pt_regs));
+ if (err)
+ return H_PARAMETER;
+ if (kvmppc_need_byteswap(vcpu))
+ byteswap_pt_regs(&l2_regs);
+ if (l2_hv.vcpu_token >= NR_CPUS)
+ return H_PARAMETER;
+
+ /* translate lpid */
+ l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
+ if (!l2)
+ return H_PARAMETER;
+ if (!l2->l1_gr_to_hr) {
+ mutex_lock(&l2->tlb_lock);
+ kvmhv_update_ptbl_cache(l2);
+ mutex_unlock(&l2->tlb_lock);
+ }
+
+ /* save l1 values of things */
+ vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
+ saved_l1_regs = vcpu->arch.regs;
+ kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
+
+ /* convert TB values/offsets to host (L0) values */
+ hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
+ vc->tb_offset += l2_hv.tb_offset;
+
+ /* set L1 state to L2 state */
+ vcpu->arch.nested = l2;
+ vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
+ vcpu->arch.regs = l2_regs;
+ vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
+ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
+ LPCR_LPES | LPCR_MER;
+ lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask);
+ sanitise_hv_regs(vcpu, &l2_hv);
+ restore_hv_regs(vcpu, &l2_hv);
+
+ vcpu->arch.ret = RESUME_GUEST;
+ vcpu->arch.trap = 0;
+ do {
+ if (mftb() >= hdec_exp) {
+ vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
+ r = RESUME_HOST;
+ break;
+ }
+ r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu, hdec_exp,
+ lpcr);
+ } while (is_kvmppc_resume_guest(r));
+
+ /* save L2 state for return */
+ l2_regs = vcpu->arch.regs;
+ l2_regs.msr = vcpu->arch.shregs.msr;
+ delta_purr = vcpu->arch.purr - l2_hv.purr;
+ delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
+ delta_ic = vcpu->arch.ic - l2_hv.ic;
+ delta_vtb = vc->vtb - l2_hv.vtb;
+ save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
+
+ /* restore L1 state */
+ vcpu->arch.nested = NULL;
+ vcpu->arch.regs = saved_l1_regs;
+ vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
+ /* set L1 MSR TS field according to L2 transaction state */
+ if (l2_regs.msr & MSR_TS_MASK)
+ vcpu->arch.shregs.msr |= MSR_TS_S;
+ vc->tb_offset = saved_l1_hv.tb_offset;
+ restore_hv_regs(vcpu, &saved_l1_hv);
+ vcpu->arch.purr += delta_purr;
+ vcpu->arch.spurr += delta_spurr;
+ vcpu->arch.ic += delta_ic;
+ vc->vtb += delta_vtb;
+
+ kvmhv_put_nested(l2);
+
+ /* copy l2_hv_state and regs back to guest */
+ if (kvmppc_need_byteswap(vcpu)) {
+ byteswap_hv_regs(&l2_hv);
+ byteswap_pt_regs(&l2_regs);
+ }
+ err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
+ sizeof(struct hv_guest_state));
+ if (err)
+ return H_AUTHORITY;
+ err = kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
+ sizeof(struct pt_regs));
+ if (err)
+ return H_AUTHORITY;
+
+ if (r == -EINTR)
+ return H_INTERRUPT;
+
+ return vcpu->arch.trap;
+}
+
+long kvmhv_nested_init(void)
+{
+ long int ptb_order;
+ unsigned long ptcr;
+ long rc;
+
+ if (!kvmhv_on_pseries())
+ return 0;
+ if (!radix_enabled())
+ return -ENODEV;
+
+ /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
+ ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
+ if (ptb_order < 8)
+ ptb_order = 8;
+ pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
+ GFP_KERNEL);
+ if (!pseries_partition_tb) {
+ pr_err("kvm-hv: failed to allocated nested partition table\n");
+ return -ENOMEM;
+ }
+
+ ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
+ rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
+ if (rc != H_SUCCESS) {
+ pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
+ rc);
+ kfree(pseries_partition_tb);
+ pseries_partition_tb = NULL;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void kvmhv_nested_exit(void)
+{
+ /*
+ * N.B. the kvmhv_on_pseries() test is there because it enables
+ * the compiler to remove the call to plpar_hcall_norets()
+ * when CONFIG_PPC_PSERIES=n.
+ */
+ if (kvmhv_on_pseries() && pseries_partition_tb) {
+ plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
+ kfree(pseries_partition_tb);
+ pseries_partition_tb = NULL;
+ }
+}
+
+static void kvmhv_flush_lpid(unsigned int lpid)
+{
+ long rc;
+
+ if (!kvmhv_on_pseries()) {
+ radix__flush_tlb_lpid(lpid);
+ return;
+ }
+
+ rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
+ lpid, TLBIEL_INVAL_SET_LPID);
+ if (rc)
+ pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
+}
+
+void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
+{
+ if (!kvmhv_on_pseries()) {
+ mmu_partition_table_set_entry(lpid, dw0, dw1);
+ return;
+ }
+
+ pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
+ pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
+ /* L0 will do the necessary barriers */
+ kvmhv_flush_lpid(lpid);
+}
+
+static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
+{
+ unsigned long dw0;
+
+ dw0 = PATB_HR | radix__get_tree_size() |
+ __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
+ kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
+}
+
+void kvmhv_vm_nested_init(struct kvm *kvm)
+{
+ kvm->arch.max_nested_lpid = -1;
+}
+
+/*
+ * Handle the H_SET_PARTITION_TABLE hcall.
+ * r4 = guest real address of partition table + log_2(size) - 12
+ * (formatted as for the PTCR).
+ */
+long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
+ int srcu_idx;
+ long ret = H_SUCCESS;
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ /*
+ * Limit the partition table to 4096 entries (because that's what
+ * hardware supports), and check the base address.
+ */
+ if ((ptcr & PRTS_MASK) > 12 - 8 ||
+ !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
+ ret = H_PARAMETER;
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ if (ret == H_SUCCESS)
+ kvm->arch.l1_ptcr = ptcr;
+ return ret;
+}
+
+/*
+ * Reload the partition table entry for a guest.
+ * Caller must hold gp->tlb_lock.
+ */
+static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
+{
+ int ret;
+ struct patb_entry ptbl_entry;
+ unsigned long ptbl_addr;
+ struct kvm *kvm = gp->l1_host;
+
+ ret = -EFAULT;
+ ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
+ if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8)))
+ ret = kvm_read_guest(kvm, ptbl_addr,
+ &ptbl_entry, sizeof(ptbl_entry));
+ if (ret) {
+ gp->l1_gr_to_hr = 0;
+ gp->process_table = 0;
+ } else {
+ gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
+ gp->process_table = be64_to_cpu(ptbl_entry.patb1);
+ }
+ kvmhv_set_nested_ptbl(gp);
+}
+
+struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
+{
+ struct kvm_nested_guest *gp;
+ long shadow_lpid;
+
+ gp = kzalloc(sizeof(*gp), GFP_KERNEL);
+ if (!gp)
+ return NULL;
+ gp->l1_host = kvm;
+ gp->l1_lpid = lpid;
+ mutex_init(&gp->tlb_lock);
+ gp->shadow_pgtable = pgd_alloc(kvm->mm);
+ if (!gp->shadow_pgtable)
+ goto out_free;
+ shadow_lpid = kvmppc_alloc_lpid();
+ if (shadow_lpid < 0)
+ goto out_free2;
+ gp->shadow_lpid = shadow_lpid;
+
+ memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
+
+ return gp;
+
+ out_free2:
+ pgd_free(kvm->mm, gp->shadow_pgtable);
+ out_free:
+ kfree(gp);
+ return NULL;
+}
+
+/*
+ * Free up any resources allocated for a nested guest.
+ */
+static void kvmhv_release_nested(struct kvm_nested_guest *gp)
+{
+ struct kvm *kvm = gp->l1_host;
+
+ if (gp->shadow_pgtable) {
+ /*
+ * No vcpu is using this struct and no call to
+ * kvmhv_get_nested can find this struct,
+ * so we don't need to hold kvm->mmu_lock.
+ */
+ kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
+ gp->shadow_lpid);
+ pgd_free(kvm->mm, gp->shadow_pgtable);
+ }
+ kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
+ kvmppc_free_lpid(gp->shadow_lpid);
+ kfree(gp);
+}
+
+static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
+{
+ struct kvm *kvm = gp->l1_host;
+ int lpid = gp->l1_lpid;
+ long ref;
+
+ spin_lock(&kvm->mmu_lock);
+ if (gp == kvm->arch.nested_guests[lpid]) {
+ kvm->arch.nested_guests[lpid] = NULL;
+ if (lpid == kvm->arch.max_nested_lpid) {
+ while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
+ ;
+ kvm->arch.max_nested_lpid = lpid;
+ }
+ --gp->refcnt;
+ }
+ ref = gp->refcnt;
+ spin_unlock(&kvm->mmu_lock);
+ if (ref == 0)
+ kvmhv_release_nested(gp);
+}
+
+/*
+ * Free up all nested resources allocated for this guest.
+ * This is called with no vcpus of the guest running, when
+ * switching the guest to HPT mode or when destroying the
+ * guest.
+ */
+void kvmhv_release_all_nested(struct kvm *kvm)
+{
+ int i;
+ struct kvm_nested_guest *gp;
+ struct kvm_nested_guest *freelist = NULL;
+ struct kvm_memory_slot *memslot;
+ int srcu_idx;
+
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
+ gp = kvm->arch.nested_guests[i];
+ if (!gp)
+ continue;
+ kvm->arch.nested_guests[i] = NULL;
+ if (--gp->refcnt == 0) {
+ gp->next = freelist;
+ freelist = gp;
+ }
+ }
+ kvm->arch.max_nested_lpid = -1;
+ spin_unlock(&kvm->mmu_lock);
+ while ((gp = freelist) != NULL) {
+ freelist = gp->next;
+ kvmhv_release_nested(gp);
+ }
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ kvm_for_each_memslot(memslot, kvm_memslots(kvm))
+ kvmhv_free_memslot_nest_rmap(memslot);
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+}
+
+/* caller must hold gp->tlb_lock */
+static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
+{
+ struct kvm *kvm = gp->l1_host;
+
+ spin_lock(&kvm->mmu_lock);
+ kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
+ spin_unlock(&kvm->mmu_lock);
+ kvmhv_flush_lpid(gp->shadow_lpid);
+ kvmhv_update_ptbl_cache(gp);
+ if (gp->l1_gr_to_hr == 0)
+ kvmhv_remove_nested(gp);
+}
+
+struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
+ bool create)
+{
+ struct kvm_nested_guest *gp, *newgp;
+
+ if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
+ l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
+ return NULL;
+
+ spin_lock(&kvm->mmu_lock);
+ gp = kvm->arch.nested_guests[l1_lpid];
+ if (gp)
+ ++gp->refcnt;
+ spin_unlock(&kvm->mmu_lock);
+
+ if (gp || !create)
+ return gp;
+
+ newgp = kvmhv_alloc_nested(kvm, l1_lpid);
+ if (!newgp)
+ return NULL;
+ spin_lock(&kvm->mmu_lock);
+ if (kvm->arch.nested_guests[l1_lpid]) {
+ /* someone else beat us to it */
+ gp = kvm->arch.nested_guests[l1_lpid];
+ } else {
+ kvm->arch.nested_guests[l1_lpid] = newgp;
+ ++newgp->refcnt;
+ gp = newgp;
+ newgp = NULL;
+ if (l1_lpid > kvm->arch.max_nested_lpid)
+ kvm->arch.max_nested_lpid = l1_lpid;
+ }
+ ++gp->refcnt;
+ spin_unlock(&kvm->mmu_lock);
+
+ if (newgp)
+ kvmhv_release_nested(newgp);
+
+ return gp;
+}
+
+void kvmhv_put_nested(struct kvm_nested_guest *gp)
+{
+ struct kvm *kvm = gp->l1_host;
+ long ref;
+
+ spin_lock(&kvm->mmu_lock);
+ ref = --gp->refcnt;
+ spin_unlock(&kvm->mmu_lock);
+ if (ref == 0)
+ kvmhv_release_nested(gp);
+}
+
+static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
+{
+ if (lpid > kvm->arch.max_nested_lpid)
+ return NULL;
+ return kvm->arch.nested_guests[lpid];
+}
+
+static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
+{
+ return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
+ RMAP_NESTED_GPA_MASK));
+}
+
+void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
+ struct rmap_nested **n_rmap)
+{
+ struct llist_node *entry = ((struct llist_head *) rmapp)->first;
+ struct rmap_nested *cursor;
+ u64 rmap, new_rmap = (*n_rmap)->rmap;
+
+ /* Are there any existing entries? */
+ if (!(*rmapp)) {
+ /* No -> use the rmap as a single entry */
+ *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
+ return;
+ }
+
+ /* Do any entries match what we're trying to insert? */
+ for_each_nest_rmap_safe(cursor, entry, &rmap) {
+ if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
+ return;
+ }
+
+ /* Do we need to create a list or just add the new entry? */
+ rmap = *rmapp;
+ if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
+ *rmapp = 0UL;
+ llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
+ if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
+ (*n_rmap)->list.next = (struct llist_node *) rmap;
+
+ /* Set NULL so not freed by caller */
+ *n_rmap = NULL;
+}
+
+static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
+ unsigned long hpa, unsigned long mask)
+{
+ struct kvm_nested_guest *gp;
+ unsigned long gpa;
+ unsigned int shift, lpid;
+ pte_t *ptep;
+
+ gpa = n_rmap & RMAP_NESTED_GPA_MASK;
+ lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
+ gp = kvmhv_find_nested(kvm, lpid);
+ if (!gp)
+ return;
+
+ /* Find and invalidate the pte */
+ ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ /* Don't spuriously invalidate ptes if the pfn has changed */
+ if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
+ kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
+}
+
+static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
+ unsigned long hpa, unsigned long mask)
+{
+ struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
+ struct rmap_nested *cursor;
+ unsigned long rmap;
+
+ for_each_nest_rmap_safe(cursor, entry, &rmap) {
+ kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
+ kfree(cursor);
+ }
+}
+
+/* called with kvm->mmu_lock held */
+void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
+ struct kvm_memory_slot *memslot,
+ unsigned long gpa, unsigned long hpa,
+ unsigned long nbytes)
+{
+ unsigned long gfn, end_gfn;
+ unsigned long addr_mask;
+
+ if (!memslot)
+ return;
+ gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
+ end_gfn = gfn + (nbytes >> PAGE_SHIFT);
+
+ addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
+ hpa &= addr_mask;
+
+ for (; gfn < end_gfn; gfn++) {
+ unsigned long *rmap = &memslot->arch.rmap[gfn];
+ kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
+ }
+}
+
+static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
+{
+ unsigned long page;
+
+ for (page = 0; page < free->npages; page++) {
+ unsigned long rmap, *rmapp = &free->arch.rmap[page];
+ struct rmap_nested *cursor;
+ struct llist_node *entry;
+
+ entry = llist_del_all((struct llist_head *) rmapp);
+ for_each_nest_rmap_safe(cursor, entry, &rmap)
+ kfree(cursor);
+ }
+}
+
+static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
+ struct kvm_nested_guest *gp,
+ long gpa, int *shift_ret)
+{
+ struct kvm *kvm = vcpu->kvm;
+ bool ret = false;
+ pte_t *ptep;
+ int shift;
+
+ spin_lock(&kvm->mmu_lock);
+ ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ if (!shift)
+ shift = PAGE_SHIFT;
+ if (ptep && pte_present(*ptep)) {
+ kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
+ ret = true;
+ }
+ spin_unlock(&kvm->mmu_lock);
+
+ if (shift_ret)
+ *shift_ret = shift;
+ return ret;
+}
+
+static inline int get_ric(unsigned int instr)
+{
+ return (instr >> 18) & 0x3;
+}
+
+static inline int get_prs(unsigned int instr)
+{
+ return (instr >> 17) & 0x1;
+}
+
+static inline int get_r(unsigned int instr)
+{
+ return (instr >> 16) & 0x1;
+}
+
+static inline int get_lpid(unsigned long r_val)
+{
+ return r_val & 0xffffffff;
+}
+
+static inline int get_is(unsigned long r_val)
+{
+ return (r_val >> 10) & 0x3;
+}
+
+static inline int get_ap(unsigned long r_val)
+{
+ return (r_val >> 5) & 0x7;
+}
+
+static inline long get_epn(unsigned long r_val)
+{
+ return r_val >> 12;
+}
+
+static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
+ int ap, long epn)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *gp;
+ long npages;
+ int shift, shadow_shift;
+ unsigned long addr;
+
+ shift = ap_to_shift(ap);
+ addr = epn << 12;
+ if (shift < 0)
+ /* Invalid ap encoding */
+ return -EINVAL;
+
+ addr &= ~((1UL << shift) - 1);
+ npages = 1UL << (shift - PAGE_SHIFT);
+
+ gp = kvmhv_get_nested(kvm, lpid, false);
+ if (!gp) /* No such guest -> nothing to do */
+ return 0;
+ mutex_lock(&gp->tlb_lock);
+
+ /* There may be more than one host page backing this single guest pte */
+ do {
+ kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
+
+ npages -= 1UL << (shadow_shift - PAGE_SHIFT);
+ addr += 1UL << shadow_shift;
+ } while (npages > 0);
+
+ mutex_unlock(&gp->tlb_lock);
+ kvmhv_put_nested(gp);
+ return 0;
+}
+
+static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
+ struct kvm_nested_guest *gp, int ric)
+{
+ struct kvm *kvm = vcpu->kvm;
+
+ mutex_lock(&gp->tlb_lock);
+ switch (ric) {
+ case 0:
+ /* Invalidate TLB */
+ spin_lock(&kvm->mmu_lock);
+ kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
+ gp->shadow_lpid);
+ kvmhv_flush_lpid(gp->shadow_lpid);
+ spin_unlock(&kvm->mmu_lock);
+ break;
+ case 1:
+ /*
+ * Invalidate PWC
+ * We don't cache this -> nothing to do
+ */
+ break;
+ case 2:
+ /* Invalidate TLB, PWC and caching of partition table entries */
+ kvmhv_flush_nested(gp);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&gp->tlb_lock);
+}
+
+static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *gp;
+ int i;
+
+ spin_lock(&kvm->mmu_lock);
+ for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
+ gp = kvm->arch.nested_guests[i];
+ if (gp) {
+ spin_unlock(&kvm->mmu_lock);
+ kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+ spin_lock(&kvm->mmu_lock);
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+}
+
+static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
+ unsigned long rsval, unsigned long rbval)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_nested_guest *gp;
+ int r, ric, prs, is, ap;
+ int lpid;
+ long epn;
+ int ret = 0;
+
+ ric = get_ric(instr);
+ prs = get_prs(instr);
+ r = get_r(instr);
+ lpid = get_lpid(rsval);
+ is = get_is(rbval);
+
+ /*
+ * These cases are invalid and are not handled:
+ * r != 1 -> Only radix supported
+ * prs == 1 -> Not HV privileged
+ * ric == 3 -> No cluster bombs for radix
+ * is == 1 -> Partition scoped translations not associated with pid
+ * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
+ */
+ if ((!r) || (prs) || (ric == 3) || (is == 1) ||
+ ((!is) && (ric == 1 || ric == 2)))
+ return -EINVAL;
+
+ switch (is) {
+ case 0:
+ /*
+ * We know ric == 0
+ * Invalidate TLB for a given target address
+ */
+ epn = get_epn(rbval);
+ ap = get_ap(rbval);
+ ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
+ break;
+ case 2:
+ /* Invalidate matching LPID */
+ gp = kvmhv_get_nested(kvm, lpid, false);
+ if (gp) {
+ kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
+ kvmhv_put_nested(gp);
+ }
+ break;
+ case 3:
+ /* Invalidate ALL LPIDs */
+ kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * This handles the H_TLB_INVALIDATE hcall.
+ * Parameters are (r4) tlbie instruction code, (r5) rS contents,
+ * (r6) rB contents.
+ */
+long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
+ if (ret)
+ return H_PARAMETER;
+ return H_SUCCESS;
+}
+
+/* Used to convert a nested guest real address to a L1 guest real address */
+static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
+ struct kvm_nested_guest *gp,
+ unsigned long n_gpa, unsigned long dsisr,
+ struct kvmppc_pte *gpte_p)
+{
+ u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
+ int ret;
+
+ ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
+ &fault_addr);
+
+ if (ret) {
+ /* We didn't find a pte */
+ if (ret == -EINVAL) {
+ /* Unsupported mmu config */
+ flags |= DSISR_UNSUPP_MMU;
+ } else if (ret == -ENOENT) {
+ /* No translation found */
+ flags |= DSISR_NOHPTE;
+ } else if (ret == -EFAULT) {
+ /* Couldn't access L1 real address */
+ flags |= DSISR_PRTABLE_FAULT;
+ vcpu->arch.fault_gpa = fault_addr;
+ } else {
+ /* Unknown error */
+ return ret;
+ }
+ goto forward_to_l1;
+ } else {
+ /* We found a pte -> check permissions */
+ if (dsisr & DSISR_ISSTORE) {
+ /* Can we write? */
+ if (!gpte_p->may_write) {
+ flags |= DSISR_PROTFAULT;
+ goto forward_to_l1;
+ }
+ } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
+ /* Can we execute? */
+ if (!gpte_p->may_execute) {
+ flags |= SRR1_ISI_N_OR_G;
+ goto forward_to_l1;
+ }
+ } else {
+ /* Can we read? */
+ if (!gpte_p->may_read && !gpte_p->may_write) {
+ flags |= DSISR_PROTFAULT;
+ goto forward_to_l1;
+ }
+ }
+ }
+
+ return 0;
+
+forward_to_l1:
+ vcpu->arch.fault_dsisr = flags;
+ if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
+ vcpu->arch.shregs.msr &= ~0x783f0000ul;
+ vcpu->arch.shregs.msr |= flags;
+ }
+ return RESUME_HOST;
+}
+
+static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
+ struct kvm_nested_guest *gp,
+ unsigned long n_gpa,
+ struct kvmppc_pte gpte,
+ unsigned long dsisr)
+{
+ struct kvm *kvm = vcpu->kvm;
+ bool writing = !!(dsisr & DSISR_ISSTORE);
+ u64 pgflags;
+ bool ret;
+
+ /* Are the rc bits set in the L1 partition scoped pte? */
+ pgflags = _PAGE_ACCESSED;
+ if (writing)
+ pgflags |= _PAGE_DIRTY;
+ if (pgflags & ~gpte.rc)
+ return RESUME_HOST;
+
+ spin_lock(&kvm->mmu_lock);
+ /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
+ ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing,
+ gpte.raddr, kvm->arch.lpid);
+ spin_unlock(&kvm->mmu_lock);
+ if (!ret)
+ return -EINVAL;
+
+ /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
+ ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa,
+ gp->shadow_lpid);
+ if (!ret)
+ return -EINVAL;
+ return 0;
+}
+
+static inline int kvmppc_radix_level_to_shift(int level)
+{
+ switch (level) {
+ case 2:
+ return PUD_SHIFT;
+ case 1:
+ return PMD_SHIFT;
+ default:
+ return PAGE_SHIFT;
+ }
+}
+
+static inline int kvmppc_radix_shift_to_level(int shift)
+{
+ if (shift == PUD_SHIFT)
+ return 2;
+ if (shift == PMD_SHIFT)
+ return 1;
+ if (shift == PAGE_SHIFT)
+ return 0;
+ WARN_ON_ONCE(1);
+ return 0;
+}
+
+/* called with gp->tlb_lock held */
+static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
+ struct kvm_nested_guest *gp)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_memory_slot *memslot;
+ struct rmap_nested *n_rmap;
+ struct kvmppc_pte gpte;
+ pte_t pte, *pte_p;
+ unsigned long mmu_seq;
+ unsigned long dsisr = vcpu->arch.fault_dsisr;
+ unsigned long ea = vcpu->arch.fault_dar;
+ unsigned long *rmapp;
+ unsigned long n_gpa, gpa, gfn, perm = 0UL;
+ unsigned int shift, l1_shift, level;
+ bool writing = !!(dsisr & DSISR_ISSTORE);
+ bool kvm_ro = false;
+ long int ret;
+
+ if (!gp->l1_gr_to_hr) {
+ kvmhv_update_ptbl_cache(gp);
+ if (!gp->l1_gr_to_hr)
+ return RESUME_HOST;
+ }
+
+ /* Convert the nested guest real address into a L1 guest real address */
+
+ n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
+ if (!(dsisr & DSISR_PRTABLE_FAULT))
+ n_gpa |= ea & 0xFFF;
+ ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
+
+ /*
+ * If the hardware found a translation but we don't now have a usable
+ * translation in the l1 partition-scoped tree, remove the shadow pte
+ * and let the guest retry.
+ */
+ if (ret == RESUME_HOST &&
+ (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
+ DSISR_BAD_COPYPASTE)))
+ goto inval;
+ if (ret)
+ return ret;
+
+ /* Failed to set the reference/change bits */
+ if (dsisr & DSISR_SET_RC) {
+ ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
+ if (ret == RESUME_HOST)
+ return ret;
+ if (ret)
+ goto inval;
+ dsisr &= ~DSISR_SET_RC;
+ if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
+ DSISR_PROTFAULT)))
+ return RESUME_GUEST;
+ }
+
+ /*
+ * We took an HISI or HDSI while we were running a nested guest which
+ * means we have no partition scoped translation for that. This means
+ * we need to insert a pte for the mapping into our shadow_pgtable.
+ */
+
+ l1_shift = gpte.page_shift;
+ if (l1_shift < PAGE_SHIFT) {
+ /* We don't support l1 using a page size smaller than our own */
+ pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
+ l1_shift, PAGE_SHIFT);
+ return -EINVAL;
+ }
+ gpa = gpte.raddr;
+ gfn = gpa >> PAGE_SHIFT;
+
+ /* 1. Get the corresponding host memslot */
+
+ memslot = gfn_to_memslot(kvm, gfn);
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
+ if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
+ /* unusual error -> reflect to the guest as a DSI */
+ kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
+ return RESUME_GUEST;
+ }
+ /* passthrough of emulated MMIO case... */
+ pr_err("emulated MMIO passthrough?\n");
+ return -EINVAL;
+ }
+ if (memslot->flags & KVM_MEM_READONLY) {
+ if (writing) {
+ /* Give the guest a DSI */
+ kvmppc_core_queue_data_storage(vcpu, ea,
+ DSISR_ISSTORE | DSISR_PROTFAULT);
+ return RESUME_GUEST;
+ }
+ kvm_ro = true;
+ }
+
+ /* 2. Find the host pte for this L1 guest real address */
+
+ /* Used to check for invalidations in progress */
+ mmu_seq = kvm->mmu_notifier_seq;
+ smp_rmb();
+
+ /* See if can find translation in our partition scoped tables for L1 */
+ pte = __pte(0);
+ spin_lock(&kvm->mmu_lock);
+ pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
+ if (!shift)
+ shift = PAGE_SHIFT;
+ if (pte_p)
+ pte = *pte_p;
+ spin_unlock(&kvm->mmu_lock);
+
+ if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
+ /* No suitable pte found -> try to insert a mapping */
+ ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
+ writing, kvm_ro, &pte, &level);
+ if (ret == -EAGAIN)
+ return RESUME_GUEST;
+ else if (ret)
+ return ret;
+ shift = kvmppc_radix_level_to_shift(level);
+ }
+
+ /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
+
+ /* The permissions is the combination of the host and l1 guest ptes */
+ perm |= gpte.may_read ? 0UL : _PAGE_READ;
+ perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
+ perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
+ pte = __pte(pte_val(pte) & ~perm);
+
+ /* What size pte can we insert? */
+ if (shift > l1_shift) {
+ u64 mask;
+ unsigned int actual_shift = PAGE_SHIFT;
+ if (PMD_SHIFT < l1_shift)
+ actual_shift = PMD_SHIFT;
+ mask = (1UL << shift) - (1UL << actual_shift);
+ pte = __pte(pte_val(pte) | (gpa & mask));
+ shift = actual_shift;
+ }
+ level = kvmppc_radix_shift_to_level(shift);
+ n_gpa &= ~((1UL << shift) - 1);
+
+ /* 4. Insert the pte into our shadow_pgtable */
+
+ n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
+ if (!n_rmap)
+ return RESUME_GUEST; /* Let the guest try again */
+ n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
+ (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
+ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
+ ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
+ mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
+ if (n_rmap)
+ kfree(n_rmap);
+ if (ret == -EAGAIN)
+ ret = RESUME_GUEST; /* Let the guest try again */
+
+ return ret;
+
+ inval:
+ kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
+ return RESUME_GUEST;
+}
+
+long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
+{
+ struct kvm_nested_guest *gp = vcpu->arch.nested;
+ long int ret;
+
+ mutex_lock(&gp->tlb_lock);
+ ret = __kvmhv_nested_page_fault(vcpu, gp);
+ mutex_unlock(&gp->tlb_lock);
+ return ret;
+}
+
+int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
+{
+ int ret = -1;
+
+ spin_lock(&kvm->mmu_lock);
+ while (++lpid <= kvm->arch.max_nested_lpid) {
+ if (kvm->arch.nested_guests[lpid]) {
+ ret = lpid;
+ break;
+ }
+ }
+ spin_unlock(&kvm->mmu_lock);
+ return ret;
+}
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index b11043b23c18..0787f12c1a1b 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -177,6 +177,7 @@ void kvmppc_subcore_enter_guest(void)
local_paca->sibling_subcore_state->in_guest[subcore_id] = 1;
}
+EXPORT_SYMBOL_GPL(kvmppc_subcore_enter_guest);
void kvmppc_subcore_exit_guest(void)
{
@@ -187,6 +188,7 @@ void kvmppc_subcore_exit_guest(void)
local_paca->sibling_subcore_state->in_guest[subcore_id] = 0;
}
+EXPORT_SYMBOL_GPL(kvmppc_subcore_exit_guest);
static bool kvmppc_tb_resync_required(void)
{
@@ -331,5 +333,13 @@ long kvmppc_realmode_hmi_handler(void)
} else {
wait_for_tb_resync();
}
+
+ /*
+ * Reset tb_offset_applied so the guest exit code won't try
+ * to subtract the previous timebase offset from the timebase.
+ */
+ if (local_paca->kvm_hstate.kvm_vcore)
+ local_paca->kvm_hstate.kvm_vcore->tb_offset_applied = 0;
+
return 0;
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 758d1d23215e..b3f5786b20dc 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -136,7 +136,7 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
/* Mark the target VCPU as having an interrupt pending */
vcpu->stat.queue_intr++;
- set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
+ set_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
/* Kick self ? Just set MER and return */
if (vcpu == this_vcpu) {
@@ -170,8 +170,7 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
{
/* Note: Only called on self ! */
- clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
- &vcpu->arch.pending_exceptions);
+ clear_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
}
@@ -768,6 +767,14 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
void __iomem *xics_phys;
int64_t rc;
+ if (kvmhv_on_pseries()) {
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+
+ iosync();
+ plpar_hcall_raw(H_EOI, retbuf, hwirq);
+ return;
+ }
+
rc = pnv_opal_pci_msi_eoi(c, hwirq);
if (rc)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 1d14046124a0..9b8d50a7cbaf 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -28,6 +28,7 @@
#include <asm/exception-64s.h>
#include <asm/kvm_book3s_asm.h>
#include <asm/book3s/64/mmu-hash.h>
+#include <asm/export.h>
#include <asm/tm.h>
#include <asm/opal.h>
#include <asm/xive-regs.h>
@@ -46,8 +47,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define NAPPING_NOVCPU 2
/* Stack frame offsets for kvmppc_hv_entry */
-#define SFS 160
+#define SFS 208
#define STACK_SLOT_TRAP (SFS-4)
+#define STACK_SLOT_SHORT_PATH (SFS-8)
#define STACK_SLOT_TID (SFS-16)
#define STACK_SLOT_PSSCR (SFS-24)
#define STACK_SLOT_PID (SFS-32)
@@ -56,6 +58,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
#define STACK_SLOT_DAWR (SFS-56)
#define STACK_SLOT_DAWRX (SFS-64)
#define STACK_SLOT_HFSCR (SFS-72)
+/* the following is used by the P9 short path */
+#define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
/*
* Call kvmppc_hv_entry in real mode.
@@ -113,45 +117,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_SPRG_VDSO_WRITE,r3
/* Reload the host's PMU registers */
- lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
- cmpwi r4, 0
- beq 23f /* skip if not */
-BEGIN_FTR_SECTION
- ld r3, HSTATE_MMCR0(r13)
- andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
- cmpwi r4, MMCR0_PMAO
- beql kvmppc_fix_pmao
-END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
- lwz r3, HSTATE_PMC1(r13)
- lwz r4, HSTATE_PMC2(r13)
- lwz r5, HSTATE_PMC3(r13)
- lwz r6, HSTATE_PMC4(r13)
- lwz r8, HSTATE_PMC5(r13)
- lwz r9, HSTATE_PMC6(r13)
- mtspr SPRN_PMC1, r3
- mtspr SPRN_PMC2, r4
- mtspr SPRN_PMC3, r5
- mtspr SPRN_PMC4, r6
- mtspr SPRN_PMC5, r8
- mtspr SPRN_PMC6, r9
- ld r3, HSTATE_MMCR0(r13)
- ld r4, HSTATE_MMCR1(r13)
- ld r5, HSTATE_MMCRA(r13)
- ld r6, HSTATE_SIAR(r13)
- ld r7, HSTATE_SDAR(r13)
- mtspr SPRN_MMCR1, r4
- mtspr SPRN_MMCRA, r5
- mtspr SPRN_SIAR, r6
- mtspr SPRN_SDAR, r7
-BEGIN_FTR_SECTION
- ld r8, HSTATE_MMCR2(r13)
- ld r9, HSTATE_SIER(r13)
- mtspr SPRN_MMCR2, r8
- mtspr SPRN_SIER, r9
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- mtspr SPRN_MMCR0, r3
- isync
-23:
+ bl kvmhv_load_host_pmu
/*
* Reload DEC. HDEC interrupts were disabled when
@@ -796,66 +762,23 @@ BEGIN_FTR_SECTION
b 91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/*
- * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
mr r3, r4
ld r4, VCPU_MSR(r3)
+ li r5, 0 /* don't preserve non-vol regs */
bl kvmppc_restore_tm_hv
+ nop
ld r4, HSTATE_KVM_VCPU(r13)
91:
#endif
- /* Load guest PMU registers */
- /* R4 is live here (vcpu pointer) */
- li r3, 1
- sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
- mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
- isync
-BEGIN_FTR_SECTION
- ld r3, VCPU_MMCR(r4)
- andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
- cmpwi r5, MMCR0_PMAO
- beql kvmppc_fix_pmao
-END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
- lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
- lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
- lwz r6, VCPU_PMC + 8(r4)
- lwz r7, VCPU_PMC + 12(r4)
- lwz r8, VCPU_PMC + 16(r4)
- lwz r9, VCPU_PMC + 20(r4)
- mtspr SPRN_PMC1, r3
- mtspr SPRN_PMC2, r5
- mtspr SPRN_PMC3, r6
- mtspr SPRN_PMC4, r7
- mtspr SPRN_PMC5, r8
- mtspr SPRN_PMC6, r9
- ld r3, VCPU_MMCR(r4)
- ld r5, VCPU_MMCR + 8(r4)
- ld r6, VCPU_MMCR + 16(r4)
- ld r7, VCPU_SIAR(r4)
- ld r8, VCPU_SDAR(r4)
- mtspr SPRN_MMCR1, r5
- mtspr SPRN_MMCRA, r6
- mtspr SPRN_SIAR, r7
- mtspr SPRN_SDAR, r8
-BEGIN_FTR_SECTION
- ld r5, VCPU_MMCR + 24(r4)
- ld r6, VCPU_SIER(r4)
- mtspr SPRN_MMCR2, r5
- mtspr SPRN_SIER, r6
-BEGIN_FTR_SECTION_NESTED(96)
- lwz r7, VCPU_PMC + 24(r4)
- lwz r8, VCPU_PMC + 28(r4)
- ld r9, VCPU_MMCR + 32(r4)
- mtspr SPRN_SPMC1, r7
- mtspr SPRN_SPMC2, r8
- mtspr SPRN_MMCRS, r9
-END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- mtspr SPRN_MMCR0, r3
- isync
+ /* Load guest PMU registers; r4 = vcpu pointer here */
+ mr r3, r4
+ bl kvmhv_load_guest_pmu
/* Load up FP, VMX and VSX registers */
+ ld r4, HSTATE_KVM_VCPU(r13)
bl kvmppc_load_fp
ld r14, VCPU_GPR(R14)(r4)
@@ -1100,73 +1023,40 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
no_xive:
#endif /* CONFIG_KVM_XICS */
-deliver_guest_interrupt:
- ld r6, VCPU_CTR(r4)
- ld r7, VCPU_XER(r4)
-
- mtctr r6
- mtxer r7
+ li r0, 0
+ stw r0, STACK_SLOT_SHORT_PATH(r1)
-kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
- ld r10, VCPU_PC(r4)
- ld r11, VCPU_MSR(r4)
+deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
+ /* Check if we can deliver an external or decrementer interrupt now */
+ ld r0, VCPU_PENDING_EXC(r4)
+BEGIN_FTR_SECTION
+ /* On POWER9, also check for emulated doorbell interrupt */
+ lbz r3, VCPU_DBELL_REQ(r4)
+ or r0, r0, r3
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
+ cmpdi r0, 0
+ beq 71f
+ mr r3, r4
+ bl kvmppc_guest_entry_inject_int
+ ld r4, HSTATE_KVM_VCPU(r13)
+71:
ld r6, VCPU_SRR0(r4)
ld r7, VCPU_SRR1(r4)
mtspr SPRN_SRR0, r6
mtspr SPRN_SRR1, r7
+fast_guest_entry_c:
+ ld r10, VCPU_PC(r4)
+ ld r11, VCPU_MSR(r4)
/* r11 = vcpu->arch.msr & ~MSR_HV */
rldicl r11, r11, 63 - MSR_HV_LG, 1
rotldi r11, r11, 1 + MSR_HV_LG
ori r11, r11, MSR_ME
- /* Check if we can deliver an external or decrementer interrupt now */
- ld r0, VCPU_PENDING_EXC(r4)
- rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
- cmpdi cr1, r0, 0
- andi. r8, r11, MSR_EE
- mfspr r8, SPRN_LPCR
- /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
- rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
- mtspr SPRN_LPCR, r8
- isync
- beq 5f
- li r0, BOOK3S_INTERRUPT_EXTERNAL
- bne cr1, 12f
- mfspr r0, SPRN_DEC
-BEGIN_FTR_SECTION
- /* On POWER9 check whether the guest has large decrementer enabled */
- andis. r8, r8, LPCR_LD@h
- bne 15f
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- extsw r0, r0
-15: cmpdi r0, 0
- li r0, BOOK3S_INTERRUPT_DECREMENTER
- bge 5f
-
-12: mtspr SPRN_SRR0, r10
- mr r10,r0
- mtspr SPRN_SRR1, r11
- mr r9, r4
- bl kvmppc_msr_interrupt
-5:
-BEGIN_FTR_SECTION
- b fast_guest_return
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
- /* On POWER9, check for pending doorbell requests */
- lbz r0, VCPU_DBELL_REQ(r4)
- cmpwi r0, 0
- beq fast_guest_return
- ld r5, HSTATE_KVM_VCORE(r13)
- /* Set DPDES register so the CPU will take a doorbell interrupt */
- li r0, 1
- mtspr SPRN_DPDES, r0
- std r0, VCORE_DPDES(r5)
- /* Make sure other cpus see vcore->dpdes set before dbell req clear */
- lwsync
- /* Clear the pending doorbell request */
- li r0, 0
- stb r0, VCPU_DBELL_REQ(r4)
+ ld r6, VCPU_CTR(r4)
+ ld r7, VCPU_XER(r4)
+ mtctr r6
+ mtxer r7
/*
* Required state:
@@ -1202,7 +1092,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
ld r5, VCPU_LR(r4)
- lwz r6, VCPU_CR(r4)
+ ld r6, VCPU_CR(r4)
mtlr r5
mtcr r6
@@ -1234,6 +1124,83 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
HRFI_TO_GUEST
b .
+/*
+ * Enter the guest on a P9 or later system where we have exactly
+ * one vcpu per vcore and we don't need to go to real mode
+ * (which implies that host and guest are both using radix MMU mode).
+ * r3 = vcpu pointer
+ * Most SPRs and all the VSRs have been loaded already.
+ */
+_GLOBAL(__kvmhv_vcpu_entry_p9)
+EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+ stdu r1, -SFS(r1)
+
+ li r0, 1
+ stw r0, STACK_SLOT_SHORT_PATH(r1)
+
+ std r3, HSTATE_KVM_VCPU(r13)
+ mfcr r4
+ stw r4, SFS+8(r1)
+
+ std r1, HSTATE_HOST_R1(r13)
+
+ reg = 14
+ .rept 18
+ std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ reg = 14
+ .rept 18
+ ld reg, __VCPU_GPR(reg)(r3)
+ reg = reg + 1
+ .endr
+
+ mfmsr r10
+ std r10, HSTATE_HOST_MSR(r13)
+
+ mr r4, r3
+ b fast_guest_entry_c
+guest_exit_short_path:
+
+ li r0, KVM_GUEST_MODE_NONE
+ stb r0, HSTATE_IN_GUEST(r13)
+
+ reg = 14
+ .rept 18
+ std reg, __VCPU_GPR(reg)(r9)
+ reg = reg + 1
+ .endr
+
+ reg = 14
+ .rept 18
+ ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
+ reg = reg + 1
+ .endr
+
+ lwz r4, SFS+8(r1)
+ mtcr r4
+
+ mr r3, r12 /* trap number */
+
+ addi r1, r1, SFS
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
+
+ /* If we are in real mode, do a rfid to get back to the caller */
+ mfmsr r4
+ andi. r5, r4, MSR_IR
+ bnelr
+ rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
+ mtspr SPRN_SRR0, r0
+ ld r10, HSTATE_HOST_MSR(r13)
+ rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+ mtspr SPRN_SRR1, r10
+ RFI_TO_KERNEL
+ b .
+
secondary_too_late:
li r12, 0
stw r12, STACK_SLOT_TRAP(r1)
@@ -1313,7 +1280,7 @@ kvmppc_interrupt_hv:
std r3, VCPU_GPR(R12)(r9)
/* CR is in the high half of r12 */
srdi r4, r12, 32
- stw r4, VCPU_CR(r9)
+ std r4, VCPU_CR(r9)
BEGIN_FTR_SECTION
ld r3, HSTATE_CFAR(r13)
std r3, VCPU_CFAR(r9)
@@ -1387,18 +1354,26 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
std r3, VCPU_CTR(r9)
std r4, VCPU_XER(r9)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- /* For softpatch interrupt, go off and do TM instruction emulation */
- cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
- beq kvmppc_tm_emul
-#endif
+ /* Save more register state */
+ mfdar r3
+ mfdsisr r4
+ std r3, VCPU_DAR(r9)
+ stw r4, VCPU_DSISR(r9)
/* If this is a page table miss then see if it's theirs or ours */
cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
beq kvmppc_hdsi
+ std r3, VCPU_FAULT_DAR(r9)
+ stw r4, VCPU_FAULT_DSISR(r9)
cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
beq kvmppc_hisi
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* For softpatch interrupt, go off and do TM instruction emulation */
+ cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
+ beq kvmppc_tm_emul
+#endif
+
/* See if this is a leftover HDEC interrupt */
cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
bne 2f
@@ -1418,10 +1393,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
BEGIN_FTR_SECTION
PPC_MSGSYNC
lwsync
+ /* always exit if we're running a nested guest */
+ ld r0, VCPU_NESTED(r9)
+ cmpdi r0, 0
+ bne guest_exit_cont
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
- beq 4f
+ beq maybe_reenter_guest
b guest_exit_cont
3:
/* If it's a hypervisor facility unavailable interrupt, save HFSCR */
@@ -1433,82 +1412,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
14:
/* External interrupt ? */
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
- bne+ guest_exit_cont
-
- /* External interrupt, first check for host_ipi. If this is
- * set, we know the host wants us out so let's do it now
- */
- bl kvmppc_read_intr
-
- /*
- * Restore the active volatile registers after returning from
- * a C function.
- */
- ld r9, HSTATE_KVM_VCPU(r13)
- li r12, BOOK3S_INTERRUPT_EXTERNAL
-
- /*
- * kvmppc_read_intr return codes:
- *
- * Exit to host (r3 > 0)
- * 1 An interrupt is pending that needs to be handled by the host
- * Exit guest and return to host by branching to guest_exit_cont
- *
- * 2 Passthrough that needs completion in the host
- * Exit guest and return to host by branching to guest_exit_cont
- * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
- * to indicate to the host to complete handling the interrupt
- *
- * Before returning to guest, we check if any CPU is heading out
- * to the host and if so, we head out also. If no CPUs are heading
- * check return values <= 0.
- *
- * Return to guest (r3 <= 0)
- * 0 No external interrupt is pending
- * -1 A guest wakeup IPI (which has now been cleared)
- * In either case, we return to guest to deliver any pending
- * guest interrupts.
- *
- * -2 A PCI passthrough external interrupt was handled
- * (interrupt was delivered directly to guest)
- * Return to guest to deliver any pending guest interrupts.
- */
-
- cmpdi r3, 1
- ble 1f
-
- /* Return code = 2 */
- li r12, BOOK3S_INTERRUPT_HV_RM_HARD
- stw r12, VCPU_TRAP(r9)
- b guest_exit_cont
-
-1: /* Return code <= 1 */
- cmpdi r3, 0
- bgt guest_exit_cont
-
- /* Return code <= 0 */
-4: ld r5, HSTATE_KVM_VCORE(r13)
- lwz r0, VCORE_ENTRY_EXIT(r5)
- cmpwi r0, 0x100
- mr r4, r9
- blt deliver_guest_interrupt
-
-guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
- /* Save more register state */
- mfdar r6
- mfdsisr r7
- std r6, VCPU_DAR(r9)
- stw r7, VCPU_DSISR(r9)
- /* don't overwrite fault_dar/fault_dsisr if HDSI */
- cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
- beq mc_cont
- std r6, VCPU_FAULT_DAR(r9)
- stw r7, VCPU_FAULT_DSISR(r9)
-
+ beq kvmppc_guest_external
/* See if it is a machine check */
cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
beq machine_check_realmode
-mc_cont:
+ /* Or a hypervisor maintenance interrupt */
+ cmpwi r12, BOOK3S_INTERRUPT_HMI
+ beq hmi_realmode
+
+guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
+
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
addi r3, r9, VCPU_TB_RMEXIT
mr r4, r9
@@ -1552,6 +1465,11 @@ mc_cont:
1:
#endif /* CONFIG_KVM_XICS */
+ /* If we came in through the P9 short path, go back out to C now */
+ lwz r0, STACK_SLOT_SHORT_PATH(r1)
+ cmpwi r0, 0
+ bne guest_exit_short_path
+
/* For hash guest, read the guest SLB and save it away */
ld r5, VCPU_KVM(r9)
lbz r0, KVM_RADIX(r5)
@@ -1780,11 +1698,13 @@ BEGIN_FTR_SECTION
b 91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/*
- * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
mr r3, r9
ld r4, VCPU_MSR(r3)
+ li r5, 0 /* don't preserve non-vol regs */
bl kvmppc_save_tm_hv
+ nop
ld r9, HSTATE_KVM_VCPU(r13)
91:
#endif
@@ -1802,83 +1722,12 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
25:
/* Save PMU registers if requested */
/* r8 and cr0.eq are live here */
-BEGIN_FTR_SECTION
- /*
- * POWER8 seems to have a hardware bug where setting
- * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
- * when some counters are already negative doesn't seem
- * to cause a performance monitor alert (and hence interrupt).
- * The effect of this is that when saving the PMU state,
- * if there is no PMU alert pending when we read MMCR0
- * before freezing the counters, but one becomes pending
- * before we read the counters, we lose it.
- * To work around this, we need a way to freeze the counters
- * before reading MMCR0. Normally, freezing the counters
- * is done by writing MMCR0 (to set MMCR0[FC]) which
- * unavoidably writes MMCR0[PMA0] as well. On POWER8,
- * we can also freeze the counters using MMCR2, by writing
- * 1s to all the counter freeze condition bits (there are
- * 9 bits each for 6 counters).
- */
- li r3, -1 /* set all freeze bits */
- clrrdi r3, r3, 10
- mfspr r10, SPRN_MMCR2
- mtspr SPRN_MMCR2, r3
- isync
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- li r3, 1
- sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
- mfspr r4, SPRN_MMCR0 /* save MMCR0 */
- mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
- mfspr r6, SPRN_MMCRA
- /* Clear MMCRA in order to disable SDAR updates */
- li r7, 0
- mtspr SPRN_MMCRA, r7
- isync
+ mr r3, r9
+ li r4, 1
beq 21f /* if no VPA, save PMU stuff anyway */
- lbz r7, LPPACA_PMCINUSE(r8)
- cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
- bne 21f
- std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
- b 22f
-21: mfspr r5, SPRN_MMCR1
- mfspr r7, SPRN_SIAR
- mfspr r8, SPRN_SDAR
- std r4, VCPU_MMCR(r9)
- std r5, VCPU_MMCR + 8(r9)
- std r6, VCPU_MMCR + 16(r9)
-BEGIN_FTR_SECTION
- std r10, VCPU_MMCR + 24(r9)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- std r7, VCPU_SIAR(r9)
- std r8, VCPU_SDAR(r9)
- mfspr r3, SPRN_PMC1
- mfspr r4, SPRN_PMC2
- mfspr r5, SPRN_PMC3
- mfspr r6, SPRN_PMC4
- mfspr r7, SPRN_PMC5
- mfspr r8, SPRN_PMC6
- stw r3, VCPU_PMC(r9)
- stw r4, VCPU_PMC + 4(r9)
- stw r5, VCPU_PMC + 8(r9)
- stw r6, VCPU_PMC + 12(r9)
- stw r7, VCPU_PMC + 16(r9)
- stw r8, VCPU_PMC + 20(r9)
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_SIER
- std r5, VCPU_SIER(r9)
-BEGIN_FTR_SECTION_NESTED(96)
- mfspr r6, SPRN_SPMC1
- mfspr r7, SPRN_SPMC2
- mfspr r8, SPRN_MMCRS
- stw r6, VCPU_PMC + 24(r9)
- stw r7, VCPU_PMC + 28(r9)
- std r8, VCPU_MMCR + 32(r9)
- lis r4, 0x8000
- mtspr SPRN_MMCRS, r4
-END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-22:
+ lbz r4, LPPACA_PMCINUSE(r8)
+21: bl kvmhv_save_guest_pmu
+ ld r9, HSTATE_KVM_VCPU(r13)
/* Restore host values of some registers */
BEGIN_FTR_SECTION
@@ -2010,24 +1859,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_DPDES, r8
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
- /* If HMI, call kvmppc_realmode_hmi_handler() */
- lwz r12, STACK_SLOT_TRAP(r1)
- cmpwi r12, BOOK3S_INTERRUPT_HMI
- bne 27f
- bl kvmppc_realmode_hmi_handler
- nop
- cmpdi r3, 0
- /*
- * At this point kvmppc_realmode_hmi_handler may have resync-ed
- * the TB, and if it has, we must not subtract the guest timebase
- * offset from the timebase. So, skip it.
- *
- * Also, do not call kvmppc_subcore_exit_guest() because it has
- * been invoked as part of kvmppc_realmode_hmi_handler().
- */
- beq 30f
-
-27:
/* Subtract timebase offset from timebase */
ld r8, VCORE_TB_OFFSET_APPL(r5)
cmpdi r8,0
@@ -2045,7 +1876,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
addis r8,r8,0x100 /* if so, increment upper 40 bits */
mtspr SPRN_TBU40,r8
-17: bl kvmppc_subcore_exit_guest
+17:
+ /*
+ * If this is an HMI, we called kvmppc_realmode_hmi_handler
+ * above, which may or may not have already called
+ * kvmppc_subcore_exit_guest. Fortunately, all that
+ * kvmppc_subcore_exit_guest does is clear a flag, so calling
+ * it again here is benign even if kvmppc_realmode_hmi_handler
+ * has already called it.
+ */
+ bl kvmppc_subcore_exit_guest
nop
30: ld r5,HSTATE_KVM_VCORE(r13)
ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
@@ -2099,6 +1939,67 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
mtlr r0
blr
+kvmppc_guest_external:
+ /* External interrupt, first check for host_ipi. If this is
+ * set, we know the host wants us out so let's do it now
+ */
+ bl kvmppc_read_intr
+
+ /*
+ * Restore the active volatile registers after returning from
+ * a C function.
+ */
+ ld r9, HSTATE_KVM_VCPU(r13)
+ li r12, BOOK3S_INTERRUPT_EXTERNAL
+
+ /*
+ * kvmppc_read_intr return codes:
+ *
+ * Exit to host (r3 > 0)
+ * 1 An interrupt is pending that needs to be handled by the host
+ * Exit guest and return to host by branching to guest_exit_cont
+ *
+ * 2 Passthrough that needs completion in the host
+ * Exit guest and return to host by branching to guest_exit_cont
+ * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
+ * to indicate to the host to complete handling the interrupt
+ *
+ * Before returning to guest, we check if any CPU is heading out
+ * to the host and if so, we head out also. If no CPUs are heading
+ * check return values <= 0.
+ *
+ * Return to guest (r3 <= 0)
+ * 0 No external interrupt is pending
+ * -1 A guest wakeup IPI (which has now been cleared)
+ * In either case, we return to guest to deliver any pending
+ * guest interrupts.
+ *
+ * -2 A PCI passthrough external interrupt was handled
+ * (interrupt was delivered directly to guest)
+ * Return to guest to deliver any pending guest interrupts.
+ */
+
+ cmpdi r3, 1
+ ble 1f
+
+ /* Return code = 2 */
+ li r12, BOOK3S_INTERRUPT_HV_RM_HARD
+ stw r12, VCPU_TRAP(r9)
+ b guest_exit_cont
+
+1: /* Return code <= 1 */
+ cmpdi r3, 0
+ bgt guest_exit_cont
+
+ /* Return code <= 0 */
+maybe_reenter_guest:
+ ld r5, HSTATE_KVM_VCORE(r13)
+ lwz r0, VCORE_ENTRY_EXIT(r5)
+ cmpwi r0, 0x100
+ mr r4, r9
+ blt deliver_guest_interrupt
+ b guest_exit_cont
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Softpatch interrupt for transactional memory emulation cases
@@ -2302,6 +2203,10 @@ hcall_try_real_mode:
andi. r0,r11,MSR_PR
/* sc 1 from userspace - reflect to guest syscall */
bne sc_1_fast_return
+ /* sc 1 from nested guest - give it to L1 to handle */
+ ld r0, VCPU_NESTED(r9)
+ cmpdi r0, 0
+ bne guest_exit_cont
clrrdi r3,r3,2
cmpldi r3,hcall_real_table_end - hcall_real_table
bge guest_exit_cont
@@ -2561,6 +2466,7 @@ hcall_real_table:
hcall_real_table_end:
_GLOBAL(kvmppc_h_set_xdabr)
+EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
andi. r0, r5, DABRX_USER | DABRX_KERNEL
beq 6f
li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
@@ -2570,6 +2476,7 @@ _GLOBAL(kvmppc_h_set_xdabr)
blr
_GLOBAL(kvmppc_h_set_dabr)
+EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
li r5, DABRX_USER | DABRX_KERNEL
3:
BEGIN_FTR_SECTION
@@ -2682,11 +2589,13 @@ BEGIN_FTR_SECTION
b 91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/*
- * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
ld r3, HSTATE_KVM_VCPU(r13)
ld r4, VCPU_MSR(r3)
+ li r5, 0 /* don't preserve non-vol regs */
bl kvmppc_save_tm_hv
+ nop
91:
#endif
@@ -2802,11 +2711,13 @@ BEGIN_FTR_SECTION
b 91f
END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
/*
- * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
+ * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
*/
mr r3, r4
ld r4, VCPU_MSR(r3)
+ li r5, 0 /* don't preserve non-vol regs */
bl kvmppc_restore_tm_hv
+ nop
ld r4, HSTATE_KVM_VCPU(r13)
91:
#endif
@@ -2874,13 +2785,7 @@ END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
mr r9, r4
cmpdi r3, 0
bgt guest_exit_cont
-
- /* see if any other thread is already exiting */
- lwz r0,VCORE_ENTRY_EXIT(r5)
- cmpwi r0,0x100
- bge guest_exit_cont
-
- b kvmppc_cede_reentry /* if not go back to guest */
+ b maybe_reenter_guest
/* cede when already previously prodded case */
kvm_cede_prodded:
@@ -2947,12 +2852,12 @@ machine_check_realmode:
*/
ld r11, VCPU_MSR(r9)
rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
- bne mc_cont /* if so, exit to host */
+ bne guest_exit_cont /* if so, exit to host */
/* Check if guest is capable of handling NMI exit */
ld r10, VCPU_KVM(r9)
lbz r10, KVM_FWNMI(r10)
cmpdi r10, 1 /* FWNMI capable? */
- beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
+ beq guest_exit_cont /* if so, exit with KVM_EXIT_NMI. */
/* if not, fall through for backward compatibility. */
andi. r10, r11, MSR_RI /* check for unrecoverable exception */
@@ -2966,6 +2871,21 @@ machine_check_realmode:
2: b fast_interrupt_c_return
/*
+ * Call C code to handle a HMI in real mode.
+ * Only the primary thread does the call, secondary threads are handled
+ * by calling hmi_exception_realmode() after kvmppc_hv_entry returns.
+ * r9 points to the vcpu on entry
+ */
+hmi_realmode:
+ lbz r0, HSTATE_PTID(r13)
+ cmpwi r0, 0
+ bne guest_exit_cont
+ bl kvmppc_realmode_hmi_handler
+ ld r9, HSTATE_KVM_VCPU(r13)
+ li r12, BOOK3S_INTERRUPT_HMI
+ b guest_exit_cont
+
+/*
* Check the reason we woke from nap, and take appropriate action.
* Returns (in r3):
* 0 if nothing needs to be done
@@ -3130,10 +3050,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
* Save transactional state and TM-related registers.
* Called with r3 pointing to the vcpu struct and r4 containing
* the guest MSR value.
- * This can modify all checkpointed registers, but
+ * r5 is non-zero iff non-volatile register state needs to be maintained.
+ * If r5 == 0, this can modify all checkpointed registers, but
* restores r1 and r2 before exit.
*/
-kvmppc_save_tm_hv:
+_GLOBAL_TOC(kvmppc_save_tm_hv)
+EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv)
/* See if we need to handle fake suspend mode */
BEGIN_FTR_SECTION
b __kvmppc_save_tm
@@ -3161,12 +3083,6 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop
- std r1, HSTATE_HOST_R1(r13)
-
- /* Clear the MSR RI since r1, r13 may be foobar. */
- li r5, 0
- mtmsrd r5, 1
-
/* We have to treclaim here because that's the only way to do S->N */
li r3, TM_CAUSE_KVM_RESCHED
TRECLAIM(R3)
@@ -3175,22 +3091,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
* We were in fake suspend, so we are not going to save the
* register state as the guest checkpointed state (since
* we already have it), therefore we can now use any volatile GPR.
+ * In fact treclaim in fake suspend state doesn't modify
+ * any registers.
*/
- /* Reload PACA pointer, stack pointer and TOC. */
- GET_PACA(r13)
- ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATOC(r13)
- /* Set MSR RI now we have r1 and r13 back. */
- li r5, MSR_RI
- mtmsrd r5, 1
-
- HMT_MEDIUM
- ld r6, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r6
-BEGIN_FTR_SECTION_NESTED(96)
+BEGIN_FTR_SECTION
bl pnv_power9_force_smt4_release
-END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
+END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
nop
4:
@@ -3216,10 +3123,12 @@ END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
* Restore transactional state and TM-related registers.
* Called with r3 pointing to the vcpu struct
* and r4 containing the guest MSR value.
+ * r5 is non-zero iff non-volatile register state needs to be maintained.
* This potentially modifies all checkpointed registers.
* It restores r1 and r2 from the PACA.
*/
-kvmppc_restore_tm_hv:
+_GLOBAL_TOC(kvmppc_restore_tm_hv)
+EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv)
/*
* If we are doing TM emulation for the guest on a POWER9 DD2,
* then we don't actually do a trechkpt -- we either set up
@@ -3424,6 +3333,194 @@ kvmppc_msr_interrupt:
blr
/*
+ * Load up guest PMU state. R3 points to the vcpu struct.
+ */
+_GLOBAL(kvmhv_load_guest_pmu)
+EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
+ mr r4, r3
+ mflr r0
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ isync
+BEGIN_FTR_SECTION
+ ld r3, VCPU_MMCR(r4)
+ andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
+ cmpwi r5, MMCR0_PMAO
+ beql kvmppc_fix_pmao
+END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
+ lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
+ lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
+ lwz r6, VCPU_PMC + 8(r4)
+ lwz r7, VCPU_PMC + 12(r4)
+ lwz r8, VCPU_PMC + 16(r4)
+ lwz r9, VCPU_PMC + 20(r4)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r5
+ mtspr SPRN_PMC3, r6
+ mtspr SPRN_PMC4, r7
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+ ld r3, VCPU_MMCR(r4)
+ ld r5, VCPU_MMCR + 8(r4)
+ ld r6, VCPU_MMCR + 16(r4)
+ ld r7, VCPU_SIAR(r4)
+ ld r8, VCPU_SDAR(r4)
+ mtspr SPRN_MMCR1, r5
+ mtspr SPRN_MMCRA, r6
+ mtspr SPRN_SIAR, r7
+ mtspr SPRN_SDAR, r8
+BEGIN_FTR_SECTION
+ ld r5, VCPU_MMCR + 24(r4)
+ ld r6, VCPU_SIER(r4)
+ mtspr SPRN_MMCR2, r5
+ mtspr SPRN_SIER, r6
+BEGIN_FTR_SECTION_NESTED(96)
+ lwz r7, VCPU_PMC + 24(r4)
+ lwz r8, VCPU_PMC + 28(r4)
+ ld r9, VCPU_MMCR + 32(r4)
+ mtspr SPRN_SPMC1, r7
+ mtspr SPRN_SPMC2, r8
+ mtspr SPRN_MMCRS, r9
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ mtspr SPRN_MMCR0, r3
+ isync
+ mtlr r0
+ blr
+
+/*
+ * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
+ */
+_GLOBAL(kvmhv_load_host_pmu)
+EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
+ mflr r0
+ lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
+ cmpwi r4, 0
+ beq 23f /* skip if not */
+BEGIN_FTR_SECTION
+ ld r3, HSTATE_MMCR0(r13)
+ andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
+ cmpwi r4, MMCR0_PMAO
+ beql kvmppc_fix_pmao
+END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
+ lwz r3, HSTATE_PMC1(r13)
+ lwz r4, HSTATE_PMC2(r13)
+ lwz r5, HSTATE_PMC3(r13)
+ lwz r6, HSTATE_PMC4(r13)
+ lwz r8, HSTATE_PMC5(r13)
+ lwz r9, HSTATE_PMC6(r13)
+ mtspr SPRN_PMC1, r3
+ mtspr SPRN_PMC2, r4
+ mtspr SPRN_PMC3, r5
+ mtspr SPRN_PMC4, r6
+ mtspr SPRN_PMC5, r8
+ mtspr SPRN_PMC6, r9
+ ld r3, HSTATE_MMCR0(r13)
+ ld r4, HSTATE_MMCR1(r13)
+ ld r5, HSTATE_MMCRA(r13)
+ ld r6, HSTATE_SIAR(r13)
+ ld r7, HSTATE_SDAR(r13)
+ mtspr SPRN_MMCR1, r4
+ mtspr SPRN_MMCRA, r5
+ mtspr SPRN_SIAR, r6
+ mtspr SPRN_SDAR, r7
+BEGIN_FTR_SECTION
+ ld r8, HSTATE_MMCR2(r13)
+ ld r9, HSTATE_SIER(r13)
+ mtspr SPRN_MMCR2, r8
+ mtspr SPRN_SIER, r9
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ mtspr SPRN_MMCR0, r3
+ isync
+ mtlr r0
+23: blr
+
+/*
+ * Save guest PMU state into the vcpu struct.
+ * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
+ */
+_GLOBAL(kvmhv_save_guest_pmu)
+EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
+ mr r9, r3
+ mr r8, r4
+BEGIN_FTR_SECTION
+ /*
+ * POWER8 seems to have a hardware bug where setting
+ * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
+ * when some counters are already negative doesn't seem
+ * to cause a performance monitor alert (and hence interrupt).
+ * The effect of this is that when saving the PMU state,
+ * if there is no PMU alert pending when we read MMCR0
+ * before freezing the counters, but one becomes pending
+ * before we read the counters, we lose it.
+ * To work around this, we need a way to freeze the counters
+ * before reading MMCR0. Normally, freezing the counters
+ * is done by writing MMCR0 (to set MMCR0[FC]) which
+ * unavoidably writes MMCR0[PMA0] as well. On POWER8,
+ * we can also freeze the counters using MMCR2, by writing
+ * 1s to all the counter freeze condition bits (there are
+ * 9 bits each for 6 counters).
+ */
+ li r3, -1 /* set all freeze bits */
+ clrrdi r3, r3, 10
+ mfspr r10, SPRN_MMCR2
+ mtspr SPRN_MMCR2, r3
+ isync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ li r3, 1
+ sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
+ mfspr r4, SPRN_MMCR0 /* save MMCR0 */
+ mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ mfspr r6, SPRN_MMCRA
+ /* Clear MMCRA in order to disable SDAR updates */
+ li r7, 0
+ mtspr SPRN_MMCRA, r7
+ isync
+ cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */
+ bne 21f
+ std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
+ b 22f
+21: mfspr r5, SPRN_MMCR1
+ mfspr r7, SPRN_SIAR
+ mfspr r8, SPRN_SDAR
+ std r4, VCPU_MMCR(r9)
+ std r5, VCPU_MMCR + 8(r9)
+ std r6, VCPU_MMCR + 16(r9)
+BEGIN_FTR_SECTION
+ std r10, VCPU_MMCR + 24(r9)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+ std r7, VCPU_SIAR(r9)
+ std r8, VCPU_SDAR(r9)
+ mfspr r3, SPRN_PMC1
+ mfspr r4, SPRN_PMC2
+ mfspr r5, SPRN_PMC3
+ mfspr r6, SPRN_PMC4
+ mfspr r7, SPRN_PMC5
+ mfspr r8, SPRN_PMC6
+ stw r3, VCPU_PMC(r9)
+ stw r4, VCPU_PMC + 4(r9)
+ stw r5, VCPU_PMC + 8(r9)
+ stw r6, VCPU_PMC + 12(r9)
+ stw r7, VCPU_PMC + 16(r9)
+ stw r8, VCPU_PMC + 20(r9)
+BEGIN_FTR_SECTION
+ mfspr r5, SPRN_SIER
+ std r5, VCPU_SIER(r9)
+BEGIN_FTR_SECTION_NESTED(96)
+ mfspr r6, SPRN_SPMC1
+ mfspr r7, SPRN_SPMC2
+ mfspr r8, SPRN_MMCRS
+ stw r6, VCPU_PMC + 24(r9)
+ stw r7, VCPU_PMC + 28(r9)
+ std r8, VCPU_MMCR + 32(r9)
+ lis r4, 0x8000
+ mtspr SPRN_MMCRS, r4
+END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+22: blr
+
+/*
* This works around a hardware bug on POWER8E processors, where
* writing a 1 to the MMCR0[PMAO] bit doesn't generate a
* performance monitor interrupt. Instead, when we need to have
diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
index 008285058f9b..888e2609e3f1 100644
--- a/arch/powerpc/kvm/book3s_hv_tm.c
+++ b/arch/powerpc/kvm/book3s_hv_tm.c
@@ -130,7 +130,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
/* Set CR0 to indicate previous transactional state */
- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
/* L=1 => tresume, L=0 => tsuspend */
if (instr & (1 << 21)) {
@@ -174,7 +174,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
copy_from_checkpoint(vcpu);
/* Set CR0 to indicate previous transactional state */
- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
return RESUME_GUEST;
@@ -204,7 +204,7 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
copy_to_checkpoint(vcpu);
/* Set CR0 to indicate previous transactional state */
- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) |
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
(((msr & MSR_TS_MASK) >> MSR_TS_S_LG) << 28);
vcpu->arch.shregs.msr = msr | MSR_TS_S;
return RESUME_GUEST;
diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
index b2c7c6fca4f9..3cf5863bc06e 100644
--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
@@ -89,7 +89,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
if (instr & (1 << 21))
vcpu->arch.shregs.msr = (msr & ~MSR_TS_MASK) | MSR_TS_T;
/* Set CR0 to 0b0010 */
- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0x20000000;
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) |
+ 0x20000000;
return 1;
}
@@ -105,5 +106,5 @@ void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu)
vcpu->arch.shregs.msr &= ~MSR_TS_MASK; /* go to N state */
vcpu->arch.regs.nip = vcpu->arch.tfhar;
copy_from_checkpoint(vcpu);
- vcpu->arch.cr = (vcpu->arch.cr & 0x0fffffff) | 0xa0000000;
+ vcpu->arch.regs.ccr = (vcpu->arch.regs.ccr & 0x0fffffff) | 0xa0000000;
}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 614ebb4261f7..4efd65d9e828 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -167,7 +167,7 @@ void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
- svcpu->cr = vcpu->arch.cr;
+ svcpu->cr = vcpu->arch.regs.ccr;
svcpu->xer = vcpu->arch.regs.xer;
svcpu->ctr = vcpu->arch.regs.ctr;
svcpu->lr = vcpu->arch.regs.link;
@@ -249,7 +249,7 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
- vcpu->arch.cr = svcpu->cr;
+ vcpu->arch.regs.ccr = svcpu->cr;
vcpu->arch.regs.xer = svcpu->xer;
vcpu->arch.regs.ctr = svcpu->ctr;
vcpu->arch.regs.link = svcpu->lr;
@@ -1246,7 +1246,6 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = RESUME_GUEST;
break;
case BOOK3S_INTERRUPT_EXTERNAL:
- case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
case BOOK3S_INTERRUPT_EXTERNAL_HV:
case BOOK3S_INTERRUPT_H_VIRT:
vcpu->stat.ext_intr_exits++;
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index b8356cdc0c04..b0b2bfc2ff51 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -310,7 +310,7 @@ static inline bool icp_try_update(struct kvmppc_icp *icp,
*/
if (new.out_ee) {
kvmppc_book3s_queue_irqprio(icp->vcpu,
- BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+ BOOK3S_INTERRUPT_EXTERNAL);
if (!change_self)
kvmppc_fast_vcpu_kick(icp->vcpu);
}
@@ -593,8 +593,7 @@ static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
u32 xirr;
/* First, remove EE from the processor */
- kvmppc_book3s_dequeue_irqprio(icp->vcpu,
- BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
/*
* ICP State: Accept_Interrupt
@@ -754,8 +753,7 @@ static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
* We can remove EE from the current processor, the update
* transaction will set it again if needed
*/
- kvmppc_book3s_dequeue_irqprio(icp->vcpu,
- BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
do {
old_state = new_state = READ_ONCE(icp->state);
@@ -1167,8 +1165,7 @@ int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
* Deassert the CPU interrupt request.
* icp_try_update will reassert it if necessary.
*/
- kvmppc_book3s_dequeue_irqprio(icp->vcpu,
- BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
+ kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
/*
* Note that if we displace an interrupt from old_state.xisr,
@@ -1393,7 +1390,8 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
}
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ if (cpu_has_feature(CPU_FTR_ARCH_206) &&
+ cpu_has_feature(CPU_FTR_HVMODE)) {
/* Enable real mode support */
xics->real_mode = ENABLE_REALMODE;
xics->real_mode_dbg = DEBUG_REALMODE;
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 30c2eb766954..ad4a370703d3 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -62,6 +62,69 @@
#define XIVE_Q_GAP 2
/*
+ * Push a vcpu's context to the XIVE on guest entry.
+ * This assumes we are in virtual mode (MMU on)
+ */
+void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
+{
+ void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
+ u64 pq;
+
+ if (!tima)
+ return;
+ eieio();
+ __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
+ __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
+ vcpu->arch.xive_pushed = 1;
+ eieio();
+
+ /*
+ * We clear the irq_pending flag. There is a small chance of a
+ * race vs. the escalation interrupt happening on another
+ * processor setting it again, but the only consequence is to
+ * cause a spurious wakeup on the next H_CEDE, which is not an
+ * issue.
+ */
+ vcpu->arch.irq_pending = 0;
+
+ /*
+ * In single escalation mode, if the escalation interrupt is
+ * on, we mask it.
+ */
+ if (vcpu->arch.xive_esc_on) {
+ pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
+ XIVE_ESB_SET_PQ_01));
+ mb();
+
+ /*
+ * We have a possible subtle race here: The escalation
+ * interrupt might have fired and be on its way to the
+ * host queue while we mask it, and if we unmask it
+ * early enough (re-cede right away), there is a
+ * theorical possibility that it fires again, thus
+ * landing in the target queue more than once which is
+ * a big no-no.
+ *
+ * Fortunately, solving this is rather easy. If the
+ * above load setting PQ to 01 returns a previous
+ * value where P is set, then we know the escalation
+ * interrupt is somewhere on its way to the host. In
+ * that case we simply don't clear the xive_esc_on
+ * flag below. It will be eventually cleared by the
+ * handler for the escalation interrupt.
+ *
+ * Then, when doing a cede, we check that flag again
+ * before re-enabling the escalation interrupt, and if
+ * set, we abort the cede.
+ */
+ if (!(pq & XIVE_ESB_VAL_P))
+ /* Now P is 0, we can clear the flag */
+ vcpu->arch.xive_esc_on = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
+
+/*
* This is a simple trigger for a generic XIVE IRQ. This must
* only be called for interrupts that support a trigger page
*/
diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c
index 4171ede8722b..033363d6e764 100644
--- a/arch/powerpc/kvm/book3s_xive_template.c
+++ b/arch/powerpc/kvm/book3s_xive_template.c
@@ -280,14 +280,6 @@ X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
/* First collect pending bits from HW */
GLUE(X_PFX,ack_pending)(xc);
- /*
- * Cleanup the old-style bits if needed (they may have been
- * set by pull or an escalation interrupts).
- */
- if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
- clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
- &vcpu->arch.pending_exceptions);
-
pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
xc->pending, xc->hw_cppr, xc->cppr);
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 81bd8a07aa51..051af7d97327 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -182,7 +182,7 @@
*/
PPC_LL r4, PACACURRENT(r13)
PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4)
- stw r10, VCPU_CR(r4)
+ PPC_STL r10, VCPU_CR(r4)
PPC_STL r11, VCPU_GPR(R4)(r4)
PPC_STL r5, VCPU_GPR(R5)(r4)
PPC_STL r6, VCPU_GPR(R6)(r4)
@@ -292,7 +292,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
PPC_STL r4, VCPU_GPR(R4)(r11)
PPC_LL r4, THREAD_NORMSAVE(0)(r10)
PPC_STL r5, VCPU_GPR(R5)(r11)
- stw r13, VCPU_CR(r11)
+ PPC_STL r13, VCPU_CR(r11)
mfspr r5, \srr0
PPC_STL r3, VCPU_GPR(R10)(r11)
PPC_LL r3, THREAD_NORMSAVE(2)(r10)
@@ -319,7 +319,7 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
PPC_STL r4, VCPU_GPR(R4)(r11)
PPC_LL r4, GPR9(r8)
PPC_STL r5, VCPU_GPR(R5)(r11)
- stw r9, VCPU_CR(r11)
+ PPC_STL r9, VCPU_CR(r11)
mfspr r5, \srr0
PPC_STL r3, VCPU_GPR(R8)(r11)
PPC_LL r3, GPR10(r8)
@@ -643,7 +643,7 @@ lightweight_exit:
PPC_LL r3, VCPU_LR(r4)
PPC_LL r5, VCPU_XER(r4)
PPC_LL r6, VCPU_CTR(r4)
- lwz r7, VCPU_CR(r4)
+ PPC_LL r7, VCPU_CR(r4)
PPC_LL r8, VCPU_PC(r4)
PPC_LD(r9, VCPU_SHARED_MSR, r11)
PPC_LL r0, VCPU_GPR(R0)(r4)
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 75dce1ef3bc8..f91b1309a0a8 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -117,7 +117,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = EMULATE_FAIL;
vcpu->arch.regs.msr = vcpu->arch.shared->msr;
- vcpu->arch.regs.ccr = vcpu->arch.cr;
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index eba5756d5b41..2869a299c4ed 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -594,7 +594,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = !!(hv_enabled && radix_enabled());
break;
case KVM_CAP_PPC_MMU_HASH_V3:
- r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300));
+ r = !!(hv_enabled && cpu_has_feature(CPU_FTR_ARCH_300) &&
+ cpu_has_feature(CPU_FTR_HVMODE));
+ break;
+ case KVM_CAP_PPC_NESTED_HV:
+ r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
+ !kvmppc_hv_ops->enable_nested(NULL));
break;
#endif
case KVM_CAP_SYNC_MMU:
@@ -2114,6 +2119,14 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
break;
}
+
+ case KVM_CAP_PPC_NESTED_HV:
+ r = -EINVAL;
+ if (!is_kvmppc_hv_enabled(kvm) ||
+ !kvm->arch.kvm_ops->enable_nested)
+ break;
+ r = kvm->arch.kvm_ops->enable_nested(kvm);
+ break;
#endif
default:
r = -EINVAL;
diff --git a/arch/powerpc/kvm/tm.S b/arch/powerpc/kvm/tm.S
index 90e330f21356..0531a1492fdf 100644
--- a/arch/powerpc/kvm/tm.S
+++ b/arch/powerpc/kvm/tm.S
@@ -28,17 +28,25 @@
* Save transactional state and TM-related registers.
* Called with:
* - r3 pointing to the vcpu struct
- * - r4 points to the MSR with current TS bits:
+ * - r4 containing the MSR with current TS bits:
* (For HV KVM, it is VCPU_MSR ; For PR KVM, it is host MSR).
- * This can modify all checkpointed registers, but
- * restores r1, r2 before exit.
+ * - r5 containing a flag indicating that non-volatile registers
+ * must be preserved.
+ * If r5 == 0, this can modify all checkpointed registers, but
+ * restores r1, r2 before exit. If r5 != 0, this restores the
+ * MSR TM/FP/VEC/VSX bits to their state on entry.
*/
_GLOBAL(__kvmppc_save_tm)
mflr r0
std r0, PPC_LR_STKOFF(r1)
+ stdu r1, -SWITCH_FRAME_SIZE(r1)
+
+ mr r9, r3
+ cmpdi cr7, r5, 0
/* Turn on TM. */
mfmsr r8
+ mr r10, r8
li r0, 1
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
ori r8, r8, MSR_FP
@@ -51,6 +59,27 @@ _GLOBAL(__kvmppc_save_tm)
std r1, HSTATE_SCRATCH2(r13)
std r3, HSTATE_SCRATCH1(r13)
+ /* Save CR on the stack - even if r5 == 0 we need to get cr7 back. */
+ mfcr r6
+ SAVE_GPR(6, r1)
+
+ /* Save DSCR so we can restore it to avoid running with user value */
+ mfspr r7, SPRN_DSCR
+ SAVE_GPR(7, r1)
+
+ /*
+ * We are going to do treclaim., which will modify all checkpointed
+ * registers. Save the non-volatile registers on the stack if
+ * preservation of non-volatile state has been requested.
+ */
+ beq cr7, 3f
+ SAVE_NVGPRS(r1)
+
+ /* MSR[TS] will be 0 (non-transactional) once we do treclaim. */
+ li r0, 0
+ rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+ SAVE_GPR(10, r1) /* final MSR value */
+3:
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
BEGIN_FTR_SECTION
/* Emulation of the treclaim instruction needs TEXASR before treclaim */
@@ -74,22 +103,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
std r9, PACATMSCRATCH(r13)
ld r9, HSTATE_SCRATCH1(r13)
- /* Get a few more GPRs free. */
- std r29, VCPU_GPRS_TM(29)(r9)
- std r30, VCPU_GPRS_TM(30)(r9)
- std r31, VCPU_GPRS_TM(31)(r9)
-
- /* Save away PPR and DSCR soon so don't run with user values. */
- mfspr r31, SPRN_PPR
+ /* Save away PPR soon so we don't run with user value. */
+ std r0, VCPU_GPRS_TM(0)(r9)
+ mfspr r0, SPRN_PPR
HMT_MEDIUM
- mfspr r30, SPRN_DSCR
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
-#endif
- /* Save all but r9, r13 & r29-r31 */
- reg = 0
+ /* Reload stack pointer. */
+ std r1, VCPU_GPRS_TM(1)(r9)
+ ld r1, HSTATE_SCRATCH2(r13)
+
+ /* Set MSR RI now we have r1 and r13 back. */
+ std r2, VCPU_GPRS_TM(2)(r9)
+ li r2, MSR_RI
+ mtmsrd r2, 1
+
+ /* Reload TOC pointer. */
+ ld r2, PACATOC(r13)
+
+ /* Save all but r0-r2, r9 & r13 */
+ reg = 3
.rept 29
.if (reg != 9) && (reg != 13)
std reg, VCPU_GPRS_TM(reg)(r9)
@@ -103,33 +135,29 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
ld r4, PACATMSCRATCH(r13)
std r4, VCPU_GPRS_TM(9)(r9)
- /* Reload stack pointer and TOC. */
- ld r1, HSTATE_SCRATCH2(r13)
- ld r2, PACATOC(r13)
-
- /* Set MSR RI now we have r1 and r13 back. */
- li r5, MSR_RI
- mtmsrd r5, 1
+ /* Restore host DSCR and CR values, after saving guest values */
+ mfcr r6
+ mfspr r7, SPRN_DSCR
+ stw r6, VCPU_CR_TM(r9)
+ std r7, VCPU_DSCR_TM(r9)
+ REST_GPR(6, r1)
+ REST_GPR(7, r1)
+ mtcr r6
+ mtspr SPRN_DSCR, r7
- /* Save away checkpinted SPRs. */
- std r31, VCPU_PPR_TM(r9)
- std r30, VCPU_DSCR_TM(r9)
+ /* Save away checkpointed SPRs. */
+ std r0, VCPU_PPR_TM(r9)
mflr r5
- mfcr r6
mfctr r7
mfspr r8, SPRN_AMR
mfspr r10, SPRN_TAR
mfxer r11
std r5, VCPU_LR_TM(r9)
- stw r6, VCPU_CR_TM(r9)
std r7, VCPU_CTR_TM(r9)
std r8, VCPU_AMR_TM(r9)
std r10, VCPU_TAR_TM(r9)
std r11, VCPU_XER_TM(r9)
- /* Restore r12 as trap number. */
- lwz r12, VCPU_TRAP(r9)
-
/* Save FP/VSX. */
addi r3, r9, VCPU_FPRS_TM
bl store_fp_state
@@ -137,6 +165,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
bl store_vr_state
mfspr r6, SPRN_VRSAVE
stw r6, VCPU_VRSAVE_TM(r9)
+
+ /* Restore non-volatile registers if requested to */
+ beq cr7, 1f
+ REST_NVGPRS(r1)
+ REST_GPR(10, r1)
1:
/*
* We need to save these SPRs after the treclaim so that the software
@@ -146,12 +179,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
*/
mfspr r7, SPRN_TEXASR
std r7, VCPU_TEXASR(r9)
-11:
mfspr r5, SPRN_TFHAR
mfspr r6, SPRN_TFIAR
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
+ /* Restore MSR state if requested */
+ beq cr7, 2f
+ mtmsrd r10, 0
+2:
+ addi r1, r1, SWITCH_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
blr
@@ -161,49 +198,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
* be invoked from C function by PR KVM only.
*/
_GLOBAL(_kvmppc_save_tm_pr)
- mflr r5
- std r5, PPC_LR_STKOFF(r1)
- stdu r1, -SWITCH_FRAME_SIZE(r1)
- SAVE_NVGPRS(r1)
-
- /* save MSR since TM/math bits might be impacted
- * by __kvmppc_save_tm().
- */
- mfmsr r5
- SAVE_GPR(5, r1)
-
- /* also save DSCR/CR/TAR so that it can be recovered later */
- mfspr r6, SPRN_DSCR
- SAVE_GPR(6, r1)
-
- mfcr r7
- stw r7, _CCR(r1)
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+ stdu r1, -PPC_MIN_STKFRM(r1)
mfspr r8, SPRN_TAR
- SAVE_GPR(8, r1)
+ std r8, PPC_MIN_STKFRM-8(r1)
+ li r5, 1 /* preserve non-volatile registers */
bl __kvmppc_save_tm
- REST_GPR(8, r1)
+ ld r8, PPC_MIN_STKFRM-8(r1)
mtspr SPRN_TAR, r8
- ld r7, _CCR(r1)
- mtcr r7
-
- REST_GPR(6, r1)
- mtspr SPRN_DSCR, r6
-
- /* need preserve current MSR's MSR_TS bits */
- REST_GPR(5, r1)
- mfmsr r6
- rldicl r6, r6, 64 - MSR_TS_S_LG, 62
- rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
- mtmsrd r5
-
- REST_NVGPRS(r1)
- addi r1, r1, SWITCH_FRAME_SIZE
- ld r5, PPC_LR_STKOFF(r1)
- mtlr r5
+ addi r1, r1, PPC_MIN_STKFRM
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
blr
EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
@@ -215,15 +225,21 @@ EXPORT_SYMBOL_GPL(_kvmppc_save_tm_pr);
* - r4 is the guest MSR with desired TS bits:
* For HV KVM, it is VCPU_MSR
* For PR KVM, it is provided by caller
- * This potentially modifies all checkpointed registers.
- * It restores r1, r2 from the PACA.
+ * - r5 containing a flag indicating that non-volatile registers
+ * must be preserved.
+ * If r5 == 0, this potentially modifies all checkpointed registers, but
+ * restores r1, r2 from the PACA before exit.
+ * If r5 != 0, this restores the MSR TM/FP/VEC/VSX bits to their state on entry.
*/
_GLOBAL(__kvmppc_restore_tm)
mflr r0
std r0, PPC_LR_STKOFF(r1)
+ cmpdi cr7, r5, 0
+
/* Turn on TM/FP/VSX/VMX so we can restore them. */
mfmsr r5
+ mr r10, r5
li r6, MSR_TM >> 32
sldi r6, r6, 32
or r5, r5, r6
@@ -244,8 +260,7 @@ _GLOBAL(__kvmppc_restore_tm)
mr r5, r4
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
- beqlr /* TM not active in guest */
- std r1, HSTATE_SCRATCH2(r13)
+ beq 9f /* TM not active in guest */
/* Make sure the failure summary is set, otherwise we'll program check
* when we trechkpt. It's possible that this might have been not set
@@ -256,6 +271,26 @@ _GLOBAL(__kvmppc_restore_tm)
mtspr SPRN_TEXASR, r7
/*
+ * Make a stack frame and save non-volatile registers if requested.
+ */
+ stdu r1, -SWITCH_FRAME_SIZE(r1)
+ std r1, HSTATE_SCRATCH2(r13)
+
+ mfcr r6
+ mfspr r7, SPRN_DSCR
+ SAVE_GPR(2, r1)
+ SAVE_GPR(6, r1)
+ SAVE_GPR(7, r1)
+
+ beq cr7, 4f
+ SAVE_NVGPRS(r1)
+
+ /* MSR[TS] will be 1 (suspended) once we do trechkpt */
+ li r0, 1
+ rldimi r10, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
+ SAVE_GPR(10, r1) /* final MSR value */
+4:
+ /*
* We need to load up the checkpointed state for the guest.
* We need to do this early as it will blow away any GPRs, VSRs and
* some SPRs.
@@ -291,8 +326,6 @@ _GLOBAL(__kvmppc_restore_tm)
ld r29, VCPU_DSCR_TM(r3)
ld r30, VCPU_PPR_TM(r3)
- std r2, PACATMSCRATCH(r13) /* Save TOC */
-
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
li r5, 0
mtmsrd r5, 1
@@ -318,18 +351,31 @@ _GLOBAL(__kvmppc_restore_tm)
/* Now let's get back the state we need. */
HMT_MEDIUM
GET_PACA(r13)
-#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
- ld r29, HSTATE_DSCR(r13)
- mtspr SPRN_DSCR, r29
-#endif
ld r1, HSTATE_SCRATCH2(r13)
- ld r2, PACATMSCRATCH(r13)
+ REST_GPR(7, r1)
+ mtspr SPRN_DSCR, r7
/* Set the MSR RI since we have our registers back. */
li r5, MSR_RI
mtmsrd r5, 1
+
+ /* Restore TOC pointer and CR */
+ REST_GPR(2, r1)
+ REST_GPR(6, r1)
+ mtcr r6
+
+ /* Restore non-volatile registers if requested to. */
+ beq cr7, 5f
+ REST_GPR(10, r1)
+ REST_NVGPRS(r1)
+
+5: addi r1, r1, SWITCH_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1)
mtlr r0
+
+9: /* Restore MSR bits if requested */
+ beqlr cr7
+ mtmsrd r10, 0
blr
/*
@@ -337,47 +383,23 @@ _GLOBAL(__kvmppc_restore_tm)
* can be invoked from C function by PR KVM only.
*/
_GLOBAL(_kvmppc_restore_tm_pr)
- mflr r5
- std r5, PPC_LR_STKOFF(r1)
- stdu r1, -SWITCH_FRAME_SIZE(r1)
- SAVE_NVGPRS(r1)
-
- /* save MSR to avoid TM/math bits change */
- mfmsr r5
- SAVE_GPR(5, r1)
-
- /* also save DSCR/CR/TAR so that it can be recovered later */
- mfspr r6, SPRN_DSCR
- SAVE_GPR(6, r1)
-
- mfcr r7
- stw r7, _CCR(r1)
+ mflr r0
+ std r0, PPC_LR_STKOFF(r1)
+ stdu r1, -PPC_MIN_STKFRM(r1)
+ /* save TAR so that it can be recovered later */
mfspr r8, SPRN_TAR
- SAVE_GPR(8, r1)
+ std r8, PPC_MIN_STKFRM-8(r1)
+ li r5, 1
bl __kvmppc_restore_tm
- REST_GPR(8, r1)
+ ld r8, PPC_MIN_STKFRM-8(r1)
mtspr SPRN_TAR, r8
- ld r7, _CCR(r1)
- mtcr r7
-
- REST_GPR(6, r1)
- mtspr SPRN_DSCR, r6
-
- /* need preserve current MSR's MSR_TS bits */
- REST_GPR(5, r1)
- mfmsr r6
- rldicl r6, r6, 64 - MSR_TS_S_LG, 62
- rldimi r5, r6, MSR_TS_S_LG, 63 - MSR_TS_T_LG
- mtmsrd r5
-
- REST_NVGPRS(r1)
- addi r1, r1, SWITCH_FRAME_SIZE
- ld r5, PPC_LR_STKOFF(r1)
- mtlr r5
+ addi r1, r1, PPC_MIN_STKFRM
+ ld r0, PPC_LR_STKOFF(r1)
+ mtlr r0
blr
EXPORT_SYMBOL_GPL(_kvmppc_restore_tm_pr);
diff --git a/arch/powerpc/kvm/trace_book3s.h b/arch/powerpc/kvm/trace_book3s.h
index f3b23759e017..372a82fa2de3 100644
--- a/arch/powerpc/kvm/trace_book3s.h
+++ b/arch/powerpc/kvm/trace_book3s.h
@@ -14,7 +14,6 @@
{0x400, "INST_STORAGE"}, \
{0x480, "INST_SEGMENT"}, \
{0x500, "EXTERNAL"}, \
- {0x501, "EXTERNAL_LEVEL"}, \
{0x502, "EXTERNAL_HV"}, \
{0x600, "ALIGNMENT"}, \
{0x700, "PROGRAM"}, \
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 670286808928..3bf9fc6fd36c 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -3,8 +3,6 @@
# Makefile for ppc-specific library files..
#
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
-
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
@@ -14,6 +12,8 @@ obj-y += string.o alloc.o code-patching.o feature-fixups.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o strlen_32.o
+obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+
# See corresponding test in arch/powerpc/Makefile
# 64-bit linker creates .sfpr on demand for final link (vmlinux),
# so it is only needed for modules, and only for older linkers which
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index 5ffee298745f..89502cbccb1b 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -98,8 +98,7 @@ static int map_patch_area(void *addr, unsigned long text_poke_addr)
else
pfn = __pa_symbol(addr) >> PAGE_SHIFT;
- err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT),
- pgprot_val(PAGE_KERNEL));
+ err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err);
if (err)
diff --git a/arch/powerpc/lib/error-inject.c b/arch/powerpc/lib/error-inject.c
new file mode 100644
index 000000000000..407b992fb02f
--- /dev/null
+++ b/arch/powerpc/lib/error-inject.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/error-injection.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
+
+void override_function_with_return(struct pt_regs *regs)
+{
+ /*
+ * Emulate 'blr'. 'regs' represents the state on entry of a predefined
+ * function in the kernel/module, captured on a kprobe. We don't need
+ * to worry about 32-bit userspace on a 64-bit kernel.
+ */
+ regs->nip = regs->link;
+}
+NOKPROBE_SYMBOL(override_function_with_return);
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S
index ec531de99996..3c3be02f33b7 100644
--- a/arch/powerpc/lib/mem_64.S
+++ b/arch/powerpc/lib/mem_64.S
@@ -40,7 +40,7 @@ _GLOBAL(memset)
.Lms: PPC_MTOCRF(1,r0)
mr r6,r3
blt cr1,8f
- beq+ 3f /* if already 8-byte aligned */
+ beq 3f /* if already 8-byte aligned */
subf r5,r0,r5
bf 31,1f
stb r4,0(r6)
@@ -85,7 +85,7 @@ _GLOBAL(memset)
addi r6,r6,8
8: cmpwi r5,0
PPC_MTOCRF(1,r5)
- beqlr+
+ beqlr
bf 29,9f
stw r4,0(r6)
addi r6,r6,4
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index cf77d755246d..36484a2ef915 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -67,7 +67,7 @@ void __init MMU_init_hw(void)
/* PIN up to the 3 first 8Mb after IMMR in DTLB table */
#ifdef CONFIG_PIN_TLB_DATA
unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
- unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_PRIVILEGED | _PAGE_DIRTY;
+ unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SH | _PAGE_DIRTY;
#ifdef CONFIG_PIN_TLB_IMMR
int i = 29;
#else
@@ -91,11 +91,10 @@ static void __init mmu_mapin_immr(void)
{
unsigned long p = PHYS_IMMR_BASE;
unsigned long v = VIRT_IMMR_BASE;
- unsigned long f = pgprot_val(PAGE_KERNEL_NCG);
int offset;
for (offset = 0; offset < IMMR_SIZE; offset += PAGE_SIZE)
- map_kernel_page(v + offset, p + offset, f);
+ map_kernel_page(v + offset, p + offset, PAGE_KERNEL_NCG);
}
/* Address of instructions to patch */
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index cdf6a9960046..ca96e7be4d0e 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -3,10 +3,10 @@
# Makefile for the linux ppc-specific parts of the memory manager.
#
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
-
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
+CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
+
obj-y := fault.o mem.o pgtable.o mmap.o \
init_$(BITS).o pgtable_$(BITS).o \
init-common.o mmu_context.o drmem.o
@@ -15,7 +15,7 @@ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
obj-$(CONFIG_PPC_BOOK3E) += tlb_low_$(BITS)e.o
hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC_BOOK3E_64) += pgtable-book3e.o
-obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb_low.o slb.o $(hash64-y) mmu_context_book3s64.o pgtable-book3s64.o
+obj-$(CONFIG_PPC_BOOK3S_64) += pgtable-hash64.o hash_utils_64.o slb.o $(hash64-y) mmu_context_book3s64.o pgtable-book3s64.o
obj-$(CONFIG_PPC_RADIX_MMU) += pgtable-radix.o tlb-radix.o
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o mmu_context_hash32.o
obj-$(CONFIG_PPC_STD_MMU) += tlb_hash$(BITS).o
@@ -43,5 +43,12 @@ obj-$(CONFIG_HIGHMEM) += highmem.o
obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
obj-$(CONFIG_SPAPR_TCE_IOMMU) += mmu_context_iommu.o
obj-$(CONFIG_PPC_PTDUMP) += dump_linuxpagetables.o
+ifdef CONFIG_PPC_PTDUMP
+obj-$(CONFIG_4xx) += dump_linuxpagetables-generic.o
+obj-$(CONFIG_PPC_8xx) += dump_linuxpagetables-8xx.o
+obj-$(CONFIG_PPC_BOOK3E_MMU) += dump_linuxpagetables-generic.o
+obj-$(CONFIG_PPC_BOOK3S_32) += dump_linuxpagetables-generic.o
+obj-$(CONFIG_PPC_BOOK3S_64) += dump_linuxpagetables-book3s64.o
+endif
obj-$(CONFIG_PPC_HTDUMP) += dump_hashpagetable.o
obj-$(CONFIG_PPC_MEM_KEYS) += pkeys.o
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 382528475433..b6e7b5952ab5 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -228,7 +228,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
do {
SetPageReserved(page);
map_kernel_page(vaddr, page_to_phys(page),
- pgprot_val(pgprot_noncached(PAGE_KERNEL)));
+ pgprot_noncached(PAGE_KERNEL));
page++;
vaddr += PAGE_SIZE;
} while (size -= PAGE_SIZE);
diff --git a/arch/powerpc/mm/dump_linuxpagetables-8xx.c b/arch/powerpc/mm/dump_linuxpagetables-8xx.c
new file mode 100644
index 000000000000..ab9e3f24db2f
--- /dev/null
+++ b/arch/powerpc/mm/dump_linuxpagetables-8xx.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * From split of dump_linuxpagetables.c
+ * Copyright 2016, Rashmica Gupta, IBM Corp.
+ *
+ */
+#include <linux/kernel.h>
+#include <asm/pgtable.h>
+
+#include "dump_linuxpagetables.h"
+
+static const struct flag_info flag_array[] = {
+ {
+ .mask = _PAGE_SH,
+ .val = 0,
+ .set = "user",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_RO | _PAGE_NA,
+ .val = 0,
+ .set = "rw",
+ }, {
+ .mask = _PAGE_RO | _PAGE_NA,
+ .val = _PAGE_RO,
+ .set = "r ",
+ }, {
+ .mask = _PAGE_RO | _PAGE_NA,
+ .val = _PAGE_NA,
+ .set = " ",
+ }, {
+ .mask = _PAGE_EXEC,
+ .val = _PAGE_EXEC,
+ .set = " X ",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_PRESENT,
+ .val = _PAGE_PRESENT,
+ .set = "present",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_GUARDED,
+ .val = _PAGE_GUARDED,
+ .set = "guarded",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_DIRTY,
+ .val = _PAGE_DIRTY,
+ .set = "dirty",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_ACCESSED,
+ .val = _PAGE_ACCESSED,
+ .set = "accessed",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_NO_CACHE,
+ .val = _PAGE_NO_CACHE,
+ .set = "no cache",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_SPECIAL,
+ .val = _PAGE_SPECIAL,
+ .set = "special",
+ }
+};
+
+struct pgtable_level pg_level[5] = {
+ {
+ }, { /* pgd */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pud */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pmd */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pte */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ },
+};
diff --git a/arch/powerpc/mm/dump_linuxpagetables-book3s64.c b/arch/powerpc/mm/dump_linuxpagetables-book3s64.c
new file mode 100644
index 000000000000..ed6fcf78256e
--- /dev/null
+++ b/arch/powerpc/mm/dump_linuxpagetables-book3s64.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * From split of dump_linuxpagetables.c
+ * Copyright 2016, Rashmica Gupta, IBM Corp.
+ *
+ */
+#include <linux/kernel.h>
+#include <asm/pgtable.h>
+
+#include "dump_linuxpagetables.h"
+
+static const struct flag_info flag_array[] = {
+ {
+ .mask = _PAGE_PRIVILEGED,
+ .val = 0,
+ .set = "user",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_READ,
+ .val = _PAGE_READ,
+ .set = "r",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_WRITE,
+ .val = _PAGE_WRITE,
+ .set = "w",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_EXEC,
+ .val = _PAGE_EXEC,
+ .set = " X ",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_PTE,
+ .val = _PAGE_PTE,
+ .set = "pte",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_PRESENT,
+ .val = _PAGE_PRESENT,
+ .set = "valid",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_PRESENT | _PAGE_INVALID,
+ .val = 0,
+ .set = " ",
+ .clear = "present",
+ }, {
+ .mask = H_PAGE_HASHPTE,
+ .val = H_PAGE_HASHPTE,
+ .set = "hpte",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_DIRTY,
+ .val = _PAGE_DIRTY,
+ .set = "dirty",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_ACCESSED,
+ .val = _PAGE_ACCESSED,
+ .set = "accessed",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_NON_IDEMPOTENT,
+ .val = _PAGE_NON_IDEMPOTENT,
+ .set = "non-idempotent",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_TOLERANT,
+ .val = _PAGE_TOLERANT,
+ .set = "tolerant",
+ .clear = " ",
+ }, {
+ .mask = H_PAGE_BUSY,
+ .val = H_PAGE_BUSY,
+ .set = "busy",
+ }, {
+#ifdef CONFIG_PPC_64K_PAGES
+ .mask = H_PAGE_COMBO,
+ .val = H_PAGE_COMBO,
+ .set = "combo",
+ }, {
+ .mask = H_PAGE_4K_PFN,
+ .val = H_PAGE_4K_PFN,
+ .set = "4K_pfn",
+ }, {
+#else /* CONFIG_PPC_64K_PAGES */
+ .mask = H_PAGE_F_GIX,
+ .val = H_PAGE_F_GIX,
+ .set = "f_gix",
+ .is_val = true,
+ .shift = H_PAGE_F_GIX_SHIFT,
+ }, {
+ .mask = H_PAGE_F_SECOND,
+ .val = H_PAGE_F_SECOND,
+ .set = "f_second",
+ }, {
+#endif /* CONFIG_PPC_64K_PAGES */
+ .mask = _PAGE_SPECIAL,
+ .val = _PAGE_SPECIAL,
+ .set = "special",
+ }
+};
+
+struct pgtable_level pg_level[5] = {
+ {
+ }, { /* pgd */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pud */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pmd */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pte */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ },
+};
diff --git a/arch/powerpc/mm/dump_linuxpagetables-generic.c b/arch/powerpc/mm/dump_linuxpagetables-generic.c
new file mode 100644
index 000000000000..1e3829ec1348
--- /dev/null
+++ b/arch/powerpc/mm/dump_linuxpagetables-generic.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * From split of dump_linuxpagetables.c
+ * Copyright 2016, Rashmica Gupta, IBM Corp.
+ *
+ */
+#include <linux/kernel.h>
+#include <asm/pgtable.h>
+
+#include "dump_linuxpagetables.h"
+
+static const struct flag_info flag_array[] = {
+ {
+ .mask = _PAGE_USER,
+ .val = _PAGE_USER,
+ .set = "user",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_RW,
+ .val = _PAGE_RW,
+ .set = "rw",
+ .clear = "r ",
+ }, {
+#ifndef CONFIG_PPC_BOOK3S_32
+ .mask = _PAGE_EXEC,
+ .val = _PAGE_EXEC,
+ .set = " X ",
+ .clear = " ",
+ }, {
+#endif
+ .mask = _PAGE_PRESENT,
+ .val = _PAGE_PRESENT,
+ .set = "present",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_GUARDED,
+ .val = _PAGE_GUARDED,
+ .set = "guarded",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_DIRTY,
+ .val = _PAGE_DIRTY,
+ .set = "dirty",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_ACCESSED,
+ .val = _PAGE_ACCESSED,
+ .set = "accessed",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_WRITETHRU,
+ .val = _PAGE_WRITETHRU,
+ .set = "write through",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_NO_CACHE,
+ .val = _PAGE_NO_CACHE,
+ .set = "no cache",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_SPECIAL,
+ .val = _PAGE_SPECIAL,
+ .set = "special",
+ }
+};
+
+struct pgtable_level pg_level[5] = {
+ {
+ }, { /* pgd */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pud */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pmd */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ }, { /* pte */
+ .flag = flag_array,
+ .num = ARRAY_SIZE(flag_array),
+ },
+};
diff --git a/arch/powerpc/mm/dump_linuxpagetables.c b/arch/powerpc/mm/dump_linuxpagetables.c
index 876e2a3c79f2..2b74f8adf4d0 100644
--- a/arch/powerpc/mm/dump_linuxpagetables.c
+++ b/arch/powerpc/mm/dump_linuxpagetables.c
@@ -27,6 +27,8 @@
#include <asm/page.h>
#include <asm/pgalloc.h>
+#include "dump_linuxpagetables.h"
+
#ifdef CONFIG_PPC32
#define KERN_VIRT_START 0
#endif
@@ -101,159 +103,6 @@ static struct addr_marker address_markers[] = {
{ -1, NULL },
};
-struct flag_info {
- u64 mask;
- u64 val;
- const char *set;
- const char *clear;
- bool is_val;
- int shift;
-};
-
-static const struct flag_info flag_array[] = {
- {
- .mask = _PAGE_USER | _PAGE_PRIVILEGED,
- .val = _PAGE_USER,
- .set = "user",
- .clear = " ",
- }, {
- .mask = _PAGE_RW | _PAGE_RO | _PAGE_NA,
- .val = _PAGE_RW,
- .set = "rw",
- }, {
- .mask = _PAGE_RW | _PAGE_RO | _PAGE_NA,
- .val = _PAGE_RO,
- .set = "ro",
- }, {
-#if _PAGE_NA != 0
- .mask = _PAGE_RW | _PAGE_RO | _PAGE_NA,
- .val = _PAGE_RO,
- .set = "na",
- }, {
-#endif
- .mask = _PAGE_EXEC,
- .val = _PAGE_EXEC,
- .set = " X ",
- .clear = " ",
- }, {
- .mask = _PAGE_PTE,
- .val = _PAGE_PTE,
- .set = "pte",
- .clear = " ",
- }, {
- .mask = _PAGE_PRESENT,
- .val = _PAGE_PRESENT,
- .set = "present",
- .clear = " ",
- }, {
-#ifdef CONFIG_PPC_BOOK3S_64
- .mask = H_PAGE_HASHPTE,
- .val = H_PAGE_HASHPTE,
-#else
- .mask = _PAGE_HASHPTE,
- .val = _PAGE_HASHPTE,
-#endif
- .set = "hpte",
- .clear = " ",
- }, {
-#ifndef CONFIG_PPC_BOOK3S_64
- .mask = _PAGE_GUARDED,
- .val = _PAGE_GUARDED,
- .set = "guarded",
- .clear = " ",
- }, {
-#endif
- .mask = _PAGE_DIRTY,
- .val = _PAGE_DIRTY,
- .set = "dirty",
- .clear = " ",
- }, {
- .mask = _PAGE_ACCESSED,
- .val = _PAGE_ACCESSED,
- .set = "accessed",
- .clear = " ",
- }, {
-#ifndef CONFIG_PPC_BOOK3S_64
- .mask = _PAGE_WRITETHRU,
- .val = _PAGE_WRITETHRU,
- .set = "write through",
- .clear = " ",
- }, {
-#endif
-#ifndef CONFIG_PPC_BOOK3S_64
- .mask = _PAGE_NO_CACHE,
- .val = _PAGE_NO_CACHE,
- .set = "no cache",
- .clear = " ",
- }, {
-#else
- .mask = _PAGE_NON_IDEMPOTENT,
- .val = _PAGE_NON_IDEMPOTENT,
- .set = "non-idempotent",
- .clear = " ",
- }, {
- .mask = _PAGE_TOLERANT,
- .val = _PAGE_TOLERANT,
- .set = "tolerant",
- .clear = " ",
- }, {
-#endif
-#ifdef CONFIG_PPC_BOOK3S_64
- .mask = H_PAGE_BUSY,
- .val = H_PAGE_BUSY,
- .set = "busy",
- }, {
-#ifdef CONFIG_PPC_64K_PAGES
- .mask = H_PAGE_COMBO,
- .val = H_PAGE_COMBO,
- .set = "combo",
- }, {
- .mask = H_PAGE_4K_PFN,
- .val = H_PAGE_4K_PFN,
- .set = "4K_pfn",
- }, {
-#else /* CONFIG_PPC_64K_PAGES */
- .mask = H_PAGE_F_GIX,
- .val = H_PAGE_F_GIX,
- .set = "f_gix",
- .is_val = true,
- .shift = H_PAGE_F_GIX_SHIFT,
- }, {
- .mask = H_PAGE_F_SECOND,
- .val = H_PAGE_F_SECOND,
- .set = "f_second",
- }, {
-#endif /* CONFIG_PPC_64K_PAGES */
-#endif
- .mask = _PAGE_SPECIAL,
- .val = _PAGE_SPECIAL,
- .set = "special",
- }
-};
-
-struct pgtable_level {
- const struct flag_info *flag;
- size_t num;
- u64 mask;
-};
-
-static struct pgtable_level pg_level[] = {
- {
- }, { /* pgd */
- .flag = flag_array,
- .num = ARRAY_SIZE(flag_array),
- }, { /* pud */
- .flag = flag_array,
- .num = ARRAY_SIZE(flag_array),
- }, { /* pmd */
- .flag = flag_array,
- .num = ARRAY_SIZE(flag_array),
- }, { /* pte */
- .flag = flag_array,
- .num = ARRAY_SIZE(flag_array),
- },
-};
-
static void dump_flag_info(struct pg_state *st, const struct flag_info
*flag, u64 pte, int num)
{
@@ -418,12 +267,13 @@ static void walk_pagetables(struct pg_state *st)
unsigned int i;
unsigned long addr;
+ addr = st->start_address;
+
/*
* Traverse the linux pagetable structure and dump pages that are in
* the hash pagetable.
*/
- for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
- addr = KERN_VIRT_START + i * PGDIR_SIZE;
+ for (i = 0; i < PTRS_PER_PGD; i++, pgd++, addr += PGDIR_SIZE) {
if (!pgd_none(*pgd) && !pgd_huge(*pgd))
/* pgd exists */
walk_pud(st, pgd, addr);
@@ -472,9 +322,14 @@ static int ptdump_show(struct seq_file *m, void *v)
{
struct pg_state st = {
.seq = m,
- .start_address = KERN_VIRT_START,
.marker = address_markers,
};
+
+ if (radix_enabled())
+ st.start_address = PAGE_OFFSET;
+ else
+ st.start_address = KERN_VIRT_START;
+
/* Traverse kernel page tables */
walk_pagetables(&st);
note_page(&st, 0, 0, 0);
diff --git a/arch/powerpc/mm/dump_linuxpagetables.h b/arch/powerpc/mm/dump_linuxpagetables.h
new file mode 100644
index 000000000000..5d513636de73
--- /dev/null
+++ b/arch/powerpc/mm/dump_linuxpagetables.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/types.h>
+
+struct flag_info {
+ u64 mask;
+ u64 val;
+ const char *set;
+ const char *clear;
+ bool is_val;
+ int shift;
+};
+
+struct pgtable_level {
+ const struct flag_info *flag;
+ size_t num;
+ u64 mask;
+};
+
+extern struct pgtable_level pg_level[5];
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 729f02df8290..aaa28fd918fe 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -115,6 +115,8 @@ static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
tlbiel_hash_set_isa300(0, is, 0, 2, 1);
asm volatile("ptesync": : :"memory");
+
+ asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
}
void hash__tlbiel_all(unsigned int action)
@@ -140,8 +142,6 @@ void hash__tlbiel_all(unsigned int action)
tlbiel_all_isa206(POWER7_TLB_SETS, is);
else
WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
-
- asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
}
static inline unsigned long ___tlbie(unsigned long vpn, int psize,
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index f23a89d8e4ce..0cc7fbc3bd1c 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1001,9 +1001,9 @@ void __init hash__early_init_mmu(void)
* 4k use hugepd format, so for hash set then to
* zero
*/
- __pmd_val_bits = 0;
- __pud_val_bits = 0;
- __pgd_val_bits = 0;
+ __pmd_val_bits = HASH_PMD_VAL_BITS;
+ __pud_val_bits = HASH_PUD_VAL_BITS;
+ __pgd_val_bits = HASH_PGD_VAL_BITS;
__kernel_virt_start = H_KERN_VIRT_START;
__kernel_virt_size = H_KERN_VIRT_SIZE;
@@ -1125,7 +1125,7 @@ void demote_segment_4k(struct mm_struct *mm, unsigned long addr)
if ((get_paca_psize(addr) != MMU_PAGE_4K) && (current->mm == mm)) {
copy_mm_to_paca(mm);
- slb_flush_and_rebolt();
+ slb_flush_and_restore_bolted();
}
}
#endif /* CONFIG_PPC_64K_PAGES */
@@ -1197,7 +1197,7 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
if (user_region) {
if (psize != get_paca_psize(ea)) {
copy_mm_to_paca(mm);
- slb_flush_and_rebolt();
+ slb_flush_and_restore_bolted();
}
} else if (get_paca()->vmalloc_sllp !=
mmu_psize_defs[mmu_vmalloc_psize].sllp) {
@@ -1482,7 +1482,7 @@ static bool should_hash_preload(struct mm_struct *mm, unsigned long ea)
#endif
void hash_preload(struct mm_struct *mm, unsigned long ea,
- unsigned long access, unsigned long trap)
+ bool is_exec, unsigned long trap)
{
int hugepage_shift;
unsigned long vsid;
@@ -1490,6 +1490,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
pte_t *ptep;
unsigned long flags;
int rc, ssize, update_flags = 0;
+ unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
BUG_ON(REGION_ID(ea) != USER_REGION_ID);
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 01f213d2bcb9..dfbc3b32f09b 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -51,6 +51,12 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
new_pmd |= _PAGE_DIRTY;
} while (!pmd_xchg(pmdp, __pmd(old_pmd), __pmd(new_pmd)));
+ /*
+ * Make sure this is thp or devmap entry
+ */
+ if (!(old_pmd & (H_PAGE_THP_HUGE | _PAGE_DEVMAP)))
+ return 0;
+
rflags = htab_convert_pte_flags(new_pmd);
#if 0
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index b320f5097a06..2e6a8f9345d3 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -62,6 +62,10 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
new_pte |= _PAGE_DIRTY;
} while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
+ /* Make sure this is a hugetlb entry */
+ if (old_pte & (H_PAGE_THP_HUGE | _PAGE_DEVMAP))
+ return 0;
+
rflags = htab_convert_pte_flags(new_pte);
if (unlikely(mmu_psize == MMU_PAGE_16G))
offset = PTRS_PER_PUD;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index e87f9ef9115b..a7226ed9cae6 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -19,6 +19,7 @@
#include <linux/moduleparam.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/kmemleak.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
@@ -95,7 +96,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
break;
else {
#ifdef CONFIG_PPC_BOOK3S_64
- *hpdp = __hugepd(__pa(new) |
+ *hpdp = __hugepd(__pa(new) | HUGEPD_VAL_BITS |
(shift_to_mmu_psize(pshift) << 2));
#elif defined(CONFIG_PPC_8xx)
*hpdp = __hugepd(__pa(new) | _PMD_USER |
@@ -112,6 +113,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
for (i = i - 1 ; i >= 0; i--, hpdp--)
*hpdp = __hugepd(0);
kmem_cache_free(cachep, new);
+ } else {
+ kmemleak_ignore(new);
}
spin_unlock(ptl);
return 0;
@@ -837,8 +840,12 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
ret_pte = (pte_t *) pmdp;
goto out;
}
-
- if (pmd_huge(pmd)) {
+ /*
+ * pmd_large check below will handle the swap pmd pte
+ * we need to do both the check because they are config
+ * dependent.
+ */
+ if (pmd_huge(pmd) || pmd_large(pmd)) {
ret_pte = (pte_t *) pmdp;
goto out;
} else if (is_hugepd(__hugepd(pmd_val(pmd))))
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 04ccb274a620..dd949d6649a2 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -309,11 +309,11 @@ void __init paging_init(void)
unsigned long end = __fix_to_virt(FIX_HOLE);
for (; v < end; v += PAGE_SIZE)
- map_kernel_page(v, 0, 0); /* XXX gross */
+ map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
#endif
#ifdef CONFIG_HIGHMEM
- map_kernel_page(PKMAP_BASE, 0, 0); /* XXX gross */
+ map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
pkmap_page_table = virt_to_kpte(PKMAP_BASE);
kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
@@ -509,7 +509,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* We don't need to worry about _PAGE_PRESENT here because we are
* called with either mm->page_table_lock held or ptl lock held
*/
- unsigned long access, trap;
+ unsigned long trap;
+ bool is_exec;
if (radix_enabled()) {
prefetch((void *)address);
@@ -531,16 +532,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
switch (trap) {
case 0x300:
- access = 0UL;
+ is_exec = false;
break;
case 0x400:
- access = _PAGE_EXEC;
+ is_exec = true;
break;
default:
return;
}
- hash_preload(vma->vm_mm, address, access, trap);
+ hash_preload(vma->vm_mm, address, is_exec, trap);
#endif /* CONFIG_PPC_STD_MMU */
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
&& defined(CONFIG_HUGETLB_PAGE)
diff --git a/arch/powerpc/mm/mmu_context_book3s64.c b/arch/powerpc/mm/mmu_context_book3s64.c
index dbd8f762140b..510f103d7813 100644
--- a/arch/powerpc/mm/mmu_context_book3s64.c
+++ b/arch/powerpc/mm/mmu_context_book3s64.c
@@ -53,6 +53,8 @@ int hash__alloc_context_id(void)
}
EXPORT_SYMBOL_GPL(hash__alloc_context_id);
+void slb_setup_new_exec(void);
+
static int hash__init_new_context(struct mm_struct *mm)
{
int index;
@@ -84,6 +86,13 @@ static int hash__init_new_context(struct mm_struct *mm)
return index;
}
+void hash__setup_new_exec(void)
+{
+ slice_setup_new_exec();
+
+ slb_setup_new_exec();
+}
+
static int radix__init_new_context(struct mm_struct *mm)
{
unsigned long rts_field;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index e5d779eed181..8574fbbc45e0 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -22,6 +22,7 @@
#include <asm/mmu.h>
#ifdef CONFIG_PPC_MMU_NOHASH
+#include <asm/trace.h>
/*
* On 40x and 8xx, we directly inline tlbia and tlbivax
@@ -30,10 +31,12 @@
static inline void _tlbil_all(void)
{
asm volatile ("sync; tlbia; isync" : : : "memory");
+ trace_tlbia(MMU_NO_CONTEXT);
}
static inline void _tlbil_pid(unsigned int pid)
{
asm volatile ("sync; tlbia; isync" : : : "memory");
+ trace_tlbia(pid);
}
#define _tlbil_pid_noind(pid) _tlbil_pid(pid)
@@ -55,6 +58,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
unsigned int tsize, unsigned int ind)
{
asm volatile ("tlbie %0; sync" : : "r" (address) : "memory");
+ trace_tlbie(0, 0, address, pid, 0, 0, 0);
}
#elif defined(CONFIG_PPC_BOOK3E)
extern void _tlbil_va(unsigned long address, unsigned int pid,
@@ -82,7 +86,7 @@ static inline void _tlbivax_bcast(unsigned long address, unsigned int pid,
#else /* CONFIG_PPC_MMU_NOHASH */
extern void hash_preload(struct mm_struct *mm, unsigned long ea,
- unsigned long access, unsigned long trap);
+ bool is_exec, unsigned long trap);
extern void _tlbie(unsigned long address);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 055b211b7126..693ae1c1acba 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1521,6 +1521,10 @@ int start_topology_update(void)
}
}
+ pr_info("Starting topology update%s%s\n",
+ (prrn_enabled ? " prrn_enabled" : ""),
+ (vphn_enabled ? " vphn_enabled" : ""));
+
return rc;
}
@@ -1542,6 +1546,8 @@ int stop_topology_update(void)
rc = del_timer_sync(&topology_timer);
}
+ pr_info("Stopping topology update\n");
+
return rc;
}
diff --git a/arch/powerpc/mm/pgtable-book3e.c b/arch/powerpc/mm/pgtable-book3e.c
index a2298930f990..e0ccf36714b2 100644
--- a/arch/powerpc/mm/pgtable-book3e.c
+++ b/arch/powerpc/mm/pgtable-book3e.c
@@ -42,7 +42,7 @@ int __meminit vmemmap_create_mapping(unsigned long start,
* thus must have the low bits clear
*/
for (i = 0; i < page_size; i += PAGE_SIZE)
- BUG_ON(map_kernel_page(start + i, phys, flags));
+ BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags)));
return 0;
}
@@ -70,7 +70,7 @@ static __ref void *early_alloc_pgtable(unsigned long size)
* map_kernel_page adds an entry to the ioremap page table
* and adds an entry to the HPT, possibly bolting it
*/
-int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
+int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
pgd_t *pgdp;
pud_t *pudp;
@@ -89,8 +89,6 @@ int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
ptep = pte_alloc_kernel(pmdp, ea);
if (!ptep)
return -ENOMEM;
- set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
- __pgprot(flags)));
} else {
pgdp = pgd_offset_k(ea);
#ifndef __PAGETABLE_PUD_FOLDED
@@ -113,9 +111,8 @@ int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
pmd_populate_kernel(&init_mm, pmdp, ptep);
}
ptep = pte_offset_kernel(pmdp, ea);
- set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
- __pgprot(flags)));
}
+ set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
smp_wmb();
return 0;
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 01d7c0f7c4f0..9f93c9f985c5 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -69,9 +69,14 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
#ifdef CONFIG_DEBUG_VM
- WARN_ON(pte_present(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
+ /*
+ * Make sure hardware valid bit is not set. We don't do
+ * tlb flush for this update.
+ */
+
+ WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp)));
assert_spin_locked(pmd_lockptr(mm, pmdp));
- WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
+ WARN_ON(!(pmd_large(pmd) || pmd_devmap(pmd)));
#endif
trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
@@ -106,7 +111,7 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
{
unsigned long old_pmd;
- old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
+ old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID);
flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
/*
* This ensures that generic code that rely on IRQ disabling
diff --git a/arch/powerpc/mm/pgtable-hash64.c b/arch/powerpc/mm/pgtable-hash64.c
index 692bfc9e372c..c08d49046a96 100644
--- a/arch/powerpc/mm/pgtable-hash64.c
+++ b/arch/powerpc/mm/pgtable-hash64.c
@@ -142,7 +142,7 @@ void hash__vmemmap_remove_mapping(unsigned long start,
* map_kernel_page adds an entry to the ioremap page table
* and adds an entry to the HPT, possibly bolting it
*/
-int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
+int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
{
pgd_t *pgdp;
pud_t *pudp;
@@ -161,8 +161,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag
ptep = pte_alloc_kernel(pmdp, ea);
if (!ptep)
return -ENOMEM;
- set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
- __pgprot(flags)));
+ set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, prot));
} else {
/*
* If the mm subsystem is not fully up, we cannot create a
@@ -170,7 +169,7 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flag
* entry in the hardware page table.
*
*/
- if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
+ if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, pgprot_val(prot),
mmu_io_psize, mmu_kernel_ssize)) {
printk(KERN_ERR "Failed to do bolted mapping IO "
"memory at %016lx !\n", pa);
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index c879979faa73..931156069a81 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -241,9 +241,8 @@ void radix__mark_initmem_nx(void)
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
-static inline void __meminit print_mapping(unsigned long start,
- unsigned long end,
- unsigned long size)
+static inline void __meminit
+print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
{
char buf[10];
@@ -252,7 +251,17 @@ static inline void __meminit print_mapping(unsigned long start,
string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
- pr_info("Mapped 0x%016lx-0x%016lx with %s pages\n", start, end, buf);
+ pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
+ exec ? " (exec)" : "");
+}
+
+static unsigned long next_boundary(unsigned long addr, unsigned long end)
+{
+#ifdef CONFIG_STRICT_KERNEL_RWX
+ if (addr < __pa_symbol(__init_begin))
+ return __pa_symbol(__init_begin);
+#endif
+ return end;
}
static int __meminit create_physical_mapping(unsigned long start,
@@ -260,13 +269,8 @@ static int __meminit create_physical_mapping(unsigned long start,
int nid)
{
unsigned long vaddr, addr, mapping_size = 0;
+ bool prev_exec, exec = false;
pgprot_t prot;
- unsigned long max_mapping_size;
-#ifdef CONFIG_STRICT_KERNEL_RWX
- int split_text_mapping = 1;
-#else
- int split_text_mapping = 0;
-#endif
int psize;
start = _ALIGN_UP(start, PAGE_SIZE);
@@ -274,14 +278,12 @@ static int __meminit create_physical_mapping(unsigned long start,
unsigned long gap, previous_size;
int rc;
- gap = end - addr;
+ gap = next_boundary(addr, end) - addr;
previous_size = mapping_size;
- max_mapping_size = PUD_SIZE;
+ prev_exec = exec;
-retry:
if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
- mmu_psize_defs[MMU_PAGE_1G].shift &&
- PUD_SIZE <= max_mapping_size) {
+ mmu_psize_defs[MMU_PAGE_1G].shift) {
mapping_size = PUD_SIZE;
psize = MMU_PAGE_1G;
} else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
@@ -293,32 +295,21 @@ retry:
psize = mmu_virtual_psize;
}
- if (split_text_mapping && (mapping_size == PUD_SIZE) &&
- (addr <= __pa_symbol(__init_begin)) &&
- (addr + mapping_size) >= __pa_symbol(_stext)) {
- max_mapping_size = PMD_SIZE;
- goto retry;
- }
-
- if (split_text_mapping && (mapping_size == PMD_SIZE) &&
- (addr <= __pa_symbol(__init_begin)) &&
- (addr + mapping_size) >= __pa_symbol(_stext)) {
- mapping_size = PAGE_SIZE;
- psize = mmu_virtual_psize;
- }
-
- if (mapping_size != previous_size) {
- print_mapping(start, addr, previous_size);
- start = addr;
- }
-
vaddr = (unsigned long)__va(addr);
if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
- overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size))
+ overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
prot = PAGE_KERNEL_X;
- else
+ exec = true;
+ } else {
prot = PAGE_KERNEL;
+ exec = false;
+ }
+
+ if (mapping_size != previous_size || exec != prev_exec) {
+ print_mapping(start, addr, previous_size, prev_exec);
+ start = addr;
+ }
rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
if (rc)
@@ -327,7 +318,7 @@ retry:
update_page_count(psize, 1);
}
- print_mapping(start, addr, mapping_size);
+ print_mapping(start, addr, mapping_size, exec);
return 0;
}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index d71c7777669c..010e1c616cb2 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -44,20 +44,13 @@ static inline int is_exec_fault(void)
static inline int pte_looks_normal(pte_t pte)
{
-#if defined(CONFIG_PPC_BOOK3S_64)
- if ((pte_val(pte) & (_PAGE_PRESENT | _PAGE_SPECIAL)) == _PAGE_PRESENT) {
+ if (pte_present(pte) && !pte_special(pte)) {
if (pte_ci(pte))
return 0;
if (pte_user(pte))
return 1;
}
return 0;
-#else
- return (pte_val(pte) &
- (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER |
- _PAGE_PRIVILEGED)) ==
- (_PAGE_PRESENT | _PAGE_USER);
-#endif
}
static struct page *maybe_pte_to_page(pte_t pte)
@@ -73,7 +66,7 @@ static struct page *maybe_pte_to_page(pte_t pte)
return page;
}
-#if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0
+#ifdef CONFIG_PPC_BOOK3S
/* Server-style MMU handles coherency when hashing if HW exec permission
* is supposed per page (currently 64-bit only). If not, then, we always
@@ -106,7 +99,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
return pte;
}
-#else /* defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 */
+#else /* CONFIG_PPC_BOOK3S */
/* Embedded type MMU with HW exec support. This is a bit more complicated
* as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so
@@ -117,7 +110,7 @@ static pte_t set_pte_filter(pte_t pte)
struct page *pg;
/* No exec permission in the first place, move on */
- if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte))
+ if (!pte_exec(pte) || !pte_looks_normal(pte))
return pte;
/* If you set _PAGE_EXEC on weird pages you're on your own */
@@ -137,7 +130,7 @@ static pte_t set_pte_filter(pte_t pte)
}
/* Else, we filter out _PAGE_EXEC */
- return __pte(pte_val(pte) & ~_PAGE_EXEC);
+ return pte_exprotect(pte);
}
static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
@@ -150,7 +143,7 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
* if necessary. Also if _PAGE_EXEC is already set, same deal,
* we just bail out
*/
- if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault())
+ if (dirty || pte_exec(pte) || !is_exec_fault())
return pte;
#ifdef CONFIG_DEBUG_VM
@@ -176,10 +169,10 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
set_bit(PG_arch_1, &pg->flags);
bail:
- return __pte(pte_val(pte) | _PAGE_EXEC);
+ return pte_mkexec(pte);
}
-#endif /* !(defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0) */
+#endif /* CONFIG_PPC_BOOK3S */
/*
* set_pte stores a linux PTE into the linux page table.
@@ -188,14 +181,13 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
pte_t pte)
{
/*
- * When handling numa faults, we already have the pte marked
- * _PAGE_PRESENT, but we can be sure that it is not in hpte.
- * Hence we can use set_pte_at for them.
+ * Make sure hardware valid bit is not set. We don't do
+ * tlb flush for this update.
*/
- VM_WARN_ON(pte_present(*ptep) && !pte_protnone(*ptep));
+ VM_WARN_ON(pte_hw_valid(*ptep) && !pte_protnone(*ptep));
/* Add the pte bit when trying to set a pte */
- pte = __pte(pte_val(pte) | _PAGE_PTE);
+ pte = pte_mkpte(pte);
/* Note: mm->context.id might not yet have been assigned as
* this context might not have been activated yet when this
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 120a49bfb9c6..5877f5aa8f5d 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -76,56 +76,69 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
void __iomem *
ioremap(phys_addr_t addr, unsigned long size)
{
- return __ioremap_caller(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED,
- __builtin_return_address(0));
+ pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
+
+ return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap);
void __iomem *
ioremap_wc(phys_addr_t addr, unsigned long size)
{
- return __ioremap_caller(addr, size, _PAGE_NO_CACHE,
- __builtin_return_address(0));
+ pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
+
+ return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_wc);
void __iomem *
+ioremap_wt(phys_addr_t addr, unsigned long size)
+{
+ pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL);
+
+ return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wt);
+
+void __iomem *
+ioremap_coherent(phys_addr_t addr, unsigned long size)
+{
+ pgprot_t prot = pgprot_cached(PAGE_KERNEL);
+
+ return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_coherent);
+
+void __iomem *
ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
{
+ pte_t pte = __pte(flags);
+
/* writeable implies dirty for kernel addresses */
- if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO)
- flags |= _PAGE_DIRTY | _PAGE_HWWRITE;
+ if (pte_write(pte))
+ pte = pte_mkdirty(pte);
/* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
- flags &= ~(_PAGE_USER | _PAGE_EXEC);
- flags |= _PAGE_PRIVILEGED;
+ pte = pte_exprotect(pte);
+ pte = pte_mkprivileged(pte);
- return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+ return __ioremap_caller(addr, size, pte_pgprot(pte), __builtin_return_address(0));
}
EXPORT_SYMBOL(ioremap_prot);
void __iomem *
__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
{
- return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+ return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
}
void __iomem *
-__ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
- void *caller)
+__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
{
unsigned long v, i;
phys_addr_t p;
int err;
- /* Make sure we have the base flags */
- if ((flags & _PAGE_PRESENT) == 0)
- flags |= pgprot_val(PAGE_KERNEL);
-
- /* Non-cacheable page cannot be coherent */
- if (flags & _PAGE_NO_CACHE)
- flags &= ~_PAGE_COHERENT;
-
/*
* Choose an address to map it to.
* Once the vmalloc system is running, we use it.
@@ -183,7 +196,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
err = 0;
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
- err = map_kernel_page(v+i, p+i, flags);
+ err = map_kernel_page(v + i, p + i, prot);
if (err) {
if (slab_is_available())
vunmap((void *)v);
@@ -209,7 +222,7 @@ void iounmap(volatile void __iomem *addr)
}
EXPORT_SYMBOL(iounmap);
-int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
+int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
{
pmd_t *pd;
pte_t *pg;
@@ -224,10 +237,8 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
/* The PTE should never be already set nor present in the
* hash table
*/
- BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) &&
- flags);
- set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
- __pgprot(flags)));
+ BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
+ set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
}
smp_wmb();
return err;
@@ -238,7 +249,7 @@ int map_kernel_page(unsigned long va, phys_addr_t pa, int flags)
*/
static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
{
- unsigned long v, s, f;
+ unsigned long v, s;
phys_addr_t p;
int ktext;
@@ -248,11 +259,10 @@ static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
for (; s < top; s += PAGE_SIZE) {
ktext = ((char *)v >= _stext && (char *)v < etext) ||
((char *)v >= _sinittext && (char *)v < _einittext);
- f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
- map_kernel_page(v, p, f);
+ map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
#ifdef CONFIG_PPC_STD_MMU_32
if (ktext)
- hash_preload(&init_mm, v, 0, 0x300);
+ hash_preload(&init_mm, v, false, 0x300);
#endif
v += PAGE_SIZE;
p += PAGE_SIZE;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 53e9eeecd5d4..fb1375c07e8c 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -113,17 +113,12 @@ unsigned long ioremap_bot = IOREMAP_BASE;
* __ioremap_at - Low level function to establish the page tables
* for an IO mapping
*/
-void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
- unsigned long flags)
+void __iomem *__ioremap_at(phys_addr_t pa, void *ea, unsigned long size, pgprot_t prot)
{
unsigned long i;
- /* Make sure we have the base flags */
- if ((flags & _PAGE_PRESENT) == 0)
- flags |= pgprot_val(PAGE_KERNEL);
-
/* We don't support the 4K PFN hack with ioremap */
- if (flags & H_PAGE_4K_PFN)
+ if (pgprot_val(prot) & H_PAGE_4K_PFN)
return NULL;
WARN_ON(pa & ~PAGE_MASK);
@@ -131,7 +126,7 @@ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
WARN_ON(size & ~PAGE_MASK);
for (i = 0; i < size; i += PAGE_SIZE)
- if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
+ if (map_kernel_page((unsigned long)ea + i, pa + i, prot))
return NULL;
return (void __iomem *)ea;
@@ -152,7 +147,7 @@ void __iounmap_at(void *ea, unsigned long size)
}
void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
- unsigned long flags, void *caller)
+ pgprot_t prot, void *caller)
{
phys_addr_t paligned;
void __iomem *ret;
@@ -182,11 +177,11 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
return NULL;
area->phys_addr = paligned;
- ret = __ioremap_at(paligned, area->addr, size, flags);
+ ret = __ioremap_at(paligned, area->addr, size, prot);
if (!ret)
vunmap(area->addr);
} else {
- ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
+ ret = __ioremap_at(paligned, (void *)ioremap_bot, size, prot);
if (ret)
ioremap_bot += size;
}
@@ -199,49 +194,59 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
unsigned long flags)
{
- return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
+ return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
}
void __iomem * ioremap(phys_addr_t addr, unsigned long size)
{
- unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
+ pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
void *caller = __builtin_return_address(0);
if (ppc_md.ioremap)
- return ppc_md.ioremap(addr, size, flags, caller);
- return __ioremap_caller(addr, size, flags, caller);
+ return ppc_md.ioremap(addr, size, prot, caller);
+ return __ioremap_caller(addr, size, prot, caller);
}
void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
{
- unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
+ pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
+ void *caller = __builtin_return_address(0);
+
+ if (ppc_md.ioremap)
+ return ppc_md.ioremap(addr, size, prot, caller);
+ return __ioremap_caller(addr, size, prot, caller);
+}
+
+void __iomem *ioremap_coherent(phys_addr_t addr, unsigned long size)
+{
+ pgprot_t prot = pgprot_cached(PAGE_KERNEL);
void *caller = __builtin_return_address(0);
if (ppc_md.ioremap)
- return ppc_md.ioremap(addr, size, flags, caller);
- return __ioremap_caller(addr, size, flags, caller);
+ return ppc_md.ioremap(addr, size, prot, caller);
+ return __ioremap_caller(addr, size, prot, caller);
}
void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
unsigned long flags)
{
+ pte_t pte = __pte(flags);
void *caller = __builtin_return_address(0);
/* writeable implies dirty for kernel addresses */
- if (flags & _PAGE_WRITE)
- flags |= _PAGE_DIRTY;
+ if (pte_write(pte))
+ pte = pte_mkdirty(pte);
/* we don't want to let _PAGE_EXEC leak out */
- flags &= ~_PAGE_EXEC;
+ pte = pte_exprotect(pte);
/*
* Force kernel mapping.
*/
- flags &= ~_PAGE_USER;
- flags |= _PAGE_PRIVILEGED;
+ pte = pte_mkprivileged(pte);
if (ppc_md.ioremap)
- return ppc_md.ioremap(addr, size, flags, caller);
- return __ioremap_caller(addr, size, flags, caller);
+ return ppc_md.ioremap(addr, size, pte_pgprot(pte), caller);
+ return __ioremap_caller(addr, size, pte_pgprot(pte), caller);
}
@@ -306,7 +311,7 @@ struct page *pud_page(pud_t pud)
*/
struct page *pmd_page(pmd_t pmd)
{
- if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
+ if (pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
return pte_page(pmd_pte(pmd));
return virt_to_page(pmd_page_vaddr(pmd));
}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index bea6c544e38f..38a793bfca37 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -163,7 +163,7 @@ void __init setbat(int index, unsigned long virt, phys_addr_t phys,
* Preload a translation in the hash table
*/
void hash_preload(struct mm_struct *mm, unsigned long ea,
- unsigned long access, unsigned long trap)
+ bool is_exec, unsigned long trap)
{
pmd_t *pmd;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 9f574e59d178..c3fdf2969d9f 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -14,6 +14,7 @@
* 2 of the License, or (at your option) any later version.
*/
+#include <asm/asm-prototypes.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -30,11 +31,10 @@
enum slb_index {
LINEAR_INDEX = 0, /* Kernel linear map (0xc000000000000000) */
- VMALLOC_INDEX = 1, /* Kernel virtual map (0xd000000000000000) */
- KSTACK_INDEX = 2, /* Kernel stack map */
+ KSTACK_INDEX = 1, /* Kernel stack map */
};
-extern void slb_allocate(unsigned long ea);
+static long slb_allocate_user(struct mm_struct *mm, unsigned long ea);
#define slb_esid_mask(ssize) \
(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
@@ -45,13 +45,43 @@ static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
}
-static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
+static inline unsigned long __mk_vsid_data(unsigned long vsid, int ssize,
unsigned long flags)
{
- return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
+ return (vsid << slb_vsid_shift(ssize)) | flags |
((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
}
+static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
+ unsigned long flags)
+{
+ return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
+}
+
+static void assert_slb_exists(unsigned long ea)
+{
+#ifdef CONFIG_DEBUG_VM
+ unsigned long tmp;
+
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
+ WARN_ON(tmp == 0);
+#endif
+}
+
+static void assert_slb_notexists(unsigned long ea)
+{
+#ifdef CONFIG_DEBUG_VM
+ unsigned long tmp;
+
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
+ WARN_ON(tmp != 0);
+#endif
+}
+
static inline void slb_shadow_update(unsigned long ea, int ssize,
unsigned long flags,
enum slb_index index)
@@ -84,6 +114,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
*/
slb_shadow_update(ea, ssize, flags, index);
+ assert_slb_notexists(ea);
asm volatile("slbmte %0,%1" :
: "r" (mk_vsid_data(ea, ssize, flags)),
"r" (mk_esid_data(ea, ssize, index))
@@ -105,17 +136,20 @@ void __slb_restore_bolted_realmode(void)
: "r" (be64_to_cpu(p->save_area[index].vsid)),
"r" (be64_to_cpu(p->save_area[index].esid)));
}
+
+ assert_slb_exists(local_paca->kstack);
}
/*
* Insert the bolted entries into an empty SLB.
- * This is not the same as rebolt because the bolted segments are not
- * changed, just loaded from the shadow area.
*/
void slb_restore_bolted_realmode(void)
{
__slb_restore_bolted_realmode();
get_paca()->slb_cache_ptr = 0;
+
+ get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
+ get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
}
/*
@@ -123,113 +157,262 @@ void slb_restore_bolted_realmode(void)
*/
void slb_flush_all_realmode(void)
{
- /*
- * This flushes all SLB entries including 0, so it must be realmode.
- */
asm volatile("slbmte %0,%0; slbia" : : "r" (0));
}
-static void __slb_flush_and_rebolt(void)
+/*
+ * This flushes non-bolted entries, it can be run in virtual mode. Must
+ * be called with interrupts disabled.
+ */
+void slb_flush_and_restore_bolted(void)
{
- /* If you change this make sure you change SLB_NUM_BOLTED
- * and PR KVM appropriately too. */
- unsigned long linear_llp, vmalloc_llp, lflags, vflags;
- unsigned long ksp_esid_data, ksp_vsid_data;
+ struct slb_shadow *p = get_slb_shadow();
- linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
- vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
- lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | vmalloc_llp;
+ BUILD_BUG_ON(SLB_NUM_BOLTED != 2);
- ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
- if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
- ksp_esid_data &= ~SLB_ESID_V;
- ksp_vsid_data = 0;
- slb_shadow_clear(KSTACK_INDEX);
- } else {
- /* Update stack entry; others don't change */
- slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX);
- ksp_vsid_data =
- be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid);
- }
+ WARN_ON(!irqs_disabled());
+
+ /*
+ * We can't take a PMU exception in the following code, so hard
+ * disable interrupts.
+ */
+ hard_irq_disable();
- /* We need to do this all in asm, so we're sure we don't touch
- * the stack between the slbia and rebolting it. */
asm volatile("isync\n"
"slbia\n"
- /* Slot 1 - first VMALLOC segment */
- "slbmte %0,%1\n"
- /* Slot 2 - kernel stack */
- "slbmte %2,%3\n"
- "isync"
- :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
- "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, VMALLOC_INDEX)),
- "r"(ksp_vsid_data),
- "r"(ksp_esid_data)
+ "slbmte %0, %1\n"
+ "isync\n"
+ :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
+ "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
: "memory");
+ assert_slb_exists(get_paca()->kstack);
+
+ get_paca()->slb_cache_ptr = 0;
+
+ get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
+ get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
}
-void slb_flush_and_rebolt(void)
+void slb_save_contents(struct slb_entry *slb_ptr)
{
+ int i;
+ unsigned long e, v;
- WARN_ON(!irqs_disabled());
+ /* Save slb_cache_ptr value. */
+ get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr;
+
+ if (!slb_ptr)
+ return;
+
+ for (i = 0; i < mmu_slb_size; i++) {
+ asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
+ asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
+ slb_ptr->esid = e;
+ slb_ptr->vsid = v;
+ slb_ptr++;
+ }
+}
+
+void slb_dump_contents(struct slb_entry *slb_ptr)
+{
+ int i, n;
+ unsigned long e, v;
+ unsigned long llp;
+
+ if (!slb_ptr)
+ return;
+
+ pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
+ pr_err("Last SLB entry inserted at slot %d\n", get_paca()->stab_rr);
+
+ for (i = 0; i < mmu_slb_size; i++) {
+ e = slb_ptr->esid;
+ v = slb_ptr->vsid;
+ slb_ptr++;
+
+ if (!e && !v)
+ continue;
+
+ pr_err("%02d %016lx %016lx\n", i, e, v);
+
+ if (!(e & SLB_ESID_V)) {
+ pr_err("\n");
+ continue;
+ }
+ llp = v & SLB_VSID_LLP;
+ if (v & SLB_VSID_B_1T) {
+ pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n",
+ GET_ESID_1T(e),
+ (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp);
+ } else {
+ pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n",
+ GET_ESID(e),
+ (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp);
+ }
+ }
+ pr_err("----------------------------------\n");
+
+ /* Dump slb cache entires as well. */
+ pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
+ pr_err("Valid SLB cache entries:\n");
+ n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
+ for (i = 0; i < n; i++)
+ pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
+ pr_err("Rest of SLB cache entries:\n");
+ for (i = n; i < SLB_CACHE_ENTRIES; i++)
+ pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
+}
+void slb_vmalloc_update(void)
+{
/*
- * We can't take a PMU exception in the following code, so hard
- * disable interrupts.
+ * vmalloc is not bolted, so just have to flush non-bolted.
*/
- hard_irq_disable();
+ slb_flush_and_restore_bolted();
+}
- __slb_flush_and_rebolt();
- get_paca()->slb_cache_ptr = 0;
+static bool preload_hit(struct thread_info *ti, unsigned long esid)
+{
+ unsigned char i;
+
+ for (i = 0; i < ti->slb_preload_nr; i++) {
+ unsigned char idx;
+
+ idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR;
+ if (esid == ti->slb_preload_esid[idx])
+ return true;
+ }
+ return false;
}
-void slb_vmalloc_update(void)
+static bool preload_add(struct thread_info *ti, unsigned long ea)
{
- unsigned long vflags;
+ unsigned char idx;
+ unsigned long esid;
+
+ if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
+ /* EAs are stored >> 28 so 256MB segments don't need clearing */
+ if (ea & ESID_MASK_1T)
+ ea &= ESID_MASK_1T;
+ }
+
+ esid = ea >> SID_SHIFT;
- vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
- slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
- slb_flush_and_rebolt();
+ if (preload_hit(ti, esid))
+ return false;
+
+ idx = (ti->slb_preload_tail + ti->slb_preload_nr) % SLB_PRELOAD_NR;
+ ti->slb_preload_esid[idx] = esid;
+ if (ti->slb_preload_nr == SLB_PRELOAD_NR)
+ ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
+ else
+ ti->slb_preload_nr++;
+
+ return true;
}
-/* Helper function to compare esids. There are four cases to handle.
- * 1. The system is not 1T segment size capable. Use the GET_ESID compare.
- * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
- * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match.
- * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
- */
-static inline int esids_match(unsigned long addr1, unsigned long addr2)
+static void preload_age(struct thread_info *ti)
{
- int esid_1t_count;
+ if (!ti->slb_preload_nr)
+ return;
+ ti->slb_preload_nr--;
+ ti->slb_preload_tail = (ti->slb_preload_tail + 1) % SLB_PRELOAD_NR;
+}
- /* System is not 1T segment size capable. */
- if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
- return (GET_ESID(addr1) == GET_ESID(addr2));
+void slb_setup_new_exec(void)
+{
+ struct thread_info *ti = current_thread_info();
+ struct mm_struct *mm = current->mm;
+ unsigned long exec = 0x10000000;
- esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
- ((addr2 >> SID_SHIFT_1T) != 0));
+ WARN_ON(irqs_disabled());
- /* both addresses are < 1T */
- if (esid_1t_count == 0)
- return (GET_ESID(addr1) == GET_ESID(addr2));
+ /*
+ * preload cache can only be used to determine whether a SLB
+ * entry exists if it does not start to overflow.
+ */
+ if (ti->slb_preload_nr + 2 > SLB_PRELOAD_NR)
+ return;
- /* One address < 1T, the other > 1T. Not a match */
- if (esid_1t_count == 1)
- return 0;
+ hard_irq_disable();
- /* Both addresses are > 1T. */
- return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
+ /*
+ * We have no good place to clear the slb preload cache on exec,
+ * flush_thread is about the earliest arch hook but that happens
+ * after we switch to the mm and have aleady preloaded the SLBEs.
+ *
+ * For the most part that's probably okay to use entries from the
+ * previous exec, they will age out if unused. It may turn out to
+ * be an advantage to clear the cache before switching to it,
+ * however.
+ */
+
+ /*
+ * preload some userspace segments into the SLB.
+ * Almost all 32 and 64bit PowerPC executables are linked at
+ * 0x10000000 so it makes sense to preload this segment.
+ */
+ if (!is_kernel_addr(exec)) {
+ if (preload_add(ti, exec))
+ slb_allocate_user(mm, exec);
+ }
+
+ /* Libraries and mmaps. */
+ if (!is_kernel_addr(mm->mmap_base)) {
+ if (preload_add(ti, mm->mmap_base))
+ slb_allocate_user(mm, mm->mmap_base);
+ }
+
+ /* see switch_slb */
+ asm volatile("isync" : : : "memory");
+
+ local_irq_enable();
}
+void preload_new_slb_context(unsigned long start, unsigned long sp)
+{
+ struct thread_info *ti = current_thread_info();
+ struct mm_struct *mm = current->mm;
+ unsigned long heap = mm->start_brk;
+
+ WARN_ON(irqs_disabled());
+
+ /* see above */
+ if (ti->slb_preload_nr + 3 > SLB_PRELOAD_NR)
+ return;
+
+ hard_irq_disable();
+
+ /* Userspace entry address. */
+ if (!is_kernel_addr(start)) {
+ if (preload_add(ti, start))
+ slb_allocate_user(mm, start);
+ }
+
+ /* Top of stack, grows down. */
+ if (!is_kernel_addr(sp)) {
+ if (preload_add(ti, sp))
+ slb_allocate_user(mm, sp);
+ }
+
+ /* Bottom of heap, grows up. */
+ if (heap && !is_kernel_addr(heap)) {
+ if (preload_add(ti, heap))
+ slb_allocate_user(mm, heap);
+ }
+
+ /* see switch_slb */
+ asm volatile("isync" : : : "memory");
+
+ local_irq_enable();
+}
+
+
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{
- unsigned long offset;
- unsigned long slbie_data = 0;
- unsigned long pc = KSTK_EIP(tsk);
- unsigned long stack = KSTK_ESP(tsk);
- unsigned long exec_base;
+ struct thread_info *ti = task_thread_info(tsk);
+ unsigned char i;
/*
* We need interrupts hard-disabled here, not just soft-disabled,
@@ -238,91 +421,107 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
* which would update the slb_cache/slb_cache_ptr fields in the PACA.
*/
hard_irq_disable();
- offset = get_paca()->slb_cache_ptr;
- if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
- offset <= SLB_CACHE_ENTRIES) {
- int i;
- asm volatile("isync" : : : "memory");
- for (i = 0; i < offset; i++) {
- slbie_data = (unsigned long)get_paca()->slb_cache[i]
- << SID_SHIFT; /* EA */
- slbie_data |= user_segment_size(slbie_data)
- << SLBIE_SSIZE_SHIFT;
- slbie_data |= SLBIE_C; /* C set for user addresses */
- asm volatile("slbie %0" : : "r" (slbie_data));
- }
- asm volatile("isync" : : : "memory");
+ asm volatile("isync" : : : "memory");
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ /*
+ * SLBIA IH=3 invalidates all Class=1 SLBEs and their
+ * associated lookaside structures, which matches what
+ * switch_slb wants. So ARCH_300 does not use the slb
+ * cache.
+ */
+ asm volatile(PPC_SLBIA(3));
} else {
- __slb_flush_and_rebolt();
- }
+ unsigned long offset = get_paca()->slb_cache_ptr;
+
+ if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
+ offset <= SLB_CACHE_ENTRIES) {
+ unsigned long slbie_data = 0;
+
+ for (i = 0; i < offset; i++) {
+ unsigned long ea;
+
+ ea = (unsigned long)
+ get_paca()->slb_cache[i] << SID_SHIFT;
+ /*
+ * Could assert_slb_exists here, but hypervisor
+ * or machine check could have come in and
+ * removed the entry at this point.
+ */
+
+ slbie_data = ea;
+ slbie_data |= user_segment_size(slbie_data)
+ << SLBIE_SSIZE_SHIFT;
+ slbie_data |= SLBIE_C; /* user slbs have C=1 */
+ asm volatile("slbie %0" : : "r" (slbie_data));
+ }
+
+ /* Workaround POWER5 < DD2.1 issue */
+ if (!cpu_has_feature(CPU_FTR_ARCH_207S) && offset == 1)
+ asm volatile("slbie %0" : : "r" (slbie_data));
+
+ } else {
+ struct slb_shadow *p = get_slb_shadow();
+ unsigned long ksp_esid_data =
+ be64_to_cpu(p->save_area[KSTACK_INDEX].esid);
+ unsigned long ksp_vsid_data =
+ be64_to_cpu(p->save_area[KSTACK_INDEX].vsid);
+
+ asm volatile(PPC_SLBIA(1) "\n"
+ "slbmte %0,%1\n"
+ "isync"
+ :: "r"(ksp_vsid_data),
+ "r"(ksp_esid_data));
+
+ get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
+ }
- /* Workaround POWER5 < DD2.1 issue */
- if (offset == 1 || offset > SLB_CACHE_ENTRIES)
- asm volatile("slbie %0" : : "r" (slbie_data));
+ get_paca()->slb_cache_ptr = 0;
+ }
+ get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
- get_paca()->slb_cache_ptr = 0;
copy_mm_to_paca(mm);
/*
- * preload some userspace segments into the SLB.
- * Almost all 32 and 64bit PowerPC executables are linked at
- * 0x10000000 so it makes sense to preload this segment.
+ * We gradually age out SLBs after a number of context switches to
+ * reduce reload overhead of unused entries (like we do with FP/VEC
+ * reload). Each time we wrap 256 switches, take an entry out of the
+ * SLB preload cache.
*/
- exec_base = 0x10000000;
-
- if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
- is_kernel_addr(exec_base))
- return;
+ tsk->thread.load_slb++;
+ if (!tsk->thread.load_slb) {
+ unsigned long pc = KSTK_EIP(tsk);
- slb_allocate(pc);
+ preload_age(ti);
+ preload_add(ti, pc);
+ }
- if (!esids_match(pc, stack))
- slb_allocate(stack);
+ for (i = 0; i < ti->slb_preload_nr; i++) {
+ unsigned char idx;
+ unsigned long ea;
- if (!esids_match(pc, exec_base) &&
- !esids_match(stack, exec_base))
- slb_allocate(exec_base);
-}
+ idx = (ti->slb_preload_tail + i) % SLB_PRELOAD_NR;
+ ea = (unsigned long)ti->slb_preload_esid[idx] << SID_SHIFT;
-static inline void patch_slb_encoding(unsigned int *insn_addr,
- unsigned int immed)
-{
+ slb_allocate_user(mm, ea);
+ }
/*
- * This function patches either an li or a cmpldi instruction with
- * a new immediate value. This relies on the fact that both li
- * (which is actually addi) and cmpldi both take a 16-bit immediate
- * value, and it is situated in the same location in the instruction,
- * ie. bits 16-31 (Big endian bit order) or the lower 16 bits.
- * The signedness of the immediate operand differs between the two
- * instructions however this code is only ever patching a small value,
- * much less than 1 << 15, so we can get away with it.
- * To patch the value we read the existing instruction, clear the
- * immediate value, and or in our new value, then write the instruction
- * back.
+ * Synchronize slbmte preloads with possible subsequent user memory
+ * address accesses by the kernel (user mode won't happen until
+ * rfid, which is safe).
*/
- unsigned int insn = (*insn_addr & 0xffff0000) | immed;
- patch_instruction(insn_addr, insn);
+ asm volatile("isync" : : : "memory");
}
-extern u32 slb_miss_kernel_load_linear[];
-extern u32 slb_miss_kernel_load_io[];
-extern u32 slb_compare_rr_to_size[];
-extern u32 slb_miss_kernel_load_vmemmap[];
-
void slb_set_size(u16 size)
{
- if (mmu_slb_size == size)
- return;
-
mmu_slb_size = size;
- patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
}
void slb_initialize(void)
{
unsigned long linear_llp, vmalloc_llp, io_llp;
- unsigned long lflags, vflags;
+ unsigned long lflags;
static int slb_encoding_inited;
#ifdef CONFIG_SPARSEMEM_VMEMMAP
unsigned long vmemmap_llp;
@@ -338,34 +537,24 @@ void slb_initialize(void)
#endif
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
- patch_slb_encoding(slb_miss_kernel_load_linear,
- SLB_VSID_KERNEL | linear_llp);
- patch_slb_encoding(slb_miss_kernel_load_io,
- SLB_VSID_KERNEL | io_llp);
- patch_slb_encoding(slb_compare_rr_to_size,
- mmu_slb_size);
-
pr_devel("SLB: linear LLP = %04lx\n", linear_llp);
pr_devel("SLB: io LLP = %04lx\n", io_llp);
-
#ifdef CONFIG_SPARSEMEM_VMEMMAP
- patch_slb_encoding(slb_miss_kernel_load_vmemmap,
- SLB_VSID_KERNEL | vmemmap_llp);
pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
#endif
}
- get_paca()->stab_rr = SLB_NUM_BOLTED;
+ get_paca()->stab_rr = SLB_NUM_BOLTED - 1;
+ get_paca()->slb_kern_bitmap = (1U << SLB_NUM_BOLTED) - 1;
+ get_paca()->slb_used_bitmap = get_paca()->slb_kern_bitmap;
lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | vmalloc_llp;
/* Invalidate the entire SLB (even entry 0) & all the ERATS */
asm volatile("isync":::"memory");
asm volatile("slbmte %0,%0"::"r" (0) : "memory");
asm volatile("isync; slbia; isync":::"memory");
create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
- create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
/* For the boot cpu, we're running on the stack in init_thread_union,
* which is in the first segment of the linear mapping, and also
@@ -381,122 +570,259 @@ void slb_initialize(void)
asm volatile("isync":::"memory");
}
-static void insert_slb_entry(unsigned long vsid, unsigned long ea,
- int bpsize, int ssize)
+static void slb_cache_update(unsigned long esid_data)
{
- unsigned long flags, vsid_data, esid_data;
- enum slb_index index;
int slb_cache_index;
- /*
- * We are irq disabled, hence should be safe to access PACA.
- */
- VM_WARN_ON(!irqs_disabled());
-
- /*
- * We can't take a PMU exception in the following code, so hard
- * disable interrupts.
- */
- hard_irq_disable();
-
- index = get_paca()->stab_rr;
-
- /*
- * simple round-robin replacement of slb starting at SLB_NUM_BOLTED.
- */
- if (index < (mmu_slb_size - 1))
- index++;
- else
- index = SLB_NUM_BOLTED;
-
- get_paca()->stab_rr = index;
-
- flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;
- vsid_data = (vsid << slb_vsid_shift(ssize)) | flags |
- ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
- esid_data = mk_esid_data(ea, ssize, index);
-
- /*
- * No need for an isync before or after this slbmte. The exception
- * we enter with and the rfid we exit with are context synchronizing.
- * Also we only handle user segments here.
- */
- asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)
- : "memory");
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return; /* ISAv3.0B and later does not use slb_cache */
/*
* Now update slb cache entries
*/
- slb_cache_index = get_paca()->slb_cache_ptr;
+ slb_cache_index = local_paca->slb_cache_ptr;
if (slb_cache_index < SLB_CACHE_ENTRIES) {
/*
* We have space in slb cache for optimized switch_slb().
* Top 36 bits from esid_data as per ISA
*/
- get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28;
- get_paca()->slb_cache_ptr++;
+ local_paca->slb_cache[slb_cache_index++] = esid_data >> 28;
+ local_paca->slb_cache_ptr++;
} else {
/*
* Our cache is full and the current cache content strictly
* doesn't indicate the active SLB conents. Bump the ptr
* so that switch_slb() will ignore the cache.
*/
- get_paca()->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
+ local_paca->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
}
}
-static void handle_multi_context_slb_miss(int context_id, unsigned long ea)
+static enum slb_index alloc_slb_index(bool kernel)
{
- struct mm_struct *mm = current->mm;
- unsigned long vsid;
- int bpsize;
+ enum slb_index index;
/*
- * We are always above 1TB, hence use high user segment size.
+ * The allocation bitmaps can become out of synch with the SLB
+ * when the _switch code does slbie when bolting a new stack
+ * segment and it must not be anywhere else in the SLB. This leaves
+ * a kernel allocated entry that is unused in the SLB. With very
+ * large systems or small segment sizes, the bitmaps could slowly
+ * fill with these entries. They will eventually be cleared out
+ * by the round robin allocator in that case, so it's probably not
+ * worth accounting for.
*/
- vsid = get_vsid(context_id, ea, mmu_highuser_ssize);
- bpsize = get_slice_psize(mm, ea);
- insert_slb_entry(vsid, ea, bpsize, mmu_highuser_ssize);
+
+ /*
+ * SLBs beyond 32 entries are allocated with stab_rr only
+ * POWER7/8/9 have 32 SLB entries, this could be expanded if a
+ * future CPU has more.
+ */
+ if (local_paca->slb_used_bitmap != U32_MAX) {
+ index = ffz(local_paca->slb_used_bitmap);
+ local_paca->slb_used_bitmap |= 1U << index;
+ if (kernel)
+ local_paca->slb_kern_bitmap |= 1U << index;
+ } else {
+ /* round-robin replacement of slb starting at SLB_NUM_BOLTED. */
+ index = local_paca->stab_rr;
+ if (index < (mmu_slb_size - 1))
+ index++;
+ else
+ index = SLB_NUM_BOLTED;
+ local_paca->stab_rr = index;
+ if (index < 32) {
+ if (kernel)
+ local_paca->slb_kern_bitmap |= 1U << index;
+ else
+ local_paca->slb_kern_bitmap &= ~(1U << index);
+ }
+ }
+ BUG_ON(index < SLB_NUM_BOLTED);
+
+ return index;
}
-void slb_miss_large_addr(struct pt_regs *regs)
+static long slb_insert_entry(unsigned long ea, unsigned long context,
+ unsigned long flags, int ssize, bool kernel)
{
- enum ctx_state prev_state = exception_enter();
- unsigned long ea = regs->dar;
- int context;
+ unsigned long vsid;
+ unsigned long vsid_data, esid_data;
+ enum slb_index index;
- if (REGION_ID(ea) != USER_REGION_ID)
- goto slb_bad_addr;
+ vsid = get_vsid(context, ea, ssize);
+ if (!vsid)
+ return -EFAULT;
/*
- * Are we beyound what the page table layout supports ?
+ * There must not be a kernel SLB fault in alloc_slb_index or before
+ * slbmte here or the allocation bitmaps could get out of whack with
+ * the SLB.
+ *
+ * User SLB faults or preloads take this path which might get inlined
+ * into the caller, so add compiler barriers here to ensure unsafe
+ * memory accesses do not come between.
*/
- if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
- goto slb_bad_addr;
+ barrier();
- /* Lower address should have been handled by asm code */
- if (ea < (1UL << MAX_EA_BITS_PER_CONTEXT))
- goto slb_bad_addr;
+ index = alloc_slb_index(kernel);
+
+ vsid_data = __mk_vsid_data(vsid, ssize, flags);
+ esid_data = mk_esid_data(ea, ssize, index);
+
+ /*
+ * No need for an isync before or after this slbmte. The exception
+ * we enter with and the rfid we exit with are context synchronizing.
+ * User preloads should add isync afterwards in case the kernel
+ * accesses user memory before it returns to userspace with rfid.
+ */
+ assert_slb_notexists(ea);
+ asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
+
+ barrier();
+
+ if (!kernel)
+ slb_cache_update(esid_data);
+
+ return 0;
+}
+
+static long slb_allocate_kernel(unsigned long ea, unsigned long id)
+{
+ unsigned long context;
+ unsigned long flags;
+ int ssize;
+
+ if (id == KERNEL_REGION_ID) {
+
+ /* We only support upto MAX_PHYSMEM_BITS */
+ if ((ea & ~REGION_MASK) > (1UL << MAX_PHYSMEM_BITS))
+ return -EFAULT;
+
+ flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_linear_psize].sllp;
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ } else if (id == VMEMMAP_REGION_ID) {
+
+ if ((ea & ~REGION_MASK) >= (1ULL << MAX_EA_BITS_PER_CONTEXT))
+ return -EFAULT;
+
+ flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmemmap_psize].sllp;
+#endif
+ } else if (id == VMALLOC_REGION_ID) {
+
+ if ((ea & ~REGION_MASK) >= (1ULL << MAX_EA_BITS_PER_CONTEXT))
+ return -EFAULT;
+
+ if (ea < H_VMALLOC_END)
+ flags = get_paca()->vmalloc_sllp;
+ else
+ flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
+ } else {
+ return -EFAULT;
+ }
+
+ ssize = MMU_SEGSIZE_1T;
+ if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
+ ssize = MMU_SEGSIZE_256M;
+
+ context = get_kernel_context(ea);
+ return slb_insert_entry(ea, context, flags, ssize, true);
+}
+
+static long slb_allocate_user(struct mm_struct *mm, unsigned long ea)
+{
+ unsigned long context;
+ unsigned long flags;
+ int bpsize;
+ int ssize;
/*
* consider this as bad access if we take a SLB miss
* on an address above addr limit.
*/
- if (ea >= current->mm->context.slb_addr_limit)
- goto slb_bad_addr;
+ if (ea >= mm->context.slb_addr_limit)
+ return -EFAULT;
- context = get_ea_context(&current->mm->context, ea);
+ context = get_user_context(&mm->context, ea);
if (!context)
- goto slb_bad_addr;
+ return -EFAULT;
+
+ if (unlikely(ea >= H_PGTABLE_RANGE)) {
+ WARN_ON(1);
+ return -EFAULT;
+ }
- handle_multi_context_slb_miss(context, ea);
- exception_exit(prev_state);
- return;
+ ssize = user_segment_size(ea);
-slb_bad_addr:
- if (user_mode(regs))
- _exception(SIGSEGV, regs, SEGV_BNDERR, ea);
- else
- bad_page_fault(regs, ea, SIGSEGV);
- exception_exit(prev_state);
+ bpsize = get_slice_psize(mm, ea);
+ flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;
+
+ return slb_insert_entry(ea, context, flags, ssize, false);
+}
+
+long do_slb_fault(struct pt_regs *regs, unsigned long ea)
+{
+ unsigned long id = REGION_ID(ea);
+
+ /* IRQs are not reconciled here, so can't check irqs_disabled */
+ VM_WARN_ON(mfmsr() & MSR_EE);
+
+ if (unlikely(!(regs->msr & MSR_RI)))
+ return -EINVAL;
+
+ /*
+ * SLB kernel faults must be very careful not to touch anything
+ * that is not bolted. E.g., PACA and global variables are okay,
+ * mm->context stuff is not.
+ *
+ * SLB user faults can access all of kernel memory, but must be
+ * careful not to touch things like IRQ state because it is not
+ * "reconciled" here. The difficulty is that we must use
+ * fast_exception_return to return from kernel SLB faults without
+ * looking at possible non-bolted memory. We could test user vs
+ * kernel faults in the interrupt handler asm and do a full fault,
+ * reconcile, ret_from_except for user faults which would make them
+ * first class kernel code. But for performance it's probably nicer
+ * if they go via fast_exception_return too.
+ */
+ if (id >= KERNEL_REGION_ID) {
+ long err;
+#ifdef CONFIG_DEBUG_VM
+ /* Catch recursive kernel SLB faults. */
+ BUG_ON(local_paca->in_kernel_slb_handler);
+ local_paca->in_kernel_slb_handler = 1;
+#endif
+ err = slb_allocate_kernel(ea, id);
+#ifdef CONFIG_DEBUG_VM
+ local_paca->in_kernel_slb_handler = 0;
+#endif
+ return err;
+ } else {
+ struct mm_struct *mm = current->mm;
+ long err;
+
+ if (unlikely(!mm))
+ return -EFAULT;
+
+ err = slb_allocate_user(mm, ea);
+ if (!err)
+ preload_add(current_thread_info(), ea);
+
+ return err;
+ }
+}
+
+void do_bad_slb_fault(struct pt_regs *regs, unsigned long ea, long err)
+{
+ if (err == -EFAULT) {
+ if (user_mode(regs))
+ _exception(SIGSEGV, regs, SEGV_BNDERR, ea);
+ else
+ bad_page_fault(regs, ea, SIGSEGV);
+ } else if (err == -EINVAL) {
+ unrecoverable_exception(regs);
+ } else {
+ BUG();
+ }
}
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
deleted file mode 100644
index 4ac5057ad439..000000000000
--- a/arch/powerpc/mm/slb_low.S
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * Low-level SLB routines
- *
- * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
- *
- * Based on earlier C version:
- * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
- * Copyright (c) 2001 Dave Engebretsen
- * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/processor.h>
-#include <asm/ppc_asm.h>
-#include <asm/asm-offsets.h>
-#include <asm/cputable.h>
-#include <asm/page.h>
-#include <asm/mmu.h>
-#include <asm/pgtable.h>
-#include <asm/firmware.h>
-#include <asm/feature-fixups.h>
-
-/*
- * This macro generates asm code to compute the VSID scramble
- * function. Used in slb_allocate() and do_stab_bolted. The function
- * computed is: (protovsid*VSID_MULTIPLIER) % VSID_MODULUS
- *
- * rt = register containing the proto-VSID and into which the
- * VSID will be stored
- * rx = scratch register (clobbered)
- * rf = flags
- *
- * - rt and rx must be different registers
- * - The answer will end up in the low VSID_BITS bits of rt. The higher
- * bits may contain other garbage, so you may need to mask the
- * result.
- */
-#define ASM_VSID_SCRAMBLE(rt, rx, rf, size) \
- lis rx,VSID_MULTIPLIER_##size@h; \
- ori rx,rx,VSID_MULTIPLIER_##size@l; \
- mulld rt,rt,rx; /* rt = rt * MULTIPLIER */ \
-/* \
- * powermac get slb fault before feature fixup, so make 65 bit part \
- * the default part of feature fixup \
- */ \
-BEGIN_MMU_FTR_SECTION \
- srdi rx,rt,VSID_BITS_65_##size; \
- clrldi rt,rt,(64-VSID_BITS_65_##size); \
- add rt,rt,rx; \
- addi rx,rt,1; \
- srdi rx,rx,VSID_BITS_65_##size; \
- add rt,rt,rx; \
- rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_65_##size)); \
-MMU_FTR_SECTION_ELSE \
- srdi rx,rt,VSID_BITS_##size; \
- clrldi rt,rt,(64-VSID_BITS_##size); \
- add rt,rt,rx; /* add high and low bits */ \
- addi rx,rt,1; \
- srdi rx,rx,VSID_BITS_##size; /* extract 2^VSID_BITS bit */ \
- add rt,rt,rx; \
- rldimi rf,rt,SLB_VSID_SHIFT_##size,(64 - (SLB_VSID_SHIFT_##size + VSID_BITS_##size)); \
-ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_68_BIT_VA)
-
-
-/* void slb_allocate(unsigned long ea);
- *
- * Create an SLB entry for the given EA (user or kernel).
- * r3 = faulting address, r13 = PACA
- * r9, r10, r11 are clobbered by this function
- * r3 is preserved.
- * No other registers are examined or changed.
- */
-_GLOBAL(slb_allocate)
- /*
- * Check if the address falls within the range of the first context, or
- * if we may need to handle multi context. For the first context we
- * allocate the slb entry via the fast path below. For large address we
- * branch out to C-code and see if additional contexts have been
- * allocated.
- * The test here is:
- * (ea & ~REGION_MASK) >= (1ull << MAX_EA_BITS_PER_CONTEXT)
- */
- rldicr. r9,r3,4,(63 - MAX_EA_BITS_PER_CONTEXT - 4)
- bne- 8f
-
- srdi r9,r3,60 /* get region */
- srdi r10,r3,SID_SHIFT /* get esid */
- cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
-
- /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
- blt cr7,0f /* user or kernel? */
-
- /* Check if hitting the linear mapping or some other kernel space
- */
- bne cr7,1f
-
- /* Linear mapping encoding bits, the "li" instruction below will
- * be patched by the kernel at boot
- */
-.globl slb_miss_kernel_load_linear
-slb_miss_kernel_load_linear:
- li r11,0
- /*
- * context = (ea >> 60) - (0xc - 1)
- * r9 = region id.
- */
- subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET
-
-BEGIN_FTR_SECTION
- b .Lslb_finish_load
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
- b .Lslb_finish_load_1T
-
-1:
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
- cmpldi cr0,r9,0xf
- bne 1f
-/* Check virtual memmap region. To be patched at kernel boot */
-.globl slb_miss_kernel_load_vmemmap
-slb_miss_kernel_load_vmemmap:
- li r11,0
- b 6f
-1:
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
-
- /*
- * r10 contains the ESID, which is the original faulting EA shifted
- * right by 28 bits. We need to compare that with (H_VMALLOC_END >> 28)
- * which is 0xd00038000. That can't be used as an immediate, even if we
- * ignored the 0xd, so we have to load it into a register, and we only
- * have one register free. So we must load all of (H_VMALLOC_END >> 28)
- * into a register and compare ESID against that.
- */
- lis r11,(H_VMALLOC_END >> 32)@h // r11 = 0xffffffffd0000000
- ori r11,r11,(H_VMALLOC_END >> 32)@l // r11 = 0xffffffffd0003800
- // Rotate left 4, then mask with 0xffffffff0
- rldic r11,r11,4,28 // r11 = 0xd00038000
- cmpld r10,r11 // if r10 >= r11
- bge 5f // goto io_mapping
-
- /*
- * vmalloc mapping gets the encoding from the PACA as the mapping
- * can be demoted from 64K -> 4K dynamically on some machines.
- */
- lhz r11,PACAVMALLOCSLLP(r13)
- b 6f
-5:
- /* IO mapping */
-.globl slb_miss_kernel_load_io
-slb_miss_kernel_load_io:
- li r11,0
-6:
- /*
- * context = (ea >> 60) - (0xc - 1)
- * r9 = region id.
- */
- subi r9,r9,KERNEL_REGION_CONTEXT_OFFSET
-
-BEGIN_FTR_SECTION
- b .Lslb_finish_load
-END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
- b .Lslb_finish_load_1T
-
-0: /*
- * For userspace addresses, make sure this is region 0.
- */
- cmpdi r9, 0
- bne- 8f
- /*
- * user space make sure we are within the allowed limit
- */
- ld r11,PACA_SLB_ADDR_LIMIT(r13)
- cmpld r3,r11
- bge- 8f
-
- /* when using slices, we extract the psize off the slice bitmaps
- * and then we need to get the sllp encoding off the mmu_psize_defs
- * array.
- *
- * XXX This is a bit inefficient especially for the normal case,
- * so we should try to implement a fast path for the standard page
- * size using the old sllp value so we avoid the array. We cannot
- * really do dynamic patching unfortunately as processes might flip
- * between 4k and 64k standard page size
- */
-#ifdef CONFIG_PPC_MM_SLICES
- /* r10 have esid */
- cmpldi r10,16
- /* below SLICE_LOW_TOP */
- blt 5f
- /*
- * Handle hpsizes,
- * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
- */
- srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
- addi r9,r11,PACAHIGHSLICEPSIZE
- lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
- /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
- rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
- b 6f
-
-5:
- /*
- * Handle lpsizes
- * r9 is get_paca()->context.low_slices_psize[index], r11 is mask_index
- */
- srdi r11,r10,1 /* index */
- addi r9,r11,PACALOWSLICESPSIZE
- lbzx r9,r13,r9 /* r9 is lpsizes[r11] */
- rldicl r11,r10,0,63 /* r11 = r10 & 0x1 */
-6:
- sldi r11,r11,2 /* index * 4 */
- /* Extract the psize and multiply to get an array offset */
- srd r9,r9,r11
- andi. r9,r9,0xf
- mulli r9,r9,MMUPSIZEDEFSIZE
-
- /* Now get to the array and obtain the sllp
- */
- ld r11,PACATOC(r13)
- ld r11,mmu_psize_defs@got(r11)
- add r11,r11,r9
- ld r11,MMUPSIZESLLP(r11)
- ori r11,r11,SLB_VSID_USER
-#else
- /* paca context sllp already contains the SLB_VSID_USER bits */
- lhz r11,PACACONTEXTSLLP(r13)
-#endif /* CONFIG_PPC_MM_SLICES */
-
- ld r9,PACACONTEXTID(r13)
-BEGIN_FTR_SECTION
- cmpldi r10,0x1000
- bge .Lslb_finish_load_1T
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
- b .Lslb_finish_load
-
-8: /* invalid EA - return an error indication */
- crset 4*cr0+eq /* indicate failure */
- blr
-
-/*
- * Finish loading of an SLB entry and return
- *
- * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
- */
-.Lslb_finish_load:
- rldimi r10,r9,ESID_BITS,0
- ASM_VSID_SCRAMBLE(r10,r9,r11,256M)
- /* r3 = EA, r11 = VSID data */
- /*
- * Find a slot, round robin. Previously we tried to find a
- * free slot first but that took too long. Unfortunately we
- * dont have any LRU information to help us choose a slot.
- */
-
- mr r9,r3
-
- /* slb_finish_load_1T continues here. r9=EA with non-ESID bits clear */
-7: ld r10,PACASTABRR(r13)
- addi r10,r10,1
- /* This gets soft patched on boot. */
-.globl slb_compare_rr_to_size
-slb_compare_rr_to_size:
- cmpldi r10,0
-
- blt+ 4f
- li r10,SLB_NUM_BOLTED
-
-4:
- std r10,PACASTABRR(r13)
-
-3:
- rldimi r9,r10,0,36 /* r9 = EA[0:35] | entry */
- oris r10,r9,SLB_ESID_V@h /* r10 = r9 | SLB_ESID_V */
-
- /* r9 = ESID data, r11 = VSID data */
-
- /*
- * No need for an isync before or after this slbmte. The exception
- * we enter with and the rfid we exit with are context synchronizing.
- */
- slbmte r11,r10
-
- /* we're done for kernel addresses */
- crclr 4*cr0+eq /* set result to "success" */
- bgelr cr7
-
- /* Update the slb cache */
- lhz r9,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
- cmpldi r9,SLB_CACHE_ENTRIES
- bge 1f
-
- /* still room in the slb cache */
- sldi r11,r9,2 /* r11 = offset * sizeof(u32) */
- srdi r10,r10,28 /* get the 36 bits of the ESID */
- add r11,r11,r13 /* r11 = (u32 *)paca + offset */
- stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
- addi r9,r9,1 /* offset++ */
- b 2f
-1: /* offset >= SLB_CACHE_ENTRIES */
- li r9,SLB_CACHE_ENTRIES+1
-2:
- sth r9,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
- crclr 4*cr0+eq /* set result to "success" */
- blr
-
-/*
- * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
- *
- * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
- */
-.Lslb_finish_load_1T:
- srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
- rldimi r10,r9,ESID_BITS_1T,0
- ASM_VSID_SCRAMBLE(r10,r9,r11,1T)
-
- li r10,MMU_SEGSIZE_1T
- rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
-
- /* r3 = EA, r11 = VSID data */
- clrrdi r9,r3,SID_SHIFT_1T /* clear out non-ESID bits */
- b 7b
-
-
-_ASM_NOKPROBE_SYMBOL(slb_allocate)
-_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_linear)
-_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_io)
-_ASM_NOKPROBE_SYMBOL(slb_compare_rr_to_size)
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-_ASM_NOKPROBE_SYMBOL(slb_miss_kernel_load_vmemmap)
-#endif
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 205fe557ca10..06898c13901d 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -31,6 +31,7 @@
#include <linux/spinlock.h>
#include <linux/export.h>
#include <linux/hugetlb.h>
+#include <linux/sched/mm.h>
#include <asm/mman.h>
#include <asm/mmu.h>
#include <asm/copro.h>
@@ -61,6 +62,13 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) {
#endif
+static inline bool slice_addr_is_low(unsigned long addr)
+{
+ u64 tmp = (u64)addr;
+
+ return tmp < SLICE_LOW_TOP;
+}
+
static void slice_range_to_mask(unsigned long start, unsigned long len,
struct slice_mask *ret)
{
@@ -70,7 +78,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len,
if (SLICE_NUM_HIGH)
bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
- if (start < SLICE_LOW_TOP) {
+ if (slice_addr_is_low(start)) {
unsigned long mend = min(end,
(unsigned long)(SLICE_LOW_TOP - 1));
@@ -78,7 +86,7 @@ static void slice_range_to_mask(unsigned long start, unsigned long len,
- (1u << GET_LOW_SLICE_INDEX(start));
}
- if ((start + len) > SLICE_LOW_TOP) {
+ if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
@@ -133,7 +141,7 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
if (!slice_low_has_vma(mm, i))
ret->low_slices |= 1u << i;
- if (high_limit <= SLICE_LOW_TOP)
+ if (slice_addr_is_low(high_limit - 1))
return;
for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
@@ -182,7 +190,7 @@ static bool slice_check_range_fits(struct mm_struct *mm,
unsigned long end = start + len - 1;
u64 low_slices = 0;
- if (start < SLICE_LOW_TOP) {
+ if (slice_addr_is_low(start)) {
unsigned long mend = min(end,
(unsigned long)(SLICE_LOW_TOP - 1));
@@ -192,7 +200,7 @@ static bool slice_check_range_fits(struct mm_struct *mm,
if ((low_slices & available->low_slices) != low_slices)
return false;
- if (SLICE_NUM_HIGH && ((start + len) > SLICE_LOW_TOP)) {
+ if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
@@ -219,7 +227,7 @@ static void slice_flush_segments(void *parm)
copy_mm_to_paca(current->active_mm);
local_irq_save(flags);
- slb_flush_and_rebolt();
+ slb_flush_and_restore_bolted();
local_irq_restore(flags);
#endif
}
@@ -303,7 +311,7 @@ static bool slice_scan_available(unsigned long addr,
int end, unsigned long *boundary_addr)
{
unsigned long slice;
- if (addr < SLICE_LOW_TOP) {
+ if (slice_addr_is_low(addr)) {
slice = GET_LOW_SLICE_INDEX(addr);
*boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
return !!(available->low_slices & (1u << slice));
@@ -706,7 +714,7 @@ unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
VM_BUG_ON(radix_enabled());
- if (addr < SLICE_LOW_TOP) {
+ if (slice_addr_is_low(addr)) {
psizes = mm->context.low_slices_psize;
index = GET_LOW_SLICE_INDEX(addr);
} else {
@@ -757,6 +765,20 @@ void slice_init_new_context_exec(struct mm_struct *mm)
bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
}
+#ifdef CONFIG_PPC_BOOK3S_64
+void slice_setup_new_exec(void)
+{
+ struct mm_struct *mm = current->mm;
+
+ slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
+
+ if (!is_32bit_task())
+ return;
+
+ mm->context.slb_addr_limit = DEFAULT_MAP_WINDOW;
+}
+#endif
+
void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
unsigned long len, unsigned int psize)
{
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index fef3e1eb3a19..6a23b9ebd2a1 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -366,6 +366,7 @@ static inline void _tlbiel_lpid_guest(unsigned long lpid, unsigned long ric)
__tlbiel_lpid_guest(lpid, set, RIC_FLUSH_TLB);
asm volatile("ptesync": : :"memory");
+ asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
}
@@ -833,6 +834,15 @@ EXPORT_SYMBOL_GPL(radix__flush_pwc_lpid);
/*
* Flush partition scoped translations from LPID (=LPIDR)
*/
+void radix__flush_tlb_lpid(unsigned int lpid)
+{
+ _tlbie_lpid(lpid, RIC_FLUSH_ALL);
+}
+EXPORT_SYMBOL_GPL(radix__flush_tlb_lpid);
+
+/*
+ * Flush partition scoped translations from LPID (=LPIDR)
+ */
void radix__local_flush_tlb_lpid(unsigned int lpid)
{
_tlbiel_lpid(lpid, RIC_FLUSH_ALL);
@@ -1007,7 +1017,6 @@ void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
goto local;
}
_tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
- goto local;
} else {
local:
_tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 15fe5f0c8665..ae5d568e267f 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -503,6 +503,9 @@ static void setup_page_sizes(void)
for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
struct mmu_psize_def *def = &mmu_psize_defs[psize];
+ if (!def->shift)
+ continue;
+
if (tlb1ps & (1U << (def->shift - 10))) {
def->flags |= MMU_PAGE_SIZE_DIRECT;
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile
index 7a7834c39f64..8d26d7416481 100644
--- a/arch/powerpc/oprofile/Makefile
+++ b/arch/powerpc/oprofile/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
index ad054dd0d666..5df6290d1ccc 100644
--- a/arch/powerpc/oprofile/backtrace.c
+++ b/arch/powerpc/oprofile/backtrace.c
@@ -7,7 +7,7 @@
* 2 of the License, or (at your option) any later version.
**/
-#include <linux/compat_time.h>
+#include <linux/time.h>
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <asm/processor.h>
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
index 82986d2acd9b..ab26df5bacb9 100644
--- a/arch/powerpc/perf/Makefile
+++ b/arch/powerpc/perf/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
obj-$(CONFIG_PERF_EVENTS) += callchain.o perf_regs.o
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index 1fafc32b12a0..6954636b16d1 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -1392,7 +1392,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
if (ret)
goto err_free_cpuhp_mem;
- pr_info("%s performance monitor hardware support registered\n",
+ pr_debug("%s performance monitor hardware support registered\n",
pmu_ptr->pmu.name);
return 0;
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 7963658dbc22..6dbae9884ec4 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -238,6 +238,7 @@ static int power7_marked_instr_event(u64 event)
case 6:
if (psel == 0x64)
return pmc >= 3;
+ break;
case 8:
return unit == 0xd;
}
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index 60254a321a91..2a9d66254ffc 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -2,7 +2,6 @@
config ACADIA
bool "Acadia"
depends on 40x
- default n
select PPC40x_SIMPLE
select 405EZ
help
@@ -11,7 +10,6 @@ config ACADIA
config EP405
bool "EP405/EP405PC"
depends on 40x
- default n
select 405GP
select PCI
help
@@ -20,7 +18,6 @@ config EP405
config HOTFOOT
bool "Hotfoot"
depends on 40x
- default n
select PPC40x_SIMPLE
select PCI
help
@@ -29,7 +26,6 @@ config HOTFOOT
config KILAUEA
bool "Kilauea"
depends on 40x
- default n
select 405EX
select PPC40x_SIMPLE
select PPC4xx_PCI_EXPRESS
@@ -41,7 +37,6 @@ config KILAUEA
config MAKALU
bool "Makalu"
depends on 40x
- default n
select 405EX
select PCI
select PPC4xx_PCI_EXPRESS
@@ -62,7 +57,6 @@ config WALNUT
config XILINX_VIRTEX_GENERIC_BOARD
bool "Generic Xilinx Virtex board"
depends on 40x
- default n
select XILINX_VIRTEX_II_PRO
select XILINX_VIRTEX_4_FX
select XILINX_INTC
@@ -80,7 +74,6 @@ config XILINX_VIRTEX_GENERIC_BOARD
config OBS600
bool "OpenBlockS 600"
depends on 40x
- default n
select 405EX
select PPC40x_SIMPLE
help
@@ -90,7 +83,6 @@ config OBS600
config PPC40x_SIMPLE
bool "Simple PowerPC 40x board support"
depends on 40x
- default n
help
This option enables the simple PowerPC 40x platform support.
@@ -156,7 +148,6 @@ config IBM405_ERR51
config APM8018X
bool "APM8018X"
depends on 40x
- default n
select PPC40x_SIMPLE
help
This option enables support for the AppliedMicro APM8018X evaluation
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index a6011422b861..f024efd5a4c2 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -2,7 +2,6 @@
config PPC_47x
bool "Support for 47x variant"
depends on 44x
- default n
select MPIC
help
This option enables support for the 47x family of processors and is
@@ -11,7 +10,6 @@ config PPC_47x
config BAMBOO
bool "Bamboo"
depends on 44x
- default n
select PPC44x_SIMPLE
select 440EP
select PCI
@@ -21,7 +19,6 @@ config BAMBOO
config BLUESTONE
bool "Bluestone"
depends on 44x
- default n
select PPC44x_SIMPLE
select APM821xx
select PCI_MSI
@@ -44,7 +41,6 @@ config EBONY
config SAM440EP
bool "Sam440ep"
depends on 44x
- default n
select 440EP
select PCI
help
@@ -53,7 +49,6 @@ config SAM440EP
config SEQUOIA
bool "Sequoia"
depends on 44x
- default n
select PPC44x_SIMPLE
select 440EPX
help
@@ -62,7 +57,6 @@ config SEQUOIA
config TAISHAN
bool "Taishan"
depends on 44x
- default n
select PPC44x_SIMPLE
select 440GX
select PCI
@@ -73,7 +67,6 @@ config TAISHAN
config KATMAI
bool "Katmai"
depends on 44x
- default n
select PPC44x_SIMPLE
select 440SPe
select PCI
@@ -86,7 +79,6 @@ config KATMAI
config RAINIER
bool "Rainier"
depends on 44x
- default n
select PPC44x_SIMPLE
select 440GRX
select PCI
@@ -96,7 +88,6 @@ config RAINIER
config WARP
bool "PIKA Warp"
depends on 44x
- default n
select 440EP
help
This option enables support for the PIKA Warp(tm) Appliance. The Warp
@@ -109,7 +100,6 @@ config WARP
config ARCHES
bool "Arches"
depends on 44x
- default n
select PPC44x_SIMPLE
select 460EX # Odd since it uses 460GT but the effects are the same
select PCI
@@ -120,7 +110,6 @@ config ARCHES
config CANYONLANDS
bool "Canyonlands"
depends on 44x
- default n
select 460EX
select PCI
select PPC4xx_PCI_EXPRESS
@@ -134,7 +123,6 @@ config CANYONLANDS
config GLACIER
bool "Glacier"
depends on 44x
- default n
select PPC44x_SIMPLE
select 460EX # Odd since it uses 460GT but the effects are the same
select PCI
@@ -147,7 +135,6 @@ config GLACIER
config REDWOOD
bool "Redwood"
depends on 44x
- default n
select PPC44x_SIMPLE
select 460SX
select PCI
@@ -160,7 +147,6 @@ config REDWOOD
config EIGER
bool "Eiger"
depends on 44x
- default n
select PPC44x_SIMPLE
select 460SX
select PCI
@@ -172,7 +158,6 @@ config EIGER
config YOSEMITE
bool "Yosemite"
depends on 44x
- default n
select PPC44x_SIMPLE
select 440EP
select PCI
@@ -182,7 +167,6 @@ config YOSEMITE
config ISS4xx
bool "ISS 4xx Simulator"
depends on (44x || 40x)
- default n
select 405GP if 40x
select 440GP if 44x && !PPC_47x
select PPC_FPU
@@ -193,7 +177,6 @@ config ISS4xx
config CURRITUCK
bool "IBM Currituck (476fpe) Support"
depends on PPC_47x
- default n
select SWIOTLB
select 476FPE
select PPC4xx_PCI_EXPRESS
@@ -203,7 +186,6 @@ config CURRITUCK
config FSP2
bool "IBM FSP2 (476fpe) Support"
depends on PPC_47x
- default n
select 476FPE
select IBM_EMAC_EMAC4 if IBM_EMAC
select IBM_EMAC_RGMII if IBM_EMAC
@@ -215,7 +197,6 @@ config FSP2
config AKEBONO
bool "IBM Akebono (476gtr) Support"
depends on PPC_47x
- default n
select SWIOTLB
select 476FPE
select PPC4xx_PCI_EXPRESS
@@ -241,7 +222,6 @@ config AKEBONO
config ICON
bool "Icon"
depends on 44x
- default n
select PPC44x_SIMPLE
select 440SPe
select PCI
@@ -252,7 +232,6 @@ config ICON
config XILINX_VIRTEX440_GENERIC_BOARD
bool "Generic Xilinx Virtex 5 FXT board support"
depends on 44x
- default n
select XILINX_VIRTEX_5_FXT
select XILINX_INTC
help
@@ -280,7 +259,6 @@ config XILINX_ML510
config PPC44x_SIMPLE
bool "Simple PowerPC 44x board support"
depends on 44x
- default n
help
This option enables the simple PowerPC 44x platform support.
diff --git a/arch/powerpc/platforms/44x/fsp2.c b/arch/powerpc/platforms/44x/fsp2.c
index 04f0c73a9b4f..7a507f775308 100644
--- a/arch/powerpc/platforms/44x/fsp2.c
+++ b/arch/powerpc/platforms/44x/fsp2.c
@@ -210,15 +210,15 @@ static void node_irq_request(const char *compat, irq_handler_t errirq_handler)
for_each_compatible_node(np, NULL, compat) {
irq = irq_of_parse_and_map(np, 0);
if (irq == NO_IRQ) {
- pr_err("device tree node %s is missing a interrupt",
- np->name);
+ pr_err("device tree node %pOFn is missing a interrupt",
+ np);
return;
}
rc = request_irq(irq, errirq_handler, 0, np->name, np);
if (rc) {
- pr_err("fsp_of_probe: request_irq failed: np=%s rc=%d",
- np->full_name, rc);
+ pr_err("fsp_of_probe: request_irq failed: np=%pOF rc=%d",
+ np, rc);
return;
}
}
diff --git a/arch/powerpc/platforms/4xx/ocm.c b/arch/powerpc/platforms/4xx/ocm.c
index 69d9f60d9fe5..f5bbd4563342 100644
--- a/arch/powerpc/platforms/4xx/ocm.c
+++ b/arch/powerpc/platforms/4xx/ocm.c
@@ -113,7 +113,6 @@ static void __init ocm_init_node(int count, struct device_node *node)
int len;
struct resource rsrc;
- int ioflags;
ocm = ocm_get_node(count);
@@ -179,9 +178,8 @@ static void __init ocm_init_node(int count, struct device_node *node)
/* ioremap the non-cached region */
if (ocm->nc.memtotal) {
- ioflags = _PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_EXEC;
ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal,
- ioflags);
+ _PAGE_EXEC | PAGE_KERNEL_NCG);
if (!ocm->nc.virt) {
printk(KERN_ERR
@@ -195,9 +193,8 @@ static void __init ocm_init_node(int count, struct device_node *node)
/* ioremap the cached region */
if (ocm->c.memtotal) {
- ioflags = _PAGE_EXEC;
ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal,
- ioflags);
+ _PAGE_EXEC | PAGE_KERNEL);
if (!ocm->c.virt) {
printk(KERN_ERR
diff --git a/arch/powerpc/platforms/4xx/soc.c b/arch/powerpc/platforms/4xx/soc.c
index 5e36508b2a70..1844bf502fcf 100644
--- a/arch/powerpc/platforms/4xx/soc.c
+++ b/arch/powerpc/platforms/4xx/soc.c
@@ -200,7 +200,7 @@ void ppc4xx_reset_system(char *cmd)
u32 reset_type = DBCR0_RST_SYSTEM;
const u32 *prop;
- np = of_find_node_by_type(NULL, "cpu");
+ np = of_get_cpu_node(0, NULL);
if (np) {
prop = of_get_property(np, "reset-type", NULL);
diff --git a/arch/powerpc/platforms/82xx/Kconfig b/arch/powerpc/platforms/82xx/Kconfig
index 6e04099361b9..1947a88bc69f 100644
--- a/arch/powerpc/platforms/82xx/Kconfig
+++ b/arch/powerpc/platforms/82xx/Kconfig
@@ -51,7 +51,6 @@ endif
config PQ2ADS
bool
- default n
config 8260
bool
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 7e966f4cf19a..fff72425727a 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -216,8 +216,8 @@ static int smp_85xx_start_cpu(int cpu)
/* Map the spin table */
if (ioremappable)
- spin_table = ioremap_prot(*cpu_rel_addr,
- sizeof(struct epapr_spin_table), _PAGE_COHERENT);
+ spin_table = ioremap_coherent(*cpu_rel_addr,
+ sizeof(struct epapr_spin_table));
else
spin_table = phys_to_virt(*cpu_rel_addr);
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c
index 027c42d8966c..f1c805c8adbc 100644
--- a/arch/powerpc/platforms/8xx/m8xx_setup.c
+++ b/arch/powerpc/platforms/8xx/m8xx_setup.c
@@ -66,7 +66,7 @@ static int __init get_freq(char *name, unsigned long *val)
int found = 0;
/* The cpu node should have timebase and clock frequency properties */
- cpu = of_find_node_by_type(NULL, "cpu");
+ cpu = of_get_cpu_node(0, NULL);
if (cpu) {
fp = of_get_property(cpu, name, NULL);
@@ -147,8 +147,9 @@ void __init mpc8xx_calibrate_decr(void)
* we have to enable the timebase). The decrementer interrupt
* is wired into the vector table, nothing to do here for that.
*/
- cpu = of_find_node_by_type(NULL, "cpu");
+ cpu = of_get_cpu_node(0, NULL);
virq= irq_of_parse_and_map(cpu, 0);
+ of_node_put(cpu);
irq = virq_to_hw(virq);
sys_tmr2 = immr_map(im_sit);
diff --git a/arch/powerpc/platforms/8xx/machine_check.c b/arch/powerpc/platforms/8xx/machine_check.c
index 402016705a39..9944fc303df0 100644
--- a/arch/powerpc/platforms/8xx/machine_check.c
+++ b/arch/powerpc/platforms/8xx/machine_check.c
@@ -18,9 +18,9 @@ int machine_check_8xx(struct pt_regs *regs)
pr_err("Machine check in kernel mode.\n");
pr_err("Caused by (from SRR1=%lx): ", reason);
if (reason & 0x40000000)
- pr_err("Fetch error at address %lx\n", regs->nip);
+ pr_cont("Fetch error at address %lx\n", regs->nip);
else
- pr_err("Data access error at address %lx\n", regs->dar);
+ pr_cont("Data access error at address %lx\n", regs->dar);
#ifdef CONFIG_PCI
/* the qspan pci read routines can cause machine checks -- Cort
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 14ef17e10ec9..260a56b7602d 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -23,7 +23,6 @@ source "arch/powerpc/platforms/amigaone/Kconfig"
config KVM_GUEST
bool "KVM Guest support"
- default n
select EPAPR_PARAVIRT
---help---
This option enables various optimizations for running under the KVM
@@ -34,7 +33,6 @@ config KVM_GUEST
config EPAPR_PARAVIRT
bool "ePAPR para-virtualization support"
- default n
help
Enables ePAPR para-virtualization support for guests.
@@ -74,7 +72,6 @@ config PPC_DT_CPU_FTRS
config UDBG_RTAS_CONSOLE
bool "RTAS based debug console"
depends on PPC_RTAS
- default n
config PPC_SMP_MUXED_IPI
bool
@@ -86,16 +83,13 @@ config PPC_SMP_MUXED_IPI
config IPIC
bool
- default n
config MPIC
bool
- default n
config MPIC_TIMER
bool "MPIC Global Timer"
depends on MPIC && FSL_SOC
- default n
help
The MPIC global timer is a hardware timer inside the
Freescale PIC complying with OpenPIC standard. When the
@@ -107,7 +101,6 @@ config MPIC_TIMER
config FSL_MPIC_TIMER_WAKEUP
tristate "Freescale MPIC global timer wakeup driver"
depends on FSL_SOC && MPIC_TIMER && PM
- default n
help
The driver provides a way to wake up the system by MPIC
timer.
@@ -115,43 +108,35 @@ config FSL_MPIC_TIMER_WAKEUP
config PPC_EPAPR_HV_PIC
bool
- default n
select EPAPR_PARAVIRT
config MPIC_WEIRD
bool
- default n
config MPIC_MSGR
bool "MPIC message register support"
depends on MPIC
- default n
help
Enables support for the MPIC message registers. These
registers are used for inter-processor communication.
config PPC_I8259
bool
- default n
config U3_DART
bool
depends on PPC64
- default n
config PPC_RTAS
bool
- default n
config RTAS_ERROR_LOGGING
bool
depends on PPC_RTAS
- default n
config PPC_RTAS_DAEMON
bool
depends on PPC_RTAS
- default n
config RTAS_PROC
bool "Proc interface to RTAS"
@@ -164,11 +149,9 @@ config RTAS_FLASH
config MMIO_NVRAM
bool
- default n
config MPIC_U3_HT_IRQS
bool
- default n
config MPIC_BROKEN_REGREAD
bool
@@ -187,15 +170,12 @@ config EEH
config PPC_MPC106
bool
- default n
config PPC_970_NAP
bool
- default n
config PPC_P7_NAP
bool
- default n
config PPC_INDIRECT_PIO
bool
@@ -295,7 +275,6 @@ config CPM2
config FSL_ULI1575
bool
- default n
select GENERIC_ISA_DMA
help
Supports for the ULI1575 PCIe south bridge that exists on some
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 6c6a7c72cae4..f4e2c5729374 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
config PPC64
bool "64-bit kernel"
- default n
select ZLIB_DEFLATE
help
This option selects whether a 32-bit or a 64-bit kernel
@@ -72,6 +71,7 @@ config PPC_BOOK3S_64
select PPC_HAVE_PMU_SUPPORT
select SYS_SUPPORTS_HUGETLBFS
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select ARCH_SUPPORTS_NUMA_BALANCING
select IRQ_WORK
@@ -368,7 +368,6 @@ config PPC_MM_SLICES
bool
default y if PPC_BOOK3S_64
default y if PPC_8xx && HUGETLB_PAGE
- default n
config PPC_HAVE_PMU_SUPPORT
bool
@@ -382,7 +381,6 @@ config PPC_PERF_CTRS
config FORCE_SMP
# Allow platforms to force SMP=y by selecting this
bool
- default n
select SMP
config SMP
@@ -423,7 +421,6 @@ config CHECK_CACHE_COHERENCY
config PPC_DOORBELL
bool
- default n
endmenu
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index e46bb7ea710f..143d4417f6cc 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -1,7 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
-
obj-$(CONFIG_FSL_ULI1575) += fsl_uli1575.o
obj-$(CONFIG_PPC_PMAC) += powermac/
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 9f5958f16923..4b2f114f3116 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
config PPC_CELL
bool
- default n
config PPC_CELL_COMMON
bool
@@ -22,7 +21,6 @@ config PPC_CELL_NATIVE
select IBM_EMAC_RGMII if IBM_EMAC
select IBM_EMAC_ZMII if IBM_EMAC #test only
select IBM_EMAC_TAH if IBM_EMAC #test only
- default n
config PPC_IBM_CELL_BLADE
bool "IBM Cell Blade"
@@ -54,7 +52,6 @@ config SPU_FS
config SPU_BASE
bool
- default n
select PPC_COPRO_BASE
config CBE_RAS
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 5c409c98cca8..f7e36373f6e0 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -180,35 +180,22 @@ out:
static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
{
- struct of_phandle_args oirq;
- int ret;
int i;
for (i=0; i < 3; i++) {
- ret = of_irq_parse_one(np, i, &oirq);
- if (ret) {
- pr_debug("spu_new: failed to get irq %d\n", i);
- goto err;
- }
- ret = -EINVAL;
- pr_debug(" irq %d no 0x%x on %pOF\n", i, oirq.args[0],
- oirq.np);
- spu->irqs[i] = irq_create_of_mapping(&oirq);
- if (!spu->irqs[i]) {
- pr_debug("spu_new: failed to map it !\n");
+ spu->irqs[i] = irq_of_parse_and_map(np, i);
+ if (!spu->irqs[i])
goto err;
- }
}
return 0;
err:
- pr_debug("failed to map irq %x for spu %s\n", *oirq.args,
- spu->name);
+ pr_debug("failed to map irq %x for spu %s\n", i, spu->name);
for (; i >= 0; i--) {
if (spu->irqs[i])
irq_dispose_mapping(spu->irqs[i]);
}
- return ret;
+ return -EINVAL;
}
static int spu_map_resource(struct spu *spu, int nr,
@@ -295,8 +282,8 @@ static int __init of_enumerate_spus(int (*fn)(void *data))
for_each_node_by_type(node, "spe") {
ret = fn(node);
if (ret) {
- printk(KERN_WARNING "%s: Error initializing %s\n",
- __func__, node->name);
+ printk(KERN_WARNING "%s: Error initializing %pOFn\n",
+ __func__, node);
of_node_put(node);
break;
}
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 403523c061ba..ecf703ee3a76 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -112,7 +112,7 @@ static void __iomem *wii_ioremap_hw_regs(char *name, char *compatible)
}
error = of_address_to_resource(np, 0, &res);
if (error) {
- pr_err("no valid reg found for %s\n", np->name);
+ pr_err("no valid reg found for %pOFn\n", np);
goto out_put;
}
diff --git a/arch/powerpc/platforms/maple/Kconfig b/arch/powerpc/platforms/maple/Kconfig
index 376d0be36b66..2601fac50354 100644
--- a/arch/powerpc/platforms/maple/Kconfig
+++ b/arch/powerpc/platforms/maple/Kconfig
@@ -13,7 +13,6 @@ config PPC_MAPLE
select PPC_RTAS
select MMIO_NVRAM
select ATA_NONSTANDARD if ATA
- default n
help
This option enables support for the Maple 970FX Evaluation Board.
For more information, refer to <http://www.970eval.com>
diff --git a/arch/powerpc/platforms/pasemi/Kconfig b/arch/powerpc/platforms/pasemi/Kconfig
index d458a791d35b..98e3bc22bebc 100644
--- a/arch/powerpc/platforms/pasemi/Kconfig
+++ b/arch/powerpc/platforms/pasemi/Kconfig
@@ -2,7 +2,6 @@
config PPC_PASEMI
depends on PPC64 && PPC_BOOK3S && CPU_BIG_ENDIAN
bool "PA Semi SoC-based platforms"
- default n
select MPIC
select PCI
select PPC_UDBG_16550
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index c80f72c370ae..53384eb42a76 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -576,7 +576,7 @@ int pasemi_dma_init(void)
res.start = 0xfd800000;
res.end = res.start + 0x1000;
}
- dma_status = __ioremap(res.start, resource_size(&res), 0);
+ dma_status = ioremap_cache(res.start, resource_size(&res));
pci_dev_put(iob_pdev);
for (i = 0; i < MAX_TXCH; i++)
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile
index f2839eed0f89..923bfb340433 100644
--- a/arch/powerpc/platforms/powermac/Makefile
+++ b/arch/powerpc/platforms/powermac/Makefile
@@ -1,9 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS_bootx_init.o += -fPIC
+CFLAGS_bootx_init.o += $(call cc-option, -fno-stack-protector)
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early boot code
-CFLAGS_REMOVE_bootx_init.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_bootx_init.o = $(CC_FLAGS_FTRACE)
endif
obj-y += pic.o setup.o time.o feature.o pci.o \
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 4eb8cb38fc69..ed2f54b3f173 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -1049,7 +1049,6 @@ core99_reset_cpu(struct device_node *node, long param, long value)
unsigned long flags;
struct macio_chip *macio;
struct device_node *np;
- struct device_node *cpus;
const int dflt_reset_lines[] = { KL_GPIO_RESET_CPU0,
KL_GPIO_RESET_CPU1,
KL_GPIO_RESET_CPU2,
@@ -1059,10 +1058,7 @@ core99_reset_cpu(struct device_node *node, long param, long value)
if (macio->type != macio_keylargo)
return -ENODEV;
- cpus = of_find_node_by_path("/cpus");
- if (cpus == NULL)
- return -ENODEV;
- for (np = cpus->child; np != NULL; np = np->sibling) {
+ for_each_of_cpu_node(np) {
const u32 *num = of_get_property(np, "reg", NULL);
const u32 *rst = of_get_property(np, "soft-reset", NULL);
if (num == NULL || rst == NULL)
@@ -1072,7 +1068,6 @@ core99_reset_cpu(struct device_node *node, long param, long value)
break;
}
}
- of_node_put(cpus);
if (np == NULL || reset_io == 0)
reset_io = dflt_reset_lines[param];
@@ -1504,16 +1499,12 @@ static long g5_reset_cpu(struct device_node *node, long param, long value)
unsigned long flags;
struct macio_chip *macio;
struct device_node *np;
- struct device_node *cpus;
macio = &macio_chips[0];
if (macio->type != macio_keylargo2 && macio->type != macio_shasta)
return -ENODEV;
- cpus = of_find_node_by_path("/cpus");
- if (cpus == NULL)
- return -ENODEV;
- for (np = cpus->child; np != NULL; np = np->sibling) {
+ for_each_of_cpu_node(np) {
const u32 *num = of_get_property(np, "reg", NULL);
const u32 *rst = of_get_property(np, "soft-reset", NULL);
if (num == NULL || rst == NULL)
@@ -1523,7 +1514,6 @@ static long g5_reset_cpu(struct device_node *node, long param, long value)
break;
}
}
- of_node_put(cpus);
if (np == NULL || reset_io == 0)
return -ENODEV;
@@ -2515,31 +2505,26 @@ found:
* supposed to be set when not supported, but I'm not very confident
* that all Apple OF revs did it properly, I do it the paranoid way.
*/
- while (uninorth_base && uninorth_rev > 3) {
- struct device_node *cpus = of_find_node_by_path("/cpus");
+ if (uninorth_base && uninorth_rev > 3) {
struct device_node *np;
- if (!cpus || !cpus->child) {
- printk(KERN_WARNING "Can't find CPU(s) in device tree !\n");
- of_node_put(cpus);
- break;
- }
- np = cpus->child;
- /* Nap mode not supported on SMP */
- if (np->sibling) {
- of_node_put(cpus);
- break;
- }
- /* Nap mode not supported if flush-on-lock property is present */
- if (of_get_property(np, "flush-on-lock", NULL)) {
- of_node_put(cpus);
- break;
+ for_each_of_cpu_node(np) {
+ int cpu_count = 1;
+
+ /* Nap mode not supported on SMP */
+ if (of_get_property(np, "flush-on-lock", NULL) ||
+ (cpu_count > 1)) {
+ powersave_nap = 0;
+ of_node_put(np);
+ break;
+ }
+
+ cpu_count++;
+ powersave_nap = 1;
}
- of_node_put(cpus);
- powersave_nap = 1;
- printk(KERN_DEBUG "Processor NAP mode on idle enabled.\n");
- break;
}
+ if (powersave_nap)
+ printk(KERN_DEBUG "Processor NAP mode on idle enabled.\n");
/* On CPUs that support it (750FX), lowspeed by default during
* NAP mode
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 3a529fcdae97..2f00e3daafb0 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -243,10 +243,9 @@ static void __init l2cr_init(void)
{
/* Checks "l2cr-value" property in the registry */
if (cpu_has_feature(CPU_FTR_L2CR)) {
- struct device_node *np = of_find_node_by_name(NULL, "cpus");
- if (!np)
- np = of_find_node_by_type(NULL, "cpu");
- if (np) {
+ struct device_node *np;
+
+ for_each_of_cpu_node(np) {
const unsigned int *l2cr =
of_get_property(np, "l2cr-value", NULL);
if (l2cr) {
@@ -256,6 +255,7 @@ static void __init l2cr_init(void)
_set_L2CR(ppc_override_l2cr_value);
}
of_node_put(np);
+ break;
}
}
@@ -279,8 +279,8 @@ static void __init pmac_setup_arch(void)
/* Set loops_per_jiffy to a half-way reasonable value,
for use until calibrate_delay gets called. */
loops_per_jiffy = 50000000 / HZ;
- cpu = of_find_node_by_type(NULL, "cpu");
- if (cpu != NULL) {
+
+ for_each_of_cpu_node(cpu) {
fp = of_get_property(cpu, "clock-frequency", NULL);
if (fp != NULL) {
if (pvr >= 0x30 && pvr < 0x80)
@@ -292,8 +292,9 @@ static void __init pmac_setup_arch(void)
else
/* 601, 603, etc. */
loops_per_jiffy = *fp / (2 * HZ);
+ of_node_put(cpu);
+ break;
}
- of_node_put(cpu);
}
/* See if newworld or oldworld */
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index f92c1918fb56..f157e3d071f2 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -45,13 +45,6 @@
#endif
/*
- * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
- * times wrap in 2040. If we need to handle later times, the read_time functions
- * need to be changed to interpret wrapped times as post-2040.
- */
-#define RTC_OFFSET 2082844800
-
-/*
* Calibrate the decrementer frequency with the VIA timer 1.
*/
#define VIA_TIMER_FREQ_6 4700000 /* time 1 frequency * 6 */
@@ -90,98 +83,6 @@ long __init pmac_time_init(void)
return delta;
}
-#ifdef CONFIG_ADB_CUDA
-static time64_t cuda_get_time(void)
-{
- struct adb_request req;
- time64_t now;
-
- if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
- return 0;
- while (!req.complete)
- cuda_poll();
- if (req.reply_len != 7)
- printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
- req.reply_len);
- now = (u32)((req.reply[3] << 24) + (req.reply[4] << 16) +
- (req.reply[5] << 8) + req.reply[6]);
- /* it's either after year 2040, or the RTC has gone backwards */
- WARN_ON(now < RTC_OFFSET);
-
- return now - RTC_OFFSET;
-}
-
-#define cuda_get_rtc_time(tm) rtc_time64_to_tm(cuda_get_time(), (tm))
-
-static int cuda_set_rtc_time(struct rtc_time *tm)
-{
- u32 nowtime;
- struct adb_request req;
-
- nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
- if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
- nowtime >> 24, nowtime >> 16, nowtime >> 8,
- nowtime) < 0)
- return -ENXIO;
- while (!req.complete)
- cuda_poll();
- if ((req.reply_len != 3) && (req.reply_len != 7))
- printk(KERN_ERR "cuda_set_rtc_time: got %d byte reply\n",
- req.reply_len);
- return 0;
-}
-
-#else
-#define cuda_get_time() 0
-#define cuda_get_rtc_time(tm)
-#define cuda_set_rtc_time(tm) 0
-#endif
-
-#ifdef CONFIG_ADB_PMU
-static time64_t pmu_get_time(void)
-{
- struct adb_request req;
- time64_t now;
-
- if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
- return 0;
- pmu_wait_complete(&req);
- if (req.reply_len != 4)
- printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
- req.reply_len);
- now = (u32)((req.reply[0] << 24) + (req.reply[1] << 16) +
- (req.reply[2] << 8) + req.reply[3]);
-
- /* it's either after year 2040, or the RTC has gone backwards */
- WARN_ON(now < RTC_OFFSET);
-
- return now - RTC_OFFSET;
-}
-
-#define pmu_get_rtc_time(tm) rtc_time64_to_tm(pmu_get_time(), (tm))
-
-static int pmu_set_rtc_time(struct rtc_time *tm)
-{
- u32 nowtime;
- struct adb_request req;
-
- nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
- if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
- nowtime >> 16, nowtime >> 8, nowtime) < 0)
- return -ENXIO;
- pmu_wait_complete(&req);
- if (req.reply_len != 0)
- printk(KERN_ERR "pmu_set_rtc_time: %d byte reply from PMU\n",
- req.reply_len);
- return 0;
-}
-
-#else
-#define pmu_get_time() 0
-#define pmu_get_rtc_time(tm)
-#define pmu_set_rtc_time(tm) 0
-#endif
-
#ifdef CONFIG_PMAC_SMU
static time64_t smu_get_time(void)
{
@@ -191,11 +92,6 @@ static time64_t smu_get_time(void)
return 0;
return rtc_tm_to_time64(&tm);
}
-
-#else
-#define smu_get_time() 0
-#define smu_get_rtc_time(tm, spin)
-#define smu_set_rtc_time(tm, spin) 0
#endif
/* Can't be __init, it's called when suspending and resuming */
@@ -203,12 +99,18 @@ time64_t pmac_get_boot_time(void)
{
/* Get the time from the RTC, used only at boot time */
switch (sys_ctrler) {
+#ifdef CONFIG_ADB_CUDA
case SYS_CTRLER_CUDA:
return cuda_get_time();
+#endif
+#ifdef CONFIG_ADB_PMU
case SYS_CTRLER_PMU:
return pmu_get_time();
+#endif
+#ifdef CONFIG_PMAC_SMU
case SYS_CTRLER_SMU:
return smu_get_time();
+#endif
default:
return 0;
}
@@ -218,15 +120,21 @@ void pmac_get_rtc_time(struct rtc_time *tm)
{
/* Get the time from the RTC, used only at boot time */
switch (sys_ctrler) {
+#ifdef CONFIG_ADB_CUDA
case SYS_CTRLER_CUDA:
- cuda_get_rtc_time(tm);
+ rtc_time64_to_tm(cuda_get_time(), tm);
break;
+#endif
+#ifdef CONFIG_ADB_PMU
case SYS_CTRLER_PMU:
- pmu_get_rtc_time(tm);
+ rtc_time64_to_tm(pmu_get_time(), tm);
break;
+#endif
+#ifdef CONFIG_PMAC_SMU
case SYS_CTRLER_SMU:
smu_get_rtc_time(tm, 1);
break;
+#endif
default:
;
}
@@ -235,12 +143,18 @@ void pmac_get_rtc_time(struct rtc_time *tm)
int pmac_set_rtc_time(struct rtc_time *tm)
{
switch (sys_ctrler) {
+#ifdef CONFIG_ADB_CUDA
case SYS_CTRLER_CUDA:
return cuda_set_rtc_time(tm);
+#endif
+#ifdef CONFIG_ADB_PMU
case SYS_CTRLER_PMU:
return pmu_set_rtc_time(tm);
+#endif
+#ifdef CONFIG_PMAC_SMU
case SYS_CTRLER_SMU:
return smu_set_rtc_time(tm, 1);
+#endif
default:
return -ENODEV;
}
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index f8dc98d3dc01..99083fe992d5 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -15,11 +15,6 @@ config PPC_POWERNV
select PPC_SCOM
select ARCH_RANDOM
select CPU_FREQ
- select CPU_FREQ_GOV_PERFORMANCE
- select CPU_FREQ_GOV_POWERSAVE
- select CPU_FREQ_GOV_USERSPACE
- select CPU_FREQ_GOV_ONDEMAND
- select CPU_FREQ_GOV_CONSERVATIVE
select PPC_DOORBELL
select MMU_NOTIFIER
select FORCE_SMP
@@ -35,7 +30,6 @@ config OPAL_PRD
config PPC_MEMTRACE
bool "Enable removal of RAM from kernel mappings for tracing"
depends on PPC_POWERNV && MEMORY_HOTREMOVE
- default n
help
Enabling this option allows for the removal of memory (RAM)
from the kernel mappings to be used for hardware tracing.
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 3c1beae29f2d..abc0be7507c8 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -223,14 +223,6 @@ int pnv_eeh_post_init(void)
eeh_probe_devices();
eeh_addr_cache_build();
- if (eeh_has_flag(EEH_POSTPONED_PROBE)) {
- eeh_clear_flag(EEH_POSTPONED_PROBE);
- if (eeh_enabled())
- pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
- else
- pr_info("EEH: No capable adapters found\n");
- }
-
/* Register OPAL event notifier */
eeh_event_irq = opal_event_request(ilog2(OPAL_EVENT_PCI_ERROR));
if (eeh_event_irq < 0) {
@@ -391,12 +383,6 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
return NULL;
- /* Skip if we haven't probed yet */
- if (phb->ioda.pe_rmap[config_addr] == IODA_INVALID_PE) {
- eeh_add_flag(EEH_POSTPONED_PROBE);
- return NULL;
- }
-
/* Initialize eeh device */
edev->class_code = pdn->class_code;
edev->mode &= 0xFFFFFF00;
@@ -604,7 +590,7 @@ static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
EEH_STATE_MMIO_ENABLED |
EEH_STATE_DMA_ENABLED);
} else if (!(pe->state & EEH_PE_ISOLATED)) {
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(pe);
pnv_eeh_get_phb_diag(pe);
if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
@@ -706,7 +692,7 @@ static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
if (phb->freeze_pe)
phb->freeze_pe(phb, pe->addr);
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(pe);
pnv_eeh_get_phb_diag(pe);
if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
@@ -1054,7 +1040,7 @@ static int pnv_eeh_reset_vf_pe(struct eeh_pe *pe, int option)
int ret;
/* The VF PE should have only one child device */
- edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, list);
+ edev = list_first_entry_or_null(&pe->edevs, struct eeh_dev, entry);
pdn = eeh_dev_to_pdn(edev);
if (!pdn)
return -ENXIO;
@@ -1148,43 +1134,6 @@ static int pnv_eeh_reset(struct eeh_pe *pe, int option)
}
/**
- * pnv_eeh_wait_state - Wait for PE state
- * @pe: EEH PE
- * @max_wait: maximal period in millisecond
- *
- * Wait for the state of associated PE. It might take some time
- * to retrieve the PE's state.
- */
-static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
-{
- int ret;
- int mwait;
-
- while (1) {
- ret = pnv_eeh_get_state(pe, &mwait);
-
- /*
- * If the PE's state is temporarily unavailable,
- * we have to wait for the specified time. Otherwise,
- * the PE's state will be returned immediately.
- */
- if (ret != EEH_STATE_UNAVAILABLE)
- return ret;
-
- if (max_wait <= 0) {
- pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
- __func__, pe->addr, max_wait);
- return EEH_STATE_NOT_SUPPORT;
- }
-
- max_wait -= mwait;
- msleep(mwait);
- }
-
- return EEH_STATE_NOT_SUPPORT;
-}
-
-/**
* pnv_eeh_get_log - Retrieve error log
* @pe: EEH PE
* @severity: temporary or permanent error log
@@ -1611,7 +1560,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
ret == EEH_NEXT_ERR_FENCED_PHB) &&
!((*pe)->state & EEH_PE_ISOLATED)) {
- eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(*pe);
pnv_eeh_get_phb_diag(*pe);
if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
@@ -1640,7 +1589,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
}
/* We possibly migrate to another PE */
- eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(*pe);
}
/*
@@ -1702,7 +1651,6 @@ static struct eeh_ops pnv_eeh_ops = {
.get_pe_addr = pnv_eeh_get_pe_addr,
.get_state = pnv_eeh_get_state,
.reset = pnv_eeh_reset,
- .wait_state = pnv_eeh_wait_state,
.get_log = pnv_eeh_get_log,
.configure_bridge = pnv_eeh_configure_bridge,
.err_inject = pnv_eeh_err_inject,
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index 51dc398ae3f7..a29fdf8a2e56 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -90,17 +90,15 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
change_memblock_state);
- lock_device_hotplug();
- remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
- unlock_device_hotplug();
return true;
}
static u64 memtrace_alloc_node(u32 nid, u64 size)
{
- u64 start_pfn, end_pfn, nr_pages;
+ u64 start_pfn, end_pfn, nr_pages, pfn;
u64 base_pfn;
+ u64 bytes = memory_block_size_bytes();
if (!node_spanned_pages(nid))
return 0;
@@ -113,8 +111,21 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
end_pfn = round_down(end_pfn - nr_pages, nr_pages);
for (base_pfn = end_pfn; base_pfn > start_pfn; base_pfn -= nr_pages) {
- if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true)
+ if (memtrace_offline_pages(nid, base_pfn, nr_pages) == true) {
+ /*
+ * Remove memory in memory block size chunks so that
+ * iomem resources are always split to the same size and
+ * we never try to remove memory that spans two iomem
+ * resources.
+ */
+ lock_device_hotplug();
+ end_pfn = base_pfn + nr_pages;
+ for (pfn = base_pfn; pfn < end_pfn; pfn += bytes>> PAGE_SHIFT) {
+ remove_memory(nid, pfn << PAGE_SHIFT, bytes);
+ }
+ unlock_device_hotplug();
return base_pfn << PAGE_SHIFT;
+ }
}
return 0;
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 8006c54a91e3..6f60e0931922 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -17,7 +17,7 @@
#include <linux/pci.h>
#include <linux/memblock.h>
#include <linux/iommu.h>
-#include <linux/debugfs.h>
+#include <linux/sizes.h>
#include <asm/debugfs.h>
#include <asm/tlb.h>
@@ -42,14 +42,6 @@
static DEFINE_SPINLOCK(npu_context_lock);
/*
- * When an address shootdown range exceeds this threshold we invalidate the
- * entire TLB on the GPU for the given PID rather than each specific address in
- * the range.
- */
-static uint64_t atsd_threshold = 2 * 1024 * 1024;
-static struct dentry *atsd_threshold_dentry;
-
-/*
* Other types of TCE cache invalidation are not functional in the
* hardware.
*/
@@ -454,79 +446,73 @@ static void put_mmio_atsd_reg(struct npu *npu, int reg)
}
/* MMIO ATSD register offsets */
-#define XTS_ATSD_AVA 1
-#define XTS_ATSD_STAT 2
-
-static void mmio_launch_invalidate(struct mmio_atsd_reg *mmio_atsd_reg,
- unsigned long launch, unsigned long va)
-{
- struct npu *npu = mmio_atsd_reg->npu;
- int reg = mmio_atsd_reg->reg;
-
- __raw_writeq_be(va, npu->mmio_atsd_regs[reg] + XTS_ATSD_AVA);
- eieio();
- __raw_writeq_be(launch, npu->mmio_atsd_regs[reg]);
-}
+#define XTS_ATSD_LAUNCH 0
+#define XTS_ATSD_AVA 1
+#define XTS_ATSD_STAT 2
-static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
- unsigned long pid, bool flush)
+static unsigned long get_atsd_launch_val(unsigned long pid, unsigned long psize)
{
- int i;
- unsigned long launch;
-
- for (i = 0; i <= max_npu2_index; i++) {
- if (mmio_atsd_reg[i].reg < 0)
- continue;
+ unsigned long launch = 0;
- /* IS set to invalidate matching PID */
- launch = PPC_BIT(12);
-
- /* PRS set to process-scoped */
- launch |= PPC_BIT(13);
+ if (psize == MMU_PAGE_COUNT) {
+ /* IS set to invalidate entire matching PID */
+ launch |= PPC_BIT(12);
+ } else {
+ /* AP set to invalidate region of psize */
+ launch |= (u64)mmu_get_ap(psize) << PPC_BITLSHIFT(17);
+ }
- /* AP */
- launch |= (u64)
- mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
+ /* PRS set to process-scoped */
+ launch |= PPC_BIT(13);
- /* PID */
- launch |= pid << PPC_BITLSHIFT(38);
+ /* PID */
+ launch |= pid << PPC_BITLSHIFT(38);
- /* No flush */
- launch |= !flush << PPC_BITLSHIFT(39);
+ /* Leave "No flush" (bit 39) 0 so every ATSD performs a flush */
- /* Invalidating the entire process doesn't use a va */
- mmio_launch_invalidate(&mmio_atsd_reg[i], launch, 0);
- }
+ return launch;
}
-static void mmio_invalidate_va(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
- unsigned long va, unsigned long pid, bool flush)
+static void mmio_atsd_regs_write(struct mmio_atsd_reg
+ mmio_atsd_reg[NV_MAX_NPUS], unsigned long offset,
+ unsigned long val)
{
- int i;
- unsigned long launch;
+ struct npu *npu;
+ int i, reg;
for (i = 0; i <= max_npu2_index; i++) {
- if (mmio_atsd_reg[i].reg < 0)
+ reg = mmio_atsd_reg[i].reg;
+ if (reg < 0)
continue;
- /* IS set to invalidate target VA */
- launch = 0;
+ npu = mmio_atsd_reg[i].npu;
+ __raw_writeq_be(val, npu->mmio_atsd_regs[reg] + offset);
+ }
+}
+
+static void mmio_invalidate_pid(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS],
+ unsigned long pid)
+{
+ unsigned long launch = get_atsd_launch_val(pid, MMU_PAGE_COUNT);
- /* PRS set to process scoped */
- launch |= PPC_BIT(13);
+ /* Invalidating the entire process doesn't use a va */
+ mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch);
+}
- /* AP */
- launch |= (u64)
- mmu_get_ap(mmu_virtual_psize) << PPC_BITLSHIFT(17);
+static void mmio_invalidate_range(struct mmio_atsd_reg
+ mmio_atsd_reg[NV_MAX_NPUS], unsigned long pid,
+ unsigned long start, unsigned long psize)
+{
+ unsigned long launch = get_atsd_launch_val(pid, psize);
- /* PID */
- launch |= pid << PPC_BITLSHIFT(38);
+ /* Write all VAs first */
+ mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_AVA, start);
- /* No flush */
- launch |= !flush << PPC_BITLSHIFT(39);
+ /* Issue one barrier for all address writes */
+ eieio();
- mmio_launch_invalidate(&mmio_atsd_reg[i], launch, va);
- }
+ /* Launch */
+ mmio_atsd_regs_write(mmio_atsd_reg, XTS_ATSD_LAUNCH, launch);
}
#define mn_to_npu_context(x) container_of(x, struct npu_context, mn)
@@ -612,14 +598,36 @@ static void release_atsd_reg(struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS])
}
/*
- * Invalidate either a single address or an entire PID depending on
- * the value of va.
+ * Invalidate a virtual address range
*/
-static void mmio_invalidate(struct npu_context *npu_context, int va,
- unsigned long address, bool flush)
+static void mmio_invalidate(struct npu_context *npu_context,
+ unsigned long start, unsigned long size)
{
struct mmio_atsd_reg mmio_atsd_reg[NV_MAX_NPUS];
unsigned long pid = npu_context->mm->context.id;
+ unsigned long atsd_start = 0;
+ unsigned long end = start + size - 1;
+ int atsd_psize = MMU_PAGE_COUNT;
+
+ /*
+ * Convert the input range into one of the supported sizes. If the range
+ * doesn't fit, use the next larger supported size. Invalidation latency
+ * is high, so over-invalidation is preferred to issuing multiple
+ * invalidates.
+ *
+ * A 4K page size isn't supported by NPU/GPU ATS, so that case is
+ * ignored.
+ */
+ if (size == SZ_64K) {
+ atsd_start = start;
+ atsd_psize = MMU_PAGE_64K;
+ } else if (ALIGN_DOWN(start, SZ_2M) == ALIGN_DOWN(end, SZ_2M)) {
+ atsd_start = ALIGN_DOWN(start, SZ_2M);
+ atsd_psize = MMU_PAGE_2M;
+ } else if (ALIGN_DOWN(start, SZ_1G) == ALIGN_DOWN(end, SZ_1G)) {
+ atsd_start = ALIGN_DOWN(start, SZ_1G);
+ atsd_psize = MMU_PAGE_1G;
+ }
if (npu_context->nmmu_flush)
/*
@@ -634,23 +642,25 @@ static void mmio_invalidate(struct npu_context *npu_context, int va,
* an invalidate.
*/
acquire_atsd_reg(npu_context, mmio_atsd_reg);
- if (va)
- mmio_invalidate_va(mmio_atsd_reg, address, pid, flush);
+
+ if (atsd_psize == MMU_PAGE_COUNT)
+ mmio_invalidate_pid(mmio_atsd_reg, pid);
else
- mmio_invalidate_pid(mmio_atsd_reg, pid, flush);
+ mmio_invalidate_range(mmio_atsd_reg, pid, atsd_start,
+ atsd_psize);
mmio_invalidate_wait(mmio_atsd_reg);
- if (flush) {
- /*
- * The GPU requires two flush ATSDs to ensure all entries have
- * been flushed. We use PID 0 as it will never be used for a
- * process on the GPU.
- */
- mmio_invalidate_pid(mmio_atsd_reg, 0, true);
- mmio_invalidate_wait(mmio_atsd_reg);
- mmio_invalidate_pid(mmio_atsd_reg, 0, true);
- mmio_invalidate_wait(mmio_atsd_reg);
- }
+
+ /*
+ * The GPU requires two flush ATSDs to ensure all entries have been
+ * flushed. We use PID 0 as it will never be used for a process on the
+ * GPU.
+ */
+ mmio_invalidate_pid(mmio_atsd_reg, 0);
+ mmio_invalidate_wait(mmio_atsd_reg);
+ mmio_invalidate_pid(mmio_atsd_reg, 0);
+ mmio_invalidate_wait(mmio_atsd_reg);
+
release_atsd_reg(mmio_atsd_reg);
}
@@ -667,7 +677,7 @@ static void pnv_npu2_mn_release(struct mmu_notifier *mn,
* There should be no more translation requests for this PID, but we
* need to ensure any entries for it are removed from the TLB.
*/
- mmio_invalidate(npu_context, 0, 0, true);
+ mmio_invalidate(npu_context, 0, ~0UL);
}
static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
@@ -676,8 +686,7 @@ static void pnv_npu2_mn_change_pte(struct mmu_notifier *mn,
pte_t pte)
{
struct npu_context *npu_context = mn_to_npu_context(mn);
-
- mmio_invalidate(npu_context, 1, address, true);
+ mmio_invalidate(npu_context, address, PAGE_SIZE);
}
static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
@@ -685,21 +694,7 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
unsigned long start, unsigned long end)
{
struct npu_context *npu_context = mn_to_npu_context(mn);
- unsigned long address;
-
- if (end - start > atsd_threshold) {
- /*
- * Just invalidate the entire PID if the address range is too
- * large.
- */
- mmio_invalidate(npu_context, 0, 0, true);
- } else {
- for (address = start; address < end; address += PAGE_SIZE)
- mmio_invalidate(npu_context, 1, address, false);
-
- /* Do the flush only on the final addess == end */
- mmio_invalidate(npu_context, 1, address, true);
- }
+ mmio_invalidate(npu_context, start, end - start);
}
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -962,11 +957,6 @@ int pnv_npu2_init(struct pnv_phb *phb)
static int npu_index;
uint64_t rc = 0;
- if (!atsd_threshold_dentry) {
- atsd_threshold_dentry = debugfs_create_x64("atsd_threshold",
- 0600, powerpc_debugfs_root, &atsd_threshold);
- }
-
phb->npu.nmmu_flush =
of_property_read_bool(phb->hose->dn, "ibm,nmmu-flush");
for_each_child_of_node(phb->hose->dn, dn) {
diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c
index badb29bde93f..d90ee4fc2c6a 100644
--- a/arch/powerpc/platforms/powernv/opal-powercap.c
+++ b/arch/powerpc/platforms/powernv/opal-powercap.c
@@ -199,7 +199,7 @@ void __init opal_powercap_init(void)
}
j = 0;
- pcaps[i].pg.name = node->name;
+ pcaps[i].pg.name = kasprintf(GFP_KERNEL, "%pOFn", node);
if (has_min) {
powercap_add_attr(min, "powercap-min",
&pcaps[i].pattrs[j]);
@@ -237,6 +237,7 @@ out_pcaps_pattrs:
while (--i >= 0) {
kfree(pcaps[i].pattrs);
kfree(pcaps[i].pg.attrs);
+ kfree(pcaps[i].pg.name);
}
kobject_put(powercap_kobj);
out_pcaps:
diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
index f7d04b6a2d7a..179609220e6f 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor-groups.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
@@ -214,9 +214,9 @@ void __init opal_sensor_groups_init(void)
}
if (!of_property_read_u32(node, "ibm,chip-id", &chipid))
- sprintf(sgs[i].name, "%s%d", node->name, chipid);
+ sprintf(sgs[i].name, "%pOFn%d", node, chipid);
else
- sprintf(sgs[i].name, "%s", node->name);
+ sprintf(sgs[i].name, "%pOFn", node);
sgs[i].sg.name = sgs[i].name;
if (add_attr_group(ops, len, &sgs[i], sgid)) {
diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c
index 9aa87df114fd..916a4b7b1bb5 100644
--- a/arch/powerpc/platforms/powernv/opal-sysparam.c
+++ b/arch/powerpc/platforms/powernv/opal-sysparam.c
@@ -194,7 +194,7 @@ void __init opal_sys_param_init(void)
count = of_property_count_strings(sysparam, "param-name");
if (count < 0) {
pr_err("SYSPARAM: No string found of property param-name in "
- "the node %s\n", sysparam->name);
+ "the node %pOFn\n", sysparam);
goto out_param_buf;
}
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 38fe4087484a..a4641515956f 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -535,7 +535,7 @@ static int opal_recover_mce(struct pt_regs *regs,
return recovered;
}
-void pnv_platform_error_reboot(struct pt_regs *regs, const char *msg)
+void __noreturn pnv_platform_error_reboot(struct pt_regs *regs, const char *msg)
{
panic_flush_kmsg_start();
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index adddde023622..14befee4b3f1 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -219,17 +219,41 @@ static void pnv_prepare_going_down(void)
static void __noreturn pnv_restart(char *cmd)
{
- long rc = OPAL_BUSY;
+ long rc;
pnv_prepare_going_down();
- while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
- rc = opal_cec_reboot();
- if (rc == OPAL_BUSY_EVENT)
- opal_poll_events(NULL);
+ do {
+ if (!cmd)
+ rc = opal_cec_reboot();
+ else if (strcmp(cmd, "full") == 0)
+ rc = opal_cec_reboot2(OPAL_REBOOT_FULL_IPL, NULL);
else
+ rc = OPAL_UNSUPPORTED;
+
+ if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
+ /* Opal is busy wait for some time and retry */
+ opal_poll_events(NULL);
mdelay(10);
- }
+
+ } else if (cmd && rc) {
+ /* Unknown error while issuing reboot */
+ if (rc == OPAL_UNSUPPORTED)
+ pr_err("Unsupported '%s' reboot.\n", cmd);
+ else
+ pr_err("Unable to issue '%s' reboot. Err=%ld\n",
+ cmd, rc);
+ pr_info("Forcing a cec-reboot\n");
+ cmd = NULL;
+ rc = OPAL_BUSY;
+
+ } else if (rc != OPAL_SUCCESS) {
+ /* Unknown error while issuing cec-reboot */
+ pr_err("Unable to reboot. Err=%ld\n", rc);
+ }
+
+ } while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT);
+
for (;;)
opal_poll_events(NULL);
}
@@ -437,6 +461,16 @@ static unsigned long pnv_get_proc_freq(unsigned int cpu)
return ret_freq;
}
+static long pnv_machine_check_early(struct pt_regs *regs)
+{
+ long handled = 0;
+
+ if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
+ handled = cur_cpu_spec->machine_check_early(regs);
+
+ return handled;
+}
+
define_machine(powernv) {
.name = "PowerNV",
.probe = pnv_probe,
@@ -448,6 +482,7 @@ define_machine(powernv) {
.machine_shutdown = pnv_shutdown,
.power_save = NULL,
.calibrate_decr = generic_calibrate_decr,
+ .machine_check_early = pnv_machine_check_early,
#ifdef CONFIG_KEXEC_CORE
.kexec_cpu_down = pnv_kexec_cpu_down,
#endif
diff --git a/arch/powerpc/platforms/ps3/Kconfig b/arch/powerpc/platforms/ps3/Kconfig
index 6f7525555b19..24864b8aaf5d 100644
--- a/arch/powerpc/platforms/ps3/Kconfig
+++ b/arch/powerpc/platforms/ps3/Kconfig
@@ -49,7 +49,6 @@ config PS3_HTAB_SIZE
config PS3_DYNAMIC_DMA
depends on PPC_PS3
bool "PS3 Platform dynamic DMA page table management"
- default n
help
This option will enable kernel support to take advantage of the
per device dynamic DMA page table management provided by the Cell
@@ -89,7 +88,6 @@ config PS3_SYS_MANAGER
config PS3_REPOSITORY_WRITE
bool "PS3 Repository write support" if PS3_ADVANCED
depends on PPC_PS3
- default n
help
Enables support for writing to the PS3 System Repository.
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index cdbfc5cfd6f3..f5387ad82279 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -664,7 +664,7 @@ static int update_flash_db(void)
db_set_64(db, &os_area_db_id_rtc_diff, saved_params.rtc_diff);
count = os_area_flash_write(db, sizeof(struct os_area_db), pos);
- if (count < sizeof(struct os_area_db)) {
+ if (count < 0 || count < sizeof(struct os_area_db)) {
pr_debug("%s: os_area_flash_write failed %zd\n", __func__,
count);
error = count < 0 ? count : -EIO;
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index b54850845466..7746c2a3c509 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -215,8 +215,7 @@ static int __init setup_areas(struct spu *spu)
goto fail_ioremap;
}
- spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys,
- LS_SIZE, pgprot_val(pgprot_noncached_wc(__pgprot(0))));
+ spu->local_store = (__force void *)ioremap_wc(spu->local_store_phys, LS_SIZE);
if (!spu->local_store) {
pr_debug("%s:%d: ioremap local_store failed\n",
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 0c698fd6d491..2e4bd32154b5 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -28,7 +28,6 @@ config PPC_PSERIES
config PPC_SPLPAR
depends on PPC_PSERIES
bool "Support for shared-processor logical partitions"
- default n
help
Enabling this option will make the kernel run more efficiently
on logically-partitioned pSeries systems which use shared
@@ -99,7 +98,6 @@ config PPC_SMLPAR
bool "Support for shared-memory logical partitions"
depends on PPC_PSERIES
select LPARCFG
- default n
help
Select this option to enable shared memory partition support.
With this option a system running in an LPAR can be given more
@@ -140,3 +138,10 @@ config IBMEBUS
bool "Support for GX bus based adapters"
help
Bus device driver for GX bus based adapters.
+
+config PAPR_SCM
+ depends on PPC_PSERIES && MEMORY_HOTPLUG
+ select LIBNVDIMM
+ tristate "Support for the PAPR Storage Class Memory interface"
+ help
+ Enable access to hypervisor provided storage class memory.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 7e89d5c47068..a43ec843c8e2 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -13,7 +13,7 @@ obj-$(CONFIG_KEXEC_CORE) += kexec.o
obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o
-obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o
+obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o pmem.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_HVCS) += hvcserver.o
@@ -24,6 +24,7 @@ obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_IBMEBUS) += ibmebus.o
+obj-$(CONFIG_PAPR_SCM) += papr_scm.o
ifdef CONFIG_PPC_PSERIES
obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index a0b20c03f078..7625546caefd 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -32,8 +32,6 @@ static struct workqueue_struct *pseries_hp_wq;
struct pseries_hp_work {
struct work_struct work;
struct pseries_hp_errorlog *errlog;
- struct completion *hp_completion;
- int *rc;
};
struct cc_workarea {
@@ -329,7 +327,7 @@ int dlpar_release_drc(u32 drc_index)
return 0;
}
-static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
+int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
{
int rc;
@@ -357,6 +355,10 @@ static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
case PSERIES_HP_ELOG_RESOURCE_CPU:
rc = dlpar_cpu(hp_elog);
break;
+ case PSERIES_HP_ELOG_RESOURCE_PMEM:
+ rc = dlpar_hp_pmem(hp_elog);
+ break;
+
default:
pr_warn_ratelimited("Invalid resource (%d) specified\n",
hp_elog->resource);
@@ -371,20 +373,13 @@ static void pseries_hp_work_fn(struct work_struct *work)
struct pseries_hp_work *hp_work =
container_of(work, struct pseries_hp_work, work);
- if (hp_work->rc)
- *(hp_work->rc) = handle_dlpar_errorlog(hp_work->errlog);
- else
- handle_dlpar_errorlog(hp_work->errlog);
-
- if (hp_work->hp_completion)
- complete(hp_work->hp_completion);
+ handle_dlpar_errorlog(hp_work->errlog);
kfree(hp_work->errlog);
kfree((void *)work);
}
-void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
- struct completion *hotplug_done, int *rc)
+void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog)
{
struct pseries_hp_work *work;
struct pseries_hp_errorlog *hp_errlog_copy;
@@ -397,13 +392,9 @@ void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
if (work) {
INIT_WORK((struct work_struct *)work, pseries_hp_work_fn);
work->errlog = hp_errlog_copy;
- work->hp_completion = hotplug_done;
- work->rc = rc;
queue_work(pseries_hp_wq, (struct work_struct *)work);
} else {
- *rc = -ENOMEM;
kfree(hp_errlog_copy);
- complete(hotplug_done);
}
}
@@ -521,18 +512,15 @@ static int dlpar_parse_id_type(char **cmd, struct pseries_hp_errorlog *hp_elog)
static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
const char *buf, size_t count)
{
- struct pseries_hp_errorlog *hp_elog;
- struct completion hotplug_done;
+ struct pseries_hp_errorlog hp_elog;
char *argbuf;
char *args;
int rc;
args = argbuf = kstrdup(buf, GFP_KERNEL);
- hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
- if (!hp_elog || !argbuf) {
+ if (!argbuf) {
pr_info("Could not allocate resources for DLPAR operation\n");
kfree(argbuf);
- kfree(hp_elog);
return -ENOMEM;
}
@@ -540,25 +528,22 @@ static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
* Parse out the request from the user, this will be in the form:
* <resource> <action> <id_type> <id>
*/
- rc = dlpar_parse_resource(&args, hp_elog);
+ rc = dlpar_parse_resource(&args, &hp_elog);
if (rc)
goto dlpar_store_out;
- rc = dlpar_parse_action(&args, hp_elog);
+ rc = dlpar_parse_action(&args, &hp_elog);
if (rc)
goto dlpar_store_out;
- rc = dlpar_parse_id_type(&args, hp_elog);
+ rc = dlpar_parse_id_type(&args, &hp_elog);
if (rc)
goto dlpar_store_out;
- init_completion(&hotplug_done);
- queue_hotplug_event(hp_elog, &hotplug_done, &rc);
- wait_for_completion(&hotplug_done);
+ rc = handle_dlpar_errorlog(&hp_elog);
dlpar_store_out:
kfree(argbuf);
- kfree(hp_elog);
if (rc)
pr_err("Could not handle DLPAR request \"%s\"\n", buf);
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 18014cdeb590..ef6595153642 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -149,7 +149,7 @@ static int dtl_start(struct dtl *dtl)
/* Register our dtl buffer with the hypervisor. The HV expects the
* buffer size to be passed in the second word of the buffer */
- ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES;
+ ((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
hwcpu = get_hard_smp_processor_id(dtl->cpu);
addr = __pa(dtl->buf);
@@ -184,7 +184,7 @@ static void dtl_stop(struct dtl *dtl)
static u64 dtl_current_index(struct dtl *dtl)
{
- return lppaca_of(dtl->cpu).dtl_idx;
+ return be64_to_cpu(lppaca_of(dtl->cpu).dtl_idx);
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 823cb27efa8b..c9e5ca4afb26 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -438,7 +438,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
/**
* pseries_eeh_get_state - Retrieve PE state
* @pe: EEH PE
- * @state: return value
+ * @delay: suggested time to wait if state is unavailable
*
* Retrieve the state of the specified PE. On RTAS compliant
* pseries platform, there already has one dedicated RTAS function
@@ -448,7 +448,7 @@ static int pseries_eeh_get_pe_addr(struct eeh_pe *pe)
* RTAS calls for the purpose, we need to try the new one and back
* to the old one if the new one couldn't work properly.
*/
-static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
+static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay)
{
int config_addr;
int ret;
@@ -499,7 +499,8 @@ static int pseries_eeh_get_state(struct eeh_pe *pe, int *state)
break;
case 5:
if (rets[2]) {
- if (state) *state = rets[2];
+ if (delay)
+ *delay = rets[2];
result = EEH_STATE_UNAVAILABLE;
} else {
result = EEH_STATE_NOT_SUPPORT;
@@ -554,64 +555,6 @@ static int pseries_eeh_reset(struct eeh_pe *pe, int option)
}
/**
- * pseries_eeh_wait_state - Wait for PE state
- * @pe: EEH PE
- * @max_wait: maximal period in millisecond
- *
- * Wait for the state of associated PE. It might take some time
- * to retrieve the PE's state.
- */
-static int pseries_eeh_wait_state(struct eeh_pe *pe, int max_wait)
-{
- int ret;
- int mwait;
-
- /*
- * According to PAPR, the state of PE might be temporarily
- * unavailable. Under the circumstance, we have to wait
- * for indicated time determined by firmware. The maximal
- * wait time is 5 minutes, which is acquired from the original
- * EEH implementation. Also, the original implementation
- * also defined the minimal wait time as 1 second.
- */
-#define EEH_STATE_MIN_WAIT_TIME (1000)
-#define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
-
- while (1) {
- ret = pseries_eeh_get_state(pe, &mwait);
-
- /*
- * If the PE's state is temporarily unavailable,
- * we have to wait for the specified time. Otherwise,
- * the PE's state will be returned immediately.
- */
- if (ret != EEH_STATE_UNAVAILABLE)
- return ret;
-
- if (max_wait <= 0) {
- pr_warn("%s: Timeout when getting PE's state (%d)\n",
- __func__, max_wait);
- return EEH_STATE_NOT_SUPPORT;
- }
-
- if (mwait <= 0) {
- pr_warn("%s: Firmware returned bad wait value %d\n",
- __func__, mwait);
- mwait = EEH_STATE_MIN_WAIT_TIME;
- } else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
- pr_warn("%s: Firmware returned too long wait value %d\n",
- __func__, mwait);
- mwait = EEH_STATE_MAX_WAIT_TIME;
- }
-
- max_wait -= mwait;
- msleep(mwait);
- }
-
- return EEH_STATE_NOT_SUPPORT;
-}
-
-/**
* pseries_eeh_get_log - Retrieve error log
* @pe: EEH PE
* @severity: temporary or permanent error log
@@ -849,7 +792,6 @@ static struct eeh_ops pseries_eeh_ops = {
.get_pe_addr = pseries_eeh_get_pe_addr,
.get_state = pseries_eeh_get_state,
.reset = pseries_eeh_reset,
- .wait_state = pseries_eeh_wait_state,
.get_log = pseries_eeh_get_log,
.configure_bridge = pseries_eeh_configure_bridge,
.err_inject = NULL,
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
index 6eeb0d4bab61..446ef104fb3a 100644
--- a/arch/powerpc/platforms/pseries/event_sources.c
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -16,7 +16,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <asm/prom.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
#include "pseries.h"
@@ -24,34 +25,19 @@ void request_event_sources_irqs(struct device_node *np,
irq_handler_t handler,
const char *name)
{
- int i, index, count = 0;
- struct of_phandle_args oirq;
- unsigned int virqs[16];
+ int i, virq, rc;
- /* First try to do a proper OF tree parsing */
- for (index = 0; of_irq_parse_one(np, index, &oirq) == 0;
- index++) {
- if (count > 15)
- break;
- virqs[count] = irq_create_of_mapping(&oirq);
- if (!virqs[count]) {
- pr_err("event-sources: Unable to allocate "
- "interrupt number for %pOF\n",
- np);
- WARN_ON(1);
- } else {
- count++;
- }
- }
+ for (i = 0; i < 16; i++) {
+ virq = of_irq_get(np, i);
+ if (virq < 0)
+ return;
+ if (WARN(!virq, "event-sources: Unable to allocate "
+ "interrupt number for %pOF\n", np))
+ continue;
- /* Now request them */
- for (i = 0; i < count; i++) {
- if (request_irq(virqs[i], handler, 0, name, NULL)) {
- pr_err("event-sources: Unable to request interrupt "
- "%d for %pOF\n", virqs[i], np);
- WARN_ON(1);
+ rc = request_irq(virq, handler, 0, name, NULL);
+ if (WARN(rc, "event-sources: Unable to request interrupt %d for %pOF\n",
+ virq, np))
return;
- }
}
}
-
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c
index a3bbeb43689e..608ecad0178f 100644
--- a/arch/powerpc/platforms/pseries/firmware.c
+++ b/arch/powerpc/platforms/pseries/firmware.c
@@ -65,6 +65,8 @@ hypertas_fw_features_table[] = {
{FW_FEATURE_SET_MODE, "hcall-set-mode"},
{FW_FEATURE_BEST_ENERGY, "hcall-best-energy-1*"},
{FW_FEATURE_HPT_RESIZE, "hcall-hpt-resize"},
+ {FW_FEATURE_BLOCK_REMOVE, "hcall-block-remove"},
+ {FW_FEATURE_PAPR_SCM, "hcall-scm"},
};
/* Build up the firmware features bitmask using the contents of
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 6ef77caf7bcf..2f8e62163602 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -287,7 +287,7 @@ static int pseries_add_processor(struct device_node *np)
if (cpumask_empty(tmp)) {
printk(KERN_ERR "Unable to find space in cpu_present_mask for"
- " processor %s with %d thread(s)\n", np->name,
+ " processor %pOFn with %d thread(s)\n", np,
nthreads);
goto out_unlock;
}
@@ -481,8 +481,8 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
if (rc) {
saved_rc = rc;
- pr_warn("Failed to attach node %s, rc: %d, drc index: %x\n",
- dn->name, rc, drc_index);
+ pr_warn("Failed to attach node %pOFn, rc: %d, drc index: %x\n",
+ dn, rc, drc_index);
rc = dlpar_release_drc(drc_index);
if (!rc)
@@ -494,8 +494,8 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
rc = dlpar_online_cpu(dn);
if (rc) {
saved_rc = rc;
- pr_warn("Failed to online cpu %s, rc: %d, drc index: %x\n",
- dn->name, rc, drc_index);
+ pr_warn("Failed to online cpu %pOFn, rc: %d, drc index: %x\n",
+ dn, rc, drc_index);
rc = dlpar_detach_node(dn);
if (!rc)
@@ -504,7 +504,7 @@ static ssize_t dlpar_cpu_add(u32 drc_index)
return saved_rc;
}
- pr_debug("Successfully added CPU %s, drc index: %x\n", dn->name,
+ pr_debug("Successfully added CPU %pOFn, drc index: %x\n", dn,
drc_index);
return rc;
}
@@ -570,19 +570,19 @@ static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
{
int rc;
- pr_debug("Attempting to remove CPU %s, drc index: %x\n",
- dn->name, drc_index);
+ pr_debug("Attempting to remove CPU %pOFn, drc index: %x\n",
+ dn, drc_index);
rc = dlpar_offline_cpu(dn);
if (rc) {
- pr_warn("Failed to offline CPU %s, rc: %d\n", dn->name, rc);
+ pr_warn("Failed to offline CPU %pOFn, rc: %d\n", dn, rc);
return -EINVAL;
}
rc = dlpar_release_drc(drc_index);
if (rc) {
- pr_warn("Failed to release drc (%x) for CPU %s, rc: %d\n",
- drc_index, dn->name, rc);
+ pr_warn("Failed to release drc (%x) for CPU %pOFn, rc: %d\n",
+ drc_index, dn, rc);
dlpar_online_cpu(dn);
return rc;
}
@@ -591,7 +591,7 @@ static ssize_t dlpar_cpu_remove(struct device_node *dn, u32 drc_index)
if (rc) {
int saved_rc = rc;
- pr_warn("Failed to detach CPU %s, rc: %d", dn->name, rc);
+ pr_warn("Failed to detach CPU %pOFn, rc: %d", dn, rc);
rc = dlpar_acquire_drc(drc_index);
if (!rc)
@@ -662,8 +662,8 @@ static int find_dlpar_cpus_to_remove(u32 *cpu_drcs, int cpus_to_remove)
rc = of_property_read_u32(dn, "ibm,my-drc-index",
&cpu_drcs[cpus_found - 1]);
if (rc) {
- pr_warn("Error occurred getting drc-index for %s\n",
- dn->name);
+ pr_warn("Error occurred getting drc-index for %pOFn\n",
+ dn);
of_node_put(dn);
return -1;
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index c1578f54c626..2b796da822c2 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -101,11 +101,12 @@ static struct property *dlpar_clone_property(struct property *prop,
return new_prop;
}
-static u32 find_aa_index(struct device_node *dr_node,
- struct property *ala_prop, const u32 *lmb_assoc)
+static bool find_aa_index(struct device_node *dr_node,
+ struct property *ala_prop,
+ const u32 *lmb_assoc, u32 *aa_index)
{
- u32 *assoc_arrays;
- u32 aa_index;
+ u32 *assoc_arrays, new_prop_size;
+ struct property *new_prop;
int aa_arrays, aa_array_entries, aa_array_sz;
int i, index;
@@ -121,54 +122,48 @@ static u32 find_aa_index(struct device_node *dr_node,
aa_array_entries = be32_to_cpu(assoc_arrays[1]);
aa_array_sz = aa_array_entries * sizeof(u32);
- aa_index = -1;
for (i = 0; i < aa_arrays; i++) {
index = (i * aa_array_entries) + 2;
if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
continue;
- aa_index = i;
- break;
+ *aa_index = i;
+ return true;
}
- if (aa_index == -1) {
- struct property *new_prop;
- u32 new_prop_size;
-
- new_prop_size = ala_prop->length + aa_array_sz;
- new_prop = dlpar_clone_property(ala_prop, new_prop_size);
- if (!new_prop)
- return -1;
-
- assoc_arrays = new_prop->value;
+ new_prop_size = ala_prop->length + aa_array_sz;
+ new_prop = dlpar_clone_property(ala_prop, new_prop_size);
+ if (!new_prop)
+ return false;
- /* increment the number of entries in the lookup array */
- assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
+ assoc_arrays = new_prop->value;
- /* copy the new associativity into the lookup array */
- index = aa_arrays * aa_array_entries + 2;
- memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
+ /* increment the number of entries in the lookup array */
+ assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
- of_update_property(dr_node, new_prop);
+ /* copy the new associativity into the lookup array */
+ index = aa_arrays * aa_array_entries + 2;
+ memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
- /*
- * The associativity lookup array index for this lmb is
- * number of entries - 1 since we added its associativity
- * to the end of the lookup array.
- */
- aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
- }
+ of_update_property(dr_node, new_prop);
- return aa_index;
+ /*
+ * The associativity lookup array index for this lmb is
+ * number of entries - 1 since we added its associativity
+ * to the end of the lookup array.
+ */
+ *aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
+ return true;
}
-static u32 lookup_lmb_associativity_index(struct drmem_lmb *lmb)
+static int update_lmb_associativity_index(struct drmem_lmb *lmb)
{
struct device_node *parent, *lmb_node, *dr_node;
struct property *ala_prop;
const u32 *lmb_assoc;
u32 aa_index;
+ bool found;
parent = of_find_node_by_path("/");
if (!parent)
@@ -200,46 +195,17 @@ static u32 lookup_lmb_associativity_index(struct drmem_lmb *lmb)
return -ENODEV;
}
- aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
+ found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
dlpar_free_cc_nodes(lmb_node);
- return aa_index;
-}
-static int dlpar_add_device_tree_lmb(struct drmem_lmb *lmb)
-{
- int rc, aa_index;
-
- lmb->flags |= DRCONF_MEM_ASSIGNED;
-
- aa_index = lookup_lmb_associativity_index(lmb);
- if (aa_index < 0) {
- pr_err("Couldn't find associativity index for drc index %x\n",
- lmb->drc_index);
- return aa_index;
+ if (!found) {
+ pr_err("Could not find LMB associativity\n");
+ return -1;
}
lmb->aa_index = aa_index;
-
- rtas_hp_event = true;
- rc = drmem_update_dt();
- rtas_hp_event = false;
-
- return rc;
-}
-
-static int dlpar_remove_device_tree_lmb(struct drmem_lmb *lmb)
-{
- int rc;
-
- lmb->flags &= ~DRCONF_MEM_ASSIGNED;
- lmb->aa_index = 0xffffffff;
-
- rtas_hp_event = true;
- rc = drmem_update_dt();
- rtas_hp_event = false;
-
- return rc;
+ return 0;
}
static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
@@ -428,7 +394,9 @@ static int dlpar_remove_lmb(struct drmem_lmb *lmb)
/* Update memory regions for memory remove */
memblock_remove(lmb->base_addr, block_sz);
- dlpar_remove_device_tree_lmb(lmb);
+ invalidate_lmb_associativity_index(lmb);
+ lmb->flags &= ~DRCONF_MEM_ASSIGNED;
+
return 0;
}
@@ -688,10 +656,8 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
if (lmb->flags & DRCONF_MEM_ASSIGNED)
return -EINVAL;
- rc = dlpar_add_device_tree_lmb(lmb);
+ rc = update_lmb_associativity_index(lmb);
if (rc) {
- pr_err("Couldn't update device tree for drc index %x\n",
- lmb->drc_index);
dlpar_release_drc(lmb->drc_index);
return rc;
}
@@ -704,14 +670,14 @@ static int dlpar_add_lmb(struct drmem_lmb *lmb)
/* Add the memory */
rc = add_memory(nid, lmb->base_addr, block_sz);
if (rc) {
- dlpar_remove_device_tree_lmb(lmb);
+ invalidate_lmb_associativity_index(lmb);
return rc;
}
rc = dlpar_online_lmb(lmb);
if (rc) {
remove_memory(nid, lmb->base_addr, block_sz);
- dlpar_remove_device_tree_lmb(lmb);
+ invalidate_lmb_associativity_index(lmb);
} else {
lmb->flags |= DRCONF_MEM_ASSIGNED;
}
@@ -958,6 +924,12 @@ int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
break;
}
+ if (!rc) {
+ rtas_hp_event = true;
+ rc = drmem_update_dt();
+ rtas_hp_event = false;
+ }
+
unlock_device_hotplug();
return rc;
}
diff --git a/arch/powerpc/platforms/pseries/ibmebus.c b/arch/powerpc/platforms/pseries/ibmebus.c
index c7c1140c13b6..5b4a56131904 100644
--- a/arch/powerpc/platforms/pseries/ibmebus.c
+++ b/arch/powerpc/platforms/pseries/ibmebus.c
@@ -404,7 +404,7 @@ static ssize_t name_show(struct device *dev,
struct platform_device *ofdev;
ofdev = to_platform_device(dev);
- return sprintf(buf, "%s\n", ofdev->dev.of_node->name);
+ return sprintf(buf, "%pOFn\n", ofdev->dev.of_node);
}
static DEVICE_ATTR_RO(name);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index d3992ced0782..32d4452973e7 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -48,6 +48,7 @@
#include <asm/kexec.h>
#include <asm/fadump.h>
#include <asm/asm-prototypes.h>
+#include <asm/debugfs.h>
#include "pseries.h"
@@ -417,6 +418,79 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
BUG_ON(lpar_rc != H_SUCCESS);
}
+
+/*
+ * As defined in the PAPR's section 14.5.4.1.8
+ * The control mask doesn't include the returned reference and change bit from
+ * the processed PTE.
+ */
+#define HBLKR_AVPN 0x0100000000000000UL
+#define HBLKR_CTRL_MASK 0xf800000000000000UL
+#define HBLKR_CTRL_SUCCESS 0x8000000000000000UL
+#define HBLKR_CTRL_ERRNOTFOUND 0x8800000000000000UL
+#define HBLKR_CTRL_ERRBUSY 0xa000000000000000UL
+
+/**
+ * H_BLOCK_REMOVE caller.
+ * @idx should point to the latest @param entry set with a PTEX.
+ * If PTE cannot be processed because another CPUs has already locked that
+ * group, those entries are put back in @param starting at index 1.
+ * If entries has to be retried and @retry_busy is set to true, these entries
+ * are retried until success. If @retry_busy is set to false, the returned
+ * is the number of entries yet to process.
+ */
+static unsigned long call_block_remove(unsigned long idx, unsigned long *param,
+ bool retry_busy)
+{
+ unsigned long i, rc, new_idx;
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
+
+ if (idx < 2) {
+ pr_warn("Unexpected empty call to H_BLOCK_REMOVE");
+ return 0;
+ }
+again:
+ new_idx = 0;
+ if (idx > PLPAR_HCALL9_BUFSIZE) {
+ pr_err("Too many PTEs (%lu) for H_BLOCK_REMOVE", idx);
+ idx = PLPAR_HCALL9_BUFSIZE;
+ } else if (idx < PLPAR_HCALL9_BUFSIZE)
+ param[idx] = HBR_END;
+
+ rc = plpar_hcall9(H_BLOCK_REMOVE, retbuf,
+ param[0], /* AVA */
+ param[1], param[2], param[3], param[4], /* TS0-7 */
+ param[5], param[6], param[7], param[8]);
+ if (rc == H_SUCCESS)
+ return 0;
+
+ BUG_ON(rc != H_PARTIAL);
+
+ /* Check that the unprocessed entries were 'not found' or 'busy' */
+ for (i = 0; i < idx-1; i++) {
+ unsigned long ctrl = retbuf[i] & HBLKR_CTRL_MASK;
+
+ if (ctrl == HBLKR_CTRL_ERRBUSY) {
+ param[++new_idx] = param[i+1];
+ continue;
+ }
+
+ BUG_ON(ctrl != HBLKR_CTRL_SUCCESS
+ && ctrl != HBLKR_CTRL_ERRNOTFOUND);
+ }
+
+ /*
+ * If there were entries found busy, retry these entries if requested,
+ * of if all the entries have to be retried.
+ */
+ if (new_idx && (retry_busy || new_idx == (PLPAR_HCALL9_BUFSIZE-1))) {
+ idx = new_idx + 1;
+ goto again;
+ }
+
+ return new_idx;
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need
@@ -424,17 +498,57 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
*/
#define PPC64_HUGE_HPTE_BATCH 12
-static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
- unsigned long *vpn, int count,
- int psize, int ssize)
+static void hugepage_block_invalidate(unsigned long *slot, unsigned long *vpn,
+ int count, int psize, int ssize)
{
unsigned long param[PLPAR_HCALL9_BUFSIZE];
- int i = 0, pix = 0, rc;
- unsigned long flags = 0;
- int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+ unsigned long shift, current_vpgb, vpgb;
+ int i, pix = 0;
- if (lock_tlbie)
- spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
+ shift = mmu_psize_defs[psize].shift;
+
+ for (i = 0; i < count; i++) {
+ /*
+ * Shifting 3 bits more on the right to get a
+ * 8 pages aligned virtual addresse.
+ */
+ vpgb = (vpn[i] >> (shift - VPN_SHIFT + 3));
+ if (!pix || vpgb != current_vpgb) {
+ /*
+ * Need to start a new 8 pages block, flush
+ * the current one if needed.
+ */
+ if (pix)
+ (void)call_block_remove(pix, param, true);
+ current_vpgb = vpgb;
+ param[0] = hpte_encode_avpn(vpn[i], psize, ssize);
+ pix = 1;
+ }
+
+ param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot[i];
+ if (pix == PLPAR_HCALL9_BUFSIZE) {
+ pix = call_block_remove(pix, param, false);
+ /*
+ * pix = 0 means that all the entries were
+ * removed, we can start a new block.
+ * Otherwise, this means that there are entries
+ * to retry, and pix points to latest one, so
+ * we should increment it and try to continue
+ * the same block.
+ */
+ if (pix)
+ pix++;
+ }
+ }
+ if (pix)
+ (void)call_block_remove(pix, param, true);
+}
+
+static void hugepage_bulk_invalidate(unsigned long *slot, unsigned long *vpn,
+ int count, int psize, int ssize)
+{
+ unsigned long param[PLPAR_HCALL9_BUFSIZE];
+ int i = 0, pix = 0, rc;
for (i = 0; i < count; i++) {
@@ -462,6 +576,23 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
param[6], param[7]);
BUG_ON(rc != H_SUCCESS);
}
+}
+
+static inline void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
+ unsigned long *vpn,
+ int count, int psize,
+ int ssize)
+{
+ unsigned long flags = 0;
+ int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+ if (lock_tlbie)
+ spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
+
+ if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
+ hugepage_block_invalidate(slot, vpn, count, psize, ssize);
+ else
+ hugepage_bulk_invalidate(slot, vpn, count, psize, ssize);
if (lock_tlbie)
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
@@ -546,6 +677,86 @@ static int pSeries_lpar_hpte_removebolted(unsigned long ea,
return 0;
}
+
+static inline unsigned long compute_slot(real_pte_t pte,
+ unsigned long vpn,
+ unsigned long index,
+ unsigned long shift,
+ int ssize)
+{
+ unsigned long slot, hash, hidx;
+
+ hash = hpt_hash(vpn, shift, ssize);
+ hidx = __rpte_to_hidx(pte, index);
+ if (hidx & _PTEIDX_SECONDARY)
+ hash = ~hash;
+ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
+ slot += hidx & _PTEIDX_GROUP_IX;
+ return slot;
+}
+
+/**
+ * The hcall H_BLOCK_REMOVE implies that the virtual pages to processed are
+ * "all within the same naturally aligned 8 page virtual address block".
+ */
+static void do_block_remove(unsigned long number, struct ppc64_tlb_batch *batch,
+ unsigned long *param)
+{
+ unsigned long vpn;
+ unsigned long i, pix = 0;
+ unsigned long index, shift, slot, current_vpgb, vpgb;
+ real_pte_t pte;
+ int psize, ssize;
+
+ psize = batch->psize;
+ ssize = batch->ssize;
+
+ for (i = 0; i < number; i++) {
+ vpn = batch->vpn[i];
+ pte = batch->pte[i];
+ pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
+ /*
+ * Shifting 3 bits more on the right to get a
+ * 8 pages aligned virtual addresse.
+ */
+ vpgb = (vpn >> (shift - VPN_SHIFT + 3));
+ if (!pix || vpgb != current_vpgb) {
+ /*
+ * Need to start a new 8 pages block, flush
+ * the current one if needed.
+ */
+ if (pix)
+ (void)call_block_remove(pix, param,
+ true);
+ current_vpgb = vpgb;
+ param[0] = hpte_encode_avpn(vpn, psize,
+ ssize);
+ pix = 1;
+ }
+
+ slot = compute_slot(pte, vpn, index, shift, ssize);
+ param[pix++] = HBR_REQUEST | HBLKR_AVPN | slot;
+
+ if (pix == PLPAR_HCALL9_BUFSIZE) {
+ pix = call_block_remove(pix, param, false);
+ /*
+ * pix = 0 means that all the entries were
+ * removed, we can start a new block.
+ * Otherwise, this means that there are entries
+ * to retry, and pix points to latest one, so
+ * we should increment it and try to continue
+ * the same block.
+ */
+ if (pix)
+ pix++;
+ }
+ } pte_iterate_hashed_end();
+ }
+
+ if (pix)
+ (void)call_block_remove(pix, param, true);
+}
+
/*
* Take a spinlock around flushes to avoid bouncing the hypervisor tlbie
* lock.
@@ -558,13 +769,18 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long param[PLPAR_HCALL9_BUFSIZE];
- unsigned long hash, index, shift, hidx, slot;
+ unsigned long index, shift, slot;
real_pte_t pte;
int psize, ssize;
if (lock_tlbie)
spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags);
+ if (firmware_has_feature(FW_FEATURE_BLOCK_REMOVE)) {
+ do_block_remove(number, batch, param);
+ goto out;
+ }
+
psize = batch->psize;
ssize = batch->ssize;
pix = 0;
@@ -572,12 +788,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
vpn = batch->vpn[i];
pte = batch->pte[i];
pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
- hash = hpt_hash(vpn, shift, ssize);
- hidx = __rpte_to_hidx(pte, index);
- if (hidx & _PTEIDX_SECONDARY)
- hash = ~hash;
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- slot += hidx & _PTEIDX_GROUP_IX;
+ slot = compute_slot(pte, vpn, index, shift, ssize);
if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) {
/*
* lpar doesn't use the passed actual page size
@@ -608,6 +819,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
BUG_ON(rc != H_SUCCESS);
}
+out:
if (lock_tlbie)
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}
@@ -1028,3 +1240,56 @@ static int __init reserve_vrma_context_id(void)
return 0;
}
machine_device_initcall(pseries, reserve_vrma_context_id);
+
+#ifdef CONFIG_DEBUG_FS
+/* debugfs file interface for vpa data */
+static ssize_t vpa_file_read(struct file *filp, char __user *buf, size_t len,
+ loff_t *pos)
+{
+ int cpu = (long)filp->private_data;
+ struct lppaca *lppaca = &lppaca_of(cpu);
+
+ return simple_read_from_buffer(buf, len, pos, lppaca,
+ sizeof(struct lppaca));
+}
+
+static const struct file_operations vpa_fops = {
+ .open = simple_open,
+ .read = vpa_file_read,
+ .llseek = default_llseek,
+};
+
+static int __init vpa_debugfs_init(void)
+{
+ char name[16];
+ long i;
+ static struct dentry *vpa_dir;
+
+ if (!firmware_has_feature(FW_FEATURE_SPLPAR))
+ return 0;
+
+ vpa_dir = debugfs_create_dir("vpa", powerpc_debugfs_root);
+ if (!vpa_dir) {
+ pr_warn("%s: can't create vpa root dir\n", __func__);
+ return -ENOMEM;
+ }
+
+ /* set up the per-cpu vpa file*/
+ for_each_possible_cpu(i) {
+ struct dentry *d;
+
+ sprintf(name, "cpu-%ld", i);
+
+ d = debugfs_create_file(name, 0400, vpa_dir, (void *)i,
+ &vpa_fops);
+ if (!d) {
+ pr_warn("%s: can't create per-cpu vpa file\n",
+ __func__);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+machine_arch_initcall(pseries, vpa_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index 7c872dc01bdb..8bd590af488a 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -585,8 +585,7 @@ static ssize_t update_mpp(u64 *entitlement, u8 *weight)
static ssize_t lparcfg_write(struct file *file, const char __user * buf,
size_t count, loff_t * off)
{
- int kbuf_sz = 64;
- char kbuf[kbuf_sz];
+ char kbuf[64];
char *tmp;
u64 new_entitled, *new_entitled_ptr = &new_entitled;
u8 new_weight, *new_weight_ptr = &new_weight;
@@ -595,7 +594,7 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
if (!firmware_has_feature(FW_FEATURE_SPLPAR))
return -EINVAL;
- if (count > kbuf_sz)
+ if (count > sizeof(kbuf))
return -EINVAL;
if (copy_from_user(kbuf, buf, count))
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index f0e30dc94988..88925f8ca8a0 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -242,7 +242,7 @@ static int add_dt_node(__be32 parent_phandle, __be32 drc_index)
static void prrn_update_node(__be32 phandle)
{
- struct pseries_hp_errorlog *hp_elog;
+ struct pseries_hp_errorlog hp_elog;
struct device_node *dn;
/*
@@ -255,18 +255,12 @@ static void prrn_update_node(__be32 phandle)
return;
}
- hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
- if(!hp_elog)
- return;
-
- hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
- hp_elog->action = PSERIES_HP_ELOG_ACTION_READD;
- hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
- hp_elog->_drc_u.drc_index = phandle;
-
- queue_hotplug_event(hp_elog, NULL, NULL);
+ hp_elog.resource = PSERIES_HP_ELOG_RESOURCE_MEM;
+ hp_elog.action = PSERIES_HP_ELOG_ACTION_READD;
+ hp_elog.id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
+ hp_elog._drc_u.drc_index = phandle;
- kfree(hp_elog);
+ handle_dlpar_errorlog(&hp_elog);
}
int pseries_devicetree_update(s32 scope)
@@ -366,6 +360,8 @@ static ssize_t migration_store(struct class *class,
if (rc)
return rc;
+ stop_topology_update();
+
do {
rc = rtas_ibm_suspend_me(streamid);
if (rc == -EAGAIN)
@@ -376,6 +372,9 @@ static ssize_t migration_store(struct class *class,
return rc;
post_mobility_fixup();
+
+ start_topology_update();
+
return count;
}
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index b7496948129e..8011b4129e3a 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -203,7 +203,8 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
/* Get the top level device in the PE */
edev = pdn_to_eeh_dev(PCI_DN(dn));
if (edev->pe)
- edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
+ edev = list_first_entry(&edev->pe->edevs, struct eeh_dev,
+ entry);
dn = pci_device_to_OF_node(edev->pdev);
if (!dn)
return NULL;
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
new file mode 100644
index 000000000000..ee9372b65ca5
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/papr_scm.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "papr-scm: " fmt
+
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/ndctl.h>
+#include <linux/sched.h>
+#include <linux/libnvdimm.h>
+#include <linux/platform_device.h>
+
+#include <asm/plpar_wrappers.h>
+
+#define BIND_ANY_ADDR (~0ul)
+
+#define PAPR_SCM_DIMM_CMD_MASK \
+ ((1ul << ND_CMD_GET_CONFIG_SIZE) | \
+ (1ul << ND_CMD_GET_CONFIG_DATA) | \
+ (1ul << ND_CMD_SET_CONFIG_DATA))
+
+struct papr_scm_priv {
+ struct platform_device *pdev;
+ struct device_node *dn;
+ uint32_t drc_index;
+ uint64_t blocks;
+ uint64_t block_size;
+ int metadata_size;
+
+ uint64_t bound_addr;
+
+ struct nvdimm_bus_descriptor bus_desc;
+ struct nvdimm_bus *bus;
+ struct nvdimm *nvdimm;
+ struct resource res;
+ struct nd_region *region;
+ struct nd_interleave_set nd_set;
+};
+
+static int drc_pmem_bind(struct papr_scm_priv *p)
+{
+ unsigned long ret[PLPAR_HCALL_BUFSIZE];
+ uint64_t rc, token;
+
+ /*
+ * When the hypervisor cannot map all the requested memory in a single
+ * hcall it returns H_BUSY and we call again with the token until
+ * we get H_SUCCESS. Aborting the retry loop before getting H_SUCCESS
+ * leave the system in an undefined state, so we wait.
+ */
+ token = 0;
+
+ do {
+ rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0,
+ p->blocks, BIND_ANY_ADDR, token);
+ token = be64_to_cpu(ret[0]);
+ cond_resched();
+ } while (rc == H_BUSY);
+
+ if (rc) {
+ dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
+ return -ENXIO;
+ }
+
+ p->bound_addr = be64_to_cpu(ret[1]);
+
+ dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res);
+
+ return 0;
+}
+
+static int drc_pmem_unbind(struct papr_scm_priv *p)
+{
+ unsigned long ret[PLPAR_HCALL_BUFSIZE];
+ uint64_t rc, token;
+
+ token = 0;
+
+ /* NB: unbind has the same retry requirements mentioned above */
+ do {
+ rc = plpar_hcall(H_SCM_UNBIND_MEM, ret, p->drc_index,
+ p->bound_addr, p->blocks, token);
+ token = be64_to_cpu(ret);
+ cond_resched();
+ } while (rc == H_BUSY);
+
+ if (rc)
+ dev_err(&p->pdev->dev, "unbind error: %lld\n", rc);
+
+ return !!rc;
+}
+
+static int papr_scm_meta_get(struct papr_scm_priv *p,
+ struct nd_cmd_get_config_data_hdr *hdr)
+{
+ unsigned long data[PLPAR_HCALL_BUFSIZE];
+ int64_t ret;
+
+ if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1)
+ return -EINVAL;
+
+ ret = plpar_hcall(H_SCM_READ_METADATA, data, p->drc_index,
+ hdr->in_offset, 1);
+
+ if (ret == H_PARAMETER) /* bad DRC index */
+ return -ENODEV;
+ if (ret)
+ return -EINVAL; /* other invalid parameter */
+
+ hdr->out_buf[0] = data[0] & 0xff;
+
+ return 0;
+}
+
+static int papr_scm_meta_set(struct papr_scm_priv *p,
+ struct nd_cmd_set_config_hdr *hdr)
+{
+ int64_t ret;
+
+ if (hdr->in_offset >= p->metadata_size || hdr->in_length != 1)
+ return -EINVAL;
+
+ ret = plpar_hcall_norets(H_SCM_WRITE_METADATA,
+ p->drc_index, hdr->in_offset, hdr->in_buf[0], 1);
+
+ if (ret == H_PARAMETER) /* bad DRC index */
+ return -ENODEV;
+ if (ret)
+ return -EINVAL; /* other invalid parameter */
+
+ return 0;
+}
+
+int papr_scm_ndctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
+{
+ struct nd_cmd_get_config_size *get_size_hdr;
+ struct papr_scm_priv *p;
+
+ /* Only dimm-specific calls are supported atm */
+ if (!nvdimm)
+ return -EINVAL;
+
+ p = nvdimm_provider_data(nvdimm);
+
+ switch (cmd) {
+ case ND_CMD_GET_CONFIG_SIZE:
+ get_size_hdr = buf;
+
+ get_size_hdr->status = 0;
+ get_size_hdr->max_xfer = 1;
+ get_size_hdr->config_size = p->metadata_size;
+ *cmd_rc = 0;
+ break;
+
+ case ND_CMD_GET_CONFIG_DATA:
+ *cmd_rc = papr_scm_meta_get(p, buf);
+ break;
+
+ case ND_CMD_SET_CONFIG_DATA:
+ *cmd_rc = papr_scm_meta_set(p, buf);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(&p->pdev->dev, "returned with cmd_rc = %d\n", *cmd_rc);
+
+ return 0;
+}
+
+static const struct attribute_group *region_attr_groups[] = {
+ &nd_region_attribute_group,
+ &nd_device_attribute_group,
+ &nd_mapping_attribute_group,
+ &nd_numa_attribute_group,
+ NULL,
+};
+
+static const struct attribute_group *bus_attr_groups[] = {
+ &nvdimm_bus_attribute_group,
+ NULL,
+};
+
+static const struct attribute_group *papr_scm_dimm_groups[] = {
+ &nvdimm_attribute_group,
+ &nd_device_attribute_group,
+ NULL,
+};
+
+static int papr_scm_nvdimm_init(struct papr_scm_priv *p)
+{
+ struct device *dev = &p->pdev->dev;
+ struct nd_mapping_desc mapping;
+ struct nd_region_desc ndr_desc;
+ unsigned long dimm_flags;
+
+ p->bus_desc.ndctl = papr_scm_ndctl;
+ p->bus_desc.module = THIS_MODULE;
+ p->bus_desc.of_node = p->pdev->dev.of_node;
+ p->bus_desc.attr_groups = bus_attr_groups;
+ p->bus_desc.provider_name = kstrdup(p->pdev->name, GFP_KERNEL);
+
+ if (!p->bus_desc.provider_name)
+ return -ENOMEM;
+
+ p->bus = nvdimm_bus_register(NULL, &p->bus_desc);
+ if (!p->bus) {
+ dev_err(dev, "Error creating nvdimm bus %pOF\n", p->dn);
+ return -ENXIO;
+ }
+
+ dimm_flags = 0;
+ set_bit(NDD_ALIASING, &dimm_flags);
+
+ p->nvdimm = nvdimm_create(p->bus, p, papr_scm_dimm_groups,
+ dimm_flags, PAPR_SCM_DIMM_CMD_MASK, 0, NULL);
+ if (!p->nvdimm) {
+ dev_err(dev, "Error creating DIMM object for %pOF\n", p->dn);
+ goto err;
+ }
+
+ /* now add the region */
+
+ memset(&mapping, 0, sizeof(mapping));
+ mapping.nvdimm = p->nvdimm;
+ mapping.start = 0;
+ mapping.size = p->blocks * p->block_size; // XXX: potential overflow?
+
+ memset(&ndr_desc, 0, sizeof(ndr_desc));
+ ndr_desc.attr_groups = region_attr_groups;
+ ndr_desc.numa_node = dev_to_node(&p->pdev->dev);
+ ndr_desc.res = &p->res;
+ ndr_desc.of_node = p->dn;
+ ndr_desc.provider_data = p;
+ ndr_desc.mapping = &mapping;
+ ndr_desc.num_mappings = 1;
+ ndr_desc.nd_set = &p->nd_set;
+ set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
+
+ p->region = nvdimm_pmem_region_create(p->bus, &ndr_desc);
+ if (!p->region) {
+ dev_err(dev, "Error registering region %pR from %pOF\n",
+ ndr_desc.res, p->dn);
+ goto err;
+ }
+
+ return 0;
+
+err: nvdimm_bus_unregister(p->bus);
+ kfree(p->bus_desc.provider_name);
+ return -ENXIO;
+}
+
+static int papr_scm_probe(struct platform_device *pdev)
+{
+ uint32_t drc_index, metadata_size, unit_cap[2];
+ struct device_node *dn = pdev->dev.of_node;
+ struct papr_scm_priv *p;
+ int rc;
+
+ /* check we have all the required DT properties */
+ if (of_property_read_u32(dn, "ibm,my-drc-index", &drc_index)) {
+ dev_err(&pdev->dev, "%pOF: missing drc-index!\n", dn);
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32_array(dn, "ibm,unit-capacity", unit_cap, 2)) {
+ dev_err(&pdev->dev, "%pOF: missing unit-capacity!\n", dn);
+ return -ENODEV;
+ }
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ /* optional DT properties */
+ of_property_read_u32(dn, "ibm,metadata-size", &metadata_size);
+
+ p->dn = dn;
+ p->drc_index = drc_index;
+ p->block_size = unit_cap[0];
+ p->blocks = unit_cap[1];
+
+ /* might be zero */
+ p->metadata_size = metadata_size;
+ p->pdev = pdev;
+
+ /* request the hypervisor to bind this region to somewhere in memory */
+ rc = drc_pmem_bind(p);
+ if (rc)
+ goto err;
+
+ /* setup the resource for the newly bound range */
+ p->res.start = p->bound_addr;
+ p->res.end = p->bound_addr + p->blocks * p->block_size;
+ p->res.name = pdev->name;
+ p->res.flags = IORESOURCE_MEM;
+
+ rc = papr_scm_nvdimm_init(p);
+ if (rc)
+ goto err2;
+
+ platform_set_drvdata(pdev, p);
+
+ return 0;
+
+err2: drc_pmem_unbind(p);
+err: kfree(p);
+ return rc;
+}
+
+static int papr_scm_remove(struct platform_device *pdev)
+{
+ struct papr_scm_priv *p = platform_get_drvdata(pdev);
+
+ nvdimm_bus_unregister(p->bus);
+ drc_pmem_unbind(p);
+ kfree(p);
+
+ return 0;
+}
+
+static const struct of_device_id papr_scm_match[] = {
+ { .compatible = "ibm,pmemory" },
+ { },
+};
+
+static struct platform_driver papr_scm_driver = {
+ .probe = papr_scm_probe,
+ .remove = papr_scm_remove,
+ .driver = {
+ .name = "papr_scm",
+ .owner = THIS_MODULE,
+ .of_match_table = papr_scm_match,
+ },
+};
+
+module_platform_driver(papr_scm_driver);
+MODULE_DEVICE_TABLE(of, papr_scm_match);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("IBM Corporation");
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index eab96637d6cf..41d8a4d1d02e 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -239,6 +239,7 @@ void __init pSeries_final_fixup(void)
{
pSeries_request_regions();
+ eeh_probe_devices();
eeh_addr_cache_build();
#ifdef CONFIG_PCI_IOV
diff --git a/arch/powerpc/platforms/pseries/pmem.c b/arch/powerpc/platforms/pseries/pmem.c
new file mode 100644
index 000000000000..a27f40eb57b1
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/pmem.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Handles hot and cold plug of persistent memory regions on pseries.
+ */
+
+#define pr_fmt(fmt) "pseries-pmem: " fmt
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/sched.h> /* for idle_task_exit */
+#include <linux/sched/hotplug.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <asm/prom.h>
+#include <asm/rtas.h>
+#include <asm/firmware.h>
+#include <asm/machdep.h>
+#include <asm/vdso_datapage.h>
+#include <asm/plpar_wrappers.h>
+#include <asm/topology.h>
+
+#include "pseries.h"
+#include "offline_states.h"
+
+static struct device_node *pmem_node;
+
+static ssize_t pmem_drc_add_node(u32 drc_index)
+{
+ struct device_node *dn;
+ int rc;
+
+ pr_debug("Attempting to add pmem node, drc index: %x\n", drc_index);
+
+ rc = dlpar_acquire_drc(drc_index);
+ if (rc) {
+ pr_err("Failed to acquire DRC, rc: %d, drc index: %x\n",
+ rc, drc_index);
+ return -EINVAL;
+ }
+
+ dn = dlpar_configure_connector(cpu_to_be32(drc_index), pmem_node);
+ if (!dn) {
+ pr_err("configure-connector failed for drc %x\n", drc_index);
+ dlpar_release_drc(drc_index);
+ return -EINVAL;
+ }
+
+ /* NB: The of reconfig notifier creates platform device from the node */
+ rc = dlpar_attach_node(dn, pmem_node);
+ if (rc) {
+ pr_err("Failed to attach node %s, rc: %d, drc index: %x\n",
+ dn->name, rc, drc_index);
+
+ if (dlpar_release_drc(drc_index))
+ dlpar_free_cc_nodes(dn);
+
+ return rc;
+ }
+
+ pr_info("Successfully added %pOF, drc index: %x\n", dn, drc_index);
+
+ return 0;
+}
+
+static ssize_t pmem_drc_remove_node(u32 drc_index)
+{
+ struct device_node *dn;
+ uint32_t index;
+ int rc;
+
+ for_each_child_of_node(pmem_node, dn) {
+ if (of_property_read_u32(dn, "ibm,my-drc-index", &index))
+ continue;
+ if (index == drc_index)
+ break;
+ }
+
+ if (!dn) {
+ pr_err("Attempting to remove unused DRC index %x\n", drc_index);
+ return -ENODEV;
+ }
+
+ pr_debug("Attempting to remove %pOF, drc index: %x\n", dn, drc_index);
+
+ /* * NB: tears down the ibm,pmemory device as a side-effect */
+ rc = dlpar_detach_node(dn);
+ if (rc)
+ return rc;
+
+ rc = dlpar_release_drc(drc_index);
+ if (rc) {
+ pr_err("Failed to release drc (%x) for CPU %s, rc: %d\n",
+ drc_index, dn->name, rc);
+ dlpar_attach_node(dn, pmem_node);
+ return rc;
+ }
+
+ pr_info("Successfully removed PMEM with drc index: %x\n", drc_index);
+
+ return 0;
+}
+
+int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog)
+{
+ u32 count, drc_index;
+ int rc;
+
+ /* slim chance, but we might get a hotplug event while booting */
+ if (!pmem_node)
+ pmem_node = of_find_node_by_type(NULL, "ibm,persistent-memory");
+ if (!pmem_node) {
+ pr_err("Hotplug event for a pmem device, but none exists\n");
+ return -ENODEV;
+ }
+
+ if (hp_elog->id_type != PSERIES_HP_ELOG_ID_DRC_INDEX) {
+ pr_err("Unsupported hotplug event type %d\n",
+ hp_elog->id_type);
+ return -EINVAL;
+ }
+
+ count = hp_elog->_drc_u.drc_count;
+ drc_index = hp_elog->_drc_u.drc_index;
+
+ lock_device_hotplug();
+
+ if (hp_elog->action == PSERIES_HP_ELOG_ACTION_ADD) {
+ rc = pmem_drc_add_node(drc_index);
+ } else if (hp_elog->action == PSERIES_HP_ELOG_ACTION_REMOVE) {
+ rc = pmem_drc_remove_node(drc_index);
+ } else {
+ pr_err("Unsupported hotplug action (%d)\n", hp_elog->action);
+ rc = -EINVAL;
+ }
+
+ unlock_device_hotplug();
+ return rc;
+}
+
+const struct of_device_id drc_pmem_match[] = {
+ { .type = "ibm,persistent-memory", },
+ {}
+};
+
+static int pseries_pmem_init(void)
+{
+ pmem_node = of_find_node_by_type(NULL, "ibm,persistent-memory");
+ if (!pmem_node)
+ return 0;
+
+ /*
+ * The generic OF bus probe/populate handles creating platform devices
+ * from the child (ibm,pmemory) nodes. The generic code registers an of
+ * reconfig notifier to handle the hot-add/remove cases too.
+ */
+ of_platform_bus_probe(pmem_node, drc_pmem_match, NULL);
+
+ return 0;
+}
+machine_arch_initcall(pseries, pseries_pmem_init);
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 60db2ee511fb..7dee8c5d3363 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -24,6 +24,7 @@ struct pt_regs;
extern int pSeries_system_reset_exception(struct pt_regs *regs);
extern int pSeries_machine_check_exception(struct pt_regs *regs);
+extern long pseries_machine_check_realmode(struct pt_regs *regs);
#ifdef CONFIG_SMP
extern void smp_init_pseries(void);
@@ -59,15 +60,21 @@ extern int dlpar_detach_node(struct device_node *);
extern int dlpar_acquire_drc(u32 drc_index);
extern int dlpar_release_drc(u32 drc_index);
-void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog,
- struct completion *hotplug_done, int *rc);
+void queue_hotplug_event(struct pseries_hp_errorlog *hp_errlog);
+int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_errlog);
+
#ifdef CONFIG_MEMORY_HOTPLUG
int dlpar_memory(struct pseries_hp_errorlog *hp_elog);
+int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog);
#else
static inline int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
{
return -EOPNOTSUPP;
}
+static inline int dlpar_hp_pmem(struct pseries_hp_errorlog *hp_elog)
+{
+ return -EOPNOTSUPP;
+}
#endif
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 851ce326874a..d97d52772789 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -27,6 +27,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/firmware.h>
+#include <asm/mce.h>
#include "pseries.h"
@@ -50,6 +51,101 @@ static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id);
static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
+/* RTAS pseries MCE errorlog section. */
+struct pseries_mc_errorlog {
+ __be32 fru_id;
+ __be32 proc_id;
+ u8 error_type;
+ /*
+ * sub_err_type (1 byte). Bit fields depends on error_type
+ *
+ * MSB0
+ * |
+ * V
+ * 01234567
+ * XXXXXXXX
+ *
+ * For error_type == MC_ERROR_TYPE_UE
+ * XXXXXXXX
+ * X 1: Permanent or Transient UE.
+ * X 1: Effective address provided.
+ * X 1: Logical address provided.
+ * XX 2: Reserved.
+ * XXX 3: Type of UE error.
+ *
+ * For error_type != MC_ERROR_TYPE_UE
+ * XXXXXXXX
+ * X 1: Effective address provided.
+ * XXXXX 5: Reserved.
+ * XX 2: Type of SLB/ERAT/TLB error.
+ */
+ u8 sub_err_type;
+ u8 reserved_1[6];
+ __be64 effective_address;
+ __be64 logical_address;
+} __packed;
+
+/* RTAS pseries MCE error types */
+#define MC_ERROR_TYPE_UE 0x00
+#define MC_ERROR_TYPE_SLB 0x01
+#define MC_ERROR_TYPE_ERAT 0x02
+#define MC_ERROR_TYPE_TLB 0x04
+#define MC_ERROR_TYPE_D_CACHE 0x05
+#define MC_ERROR_TYPE_I_CACHE 0x07
+
+/* RTAS pseries MCE error sub types */
+#define MC_ERROR_UE_INDETERMINATE 0
+#define MC_ERROR_UE_IFETCH 1
+#define MC_ERROR_UE_PAGE_TABLE_WALK_IFETCH 2
+#define MC_ERROR_UE_LOAD_STORE 3
+#define MC_ERROR_UE_PAGE_TABLE_WALK_LOAD_STORE 4
+
+#define MC_ERROR_SLB_PARITY 0
+#define MC_ERROR_SLB_MULTIHIT 1
+#define MC_ERROR_SLB_INDETERMINATE 2
+
+#define MC_ERROR_ERAT_PARITY 1
+#define MC_ERROR_ERAT_MULTIHIT 2
+#define MC_ERROR_ERAT_INDETERMINATE 3
+
+#define MC_ERROR_TLB_PARITY 1
+#define MC_ERROR_TLB_MULTIHIT 2
+#define MC_ERROR_TLB_INDETERMINATE 3
+
+static inline u8 rtas_mc_error_sub_type(const struct pseries_mc_errorlog *mlog)
+{
+ switch (mlog->error_type) {
+ case MC_ERROR_TYPE_UE:
+ return (mlog->sub_err_type & 0x07);
+ case MC_ERROR_TYPE_SLB:
+ case MC_ERROR_TYPE_ERAT:
+ case MC_ERROR_TYPE_TLB:
+ return (mlog->sub_err_type & 0x03);
+ default:
+ return 0;
+ }
+}
+
+static
+inline u64 rtas_mc_get_effective_addr(const struct pseries_mc_errorlog *mlog)
+{
+ __be64 addr = 0;
+
+ switch (mlog->error_type) {
+ case MC_ERROR_TYPE_UE:
+ if (mlog->sub_err_type & 0x40)
+ addr = mlog->effective_address;
+ break;
+ case MC_ERROR_TYPE_SLB:
+ case MC_ERROR_TYPE_ERAT:
+ case MC_ERROR_TYPE_TLB:
+ if (mlog->sub_err_type & 0x80)
+ addr = mlog->effective_address;
+ default:
+ break;
+ }
+ return be64_to_cpu(addr);
+}
/*
* Enable the hotplug interrupt late because processing them may touch other
@@ -237,8 +333,9 @@ static irqreturn_t ras_hotplug_interrupt(int irq, void *dev_id)
* hotplug events on the ras_log_buf to be handled by rtas_errd.
*/
if (hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_MEM ||
- hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_CPU)
- queue_hotplug_event(hp_elog, NULL, NULL);
+ hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_CPU ||
+ hp_elog->resource == PSERIES_HP_ELOG_RESOURCE_PMEM)
+ queue_hotplug_event(hp_elog);
else
log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
@@ -427,6 +524,188 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
return 0; /* need to perform reset */
}
+#define VAL_TO_STRING(ar, val) \
+ (((val) < ARRAY_SIZE(ar)) ? ar[(val)] : "Unknown")
+
+static void pseries_print_mce_info(struct pt_regs *regs,
+ struct rtas_error_log *errp)
+{
+ const char *level, *sevstr;
+ struct pseries_errorlog *pseries_log;
+ struct pseries_mc_errorlog *mce_log;
+ u8 error_type, err_sub_type;
+ u64 addr;
+ u8 initiator = rtas_error_initiator(errp);
+ int disposition = rtas_error_disposition(errp);
+
+ static const char * const initiators[] = {
+ "Unknown",
+ "CPU",
+ "PCI",
+ "ISA",
+ "Memory",
+ "Power Mgmt",
+ };
+ static const char * const mc_err_types[] = {
+ "UE",
+ "SLB",
+ "ERAT",
+ "TLB",
+ "D-Cache",
+ "Unknown",
+ "I-Cache",
+ };
+ static const char * const mc_ue_types[] = {
+ "Indeterminate",
+ "Instruction fetch",
+ "Page table walk ifetch",
+ "Load/Store",
+ "Page table walk Load/Store",
+ };
+
+ /* SLB sub errors valid values are 0x0, 0x1, 0x2 */
+ static const char * const mc_slb_types[] = {
+ "Parity",
+ "Multihit",
+ "Indeterminate",
+ };
+
+ /* TLB and ERAT sub errors valid values are 0x1, 0x2, 0x3 */
+ static const char * const mc_soft_types[] = {
+ "Unknown",
+ "Parity",
+ "Multihit",
+ "Indeterminate",
+ };
+
+ if (!rtas_error_extended(errp)) {
+ pr_err("Machine check interrupt: Missing extended error log\n");
+ return;
+ }
+
+ pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
+ if (pseries_log == NULL)
+ return;
+
+ mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
+
+ error_type = mce_log->error_type;
+ err_sub_type = rtas_mc_error_sub_type(mce_log);
+
+ switch (rtas_error_severity(errp)) {
+ case RTAS_SEVERITY_NO_ERROR:
+ level = KERN_INFO;
+ sevstr = "Harmless";
+ break;
+ case RTAS_SEVERITY_WARNING:
+ level = KERN_WARNING;
+ sevstr = "";
+ break;
+ case RTAS_SEVERITY_ERROR:
+ case RTAS_SEVERITY_ERROR_SYNC:
+ level = KERN_ERR;
+ sevstr = "Severe";
+ break;
+ case RTAS_SEVERITY_FATAL:
+ default:
+ level = KERN_ERR;
+ sevstr = "Fatal";
+ break;
+ }
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* Display faulty slb contents for SLB errors. */
+ if (error_type == MC_ERROR_TYPE_SLB)
+ slb_dump_contents(local_paca->mce_faulty_slbs);
+#endif
+
+ printk("%s%s Machine check interrupt [%s]\n", level, sevstr,
+ disposition == RTAS_DISP_FULLY_RECOVERED ?
+ "Recovered" : "Not recovered");
+ if (user_mode(regs)) {
+ printk("%s NIP: [%016lx] PID: %d Comm: %s\n", level,
+ regs->nip, current->pid, current->comm);
+ } else {
+ printk("%s NIP [%016lx]: %pS\n", level, regs->nip,
+ (void *)regs->nip);
+ }
+ printk("%s Initiator: %s\n", level,
+ VAL_TO_STRING(initiators, initiator));
+
+ switch (error_type) {
+ case MC_ERROR_TYPE_UE:
+ printk("%s Error type: %s [%s]\n", level,
+ VAL_TO_STRING(mc_err_types, error_type),
+ VAL_TO_STRING(mc_ue_types, err_sub_type));
+ break;
+ case MC_ERROR_TYPE_SLB:
+ printk("%s Error type: %s [%s]\n", level,
+ VAL_TO_STRING(mc_err_types, error_type),
+ VAL_TO_STRING(mc_slb_types, err_sub_type));
+ break;
+ case MC_ERROR_TYPE_ERAT:
+ case MC_ERROR_TYPE_TLB:
+ printk("%s Error type: %s [%s]\n", level,
+ VAL_TO_STRING(mc_err_types, error_type),
+ VAL_TO_STRING(mc_soft_types, err_sub_type));
+ break;
+ default:
+ printk("%s Error type: %s\n", level,
+ VAL_TO_STRING(mc_err_types, error_type));
+ break;
+ }
+
+ addr = rtas_mc_get_effective_addr(mce_log);
+ if (addr)
+ printk("%s Effective address: %016llx\n", level, addr);
+}
+
+static int mce_handle_error(struct rtas_error_log *errp)
+{
+ struct pseries_errorlog *pseries_log;
+ struct pseries_mc_errorlog *mce_log;
+ int disposition = rtas_error_disposition(errp);
+ u8 error_type;
+
+ if (!rtas_error_extended(errp))
+ goto out;
+
+ pseries_log = get_pseries_errorlog(errp, PSERIES_ELOG_SECT_ID_MCE);
+ if (pseries_log == NULL)
+ goto out;
+
+ mce_log = (struct pseries_mc_errorlog *)pseries_log->data;
+ error_type = mce_log->error_type;
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ if (disposition == RTAS_DISP_NOT_RECOVERED) {
+ switch (error_type) {
+ case MC_ERROR_TYPE_SLB:
+ case MC_ERROR_TYPE_ERAT:
+ /*
+ * Store the old slb content in paca before flushing.
+ * Print this when we go to virtual mode.
+ * There are chances that we may hit MCE again if there
+ * is a parity error on the SLB entry we trying to read
+ * for saving. Hence limit the slb saving to single
+ * level of recursion.
+ */
+ if (local_paca->in_mce == 1)
+ slb_save_contents(local_paca->mce_faulty_slbs);
+ flush_and_reload_slb();
+ disposition = RTAS_DISP_FULLY_RECOVERED;
+ rtas_set_disposition_recovered(errp);
+ break;
+ default:
+ break;
+ }
+ }
+#endif
+
+out:
+ return disposition;
+}
+
/*
* Process MCE rtas errlog event.
*/
@@ -452,8 +731,11 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
int recovered = 0;
int disposition = rtas_error_disposition(err);
+ pseries_print_mce_info(regs, err);
+
if (!(regs->msr & MSR_RI)) {
/* If MSR_RI isn't set, we cannot recover */
+ pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
recovered = 0;
} else if (disposition == RTAS_DISP_FULLY_RECOVERED) {
@@ -503,11 +785,31 @@ int pSeries_machine_check_exception(struct pt_regs *regs)
struct rtas_error_log *errp;
if (fwnmi_active) {
- errp = fwnmi_get_errinfo(regs);
fwnmi_release_errinfo();
+ errp = fwnmi_get_errlog();
if (errp && recover_mce(regs, errp))
return 1;
}
return 0;
}
+
+long pseries_machine_check_realmode(struct pt_regs *regs)
+{
+ struct rtas_error_log *errp;
+ int disposition;
+
+ if (fwnmi_active) {
+ errp = fwnmi_get_errinfo(regs);
+ /*
+ * Call to fwnmi_release_errinfo() in real mode causes kernel
+ * to panic. Hence we will call it as soon as we go into
+ * virtual mode.
+ */
+ disposition = mce_handle_error(errp);
+ if (disposition == RTAS_DISP_FULLY_RECOVERED)
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index ba1791fd3234..0f553dcfa548 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -107,6 +107,10 @@ static void __init fwnmi_init(void)
u8 *mce_data_buf;
unsigned int i;
int nr_cpus = num_possible_cpus();
+#ifdef CONFIG_PPC_BOOK3S_64
+ struct slb_entry *slb_ptr;
+ size_t size;
+#endif
int ibm_nmi_register = rtas_token("ibm,nmi-register");
if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE)
@@ -132,6 +136,15 @@ static void __init fwnmi_init(void)
paca_ptrs[i]->mce_data_buf = mce_data_buf +
(RTAS_ERROR_LOG_MAX * i);
}
+
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* Allocate per cpu slb area to save old slb contents during MCE */
+ size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus;
+ slb_ptr = __va(memblock_alloc_base(size, sizeof(struct slb_entry),
+ ppc64_rma_size));
+ for_each_possible_cpu(i)
+ paca_ptrs[i]->mce_faulty_slbs = slb_ptr + (mmu_slb_size * i);
+#endif
}
static void pseries_8259_cascade(struct irq_desc *desc)
@@ -1017,6 +1030,7 @@ define_machine(pseries) {
.calibrate_decr = generic_calibrate_decr,
.progress = rtas_progress,
.system_reset_exception = pSeries_system_reset_exception,
+ .machine_check_early = pseries_machine_check_realmode,
.machine_check_exception = pSeries_machine_check_exception,
#ifdef CONFIG_KEXEC_CORE
.machine_kexec = pSeries_machine_kexec,
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index 49e04ec19238..88f1ad1d6309 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -1349,7 +1349,6 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
struct device_node *parent_node;
const __be32 *prop;
enum vio_dev_family family;
- const char *of_node_name = of_node->name ? of_node->name : "<unknown>";
/*
* Determine if this node is a under the /vdevice node or under the
@@ -1362,24 +1361,24 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
else if (!strcmp(parent_node->type, "vdevice"))
family = VDEVICE;
else {
- pr_warn("%s: parent(%pOF) of %s not recognized.\n",
+ pr_warn("%s: parent(%pOF) of %pOFn not recognized.\n",
__func__,
parent_node,
- of_node_name);
+ of_node);
of_node_put(parent_node);
return NULL;
}
of_node_put(parent_node);
} else {
- pr_warn("%s: could not determine the parent of node %s.\n",
- __func__, of_node_name);
+ pr_warn("%s: could not determine the parent of node %pOFn.\n",
+ __func__, of_node);
return NULL;
}
if (family == PFO) {
if (of_get_property(of_node, "interrupt-controller", NULL)) {
- pr_debug("%s: Skipping the interrupt controller %s.\n",
- __func__, of_node_name);
+ pr_debug("%s: Skipping the interrupt controller %pOFn.\n",
+ __func__, of_node);
return NULL;
}
}
@@ -1399,15 +1398,15 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
if (of_node->type != NULL)
viodev->type = of_node->type;
else {
- pr_warn("%s: node %s is missing the 'device_type' "
- "property.\n", __func__, of_node_name);
+ pr_warn("%s: node %pOFn is missing the 'device_type' "
+ "property.\n", __func__, of_node);
goto out;
}
prop = of_get_property(of_node, "reg", NULL);
if (prop == NULL) {
- pr_warn("%s: node %s missing 'reg'\n",
- __func__, of_node_name);
+ pr_warn("%s: node %pOFn missing 'reg'\n",
+ __func__, of_node);
goto out;
}
unit_address = of_read_number(prop, 1);
@@ -1422,8 +1421,8 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
if (prop != NULL)
viodev->resource_id = of_read_number(prop, 1);
- dev_set_name(&viodev->dev, "%s", of_node_name);
- viodev->type = of_node_name;
+ dev_set_name(&viodev->dev, "%pOFn", of_node);
+ viodev->type = dev_name(&viodev->dev);
viodev->irq = 0;
}
@@ -1694,7 +1693,7 @@ struct vio_dev *vio_find_node(struct device_node *vnode)
snprintf(kobj_name, sizeof(kobj_name), "%x",
(uint32_t)of_read_number(prop, 1));
} else if (!strcmp(dev_type, "ibm,platform-facilities"))
- snprintf(kobj_name, sizeof(kobj_name), "%s", vnode->name);
+ snprintf(kobj_name, sizeof(kobj_name), "%pOFn", vnode);
else
return NULL;
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index bcef2ac56479..e0dbec780fe9 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -6,19 +6,16 @@
config PPC4xx_PCI_EXPRESS
bool
depends on PCI && 4xx
- default n
config PPC4xx_HSTA_MSI
bool
depends on PCI_MSI
depends on PCI && 4xx
- default n
config PPC4xx_MSI
bool
depends on PCI_MSI
depends on PCI && 4xx
- default n
config PPC_MSI_BITMAP
bool
@@ -37,11 +34,9 @@ config PPC_SCOM
config SCOM_DEBUGFS
bool "Expose SCOM controllers via debugfs"
depends on PPC_SCOM && DEBUG_FS
- default n
config GE_FPGA
bool
- default n
config FSL_CORENET_RCPM
bool
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index f730539074c4..2caa4defdfb6 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
@@ -56,8 +55,6 @@ obj-$(CONFIG_PPC_SCOM) += scom.o
obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
-
obj-$(CONFIG_PPC_XICS) += xics/
obj-$(CONFIG_PPC_XIVE) += xive/
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
index 00ccf3e4fcb4..15cbdd4fde06 100644
--- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
@@ -107,11 +107,11 @@ int __init instantiate_cache_sram(struct platform_device *dev,
goto out_free;
}
- cache_sram->base_virt = ioremap_prot(cache_sram->base_phys,
- cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL);
+ cache_sram->base_virt = ioremap_coherent(cache_sram->base_phys,
+ cache_sram->size);
if (!cache_sram->base_virt) {
- dev_err(&dev->dev, "%pOF: ioremap_prot failed\n",
- dev->dev.of_node);
+ dev_err(&dev->dev, "%pOF: ioremap_coherent failed\n",
+ dev->dev.of_node);
ret = -ENOMEM;
goto out_release;
}
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 535cf1f6941c..6300123ce965 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -846,7 +846,7 @@ void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq)
u32 ipic_get_mcp_status(void)
{
- return ipic_read(primary_ipic->regs, IPIC_SERSR);
+ return primary_ipic ? ipic_read(primary_ipic->regs, IPIC_SERSR) : 0;
}
void ipic_clear_mcp_status(u32 mask)
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile
index 5d438d92472b..ba1e3117b1c0 100644
--- a/arch/powerpc/sysdev/xics/Makefile
+++ b/arch/powerpc/sysdev/xics/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
obj-y += xics-common.o
obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o
diff --git a/arch/powerpc/sysdev/xive/Kconfig b/arch/powerpc/sysdev/xive/Kconfig
index 70ee976e1de0..785c292d104b 100644
--- a/arch/powerpc/sysdev/xive/Kconfig
+++ b/arch/powerpc/sysdev/xive/Kconfig
@@ -1,17 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
config PPC_XIVE
bool
- default n
select PPC_SMP_MUXED_IPI
select HARDIRQS_SW_RESEND
config PPC_XIVE_NATIVE
bool
- default n
select PPC_XIVE
depends on PPC_POWERNV
config PPC_XIVE_SPAPR
bool
- default n
select PPC_XIVE
diff --git a/arch/powerpc/sysdev/xive/Makefile b/arch/powerpc/sysdev/xive/Makefile
index 536d6e5706e3..dea2abc23f4d 100644
--- a/arch/powerpc/sysdev/xive/Makefile
+++ b/arch/powerpc/sysdev/xive/Makefile
@@ -1,4 +1,3 @@
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
obj-y += common.o
obj-$(CONFIG_PPC_XIVE_NATIVE) += native.o
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 959a2a62f233..9824074ec1b5 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -1010,12 +1010,13 @@ static void xive_ipi_eoi(struct irq_data *d)
{
struct xive_cpu *xc = __this_cpu_read(xive_cpu);
- DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
- d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
-
/* Handle possible race with unplug and drop stale IPIs */
if (!xc)
return;
+
+ DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
+ d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
+
xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
xive_do_queue_eoi(xc);
}
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 5b20a678d755..1ca127d052a6 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -238,20 +238,11 @@ static bool xive_native_match(struct device_node *node)
#ifdef CONFIG_SMP
static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
{
- struct device_node *np;
- unsigned int chip_id;
s64 irq;
- /* Find the chip ID */
- np = of_get_cpu_node(cpu, NULL);
- if (np) {
- if (of_property_read_u32(np, "ibm,chip-id", &chip_id) < 0)
- chip_id = 0;
- }
-
/* Allocate an IPI and populate info about it */
for (;;) {
- irq = opal_xive_allocate_irq(chip_id);
+ irq = opal_xive_allocate_irq(xc->chip_id);
if (irq == OPAL_BUSY) {
msleep(OPAL_BUSY_DELAY_MS);
continue;
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 1bc3abb237cd..69e7fb47bcaa 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -1,14 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for xmon
-subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+# Disable clang warning for using setjmp without setjmp.h header
+subdir-ccflags-y := $(call cc-disable-warning, builtin-requires-header)
GCOV_PROFILE := n
UBSAN_SANITIZE := n
# Disable ftrace for the entire directory
ORIG_CFLAGS := $(KBUILD_CFLAGS)
-KBUILD_CFLAGS = $(subst -mno-sched-epilog,,$(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)))
+KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS))
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 4264aedc7775..36b8dc47a3c3 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2378,25 +2378,33 @@ static void dump_one_paca(int cpu)
DUMP(p, cpu_start, "%#-*x");
DUMP(p, kexec_state, "%#-*x");
#ifdef CONFIG_PPC_BOOK3S_64
- for (i = 0; i < SLB_NUM_BOLTED; i++) {
- u64 esid, vsid;
+ if (!early_radix_enabled()) {
+ for (i = 0; i < SLB_NUM_BOLTED; i++) {
+ u64 esid, vsid;
- if (!p->slb_shadow_ptr)
- continue;
+ if (!p->slb_shadow_ptr)
+ continue;
+
+ esid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].esid);
+ vsid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].vsid);
- esid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].esid);
- vsid = be64_to_cpu(p->slb_shadow_ptr->save_area[i].vsid);
+ if (esid || vsid) {
+ printf(" %-*s[%d] = 0x%016llx 0x%016llx\n",
+ 22, "slb_shadow", i, esid, vsid);
+ }
+ }
+ DUMP(p, vmalloc_sllp, "%#-*x");
+ DUMP(p, stab_rr, "%#-*x");
+ DUMP(p, slb_used_bitmap, "%#-*x");
+ DUMP(p, slb_kern_bitmap, "%#-*x");
- if (esid || vsid) {
- printf(" %-*s[%d] = 0x%016llx 0x%016llx\n",
- 22, "slb_shadow", i, esid, vsid);
+ if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
+ DUMP(p, slb_cache_ptr, "%#-*x");
+ for (i = 0; i < SLB_CACHE_ENTRIES; i++)
+ printf(" %-*s[%d] = 0x%016x\n",
+ 22, "slb_cache", i, p->slb_cache[i]);
}
}
- DUMP(p, vmalloc_sllp, "%#-*x");
- DUMP(p, slb_cache_ptr, "%#-*x");
- for (i = 0; i < SLB_CACHE_ENTRIES; i++)
- printf(" %-*s[%d] = 0x%016x\n",
- 22, "slb_cache", i, p->slb_cache[i]);
DUMP(p, rfi_flush_fallback_area, "%-*px");
#endif
@@ -2412,7 +2420,9 @@ static void dump_one_paca(int cpu)
DUMP(p, __current, "%-*px");
DUMP(p, kstack, "%#-*llx");
printf(" %-*s = 0x%016llx\n", 25, "kstack_base", p->kstack & ~(THREAD_SIZE - 1));
- DUMP(p, stab_rr, "%#-*llx");
+#ifdef CONFIG_STACKPROTECTOR
+ DUMP(p, canary, "%#-*lx");
+#endif
DUMP(p, saved_r1, "%#-*llx");
DUMP(p, trap_save, "%#-*x");
DUMP(p, irq_soft_mask, "%#-*x");
@@ -2444,11 +2454,15 @@ static void dump_one_paca(int cpu)
DUMP(p, accounting.utime, "%#-*lx");
DUMP(p, accounting.stime, "%#-*lx");
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
DUMP(p, accounting.utime_scaled, "%#-*lx");
+#endif
DUMP(p, accounting.starttime, "%#-*lx");
DUMP(p, accounting.starttime_user, "%#-*lx");
+#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
DUMP(p, accounting.startspurr, "%#-*lx");
DUMP(p, accounting.utime_sspurr, "%#-*lx");
+#endif
DUMP(p, accounting.steal_time, "%#-*lx");
#undef DUMP
@@ -2988,15 +3002,17 @@ static void show_task(struct task_struct *tsk)
#ifdef CONFIG_PPC_BOOK3S_64
void format_pte(void *ptep, unsigned long pte)
{
+ pte_t entry = __pte(pte);
+
printf("ptep @ 0x%016lx = 0x%016lx\n", (unsigned long)ptep, pte);
printf("Maps physical address = 0x%016lx\n", pte & PTE_RPN_MASK);
printf("Flags = %s%s%s%s%s\n",
- (pte & _PAGE_ACCESSED) ? "Accessed " : "",
- (pte & _PAGE_DIRTY) ? "Dirty " : "",
- (pte & _PAGE_READ) ? "Read " : "",
- (pte & _PAGE_WRITE) ? "Write " : "",
- (pte & _PAGE_EXEC) ? "Exec " : "");
+ pte_young(entry) ? "Accessed " : "",
+ pte_dirty(entry) ? "Dirty " : "",
+ pte_read(entry) ? "Read " : "",
+ pte_write(entry) ? "Write " : "",
+ pte_exec(entry) ? "Exec " : "");
}
static void show_pte(unsigned long addr)
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index a344980287a5..fe451348ae57 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -31,6 +31,7 @@ config RISCV
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_DMA_CONTIGUOUS
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GENERIC_DMA_COHERENT
select HAVE_PERF_EVENTS
select IRQ_DOMAIN
@@ -108,10 +109,12 @@ config ARCH_RV32I
select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_UCMPDI2
+ select GENERIC_LIB_UMODDI3
config ARCH_RV64I
bool "RV64I"
select 64BIT
+ select ARCH_SUPPORTS_INT128 if GCC_VERSION >= 50000
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
@@ -208,14 +211,61 @@ config RISCV_BASE_PMU
endmenu
+config FPU
+ bool "FPU support"
+ default y
+ help
+ Say N here if you want to disable all floating-point related procedure
+ in the kernel.
+
+ If you don't know what to do here, say Y.
+
endmenu
-menu "Kernel type"
+menu "Kernel features"
source "kernel/Kconfig.hz"
endmenu
+menu "Boot options"
+
+config CMDLINE_BOOL
+ bool "Built-in kernel command line"
+ help
+ For most platforms, it is firmware or second stage bootloader
+ that by default specifies the kernel command line options.
+ However, it might be necessary or advantageous to either override
+ the default kernel command line or add a few extra options to it.
+ For such cases, this option allows hardcoding command line options
+ directly into the kernel.
+
+ For that, choose 'Y' here and fill in the extra boot parameters
+ in CONFIG_CMDLINE.
+
+ The built-in options will be concatenated to the default command
+ line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
+ command line will be ignored and replaced by the built-in string.
+
+config CMDLINE
+ string "Built-in kernel command string"
+ depends on CMDLINE_BOOL
+ default ""
+ help
+ Supply command-line options at build time by entering them here.
+
+config CMDLINE_FORCE
+ bool "Built-in command line overrides bootloader arguments"
+ depends on CMDLINE_BOOL
+ help
+ Set this option to 'Y' to have the kernel ignore the bootloader
+ or firmware command line. Instead, the built-in command line
+ will be used exclusively.
+
+ If you don't know what to do here, say N.
+
+endmenu
+
menu "Bus support"
config PCI
diff --git a/arch/riscv/Kconfig.debug b/arch/riscv/Kconfig.debug
index 3224ff6ecf6e..c5a72f17c469 100644
--- a/arch/riscv/Kconfig.debug
+++ b/arch/riscv/Kconfig.debug
@@ -1,37 +1,2 @@
-
-config CMDLINE_BOOL
- bool "Built-in kernel command line"
- help
- For most platforms, it is firmware or second stage bootloader
- that by default specifies the kernel command line options.
- However, it might be necessary or advantageous to either override
- the default kernel command line or add a few extra options to it.
- For such cases, this option allows hardcoding command line options
- directly into the kernel.
-
- For that, choose 'Y' here and fill in the extra boot parameters
- in CONFIG_CMDLINE.
-
- The built-in options will be concatenated to the default command
- line if CMDLINE_FORCE is set to 'N'. Otherwise, the default
- command line will be ignored and replaced by the built-in string.
-
-config CMDLINE
- string "Built-in kernel command string"
- depends on CMDLINE_BOOL
- default ""
- help
- Supply command-line options at build time by entering them here.
-
-config CMDLINE_FORCE
- bool "Built-in command line overrides bootloader arguments"
- depends on CMDLINE_BOOL
- help
- Set this option to 'Y' to have the kernel ignore the bootloader
- or firmware command line. Instead, the built-in command line
- will be used exclusively.
-
- If you don't know what to do here, say N.
-
config EARLY_PRINTK
def_bool y
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 61ec42405ec9..d10146197533 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -25,10 +25,7 @@ ifeq ($(CONFIG_ARCH_RV64I),y)
KBUILD_CFLAGS += -mabi=lp64
KBUILD_AFLAGS += -mabi=lp64
-
- KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0500, -DCONFIG_ARCH_SUPPORTS_INT128)
- KBUILD_MARCH = rv64im
KBUILD_LDFLAGS += -melf64lriscv
else
BITS := 32
@@ -36,22 +33,20 @@ else
KBUILD_CFLAGS += -mabi=ilp32
KBUILD_AFLAGS += -mabi=ilp32
- KBUILD_MARCH = rv32im
KBUILD_LDFLAGS += -melf32lriscv
endif
KBUILD_CFLAGS += -Wall
-ifeq ($(CONFIG_RISCV_ISA_A),y)
- KBUILD_ARCH_A = a
-endif
-ifeq ($(CONFIG_RISCV_ISA_C),y)
- KBUILD_ARCH_C = c
-endif
-
-KBUILD_AFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_ARCH_A)fd$(KBUILD_ARCH_C)
+# ISA string setting
+riscv-march-$(CONFIG_ARCH_RV32I) := rv32im
+riscv-march-$(CONFIG_ARCH_RV64I) := rv64im
+riscv-march-$(CONFIG_RISCV_ISA_A) := $(riscv-march-y)a
+riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
+riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
+KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
+KBUILD_AFLAGS += -march=$(riscv-march-y)
-KBUILD_CFLAGS += -march=$(KBUILD_MARCH)$(KBUILD_ARCH_A)$(KBUILD_ARCH_C)
KBUILD_CFLAGS += -mno-save-restore
KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index efdbe311e936..6a646d9ea780 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -13,7 +13,6 @@ generic-y += errno.h
generic-y += exec.h
generic-y += fb.h
generic-y += fcntl.h
-generic-y += futex.h
generic-y += hardirq.h
generic-y += hash.h
generic-y += hw_irq.h
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
new file mode 100644
index 000000000000..3b19eba1bc8e
--- /dev/null
+++ b/arch/riscv/include/asm/futex.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 2018 Jim Wilson (jimw@sifive.com)
+ */
+
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#ifndef CONFIG_RISCV_ISA_A
+/*
+ * Use the generic interrupt disabling versions if the A extension
+ * is not supported.
+ */
+#ifdef CONFIG_SMP
+#error "Can't support generic futex calls without A extension on SMP"
+#endif
+#include <asm-generic/futex.h>
+
+#else /* CONFIG_RISCV_ISA_A */
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <asm/asm.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ uintptr_t tmp; \
+ __enable_user_access(); \
+ __asm__ __volatile__ ( \
+ "1: " insn " \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .balign 4 \n" \
+ "3: li %[r],%[e] \n" \
+ " jump 2b,%[t] \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .balign " RISCV_SZPTR " \n" \
+ " " RISCV_PTR " 1b, 3b \n" \
+ " .previous \n" \
+ : [r] "+r" (ret), [ov] "=&r" (oldval), \
+ [u] "+m" (*uaddr), [t] "=&r" (tmp) \
+ : [op] "Jr" (oparg), [e] "i" (-EFAULT) \
+ : "memory"); \
+ __disable_user_access(); \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret = 0;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("amoswap.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("amoadd.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("amoor.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("amoand.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("amoxor.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val;
+ uintptr_t tmp;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __enable_user_access();
+ __asm__ __volatile__ (
+ "1: lr.w.aqrl %[v],%[u] \n"
+ " bne %[v],%z[ov],3f \n"
+ "2: sc.w.aqrl %[t],%z[nv],%[u] \n"
+ " bnez %[t],1b \n"
+ "3: \n"
+ " .section .fixup,\"ax\" \n"
+ " .balign 4 \n"
+ "4: li %[r],%[e] \n"
+ " jump 3b,%[t] \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " .balign " RISCV_SZPTR " \n"
+ " " RISCV_PTR " 1b, 4b \n"
+ " " RISCV_PTR " 2b, 4b \n"
+ " .previous \n"
+ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
+ : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "i" (-EFAULT)
+ : "memory");
+ __disable_user_access();
+
+ *uval = val;
+ return ret;
+}
+
+#endif /* CONFIG_RISCV_ISA_A */
+#endif /* _ASM_FUTEX_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 3fe4af8147d2..50de774d827a 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -88,7 +88,7 @@ static inline void wait_for_interrupt(void)
}
struct device_node;
-extern int riscv_of_processor_hart(struct device_node *node);
+int riscv_of_processor_hartid(struct device_node *node);
extern void riscv_fill_hwcap(void);
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index 36016845461d..41aa73b476f4 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -14,16 +14,24 @@
#ifndef _ASM_RISCV_SMP_H
#define _ASM_RISCV_SMP_H
-/* This both needs asm-offsets.h and is used when generating it. */
-#ifndef GENERATING_ASM_OFFSETS
-#include <asm/asm-offsets.h>
-#endif
-
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
+#include <linux/thread_info.h>
+
+#define INVALID_HARTID ULONG_MAX
+/*
+ * Mapping between linux logical cpu index and hartid.
+ */
+extern unsigned long __cpuid_to_hartid_map[NR_CPUS];
+#define cpuid_to_hartid_map(cpu) __cpuid_to_hartid_map[cpu]
+
+struct seq_file;
#ifdef CONFIG_SMP
+/* print IPI stats */
+void show_ipi_stats(struct seq_file *p, int prec);
+
/* SMP initialization hook for setup_arch */
void __init setup_smp(void);
@@ -33,14 +41,31 @@ void arch_send_call_function_ipi_mask(struct cpumask *mask);
/* Hook for the generic smp_call_function_single() routine. */
void arch_send_call_function_single_ipi(int cpu);
+int riscv_hartid_to_cpuid(int hartid);
+void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
+
/*
- * This is particularly ugly: it appears we can't actually get the definition
- * of task_struct here, but we need access to the CPU this task is running on.
- * Instead of using C we're using asm-offsets.h to get the current processor
- * ID.
+ * Obtains the hart ID of the currently executing task. This relies on
+ * THREAD_INFO_IN_TASK, but we define that unconditionally.
*/
-#define raw_smp_processor_id() (*((int*)((char*)get_current() + TASK_TI_CPU)))
+#define raw_smp_processor_id() (current_thread_info()->cpu)
-#endif /* CONFIG_SMP */
+#else
+
+static inline void show_ipi_stats(struct seq_file *p, int prec)
+{
+}
+static inline int riscv_hartid_to_cpuid(int hartid)
+{
+ return 0;
+}
+
+static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
+ struct cpumask *out)
+{
+ cpumask_set_cpu(cpuid_to_hartid_map(0), out);
+}
+
+#endif /* CONFIG_SMP */
#endif /* _ASM_RISCV_SMP_H */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index dd6b05bff75b..733559083f24 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -18,6 +18,7 @@
#include <asm/ptrace.h>
#include <asm/csr.h>
+#ifdef CONFIG_FPU
extern void __fstate_save(struct task_struct *save_to);
extern void __fstate_restore(struct task_struct *restore_from);
@@ -55,6 +56,14 @@ static inline void __switch_to_aux(struct task_struct *prev,
fstate_restore(next, task_pt_regs(next));
}
+extern bool has_fpu;
+#else
+#define has_fpu false
+#define fstate_save(task, regs) do { } while (0)
+#define fstate_restore(task, regs) do { } while (0)
+#define __switch_to_aux(__prev, __next) do { } while (0)
+#endif
+
extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
@@ -62,7 +71,8 @@ extern struct task_struct *__switch_to(struct task_struct *,
do { \
struct task_struct *__prev = (prev); \
struct task_struct *__next = (next); \
- __switch_to_aux(__prev, __next); \
+ if (has_fpu) \
+ __switch_to_aux(__prev, __next); \
((last) = __switch_to(__prev, __next)); \
} while (0)
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 85c2d8bae957..54fee0cadb1e 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -16,6 +16,7 @@
#define _ASM_RISCV_TLBFLUSH_H
#include <linux/mm_types.h>
+#include <asm/smp.h>
/*
* Flush entire local TLB. 'sfence.vma' implicitly fences with the instruction
@@ -49,13 +50,22 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
#include <asm/sbi.h>
+static inline void remote_sfence_vma(struct cpumask *cmask, unsigned long start,
+ unsigned long size)
+{
+ struct cpumask hmask;
+
+ cpumask_clear(&hmask);
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma(hmask.bits, start, size);
+}
+
#define flush_tlb_all() sbi_remote_sfence_vma(NULL, 0, -1)
#define flush_tlb_page(vma, addr) flush_tlb_range(vma, addr, 0)
#define flush_tlb_range(vma, start, end) \
- sbi_remote_sfence_vma(mm_cpumask((vma)->vm_mm)->bits, \
- start, (end) - (start))
+ remote_sfence_vma(mm_cpumask((vma)->vm_mm), start, (end) - (start))
#define flush_tlb_mm(mm) \
- sbi_remote_sfence_vma(mm_cpumask(mm)->bits, 0, -1)
+ remote_sfence_vma(mm_cpumask(mm), 0, -1)
#endif /* CONFIG_SMP */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
index 0caea01d5cca..eff7aa9aa163 100644
--- a/arch/riscv/include/asm/unistd.h
+++ b/arch/riscv/include/asm/unistd.h
@@ -16,6 +16,7 @@
* be included multiple times. See uapi/asm/syscalls.h for more info.
*/
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
#include <uapi/asm/syscalls.h>
diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h
index 1e0dfc36aab9..644a00ce6e2e 100644
--- a/arch/riscv/include/uapi/asm/elf.h
+++ b/arch/riscv/include/uapi/asm/elf.h
@@ -19,7 +19,10 @@ typedef unsigned long elf_greg_t;
typedef struct user_regs_struct elf_gregset_t;
#define ELF_NGREG (sizeof(elf_gregset_t) / sizeof(elf_greg_t))
+/* We don't support f without d, or q. */
+typedef __u64 elf_fpreg_t;
typedef union __riscv_fp_state elf_fpregset_t;
+#define ELF_NFPREG (sizeof(struct __riscv_d_ext_state) / sizeof(elf_fpreg_t))
#if __riscv_xlen == 64
#define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info)
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index e1274fc03af4..f13f7f276639 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -31,6 +31,7 @@ obj-y += vdso/
CFLAGS_setup.o := -mcmodel=medany
+obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
index 0bc86e5f8f3f..cb35ffd8ec6b 100644
--- a/arch/riscv/kernel/cacheinfo.c
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -22,13 +22,6 @@ static void ci_leaf_init(struct cacheinfo *this_leaf,
{
this_leaf->level = level;
this_leaf->type = type;
- /* not a sector cache */
- this_leaf->physical_line_partition = 1;
- /* TODO: Add to DTS */
- this_leaf->attributes =
- CACHE_WRITE_BACK
- | CACHE_READ_ALLOCATE
- | CACHE_WRITE_ALLOCATE;
}
static int __init_cache_level(unsigned int cpu)
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index ca6c81e54e37..3a5a2ee31547 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -14,9 +14,13 @@
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/of.h>
+#include <asm/smp.h>
-/* Return -1 if not a valid hart */
-int riscv_of_processor_hart(struct device_node *node)
+/*
+ * Returns the hart ID of the given device tree node, or -1 if the device tree
+ * node isn't a RISC-V hart.
+ */
+int riscv_of_processor_hartid(struct device_node *node)
{
const char *isa, *status;
u32 hart;
@@ -58,6 +62,64 @@ int riscv_of_processor_hart(struct device_node *node)
#ifdef CONFIG_PROC_FS
+static void print_isa(struct seq_file *f, const char *orig_isa)
+{
+ static const char *ext = "mafdc";
+ const char *isa = orig_isa;
+ const char *e;
+
+ /*
+ * Linux doesn't support rv32e or rv128i, and we only support booting
+ * kernels on harts with the same ISA that the kernel is compiled for.
+ */
+#if defined(CONFIG_32BIT)
+ if (strncmp(isa, "rv32i", 5) != 0)
+ return;
+#elif defined(CONFIG_64BIT)
+ if (strncmp(isa, "rv64i", 5) != 0)
+ return;
+#endif
+
+ /* Print the base ISA, as we already know it's legal. */
+ seq_puts(f, "isa\t\t: ");
+ seq_write(f, isa, 5);
+ isa += 5;
+
+ /*
+ * Check the rest of the ISA string for valid extensions, printing those
+ * we find. RISC-V ISA strings define an order, so we only print the
+ * extension bits when they're in order.
+ */
+ for (e = ext; *e != '\0'; ++e) {
+ if (isa[0] == e[0]) {
+ seq_write(f, isa, 1);
+ isa++;
+ }
+ }
+ seq_puts(f, "\n");
+
+ /*
+ * If we were given an unsupported ISA in the device tree then print
+ * a bit of info describing what went wrong.
+ */
+ if (isa[0] != '\0')
+ pr_info("unsupported ISA \"%s\" in device tree", orig_isa);
+}
+
+static void print_mmu(struct seq_file *f, const char *mmu_type)
+{
+#if defined(CONFIG_32BIT)
+ if (strcmp(mmu_type, "riscv,sv32") != 0)
+ return;
+#elif defined(CONFIG_64BIT)
+ if (strcmp(mmu_type, "riscv,sv39") != 0 &&
+ strcmp(mmu_type, "riscv,sv48") != 0)
+ return;
+#endif
+
+ seq_printf(f, "mmu\t\t: %s\n", mmu_type+6);
+}
+
static void *c_start(struct seq_file *m, loff_t *pos)
{
*pos = cpumask_next(*pos - 1, cpu_online_mask);
@@ -78,21 +140,20 @@ static void c_stop(struct seq_file *m, void *v)
static int c_show(struct seq_file *m, void *v)
{
- unsigned long hart_id = (unsigned long)v - 1;
- struct device_node *node = of_get_cpu_node(hart_id, NULL);
+ unsigned long cpu_id = (unsigned long)v - 1;
+ struct device_node *node = of_get_cpu_node(cpuid_to_hartid_map(cpu_id),
+ NULL);
const char *compat, *isa, *mmu;
- seq_printf(m, "hart\t: %lu\n", hart_id);
- if (!of_property_read_string(node, "riscv,isa", &isa)
- && isa[0] == 'r'
- && isa[1] == 'v')
- seq_printf(m, "isa\t: %s\n", isa);
- if (!of_property_read_string(node, "mmu-type", &mmu)
- && !strncmp(mmu, "riscv,", 6))
- seq_printf(m, "mmu\t: %s\n", mmu+6);
+ seq_printf(m, "processor\t: %lu\n", cpu_id);
+ seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
+ if (!of_property_read_string(node, "riscv,isa", &isa))
+ print_isa(m, isa);
+ if (!of_property_read_string(node, "mmu-type", &mmu))
+ print_mmu(m, mmu);
if (!of_property_read_string(node, "compatible", &compat)
&& strcmp(compat, "riscv"))
- seq_printf(m, "uarch\t: %s\n", compat);
+ seq_printf(m, "uarch\t\t: %s\n", compat);
seq_puts(m, "\n");
return 0;
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index 17011a870044..5493f3228704 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -22,6 +22,9 @@
#include <asm/hwcap.h>
unsigned long elf_hwcap __read_mostly;
+#ifdef CONFIG_FPU
+bool has_fpu __read_mostly;
+#endif
void riscv_fill_hwcap(void)
{
@@ -57,5 +60,17 @@ void riscv_fill_hwcap(void)
for (i = 0; i < strlen(isa); ++i)
elf_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
+ /* We don't support systems with F but without D, so mask those out
+ * here. */
+ if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
+ pr_info("This kernel does not support systems with F but not D");
+ elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
+ }
+
pr_info("elf_hwcap is 0x%lx", elf_hwcap);
+
+#ifdef CONFIG_FPU
+ if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D))
+ has_fpu = true;
+#endif
}
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index fa2c08e3c05e..13d4826ab2a1 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -168,7 +168,6 @@ ENTRY(handle_exception)
/* Handle interrupts */
move a0, sp /* pt_regs */
- move a1, s4 /* scause */
tail do_IRQ
1:
/* Exceptions run with interrupts enabled */
@@ -357,93 +356,6 @@ ENTRY(__switch_to)
ret
ENDPROC(__switch_to)
-ENTRY(__fstate_save)
- li a2, TASK_THREAD_F0
- add a0, a0, a2
- li t1, SR_FS
- csrs sstatus, t1
- frcsr t0
- fsd f0, TASK_THREAD_F0_F0(a0)
- fsd f1, TASK_THREAD_F1_F0(a0)
- fsd f2, TASK_THREAD_F2_F0(a0)
- fsd f3, TASK_THREAD_F3_F0(a0)
- fsd f4, TASK_THREAD_F4_F0(a0)
- fsd f5, TASK_THREAD_F5_F0(a0)
- fsd f6, TASK_THREAD_F6_F0(a0)
- fsd f7, TASK_THREAD_F7_F0(a0)
- fsd f8, TASK_THREAD_F8_F0(a0)
- fsd f9, TASK_THREAD_F9_F0(a0)
- fsd f10, TASK_THREAD_F10_F0(a0)
- fsd f11, TASK_THREAD_F11_F0(a0)
- fsd f12, TASK_THREAD_F12_F0(a0)
- fsd f13, TASK_THREAD_F13_F0(a0)
- fsd f14, TASK_THREAD_F14_F0(a0)
- fsd f15, TASK_THREAD_F15_F0(a0)
- fsd f16, TASK_THREAD_F16_F0(a0)
- fsd f17, TASK_THREAD_F17_F0(a0)
- fsd f18, TASK_THREAD_F18_F0(a0)
- fsd f19, TASK_THREAD_F19_F0(a0)
- fsd f20, TASK_THREAD_F20_F0(a0)
- fsd f21, TASK_THREAD_F21_F0(a0)
- fsd f22, TASK_THREAD_F22_F0(a0)
- fsd f23, TASK_THREAD_F23_F0(a0)
- fsd f24, TASK_THREAD_F24_F0(a0)
- fsd f25, TASK_THREAD_F25_F0(a0)
- fsd f26, TASK_THREAD_F26_F0(a0)
- fsd f27, TASK_THREAD_F27_F0(a0)
- fsd f28, TASK_THREAD_F28_F0(a0)
- fsd f29, TASK_THREAD_F29_F0(a0)
- fsd f30, TASK_THREAD_F30_F0(a0)
- fsd f31, TASK_THREAD_F31_F0(a0)
- sw t0, TASK_THREAD_FCSR_F0(a0)
- csrc sstatus, t1
- ret
-ENDPROC(__fstate_save)
-
-ENTRY(__fstate_restore)
- li a2, TASK_THREAD_F0
- add a0, a0, a2
- li t1, SR_FS
- lw t0, TASK_THREAD_FCSR_F0(a0)
- csrs sstatus, t1
- fld f0, TASK_THREAD_F0_F0(a0)
- fld f1, TASK_THREAD_F1_F0(a0)
- fld f2, TASK_THREAD_F2_F0(a0)
- fld f3, TASK_THREAD_F3_F0(a0)
- fld f4, TASK_THREAD_F4_F0(a0)
- fld f5, TASK_THREAD_F5_F0(a0)
- fld f6, TASK_THREAD_F6_F0(a0)
- fld f7, TASK_THREAD_F7_F0(a0)
- fld f8, TASK_THREAD_F8_F0(a0)
- fld f9, TASK_THREAD_F9_F0(a0)
- fld f10, TASK_THREAD_F10_F0(a0)
- fld f11, TASK_THREAD_F11_F0(a0)
- fld f12, TASK_THREAD_F12_F0(a0)
- fld f13, TASK_THREAD_F13_F0(a0)
- fld f14, TASK_THREAD_F14_F0(a0)
- fld f15, TASK_THREAD_F15_F0(a0)
- fld f16, TASK_THREAD_F16_F0(a0)
- fld f17, TASK_THREAD_F17_F0(a0)
- fld f18, TASK_THREAD_F18_F0(a0)
- fld f19, TASK_THREAD_F19_F0(a0)
- fld f20, TASK_THREAD_F20_F0(a0)
- fld f21, TASK_THREAD_F21_F0(a0)
- fld f22, TASK_THREAD_F22_F0(a0)
- fld f23, TASK_THREAD_F23_F0(a0)
- fld f24, TASK_THREAD_F24_F0(a0)
- fld f25, TASK_THREAD_F25_F0(a0)
- fld f26, TASK_THREAD_F26_F0(a0)
- fld f27, TASK_THREAD_F27_F0(a0)
- fld f28, TASK_THREAD_F28_F0(a0)
- fld f29, TASK_THREAD_F29_F0(a0)
- fld f30, TASK_THREAD_F30_F0(a0)
- fld f31, TASK_THREAD_F31_F0(a0)
- fscsr t0
- csrc sstatus, t1
- ret
-ENDPROC(__fstate_restore)
-
-
.section ".rodata"
/* Exception vector table */
ENTRY(excp_vect_table)
diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S
new file mode 100644
index 000000000000..1defb0618aff
--- /dev/null
+++ b/arch/riscv/kernel/fpu.S
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/asm-offsets.h>
+
+ENTRY(__fstate_save)
+ li a2, TASK_THREAD_F0
+ add a0, a0, a2
+ li t1, SR_FS
+ csrs sstatus, t1
+ frcsr t0
+ fsd f0, TASK_THREAD_F0_F0(a0)
+ fsd f1, TASK_THREAD_F1_F0(a0)
+ fsd f2, TASK_THREAD_F2_F0(a0)
+ fsd f3, TASK_THREAD_F3_F0(a0)
+ fsd f4, TASK_THREAD_F4_F0(a0)
+ fsd f5, TASK_THREAD_F5_F0(a0)
+ fsd f6, TASK_THREAD_F6_F0(a0)
+ fsd f7, TASK_THREAD_F7_F0(a0)
+ fsd f8, TASK_THREAD_F8_F0(a0)
+ fsd f9, TASK_THREAD_F9_F0(a0)
+ fsd f10, TASK_THREAD_F10_F0(a0)
+ fsd f11, TASK_THREAD_F11_F0(a0)
+ fsd f12, TASK_THREAD_F12_F0(a0)
+ fsd f13, TASK_THREAD_F13_F0(a0)
+ fsd f14, TASK_THREAD_F14_F0(a0)
+ fsd f15, TASK_THREAD_F15_F0(a0)
+ fsd f16, TASK_THREAD_F16_F0(a0)
+ fsd f17, TASK_THREAD_F17_F0(a0)
+ fsd f18, TASK_THREAD_F18_F0(a0)
+ fsd f19, TASK_THREAD_F19_F0(a0)
+ fsd f20, TASK_THREAD_F20_F0(a0)
+ fsd f21, TASK_THREAD_F21_F0(a0)
+ fsd f22, TASK_THREAD_F22_F0(a0)
+ fsd f23, TASK_THREAD_F23_F0(a0)
+ fsd f24, TASK_THREAD_F24_F0(a0)
+ fsd f25, TASK_THREAD_F25_F0(a0)
+ fsd f26, TASK_THREAD_F26_F0(a0)
+ fsd f27, TASK_THREAD_F27_F0(a0)
+ fsd f28, TASK_THREAD_F28_F0(a0)
+ fsd f29, TASK_THREAD_F29_F0(a0)
+ fsd f30, TASK_THREAD_F30_F0(a0)
+ fsd f31, TASK_THREAD_F31_F0(a0)
+ sw t0, TASK_THREAD_FCSR_F0(a0)
+ csrc sstatus, t1
+ ret
+ENDPROC(__fstate_save)
+
+ENTRY(__fstate_restore)
+ li a2, TASK_THREAD_F0
+ add a0, a0, a2
+ li t1, SR_FS
+ lw t0, TASK_THREAD_FCSR_F0(a0)
+ csrs sstatus, t1
+ fld f0, TASK_THREAD_F0_F0(a0)
+ fld f1, TASK_THREAD_F1_F0(a0)
+ fld f2, TASK_THREAD_F2_F0(a0)
+ fld f3, TASK_THREAD_F3_F0(a0)
+ fld f4, TASK_THREAD_F4_F0(a0)
+ fld f5, TASK_THREAD_F5_F0(a0)
+ fld f6, TASK_THREAD_F6_F0(a0)
+ fld f7, TASK_THREAD_F7_F0(a0)
+ fld f8, TASK_THREAD_F8_F0(a0)
+ fld f9, TASK_THREAD_F9_F0(a0)
+ fld f10, TASK_THREAD_F10_F0(a0)
+ fld f11, TASK_THREAD_F11_F0(a0)
+ fld f12, TASK_THREAD_F12_F0(a0)
+ fld f13, TASK_THREAD_F13_F0(a0)
+ fld f14, TASK_THREAD_F14_F0(a0)
+ fld f15, TASK_THREAD_F15_F0(a0)
+ fld f16, TASK_THREAD_F16_F0(a0)
+ fld f17, TASK_THREAD_F17_F0(a0)
+ fld f18, TASK_THREAD_F18_F0(a0)
+ fld f19, TASK_THREAD_F19_F0(a0)
+ fld f20, TASK_THREAD_F20_F0(a0)
+ fld f21, TASK_THREAD_F21_F0(a0)
+ fld f22, TASK_THREAD_F22_F0(a0)
+ fld f23, TASK_THREAD_F23_F0(a0)
+ fld f24, TASK_THREAD_F24_F0(a0)
+ fld f25, TASK_THREAD_F25_F0(a0)
+ fld f26, TASK_THREAD_F26_F0(a0)
+ fld f27, TASK_THREAD_F27_F0(a0)
+ fld f28, TASK_THREAD_F28_F0(a0)
+ fld f29, TASK_THREAD_F29_F0(a0)
+ fld f30, TASK_THREAD_F30_F0(a0)
+ fld f31, TASK_THREAD_F31_F0(a0)
+ fscsr t0
+ csrc sstatus, t1
+ ret
+ENDPROC(__fstate_restore)
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index c4d2c63f9a29..711190d473d4 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -47,6 +47,8 @@ ENTRY(_start)
/* Save hart ID and DTB physical address */
mv s0, a0
mv s1, a1
+ la a2, boot_cpu_hartid
+ REG_S a0, (a2)
/* Initialize page tables and relocate to virtual addresses */
la sp, init_thread_union + THREAD_SIZE
@@ -55,7 +57,7 @@ ENTRY(_start)
/* Restore C environment */
la tp, init_task
- sw s0, TASK_TI_CPU(tp)
+ sw zero, TASK_TI_CPU(tp)
la sp, init_thread_union
li a0, ASM_THREAD_SIZE
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 0cfac48a1272..48e6b7db83a1 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -8,6 +8,8 @@
#include <linux/interrupt.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
+#include <linux/seq_file.h>
+#include <asm/smp.h>
/*
* Possible interrupt causes:
@@ -24,12 +26,18 @@
*/
#define INTERRUPT_CAUSE_FLAG (1UL << (__riscv_xlen - 1))
-asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs, unsigned long cause)
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ show_ipi_stats(p, prec);
+ return 0;
+}
+
+asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
- switch (cause & ~INTERRUPT_CAUSE_FLAG) {
+ switch (regs->scause & ~INTERRUPT_CAUSE_FLAG) {
case INTERRUPT_CAUSE_TIMER:
riscv_timer_interrupt();
break;
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 5721624886a1..8a5593ff9ff3 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -75,7 +75,6 @@ ENTRY(return_to_handler)
RESTORE_RET_ABI_STATE
jalr a1
ENDPROC(return_to_handler)
-EXPORT_SYMBOL(return_to_handler)
#endif
#ifndef CONFIG_DYNAMIC_FTRACE
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index d7c6ca7c95ae..bef19993ea92 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -76,7 +76,9 @@ void show_regs(struct pt_regs *regs)
void start_thread(struct pt_regs *regs, unsigned long pc,
unsigned long sp)
{
- regs->sstatus = SR_SPIE /* User mode, irqs on */ | SR_FS_INITIAL;
+ regs->sstatus = SR_SPIE;
+ if (has_fpu)
+ regs->sstatus |= SR_FS_INITIAL;
regs->sepc = pc;
regs->sp = sp;
set_fs(USER_DS);
@@ -84,12 +86,14 @@ void start_thread(struct pt_regs *regs, unsigned long pc,
void flush_thread(void)
{
+#ifdef CONFIG_FPU
/*
* Reset FPU context
* frm: round to nearest, ties to even (IEEE default)
* fflags: accrued exceptions cleared
*/
memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
+#endif
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 9f82a7e34c64..60f1e02eed36 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -28,6 +28,9 @@
enum riscv_regset {
REGSET_X,
+#ifdef CONFIG_FPU
+ REGSET_F,
+#endif
};
static int riscv_gpr_get(struct task_struct *target,
@@ -54,6 +57,45 @@ static int riscv_gpr_set(struct task_struct *target,
return ret;
}
+#ifdef CONFIG_FPU
+static int riscv_fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ int ret;
+ struct __riscv_d_ext_state *fstate = &target->thread.fstate;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fstate, 0,
+ offsetof(struct __riscv_d_ext_state, fcsr));
+ if (!ret) {
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, fstate, 0,
+ offsetof(struct __riscv_d_ext_state, fcsr) +
+ sizeof(fstate->fcsr));
+ }
+
+ return ret;
+}
+
+static int riscv_fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct __riscv_d_ext_state *fstate = &target->thread.fstate;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0,
+ offsetof(struct __riscv_d_ext_state, fcsr));
+ if (!ret) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0,
+ offsetof(struct __riscv_d_ext_state, fcsr) +
+ sizeof(fstate->fcsr));
+ }
+
+ return ret;
+}
+#endif
static const struct user_regset riscv_user_regset[] = {
[REGSET_X] = {
@@ -64,6 +106,16 @@ static const struct user_regset riscv_user_regset[] = {
.get = &riscv_gpr_get,
.set = &riscv_gpr_set,
},
+#ifdef CONFIG_FPU
+ [REGSET_F] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .get = &riscv_fpr_get,
+ .set = &riscv_fpr_set,
+ },
+#endif
};
static const struct user_regset_view riscv_user_native_view = {
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index b2d26d9d8489..2c290e6aaa6e 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -81,6 +81,16 @@ EXPORT_SYMBOL(empty_zero_page);
/* The lucky hart to first increment this variable will boot the other cores */
atomic_t hart_lottery;
+unsigned long boot_cpu_hartid;
+
+unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
+ [0 ... NR_CPUS-1] = INVALID_HARTID
+};
+
+void __init smp_setup_processor_id(void)
+{
+ cpuid_to_hartid_map(0) = boot_cpu_hartid;
+}
#ifdef CONFIG_BLK_DEV_INITRD
static void __init setup_initrd(void)
@@ -227,7 +237,10 @@ void __init setup_arch(char **cmdline_p)
setup_bootmem();
paging_init();
unflatten_device_tree();
+
+#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
+#endif
#ifdef CONFIG_SMP
setup_smp();
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 718d0c984ef0..f9b5e7e352ef 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -37,45 +37,69 @@ struct rt_sigframe {
struct ucontext uc;
};
-static long restore_d_state(struct pt_regs *regs,
- struct __riscv_d_ext_state __user *state)
+#ifdef CONFIG_FPU
+static long restore_fp_state(struct pt_regs *regs,
+ union __riscv_fp_state *sc_fpregs)
{
long err;
+ struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+ size_t i;
+
err = __copy_from_user(&current->thread.fstate, state, sizeof(*state));
- if (likely(!err))
- fstate_restore(current, regs);
+ if (unlikely(err))
+ return err;
+
+ fstate_restore(current, regs);
+
+ /* We support no other extension state at this time. */
+ for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+ u32 value;
+
+ err = __get_user(value, &sc_fpregs->q.reserved[i]);
+ if (unlikely(err))
+ break;
+ if (value != 0)
+ return -EINVAL;
+ }
+
return err;
}
-static long save_d_state(struct pt_regs *regs,
- struct __riscv_d_ext_state __user *state)
+static long save_fp_state(struct pt_regs *regs,
+ union __riscv_fp_state *sc_fpregs)
{
+ long err;
+ struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+ size_t i;
+
fstate_save(current, regs);
- return __copy_to_user(state, &current->thread.fstate, sizeof(*state));
+ err = __copy_to_user(state, &current->thread.fstate, sizeof(*state));
+ if (unlikely(err))
+ return err;
+
+ /* We support no other extension state at this time. */
+ for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+ err = __put_user(0, &sc_fpregs->q.reserved[i]);
+ if (unlikely(err))
+ break;
+ }
+
+ return err;
}
+#else
+#define save_fp_state(task, regs) (0)
+#define restore_fp_state(task, regs) (0)
+#endif
static long restore_sigcontext(struct pt_regs *regs,
struct sigcontext __user *sc)
{
long err;
- size_t i;
/* sc_regs is structured the same as the start of pt_regs */
err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs));
- if (unlikely(err))
- return err;
/* Restore the floating-point state. */
- err = restore_d_state(regs, &sc->sc_fpregs.d);
- if (unlikely(err))
- return err;
- /* We support no other extension state at this time. */
- for (i = 0; i < ARRAY_SIZE(sc->sc_fpregs.q.reserved); i++) {
- u32 value;
- err = __get_user(value, &sc->sc_fpregs.q.reserved[i]);
- if (unlikely(err))
- break;
- if (value != 0)
- return -EINVAL;
- }
+ if (has_fpu)
+ err |= restore_fp_state(regs, &sc->sc_fpregs);
return err;
}
@@ -124,14 +148,11 @@ static long setup_sigcontext(struct rt_sigframe __user *frame,
{
struct sigcontext __user *sc = &frame->uc.uc_mcontext;
long err;
- size_t i;
/* sc_regs is structured the same as the start of pt_regs */
err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs));
/* Save the floating-point state. */
- err |= save_d_state(regs, &sc->sc_fpregs.d);
- /* We support no other extension state at this time. */
- for (i = 0; i < ARRAY_SIZE(sc->sc_fpregs.q.reserved); i++)
- err |= __put_user(0, &sc->sc_fpregs.q.reserved[i]);
+ if (has_fpu)
+ err |= save_fp_state(regs, &sc->sc_fpregs);
return err;
}
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 906fe21ea21b..57b1383e5ef7 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -22,23 +22,44 @@
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/sched.h>
+#include <linux/seq_file.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
-/* A collection of single bit ipi messages. */
-static struct {
- unsigned long bits ____cacheline_aligned;
-} ipi_data[NR_CPUS] __cacheline_aligned;
-
enum ipi_message_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_MAX
};
+/* A collection of single bit ipi messages. */
+static struct {
+ unsigned long stats[IPI_MAX] ____cacheline_aligned;
+ unsigned long bits ____cacheline_aligned;
+} ipi_data[NR_CPUS] __cacheline_aligned;
+
+int riscv_hartid_to_cpuid(int hartid)
+{
+ int i = -1;
+
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpuid_to_hartid_map(i) == hartid)
+ return i;
+ pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
+ BUG();
+ return i;
+}
+
+void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
+{
+ int cpu;
+
+ for_each_cpu(cpu, in)
+ cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
+}
/* Unsupported */
int setup_profiling_timer(unsigned int multiplier)
{
@@ -48,6 +69,7 @@ int setup_profiling_timer(unsigned int multiplier)
void riscv_software_interrupt(void)
{
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
+ unsigned long *stats = ipi_data[smp_processor_id()].stats;
/* Clear pending IPI */
csr_clear(sip, SIE_SSIE);
@@ -62,11 +84,15 @@ void riscv_software_interrupt(void)
if (ops == 0)
return;
- if (ops & (1 << IPI_RESCHEDULE))
+ if (ops & (1 << IPI_RESCHEDULE)) {
+ stats[IPI_RESCHEDULE]++;
scheduler_ipi();
+ }
- if (ops & (1 << IPI_CALL_FUNC))
+ if (ops & (1 << IPI_CALL_FUNC)) {
+ stats[IPI_CALL_FUNC]++;
generic_smp_call_function_interrupt();
+ }
BUG_ON((ops >> IPI_MAX) != 0);
@@ -78,14 +104,36 @@ void riscv_software_interrupt(void)
static void
send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
{
- int i;
+ int cpuid, hartid;
+ struct cpumask hartid_mask;
+ cpumask_clear(&hartid_mask);
mb();
- for_each_cpu(i, to_whom)
- set_bit(operation, &ipi_data[i].bits);
-
+ for_each_cpu(cpuid, to_whom) {
+ set_bit(operation, &ipi_data[cpuid].bits);
+ hartid = cpuid_to_hartid_map(cpuid);
+ cpumask_set_cpu(hartid, &hartid_mask);
+ }
mb();
- sbi_send_ipi(cpumask_bits(to_whom));
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static const char * const ipi_names[] = {
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNC] = "Function call interrupts",
+};
+
+void show_ipi_stats(struct seq_file *p, int prec)
+{
+ unsigned int cpu, i;
+
+ for (i = 0; i < IPI_MAX; i++) {
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
+ seq_printf(p, " %s\n", ipi_names[i]);
+ }
}
void arch_send_call_function_ipi_mask(struct cpumask *mask)
@@ -127,7 +175,7 @@ void smp_send_reschedule(int cpu)
void flush_icache_mm(struct mm_struct *mm, bool local)
{
unsigned int cpu;
- cpumask_t others, *mask;
+ cpumask_t others, hmask, *mask;
preempt_disable();
@@ -145,9 +193,11 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
*/
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
local |= cpumask_empty(&others);
- if (mm != current->active_mm || !local)
- sbi_remote_fence_i(others.bits);
- else {
+ if (mm != current->active_mm || !local) {
+ cpumask_clear(&hmask);
+ riscv_cpuid_to_hartid_mask(&others, &hmask);
+ sbi_remote_fence_i(hmask.bits);
+ } else {
/*
* It's assumed that at least one strongly ordered operation is
* performed on this hart between setting a hart's cpumask bit
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 56abab6a9812..18cda0e8cf94 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -30,6 +30,7 @@
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/sched/task_stack.h>
+#include <linux/sched/mm.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
@@ -50,25 +51,33 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
void __init setup_smp(void)
{
struct device_node *dn = NULL;
- int hart, im_okay_therefore_i_am = 0;
+ int hart;
+ bool found_boot_cpu = false;
+ int cpuid = 1;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
- hart = riscv_of_processor_hart(dn);
- if (hart >= 0) {
- set_cpu_possible(hart, true);
- set_cpu_present(hart, true);
- if (hart == smp_processor_id()) {
- BUG_ON(im_okay_therefore_i_am);
- im_okay_therefore_i_am = 1;
- }
+ hart = riscv_of_processor_hartid(dn);
+ if (hart < 0)
+ continue;
+
+ if (hart == cpuid_to_hartid_map(0)) {
+ BUG_ON(found_boot_cpu);
+ found_boot_cpu = 1;
+ continue;
}
+
+ cpuid_to_hartid_map(cpuid) = hart;
+ set_cpu_possible(cpuid, true);
+ set_cpu_present(cpuid, true);
+ cpuid++;
}
- BUG_ON(!im_okay_therefore_i_am);
+ BUG_ON(!found_boot_cpu);
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
+ int hartid = cpuid_to_hartid_map(cpu);
tidle->thread_info.cpu = cpu;
/*
@@ -79,8 +88,9 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
* the spinning harts that they can continue the boot process.
*/
smp_mb();
- __cpu_up_stack_pointer[cpu] = task_stack_page(tidle) + THREAD_SIZE;
- __cpu_up_task_pointer[cpu] = tidle;
+ WRITE_ONCE(__cpu_up_stack_pointer[hartid],
+ task_stack_page(tidle) + THREAD_SIZE);
+ WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);
while (!cpu_online(cpu))
cpu_relax();
@@ -100,14 +110,22 @@ asmlinkage void __init smp_callin(void)
struct mm_struct *mm = &init_mm;
/* All kernel threads share the same mm context. */
- atomic_inc(&mm->mm_count);
+ mmgrab(mm);
current->active_mm = mm;
trap_init();
notify_cpu_starting(smp_processor_id());
set_cpu_online(smp_processor_id(), 1);
+ /*
+ * Remote TLB flushes are ignored while the CPU is offline, so emit
+ * a local TLB flush right now just in case.
+ */
local_flush_tlb_all();
- local_irq_enable();
+ /*
+ * Disable preemption before enabling interrupts, so we don't try to
+ * schedule a CPU that hasn't actually started yet.
+ */
preempt_disable();
+ local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
index 445ec84f9a47..5739bd05d289 100644
--- a/arch/riscv/lib/Makefile
+++ b/arch/riscv/lib/Makefile
@@ -2,6 +2,7 @@ lib-y += delay.o
lib-y += memcpy.o
lib-y += memset.o
lib-y += uaccess.o
-lib-y += tishift.o
+
+lib-(CONFIG_64BIT) += tishift.o
lib-$(CONFIG_32BIT) += udivdi3.o
diff --git a/arch/riscv/mm/ioremap.c b/arch/riscv/mm/ioremap.c
index 70ef2724cdf6..bd2f2db557cc 100644
--- a/arch/riscv/mm/ioremap.c
+++ b/arch/riscv/mm/ioremap.c
@@ -42,7 +42,7 @@ static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size,
/* Page-align mappings */
offset = addr & (~PAGE_MASK);
- addr &= PAGE_MASK;
+ addr -= offset;
size = PAGE_ALIGN(size + offset);
area = get_vm_area_caller(size, VM_IOREMAP, caller);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 039a3417dfc4..8b25e1f45b27 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -783,6 +783,17 @@ config VFIO_CCW
To compile this driver as a module, choose M here: the
module will be called vfio_ccw.
+config VFIO_AP
+ def_tristate n
+ prompt "VFIO support for AP devices"
+ depends on S390_AP_IOMMU && VFIO_MDEV_DEVICE && KVM
+ help
+ This driver grants access to Adjunct Processor (AP) devices
+ via the VFIO mediated device interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vfio_ap.
+
endmenu
menu "Dump support"
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 941d8cc6c9f5..259d1698ac50 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -668,7 +668,6 @@ CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index eb6f75f24208..37fd60c20e22 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -610,7 +610,6 @@ CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index c54cb26eb7f5..812d9498d97b 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -44,7 +44,7 @@ struct s390_aes_ctx {
int key_len;
unsigned long fc;
union {
- struct crypto_skcipher *blk;
+ struct crypto_sync_skcipher *blk;
struct crypto_cipher *cip;
} fallback;
};
@@ -54,7 +54,7 @@ struct s390_xts_ctx {
u8 pcc_key[32];
int key_len;
unsigned long fc;
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
struct gcm_sg_walk {
@@ -184,14 +184,15 @@ static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
unsigned int ret;
- crypto_skcipher_clear_flags(sctx->fallback.blk, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
+ crypto_sync_skcipher_clear_flags(sctx->fallback.blk,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(sctx->fallback.blk, key, len);
+ ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= crypto_skcipher_get_flags(sctx->fallback.blk) &
+ tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) &
CRYPTO_TFM_RES_MASK;
return ret;
@@ -204,9 +205,9 @@ static int fallback_blk_dec(struct blkcipher_desc *desc,
unsigned int ret;
struct crypto_blkcipher *tfm = desc->tfm;
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
- skcipher_request_set_tfm(req, sctx->fallback.blk);
+ skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
@@ -223,9 +224,9 @@ static int fallback_blk_enc(struct blkcipher_desc *desc,
unsigned int ret;
struct crypto_blkcipher *tfm = desc->tfm;
struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
- skcipher_request_set_tfm(req, sctx->fallback.blk);
+ skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
@@ -306,8 +307,7 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
const char *name = tfm->__crt_alg->cra_name;
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- sctx->fallback.blk = crypto_alloc_skcipher(name, 0,
- CRYPTO_ALG_ASYNC |
+ sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(sctx->fallback.blk)) {
@@ -323,7 +323,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
{
struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(sctx->fallback.blk);
+ crypto_free_sync_skcipher(sctx->fallback.blk);
}
static struct crypto_alg ecb_aes_alg = {
@@ -453,14 +453,15 @@ static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
unsigned int ret;
- crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
+ crypto_sync_skcipher_clear_flags(xts_ctx->fallback,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(xts_ctx->fallback, key, len);
+ ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->crt_flags |= crypto_skcipher_get_flags(xts_ctx->fallback) &
+ tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) &
CRYPTO_TFM_RES_MASK;
return ret;
@@ -472,10 +473,10 @@ static int xts_fallback_decrypt(struct blkcipher_desc *desc,
{
struct crypto_blkcipher *tfm = desc->tfm;
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
unsigned int ret;
- skcipher_request_set_tfm(req, xts_ctx->fallback);
+ skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
@@ -491,10 +492,10 @@ static int xts_fallback_encrypt(struct blkcipher_desc *desc,
{
struct crypto_blkcipher *tfm = desc->tfm;
struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
unsigned int ret;
- skcipher_request_set_tfm(req, xts_ctx->fallback);
+ skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
@@ -611,8 +612,7 @@ static int xts_fallback_init(struct crypto_tfm *tfm)
const char *name = tfm->__crt_alg->cra_name;
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
- xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
- CRYPTO_ALG_ASYNC |
+ xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(xts_ctx->fallback)) {
@@ -627,7 +627,7 @@ static void xts_fallback_exit(struct crypto_tfm *tfm)
{
struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(xts_ctx->fallback);
+ crypto_free_sync_skcipher(xts_ctx->fallback);
}
static struct crypto_alg xts_aes_alg = {
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 20add000dd6d..7cb6a52f727d 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -221,7 +221,6 @@ CONFIG_CRYPTO_SALSA20=m
CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_SM4=m
-CONFIG_CRYPTO_SPECK=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_DEFLATE=m
diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h
index 97db2fba546a..63b46e30b2c3 100644
--- a/arch/s390/include/asm/compat.h
+++ b/arch/s390/include/asm/compat.h
@@ -9,6 +9,8 @@
#include <linux/sched/task_stack.h>
#include <linux/thread_info.h>
+#include <asm-generic/compat.h>
+
#define __TYPE_IS_PTR(t) (!__builtin_types_compatible_p( \
typeof(0?(__force t)0:0ULL), u64))
@@ -51,34 +53,18 @@
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "s390\0\0\0\0"
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_key_t;
-typedef s32 compat_timer_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
typedef struct {
u32 mask;
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 29c940bf8506..d5d24889c3bc 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -44,6 +44,7 @@
#define KVM_REQ_ICPT_OPEREXC KVM_ARCH_REQ(2)
#define KVM_REQ_START_MIGRATION KVM_ARCH_REQ(3)
#define KVM_REQ_STOP_MIGRATION KVM_ARCH_REQ(4)
+#define KVM_REQ_VSIE_RESTART KVM_ARCH_REQ(5)
#define SIGP_CTRL_C 0x80
#define SIGP_CTRL_SCN_MASK 0x3f
@@ -186,6 +187,7 @@ struct kvm_s390_sie_block {
#define ECA_AIV 0x00200000
#define ECA_VX 0x00020000
#define ECA_PROTEXCI 0x00002000
+#define ECA_APIE 0x00000008
#define ECA_SII 0x00000001
__u32 eca; /* 0x004c */
#define ICPT_INST 0x04
@@ -237,7 +239,11 @@ struct kvm_s390_sie_block {
psw_t gpsw; /* 0x0090 */
__u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */
- __u8 reservedb0[20]; /* 0x00b0 */
+ __u8 reservedb0[8]; /* 0x00b0 */
+#define HPID_KVM 0x4
+#define HPID_VSIE 0x5
+ __u8 hpid; /* 0x00b8 */
+ __u8 reservedb9[11]; /* 0x00b9 */
__u16 extcpuaddr; /* 0x00c4 */
__u16 eic; /* 0x00c6 */
__u32 reservedc8; /* 0x00c8 */
@@ -255,6 +261,8 @@ struct kvm_s390_sie_block {
__u8 reservede4[4]; /* 0x00e4 */
__u64 tecmc; /* 0x00e8 */
__u8 reservedf0[12]; /* 0x00f0 */
+#define CRYCB_FORMAT_MASK 0x00000003
+#define CRYCB_FORMAT0 0x00000000
#define CRYCB_FORMAT1 0x00000001
#define CRYCB_FORMAT2 0x00000003
__u32 crycbd; /* 0x00fc */
@@ -715,6 +723,7 @@ struct kvm_s390_crypto {
__u32 crycbd;
__u8 aes_kw;
__u8 dea_kw;
+ __u8 apie;
};
#define APCB0_MASK_SIZE 1
@@ -855,6 +864,10 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work);
+void kvm_arch_crypto_clear_masks(struct kvm *kvm);
+void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
+ unsigned long *aqm, unsigned long *adm);
+
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
extern char sie_exit;
diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h
index fd79c0d35dc4..a1fbf15d53aa 100644
--- a/arch/s390/include/asm/unistd.h
+++ b/arch/s390/include/asm/unistd.h
@@ -15,6 +15,7 @@
#define __IGNORE_pkey_alloc
#define __IGNORE_pkey_free
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_SYS_ALARM
#define __ARCH_WANT_SYS_GETHOSTNAME
@@ -25,7 +26,6 @@
#define __ARCH_WANT_SYS_IPC
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLD_GETRLIMIT
#define __ARCH_WANT_SYS_OLD_MMAP
@@ -34,6 +34,7 @@
#define __ARCH_WANT_SYS_SIGPROCMASK
# ifdef CONFIG_COMPAT
# define __ARCH_WANT_COMPAT_SYS_TIME
+# define __ARCH_WANT_SYS_UTIME32
# endif
#define __ARCH_WANT_SYS_FORK
#define __ARCH_WANT_SYS_VFORK
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index 9a50f02b9894..16511d97e8dc 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -160,6 +160,8 @@ struct kvm_s390_vm_cpu_subfunc {
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
+#define KVM_S390_VM_CRYPTO_ENABLE_APIE 4
+#define KVM_S390_VM_CRYPTO_DISABLE_APIE 5
/* kvm attributes for migration mode */
#define KVM_S390_VM_MIGRATION_STOP 0
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ac5da6b0b862..fe24150ff666 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -40,6 +40,7 @@
#include <asm/sclp.h>
#include <asm/cpacf.h>
#include <asm/timex.h>
+#include <asm/ap.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -844,20 +845,24 @@ void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
kvm_s390_vcpu_block_all(kvm);
- kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_s390_vcpu_crypto_setup(vcpu);
+ /* recreate the shadow crycb by leaving the VSIE handler */
+ kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
+ }
kvm_s390_vcpu_unblock_all(kvm);
}
static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
{
- if (!test_kvm_facility(kvm, 76))
- return -EINVAL;
-
mutex_lock(&kvm->lock);
switch (attr->attr) {
case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
get_random_bytes(
kvm->arch.crypto.crycb->aes_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
@@ -865,6 +870,10 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
get_random_bytes(
kvm->arch.crypto.crycb->dea_wrapping_key_mask,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
@@ -872,17 +881,39 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
kvm->arch.crypto.aes_kw = 0;
memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
break;
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
+ if (!test_kvm_facility(kvm, 76)) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
kvm->arch.crypto.dea_kw = 0;
memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
break;
+ case KVM_S390_VM_CRYPTO_ENABLE_APIE:
+ if (!ap_instructions_available()) {
+ mutex_unlock(&kvm->lock);
+ return -EOPNOTSUPP;
+ }
+ kvm->arch.crypto.apie = 1;
+ break;
+ case KVM_S390_VM_CRYPTO_DISABLE_APIE:
+ if (!ap_instructions_available()) {
+ mutex_unlock(&kvm->lock);
+ return -EOPNOTSUPP;
+ }
+ kvm->arch.crypto.apie = 0;
+ break;
default:
mutex_unlock(&kvm->lock);
return -ENXIO;
@@ -1491,6 +1522,10 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
ret = 0;
break;
+ case KVM_S390_VM_CRYPTO_ENABLE_APIE:
+ case KVM_S390_VM_CRYPTO_DISABLE_APIE:
+ ret = ap_instructions_available() ? 0 : -ENXIO;
+ break;
default:
ret = -ENXIO;
break;
@@ -1992,55 +2027,101 @@ long kvm_arch_vm_ioctl(struct file *filp,
return r;
}
-static int kvm_s390_query_ap_config(u8 *config)
-{
- u32 fcn_code = 0x04000000UL;
- u32 cc = 0;
-
- memset(config, 0, 128);
- asm volatile(
- "lgr 0,%1\n"
- "lgr 2,%2\n"
- ".long 0xb2af0000\n" /* PQAP(QCI) */
- "0: ipm %0\n"
- "srl %0,28\n"
- "1:\n"
- EX_TABLE(0b, 1b)
- : "+r" (cc)
- : "r" (fcn_code), "r" (config)
- : "cc", "0", "2", "memory"
- );
-
- return cc;
-}
-
static int kvm_s390_apxa_installed(void)
{
- u8 config[128];
- int cc;
+ struct ap_config_info info;
- if (test_facility(12)) {
- cc = kvm_s390_query_ap_config(config);
-
- if (cc)
- pr_err("PQAP(QCI) failed with cc=%d", cc);
- else
- return config[0] & 0x40;
+ if (ap_instructions_available()) {
+ if (ap_qci(&info) == 0)
+ return info.apxa;
}
return 0;
}
+/*
+ * The format of the crypto control block (CRYCB) is specified in the 3 low
+ * order bits of the CRYCB designation (CRYCBD) field as follows:
+ * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
+ * AP extended addressing (APXA) facility are installed.
+ * Format 1: The APXA facility is not installed but the MSAX3 facility is.
+ * Format 2: Both the APXA and MSAX3 facilities are installed
+ */
static void kvm_s390_set_crycb_format(struct kvm *kvm)
{
kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
+ /* Clear the CRYCB format bits - i.e., set format 0 by default */
+ kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
+
+ /* Check whether MSAX3 is installed */
+ if (!test_kvm_facility(kvm, 76))
+ return;
+
if (kvm_s390_apxa_installed())
kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
else
kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
}
+void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
+ unsigned long *aqm, unsigned long *adm)
+{
+ struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
+
+ mutex_lock(&kvm->lock);
+ kvm_s390_vcpu_block_all(kvm);
+
+ switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
+ case CRYCB_FORMAT2: /* APCB1 use 256 bits */
+ memcpy(crycb->apcb1.apm, apm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
+ apm[0], apm[1], apm[2], apm[3]);
+ memcpy(crycb->apcb1.aqm, aqm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
+ aqm[0], aqm[1], aqm[2], aqm[3]);
+ memcpy(crycb->apcb1.adm, adm, 32);
+ VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
+ adm[0], adm[1], adm[2], adm[3]);
+ break;
+ case CRYCB_FORMAT1:
+ case CRYCB_FORMAT0: /* Fall through both use APCB0 */
+ memcpy(crycb->apcb0.apm, apm, 8);
+ memcpy(crycb->apcb0.aqm, aqm, 2);
+ memcpy(crycb->apcb0.adm, adm, 2);
+ VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
+ apm[0], *((unsigned short *)aqm),
+ *((unsigned short *)adm));
+ break;
+ default: /* Can not happen */
+ break;
+ }
+
+ /* recreate the shadow crycb for each vcpu */
+ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
+ kvm_s390_vcpu_unblock_all(kvm);
+ mutex_unlock(&kvm->lock);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
+
+void kvm_arch_crypto_clear_masks(struct kvm *kvm)
+{
+ mutex_lock(&kvm->lock);
+ kvm_s390_vcpu_block_all(kvm);
+
+ memset(&kvm->arch.crypto.crycb->apcb0, 0,
+ sizeof(kvm->arch.crypto.crycb->apcb0));
+ memset(&kvm->arch.crypto.crycb->apcb1, 0,
+ sizeof(kvm->arch.crypto.crycb->apcb1));
+
+ VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
+ /* recreate the shadow crycb for each vcpu */
+ kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
+ kvm_s390_vcpu_unblock_all(kvm);
+ mutex_unlock(&kvm->lock);
+}
+EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
+
static u64 kvm_s390_get_initial_cpuid(void)
{
struct cpuid cpuid;
@@ -2052,12 +2133,12 @@ static u64 kvm_s390_get_initial_cpuid(void)
static void kvm_s390_crypto_init(struct kvm *kvm)
{
- if (!test_kvm_facility(kvm, 76))
- return;
-
kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
kvm_s390_set_crycb_format(kvm);
+ if (!test_kvm_facility(kvm, 76))
+ return;
+
/* Enable AES/DEA protected key functions by default */
kvm->arch.crypto.aes_kw = 1;
kvm->arch.crypto.dea_kw = 1;
@@ -2583,17 +2664,25 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
{
- if (!test_kvm_facility(vcpu->kvm, 76))
+ /*
+ * If the AP instructions are not being interpreted and the MSAX3
+ * facility is not configured for the guest, there is nothing to set up.
+ */
+ if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
return;
+ vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
+ vcpu->arch.sie_block->eca &= ~ECA_APIE;
+
+ if (vcpu->kvm->arch.crypto.apie)
+ vcpu->arch.sie_block->eca |= ECA_APIE;
+ /* Set up protected key support */
if (vcpu->kvm->arch.crypto.aes_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_AES;
if (vcpu->kvm->arch.crypto.dea_kw)
vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
-
- vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
}
void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
@@ -2685,6 +2774,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
+ vcpu->arch.sie_block->hpid = HPID_KVM;
+
kvm_s390_vcpu_crypto_setup(vcpu);
return rc;
@@ -2768,18 +2859,25 @@ static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
exit_sie(vcpu);
}
+bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
+{
+ return atomic_read(&vcpu->arch.sie_block->prog20) &
+ (PROG_BLOCK_SIE | PROG_REQUEST);
+}
+
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
{
atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
}
/*
- * Kick a guest cpu out of SIE and wait until SIE is not running.
+ * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
* If the CPU is not running (e.g. waiting as idle) the function will
* return immediately. */
void exit_sie(struct kvm_vcpu *vcpu)
{
kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
+ kvm_s390_vsie_kick(vcpu);
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
cpu_relax();
}
@@ -3196,6 +3294,8 @@ retry:
/* nothing to do, just clear the request */
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ /* we left the vsie handler, nothing to do, just clear the request */
+ kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
return 0;
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 981e3ba97461..1f6e36cdce0d 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -290,6 +290,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
+bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu);
void exit_sie(struct kvm_vcpu *vcpu);
void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index a2b28cd1e3fe..a153257bf7d9 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -135,14 +135,148 @@ static int prepare_cpuflags(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
atomic_set(&scb_s->cpuflags, newflags);
return 0;
}
+/* Copy to APCB FORMAT1 from APCB FORMAT0 */
+static int setup_apcb10(struct kvm_vcpu *vcpu, struct kvm_s390_apcb1 *apcb_s,
+ unsigned long apcb_o, struct kvm_s390_apcb1 *apcb_h)
+{
+ struct kvm_s390_apcb0 tmp;
-/*
+ if (read_guest_real(vcpu, apcb_o, &tmp, sizeof(struct kvm_s390_apcb0)))
+ return -EFAULT;
+
+ apcb_s->apm[0] = apcb_h->apm[0] & tmp.apm[0];
+ apcb_s->aqm[0] = apcb_h->aqm[0] & tmp.aqm[0] & 0xffff000000000000UL;
+ apcb_s->adm[0] = apcb_h->adm[0] & tmp.adm[0] & 0xffff000000000000UL;
+
+ return 0;
+
+}
+
+/**
+ * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
+ * @vcpu: pointer to the virtual CPU
+ * @apcb_s: pointer to start of apcb in the shadow crycb
+ * @apcb_o: pointer to start of original apcb in the guest2
+ * @apcb_h: pointer to start of apcb in the guest1
+ *
+ * Returns 0 and -EFAULT on error reading guest apcb
+ */
+static int setup_apcb00(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
+ unsigned long apcb_o, unsigned long *apcb_h)
+{
+ if (read_guest_real(vcpu, apcb_o, apcb_s,
+ sizeof(struct kvm_s390_apcb0)))
+ return -EFAULT;
+
+ bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb0));
+
+ return 0;
+}
+
+/**
+ * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
+ * @vcpu: pointer to the virtual CPU
+ * @apcb_s: pointer to start of apcb in the shadow crycb
+ * @apcb_o: pointer to start of original guest apcb
+ * @apcb_h: pointer to start of apcb in the host
+ *
+ * Returns 0 and -EFAULT on error reading guest apcb
+ */
+static int setup_apcb11(struct kvm_vcpu *vcpu, unsigned long *apcb_s,
+ unsigned long apcb_o,
+ unsigned long *apcb_h)
+{
+ if (read_guest_real(vcpu, apcb_o, apcb_s,
+ sizeof(struct kvm_s390_apcb1)))
+ return -EFAULT;
+
+ bitmap_and(apcb_s, apcb_s, apcb_h, sizeof(struct kvm_s390_apcb1));
+
+ return 0;
+}
+
+/**
+ * setup_apcb - Create a shadow copy of the apcb.
+ * @vcpu: pointer to the virtual CPU
+ * @crycb_s: pointer to shadow crycb
+ * @crycb_o: pointer to original guest crycb
+ * @crycb_h: pointer to the host crycb
+ * @fmt_o: format of the original guest crycb.
+ * @fmt_h: format of the host crycb.
+ *
+ * Checks the compatibility between the guest and host crycb and calls the
+ * appropriate copy function.
+ *
+ * Return 0 or an error number if the guest and host crycb are incompatible.
+ */
+static int setup_apcb(struct kvm_vcpu *vcpu, struct kvm_s390_crypto_cb *crycb_s,
+ const u32 crycb_o,
+ struct kvm_s390_crypto_cb *crycb_h,
+ int fmt_o, int fmt_h)
+{
+ struct kvm_s390_crypto_cb *crycb;
+
+ crycb = (struct kvm_s390_crypto_cb *) (unsigned long)crycb_o;
+
+ switch (fmt_o) {
+ case CRYCB_FORMAT2:
+ if ((crycb_o & PAGE_MASK) != ((crycb_o + 256) & PAGE_MASK))
+ return -EACCES;
+ if (fmt_h != CRYCB_FORMAT2)
+ return -EINVAL;
+ return setup_apcb11(vcpu, (unsigned long *)&crycb_s->apcb1,
+ (unsigned long) &crycb->apcb1,
+ (unsigned long *)&crycb_h->apcb1);
+ case CRYCB_FORMAT1:
+ switch (fmt_h) {
+ case CRYCB_FORMAT2:
+ return setup_apcb10(vcpu, &crycb_s->apcb1,
+ (unsigned long) &crycb->apcb0,
+ &crycb_h->apcb1);
+ case CRYCB_FORMAT1:
+ return setup_apcb00(vcpu,
+ (unsigned long *) &crycb_s->apcb0,
+ (unsigned long) &crycb->apcb0,
+ (unsigned long *) &crycb_h->apcb0);
+ }
+ break;
+ case CRYCB_FORMAT0:
+ if ((crycb_o & PAGE_MASK) != ((crycb_o + 32) & PAGE_MASK))
+ return -EACCES;
+
+ switch (fmt_h) {
+ case CRYCB_FORMAT2:
+ return setup_apcb10(vcpu, &crycb_s->apcb1,
+ (unsigned long) &crycb->apcb0,
+ &crycb_h->apcb1);
+ case CRYCB_FORMAT1:
+ case CRYCB_FORMAT0:
+ return setup_apcb00(vcpu,
+ (unsigned long *) &crycb_s->apcb0,
+ (unsigned long) &crycb->apcb0,
+ (unsigned long *) &crycb_h->apcb0);
+ }
+ }
+ return -EINVAL;
+}
+
+/**
+ * shadow_crycb - Create a shadow copy of the crycb block
+ * @vcpu: a pointer to the virtual CPU
+ * @vsie_page: a pointer to internal date used for the vSIE
+ *
* Create a shadow copy of the crycb block and setup key wrapping, if
* requested for guest 3 and enabled for guest 2.
*
- * We only accept format-1 (no AP in g2), but convert it into format-2
+ * We accept format-1 or format-2, but we convert format-1 into format-2
+ * in the shadow CRYCB.
+ * Using format-2 enables the firmware to choose the right format when
+ * scheduling the SIE.
* There is nothing to do for format-0.
*
+ * This function centralize the issuing of set_validity_icpt() for all
+ * the subfunctions working on the crycb.
+ *
* Returns: - 0 if shadowed or nothing to do
* - > 0 if control has to be given to guest 2
*/
@@ -154,23 +288,40 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
unsigned long *b1, *b2;
u8 ecb3_flags;
+ int apie_h;
+ int key_msk = test_kvm_facility(vcpu->kvm, 76);
+ int fmt_o = crycbd_o & CRYCB_FORMAT_MASK;
+ int fmt_h = vcpu->arch.sie_block->crycbd & CRYCB_FORMAT_MASK;
+ int ret = 0;
scb_s->crycbd = 0;
- if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
- return 0;
- /* format-1 is supported with message-security-assist extension 3 */
- if (!test_kvm_facility(vcpu->kvm, 76))
+
+ apie_h = vcpu->arch.sie_block->eca & ECA_APIE;
+ if (!apie_h && !key_msk)
return 0;
+
+ if (!crycb_addr)
+ return set_validity_icpt(scb_s, 0x0039U);
+
+ if (fmt_o == CRYCB_FORMAT1)
+ if ((crycb_addr & PAGE_MASK) !=
+ ((crycb_addr + 128) & PAGE_MASK))
+ return set_validity_icpt(scb_s, 0x003CU);
+
+ if (apie_h && (scb_o->eca & ECA_APIE)) {
+ ret = setup_apcb(vcpu, &vsie_page->crycb, crycb_addr,
+ vcpu->kvm->arch.crypto.crycb,
+ fmt_o, fmt_h);
+ if (ret)
+ goto end;
+ scb_s->eca |= scb_o->eca & ECA_APIE;
+ }
+
/* we may only allow it if enabled for guest 2 */
ecb3_flags = scb_o->ecb3 & vcpu->arch.sie_block->ecb3 &
(ECB3_AES | ECB3_DEA);
if (!ecb3_flags)
- return 0;
-
- if ((crycb_addr & PAGE_MASK) != ((crycb_addr + 128) & PAGE_MASK))
- return set_validity_icpt(scb_s, 0x003CU);
- else if (!crycb_addr)
- return set_validity_icpt(scb_s, 0x0039U);
+ goto end;
/* copy only the wrapping keys */
if (read_guest_real(vcpu, crycb_addr + 72,
@@ -178,8 +329,6 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
return set_validity_icpt(scb_s, 0x0035U);
scb_s->ecb3 |= ecb3_flags;
- scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT1 |
- CRYCB_FORMAT2;
/* xor both blocks in one run */
b1 = (unsigned long *) vsie_page->crycb.dea_wrapping_key_mask;
@@ -187,6 +336,16 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
vcpu->kvm->arch.crypto.crycb->dea_wrapping_key_mask;
/* as 56%8 == 0, bitmap_xor won't overwrite any data */
bitmap_xor(b1, b1, b2, BITS_PER_BYTE * 56);
+end:
+ switch (ret) {
+ case -EINVAL:
+ return set_validity_icpt(scb_s, 0x0020U);
+ case -EFAULT:
+ return set_validity_icpt(scb_s, 0x0035U);
+ case -EACCES:
+ return set_validity_icpt(scb_s, 0x003CU);
+ }
+ scb_s->crycbd = ((__u32)(__u64) &vsie_page->crycb) | CRYCB_FORMAT2;
return 0;
}
@@ -383,6 +542,8 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (test_kvm_facility(vcpu->kvm, 156))
scb_s->ecd |= scb_o->ecd & ECD_ETOKENF;
+ scb_s->hpid = HPID_VSIE;
+
prepare_ibc(vcpu, vsie_page);
rc = shadow_crycb(vcpu, vsie_page);
out:
@@ -830,7 +991,7 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
int guest_bp_isolation;
- int rc;
+ int rc = 0;
handle_last_fault(vcpu, vsie_page);
@@ -858,7 +1019,18 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
guest_enter_irqoff();
local_irq_enable();
- rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
+ /*
+ * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
+ * and VCPU requests also hinder the vSIE from running and lead
+ * to an immediate exit. kvm_s390_vsie_kick() has to be used to
+ * also kick the vSIE.
+ */
+ vcpu->arch.sie_block->prog0c |= PROG_IN_SIE;
+ barrier();
+ if (!kvm_s390_vcpu_sie_inhibited(vcpu))
+ rc = sie64a(scb_s, vcpu->run->s.regs.gprs);
+ barrier();
+ vcpu->arch.sie_block->prog0c &= ~PROG_IN_SIE;
local_irq_disable();
guest_exit_irqoff();
@@ -1005,7 +1177,8 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (rc == -EAGAIN)
rc = 0;
if (rc || scb_s->icptcode || signal_pending(current) ||
- kvm_s390_vcpu_has_irq(vcpu, 0))
+ kvm_s390_vcpu_has_irq(vcpu, 0) ||
+ kvm_s390_vcpu_sie_inhibited(vcpu))
break;
}
@@ -1122,7 +1295,8 @@ int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu)
if (unlikely(scb_addr & 0x1ffUL))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0))
+ if (signal_pending(current) || kvm_s390_vcpu_has_irq(vcpu, 0) ||
+ kvm_s390_vcpu_sie_inhibited(vcpu))
return 0;
vsie_page = get_vsie_page(vcpu->kvm, scb_addr);
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index 911c7ded35f1..1e668b95e0c6 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -907,10 +907,16 @@ static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
pmd_t *pmdp;
BUG_ON(gmap_is_shadow(gmap));
- spin_lock(&gmap->guest_table_lock);
pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
+ if (!pmdp)
+ return NULL;
- if (!pmdp || pmd_none(*pmdp)) {
+ /* without huge pages, there is no need to take the table lock */
+ if (!gmap->mm->context.allow_gmap_hpage_1m)
+ return pmd_none(*pmdp) ? NULL : pmdp;
+
+ spin_lock(&gmap->guest_table_lock);
+ if (pmd_none(*pmdp)) {
spin_unlock(&gmap->guest_table_lock);
return NULL;
}
diff --git a/arch/s390/tools/gen_facilities.c b/arch/s390/tools/gen_facilities.c
index 0c85aedcf9b3..fd788e0f2e5b 100644
--- a/arch/s390/tools/gen_facilities.c
+++ b/arch/s390/tools/gen_facilities.c
@@ -106,6 +106,8 @@ static struct facility_def facility_defs[] = {
.name = "FACILITIES_KVM_CPUMODEL",
.bits = (int[]){
+ 12, /* AP Query Configuration Information */
+ 15, /* AP Facilities Test */
156, /* etoken facility */
-1 /* END */
}
diff --git a/arch/sh/boards/of-generic.c b/arch/sh/boards/of-generic.c
index 26789ad28193..cde370cad4ae 100644
--- a/arch/sh/boards/of-generic.c
+++ b/arch/sh/boards/of-generic.c
@@ -64,7 +64,7 @@ static void sh_of_smp_probe(void)
init_cpu_possible(cpumask_of(0));
- for_each_node_by_type(np, "cpu") {
+ for_each_of_cpu_node(np) {
const __be32 *cell = of_get_property(np, "reg", NULL);
u64 id = -1;
if (cell) id = of_read_number(cell, of_n_addr_cells(np));
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index b36200af9ce7..a99234b61051 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -5,6 +5,7 @@
# include <asm/unistd_64.h>
# endif
+# define __ARCH_WANT_NEW_STAT
# define __ARCH_WANT_OLD_READDIR
# define __ARCH_WANT_OLD_STAT
# define __ARCH_WANT_STAT64
@@ -19,7 +20,6 @@
# define __ARCH_WANT_SYS_SOCKETCALL
# define __ARCH_WANT_SYS_FADVISE64
# define __ARCH_WANT_SYS_GETPGRP
-# define __ARCH_WANT_SYS_LLSEEK
# define __ARCH_WANT_SYS_NICE
# define __ARCH_WANT_SYS_OLD_GETRLIMIT
# define __ARCH_WANT_SYS_OLD_UNAME
diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h
index f71ef3729888..316faa0130ba 100644
--- a/arch/sparc/include/asm/cmpxchg_64.h
+++ b/arch/sparc/include/asm/cmpxchg_64.h
@@ -52,7 +52,12 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
return val;
}
-#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define xchg(ptr,x) \
+({ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
+ __ret; \
+})
void __xchg_called_with_bad_pointer(void);
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index 4eb51d2dae98..30b1763580b1 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -6,38 +6,23 @@
*/
#include <linux/types.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "sparc\0\0"
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef s16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_key_t;
-typedef s32 compat_timer_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 compat_s64;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
typedef u64 compat_u64;
-typedef u32 compat_uptr_t;
-
struct compat_stat {
compat_dev_t st_dev;
compat_ino_t st_ino;
@@ -47,11 +32,11 @@ struct compat_stat {
__compat_gid_t st_gid;
compat_dev_t st_rdev;
compat_off_t st_size;
- compat_time_t st_atime;
+ old_time32_t st_atime;
compat_ulong_t st_atime_nsec;
- compat_time_t st_mtime;
+ old_time32_t st_mtime;
compat_ulong_t st_mtime_nsec;
- compat_time_t st_ctime;
+ old_time32_t st_ctime;
compat_ulong_t st_ctime_nsec;
compat_off_t st_blksize;
compat_off_t st_blocks;
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index d955c8df62d6..1902db27ff4b 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -24,9 +24,6 @@
#include <linux/atomic.h>
#include <linux/irqdomain.h>
-#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2
-#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
-
#define of_compat_cmp(s1, s2, l) strncmp((s1), (s2), (l))
#define of_prop_cmp(s1, s2) strcasecmp((s1), (s2))
#define of_node_cmp(s1, s2) strcmp((s1), (s2))
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
index 4ff29b1406a9..b1d4e2e3210f 100644
--- a/arch/sparc/include/asm/switch_to_64.h
+++ b/arch/sparc/include/asm/switch_to_64.h
@@ -67,6 +67,7 @@ do { save_and_clear_fpu(); \
} while(0)
void synchronize_user_stack(void);
-void fault_in_user_windows(void);
+struct pt_regs;
+void fault_in_user_windows(struct pt_regs *);
#endif /* __SPARC64_SWITCH_TO_64_H */
diff --git a/arch/sparc/include/asm/unistd.h b/arch/sparc/include/asm/unistd.h
index b2a6a955113e..00f87dbd0b17 100644
--- a/arch/sparc/include/asm/unistd.h
+++ b/arch/sparc/include/asm/unistd.h
@@ -21,6 +21,7 @@
#else
#define __NR_time 231 /* Linux sparc32 */
#endif
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_ALARM
@@ -33,7 +34,6 @@
#define __ARCH_WANT_SYS_SOCKETCALL
#define __ARCH_WANT_SYS_FADVISE64
#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_NICE
#define __ARCH_WANT_SYS_OLDUMOUNT
#define __ARCH_WANT_SYS_SIGPENDING
@@ -42,6 +42,7 @@
#define __ARCH_WANT_SYS_IPC
#else
#define __ARCH_WANT_COMPAT_SYS_TIME
+#define __ARCH_WANT_SYS_UTIME32
#define __ARCH_WANT_COMPAT_SYS_SENDFILE
#endif
diff --git a/arch/sparc/include/asm/vdso.h b/arch/sparc/include/asm/vdso.h
index 56836eb01787..59e79d35cd73 100644
--- a/arch/sparc/include/asm/vdso.h
+++ b/arch/sparc/include/asm/vdso.h
@@ -9,8 +9,6 @@ struct vdso_image {
void *data;
unsigned long size; /* Always a multiple of PAGE_SIZE */
- unsigned long tick_patch, tick_patch_len;
-
long sym_vvar_start; /* Negative offset to the vvar area */
};
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 6c086086ca8f..59eaf6227af1 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -36,6 +36,7 @@
#include <linux/sysrq.h>
#include <linux/nmi.h>
#include <linux/context_tracking.h>
+#include <linux/signal.h>
#include <linux/uaccess.h>
#include <asm/page.h>
@@ -521,7 +522,12 @@ static void stack_unaligned(unsigned long sp)
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp, 0, current);
}
-void fault_in_user_windows(void)
+static const char uwfault32[] = KERN_INFO \
+ "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n";
+static const char uwfault64[] = KERN_INFO \
+ "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n";
+
+void fault_in_user_windows(struct pt_regs *regs)
{
struct thread_info *t = current_thread_info();
unsigned long window;
@@ -534,9 +540,9 @@ void fault_in_user_windows(void)
do {
struct reg_window *rwin = &t->reg_window[window];
int winsize = sizeof(struct reg_window);
- unsigned long sp;
+ unsigned long sp, orig_sp;
- sp = t->rwbuf_stkptrs[window];
+ orig_sp = sp = t->rwbuf_stkptrs[window];
if (test_thread_64bit_stack(sp))
sp += STACK_BIAS;
@@ -547,8 +553,16 @@ void fault_in_user_windows(void)
stack_unaligned(sp);
if (unlikely(copy_to_user((char __user *)sp,
- rwin, winsize)))
+ rwin, winsize))) {
+ if (show_unhandled_signals)
+ printk_ratelimited(is_compat_task() ?
+ uwfault32 : uwfault64,
+ current->comm, current->pid,
+ sp, orig_sp,
+ regs->tpc,
+ regs->u_regs[UREG_I7]);
goto barf;
+ }
} while (window--);
}
set_thread_wsaved(0);
@@ -556,8 +570,7 @@ void fault_in_user_windows(void)
barf:
set_thread_wsaved(window + 1);
- user_exit();
- do_exit(SIGILL);
+ force_sig(SIGSEGV, current);
}
asmlinkage long sparc_do_fork(unsigned long clone_flags,
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 4073e2b87dd0..29aa34f11720 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -39,6 +39,7 @@ __handle_preemption:
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
__handle_user_windows:
+ add %sp, PTREGS_OFF, %o0
call fault_in_user_windows
661: wrpr %g0, RTRAP_PSTATE, %pstate
/* If userspace is using ADI, it could potentially pass
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index 44d379db3f64..4c5b3fcbed94 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -371,7 +371,11 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
get_sigframe(ksig, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size)) {
- do_exit(SIGILL);
+ if (show_unhandled_signals)
+ pr_info("%s[%d] bad frame in setup_frame32: %08lx TPC %08lx O7 %08lx\n",
+ current->comm, current->pid, (unsigned long)sf,
+ regs->tpc, regs->u_regs[UREG_I7]);
+ force_sigsegv(ksig->sig, current);
return -EINVAL;
}
@@ -501,7 +505,11 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
get_sigframe(ksig, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size)) {
- do_exit(SIGILL);
+ if (show_unhandled_signals)
+ pr_info("%s[%d] bad frame in setup_rt_frame32: %08lx TPC %08lx O7 %08lx\n",
+ current->comm, current->pid, (unsigned long)sf,
+ regs->tpc, regs->u_regs[UREG_I7]);
+ force_sigsegv(ksig->sig, current);
return -EINVAL;
}
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index 48366e5eb5b2..e9de1803a22e 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -370,7 +370,11 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
get_sigframe(ksig, regs, sf_size);
if (invalid_frame_pointer (sf)) {
- do_exit(SIGILL); /* won't return, actually */
+ if (show_unhandled_signals)
+ pr_info("%s[%d] bad frame in setup_rt_frame: %016lx TPC %016lx O7 %016lx\n",
+ current->comm, current->pid, (unsigned long)sf,
+ regs->tpc, regs->u_regs[UREG_I7]);
+ force_sigsegv(ksig->sig, current);
return -EINVAL;
}
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index f396048a0d68..39822f611c01 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1383,6 +1383,7 @@ int __node_distance(int from, int to)
}
return numa_latency[from][to];
}
+EXPORT_SYMBOL(__node_distance);
static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
{
diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c
index 7b539ceebe13..55662c3b4513 100644
--- a/arch/sparc/vdso/vclock_gettime.c
+++ b/arch/sparc/vdso/vclock_gettime.c
@@ -90,16 +90,15 @@ notrace static __always_inline u64 vread_tick(void)
{
u64 ret;
- __asm__ __volatile__("1:\n\t"
- "rd %%tick, %0\n\t"
- ".pushsection .tick_patch, \"a\"\n\t"
- ".word 1b - ., 1f - .\n\t"
- ".popsection\n\t"
- ".pushsection .tick_patch_replacement, \"ax\"\n\t"
- "1:\n\t"
- "rd %%asr24, %0\n\t"
- ".popsection\n"
- : "=r" (ret));
+ __asm__ __volatile__("rd %%tick, %0" : "=r" (ret));
+ return ret;
+}
+
+notrace static __always_inline u64 vread_tick_stick(void)
+{
+ u64 ret;
+
+ __asm__ __volatile__("rd %%asr24, %0" : "=r" (ret));
return ret;
}
#else
@@ -107,16 +106,18 @@ notrace static __always_inline u64 vread_tick(void)
{
register unsigned long long ret asm("o4");
- __asm__ __volatile__("1:\n\t"
- "rd %%tick, %L0\n\t"
- "srlx %L0, 32, %H0\n\t"
- ".pushsection .tick_patch, \"a\"\n\t"
- ".word 1b - ., 1f - .\n\t"
- ".popsection\n\t"
- ".pushsection .tick_patch_replacement, \"ax\"\n\t"
- "1:\n\t"
- "rd %%asr24, %L0\n\t"
- ".popsection\n"
+ __asm__ __volatile__("rd %%tick, %L0\n\t"
+ "srlx %L0, 32, %H0"
+ : "=r" (ret));
+ return ret;
+}
+
+notrace static __always_inline u64 vread_tick_stick(void)
+{
+ register unsigned long long ret asm("o4");
+
+ __asm__ __volatile__("rd %%asr24, %L0\n\t"
+ "srlx %L0, 32, %H0"
: "=r" (ret));
return ret;
}
@@ -132,6 +133,16 @@ notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
return v * vvar->clock.mult;
}
+notrace static __always_inline u64 vgetsns_stick(struct vvar_data *vvar)
+{
+ u64 v;
+ u64 cycles;
+
+ cycles = vread_tick_stick();
+ v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
+ return v * vvar->clock.mult;
+}
+
notrace static __always_inline int do_realtime(struct vvar_data *vvar,
struct timespec *ts)
{
@@ -152,6 +163,26 @@ notrace static __always_inline int do_realtime(struct vvar_data *vvar,
return 0;
}
+notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
+ struct timespec *ts)
+{
+ unsigned long seq;
+ u64 ns;
+
+ do {
+ seq = vvar_read_begin(vvar);
+ ts->tv_sec = vvar->wall_time_sec;
+ ns = vvar->wall_time_snsec;
+ ns += vgetsns_stick(vvar);
+ ns >>= vvar->clock.shift;
+ } while (unlikely(vvar_read_retry(vvar, seq)));
+
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
struct timespec *ts)
{
@@ -172,6 +203,26 @@ notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
return 0;
}
+notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
+ struct timespec *ts)
+{
+ unsigned long seq;
+ u64 ns;
+
+ do {
+ seq = vvar_read_begin(vvar);
+ ts->tv_sec = vvar->monotonic_time_sec;
+ ns = vvar->monotonic_time_snsec;
+ ns += vgetsns_stick(vvar);
+ ns >>= vvar->clock.shift;
+ } while (unlikely(vvar_read_retry(vvar, seq)));
+
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
notrace static int do_realtime_coarse(struct vvar_data *vvar,
struct timespec *ts)
{
@@ -228,6 +279,31 @@ clock_gettime(clockid_t, struct timespec *)
__attribute__((weak, alias("__vdso_clock_gettime")));
notrace int
+__vdso_clock_gettime_stick(clockid_t clock, struct timespec *ts)
+{
+ struct vvar_data *vvd = get_vvar_data();
+
+ switch (clock) {
+ case CLOCK_REALTIME:
+ if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+ break;
+ return do_realtime_stick(vvd, ts);
+ case CLOCK_MONOTONIC:
+ if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
+ break;
+ return do_monotonic_stick(vvd, ts);
+ case CLOCK_REALTIME_COARSE:
+ return do_realtime_coarse(vvd, ts);
+ case CLOCK_MONOTONIC_COARSE:
+ return do_monotonic_coarse(vvd, ts);
+ }
+ /*
+ * Unknown clock ID ? Fall back to the syscall.
+ */
+ return vdso_fallback_gettime(clock, ts);
+}
+
+notrace int
__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
struct vvar_data *vvd = get_vvar_data();
@@ -262,3 +338,36 @@ __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
int
gettimeofday(struct timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));
+
+notrace int
+__vdso_gettimeofday_stick(struct timeval *tv, struct timezone *tz)
+{
+ struct vvar_data *vvd = get_vvar_data();
+
+ if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
+ if (likely(tv != NULL)) {
+ union tstv_t {
+ struct timespec ts;
+ struct timeval tv;
+ } *tstv = (union tstv_t *) tv;
+ do_realtime_stick(vvd, &tstv->ts);
+ /*
+ * Assign before dividing to ensure that the division is
+ * done in the type of tv_usec, not tv_nsec.
+ *
+ * There cannot be > 1 billion usec in a second:
+ * do_realtime() has already distributed such overflow
+ * into tv_sec. So we can assign it to an int safely.
+ */
+ tstv->tv.tv_usec = tstv->ts.tv_nsec;
+ tstv->tv.tv_usec /= 1000;
+ }
+ if (unlikely(tz != NULL)) {
+ /* Avoid memcpy. Some old compilers fail to inline it */
+ tz->tz_minuteswest = vvd->tz_minuteswest;
+ tz->tz_dsttime = vvd->tz_dsttime;
+ }
+ return 0;
+ }
+ return vdso_fallback_gettimeofday(tv, tz);
+}
diff --git a/arch/sparc/vdso/vdso-layout.lds.S b/arch/sparc/vdso/vdso-layout.lds.S
index ed36d49e1617..d31e57e8a3bb 100644
--- a/arch/sparc/vdso/vdso-layout.lds.S
+++ b/arch/sparc/vdso/vdso-layout.lds.S
@@ -73,9 +73,6 @@ SECTIONS
.text : { *(.text*) } :text =0x90909090,
- .tick_patch : { *(.tick_patch) } :text
- .tick_patch_insns : { *(.tick_patch_insns) } :text
-
/DISCARD/ : {
*(.discard)
*(.discard.*)
diff --git a/arch/sparc/vdso/vdso.lds.S b/arch/sparc/vdso/vdso.lds.S
index f3caa29a331c..629ab6900df7 100644
--- a/arch/sparc/vdso/vdso.lds.S
+++ b/arch/sparc/vdso/vdso.lds.S
@@ -18,8 +18,10 @@ VERSION {
global:
clock_gettime;
__vdso_clock_gettime;
+ __vdso_clock_gettime_stick;
gettimeofday;
__vdso_gettimeofday;
+ __vdso_gettimeofday_stick;
local: *;
};
}
diff --git a/arch/sparc/vdso/vdso2c.h b/arch/sparc/vdso/vdso2c.h
index 4df005cf98c0..60d69acc748f 100644
--- a/arch/sparc/vdso/vdso2c.h
+++ b/arch/sparc/vdso/vdso2c.h
@@ -17,11 +17,9 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
unsigned long mapping_size;
int i;
unsigned long j;
- ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
- *patch_sec = NULL;
+ ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr;
ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
ELF(Dyn) *dyn = 0, *dyn_end = 0;
- const char *secstrings;
INT_BITS syms[NSYMS] = {};
ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_BE(&hdr->e_phoff));
@@ -64,18 +62,11 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
}
/* Walk the section table */
- secstrings_hdr = raw_addr + GET_BE(&hdr->e_shoff) +
- GET_BE(&hdr->e_shentsize)*GET_BE(&hdr->e_shstrndx);
- secstrings = raw_addr + GET_BE(&secstrings_hdr->sh_offset);
for (i = 0; i < GET_BE(&hdr->e_shnum); i++) {
ELF(Shdr) *sh = raw_addr + GET_BE(&hdr->e_shoff) +
GET_BE(&hdr->e_shentsize) * i;
if (GET_BE(&sh->sh_type) == SHT_SYMTAB)
symtab_hdr = sh;
-
- if (!strcmp(secstrings + GET_BE(&sh->sh_name),
- ".tick_patch"))
- patch_sec = sh;
}
if (!symtab_hdr)
@@ -142,12 +133,6 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
fprintf(outfile, "const struct vdso_image %s_builtin = {\n", name);
fprintf(outfile, "\t.data = raw_data,\n");
fprintf(outfile, "\t.size = %lu,\n", mapping_size);
- if (patch_sec) {
- fprintf(outfile, "\t.tick_patch = %lu,\n",
- (unsigned long)GET_BE(&patch_sec->sh_offset));
- fprintf(outfile, "\t.tick_patch_len = %lu,\n",
- (unsigned long)GET_BE(&patch_sec->sh_size));
- }
for (i = 0; i < NSYMS; i++) {
if (required_syms[i].export && syms[i])
fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
diff --git a/arch/sparc/vdso/vdso32/vdso32.lds.S b/arch/sparc/vdso/vdso32/vdso32.lds.S
index 53575ee154c4..218930fdff03 100644
--- a/arch/sparc/vdso/vdso32/vdso32.lds.S
+++ b/arch/sparc/vdso/vdso32/vdso32.lds.S
@@ -17,8 +17,10 @@ VERSION {
global:
clock_gettime;
__vdso_clock_gettime;
+ __vdso_clock_gettime_stick;
gettimeofday;
__vdso_gettimeofday;
+ __vdso_gettimeofday_stick;
local: *;
};
}
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c
index 8874a27d8adc..154fe8adc090 100644
--- a/arch/sparc/vdso/vma.c
+++ b/arch/sparc/vdso/vma.c
@@ -42,24 +42,201 @@ static struct vm_special_mapping vdso_mapping32 = {
struct vvar_data *vvar_data;
-struct tick_patch_entry {
- s32 orig, repl;
+struct vdso_elfinfo32 {
+ Elf32_Ehdr *hdr;
+ Elf32_Sym *dynsym;
+ unsigned long dynsymsize;
+ const char *dynstr;
+ unsigned long text;
};
-static void stick_patch(const struct vdso_image *image)
+struct vdso_elfinfo64 {
+ Elf64_Ehdr *hdr;
+ Elf64_Sym *dynsym;
+ unsigned long dynsymsize;
+ const char *dynstr;
+ unsigned long text;
+};
+
+struct vdso_elfinfo {
+ union {
+ struct vdso_elfinfo32 elf32;
+ struct vdso_elfinfo64 elf64;
+ } u;
+};
+
+static void *one_section64(struct vdso_elfinfo64 *e, const char *name,
+ unsigned long *size)
+{
+ const char *snames;
+ Elf64_Shdr *shdrs;
+ unsigned int i;
+
+ shdrs = (void *)e->hdr + e->hdr->e_shoff;
+ snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < e->hdr->e_shnum; i++) {
+ if (!strcmp(snames+shdrs[i].sh_name, name)) {
+ if (size)
+ *size = shdrs[i].sh_size;
+ return (void *)e->hdr + shdrs[i].sh_offset;
+ }
+ }
+ return NULL;
+}
+
+static int find_sections64(const struct vdso_image *image, struct vdso_elfinfo *_e)
+{
+ struct vdso_elfinfo64 *e = &_e->u.elf64;
+
+ e->hdr = image->data;
+ e->dynsym = one_section64(e, ".dynsym", &e->dynsymsize);
+ e->dynstr = one_section64(e, ".dynstr", NULL);
+
+ if (!e->dynsym || !e->dynstr) {
+ pr_err("VDSO64: Missing symbol sections.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static Elf64_Sym *find_sym64(const struct vdso_elfinfo64 *e, const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; i < (e->dynsymsize / sizeof(Elf64_Sym)); i++) {
+ Elf64_Sym *s = &e->dynsym[i];
+ if (s->st_name == 0)
+ continue;
+ if (!strcmp(e->dynstr + s->st_name, name))
+ return s;
+ }
+ return NULL;
+}
+
+static int patchsym64(struct vdso_elfinfo *_e, const char *orig,
+ const char *new)
+{
+ struct vdso_elfinfo64 *e = &_e->u.elf64;
+ Elf64_Sym *osym = find_sym64(e, orig);
+ Elf64_Sym *nsym = find_sym64(e, new);
+
+ if (!nsym || !osym) {
+ pr_err("VDSO64: Missing symbols.\n");
+ return -ENODEV;
+ }
+ osym->st_value = nsym->st_value;
+ osym->st_size = nsym->st_size;
+ osym->st_info = nsym->st_info;
+ osym->st_other = nsym->st_other;
+ osym->st_shndx = nsym->st_shndx;
+
+ return 0;
+}
+
+static void *one_section32(struct vdso_elfinfo32 *e, const char *name,
+ unsigned long *size)
+{
+ const char *snames;
+ Elf32_Shdr *shdrs;
+ unsigned int i;
+
+ shdrs = (void *)e->hdr + e->hdr->e_shoff;
+ snames = (void *)e->hdr + shdrs[e->hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < e->hdr->e_shnum; i++) {
+ if (!strcmp(snames+shdrs[i].sh_name, name)) {
+ if (size)
+ *size = shdrs[i].sh_size;
+ return (void *)e->hdr + shdrs[i].sh_offset;
+ }
+ }
+ return NULL;
+}
+
+static int find_sections32(const struct vdso_image *image, struct vdso_elfinfo *_e)
+{
+ struct vdso_elfinfo32 *e = &_e->u.elf32;
+
+ e->hdr = image->data;
+ e->dynsym = one_section32(e, ".dynsym", &e->dynsymsize);
+ e->dynstr = one_section32(e, ".dynstr", NULL);
+
+ if (!e->dynsym || !e->dynstr) {
+ pr_err("VDSO32: Missing symbol sections.\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static Elf32_Sym *find_sym32(const struct vdso_elfinfo32 *e, const char *name)
{
- struct tick_patch_entry *p, *p_end;
+ unsigned int i;
+
+ for (i = 0; i < (e->dynsymsize / sizeof(Elf32_Sym)); i++) {
+ Elf32_Sym *s = &e->dynsym[i];
+ if (s->st_name == 0)
+ continue;
+ if (!strcmp(e->dynstr + s->st_name, name))
+ return s;
+ }
+ return NULL;
+}
- p = image->data + image->tick_patch;
- p_end = (void *)p + image->tick_patch_len;
- while (p < p_end) {
- u32 *instr = (void *)&p->orig + p->orig;
- u32 *repl = (void *)&p->repl + p->repl;
+static int patchsym32(struct vdso_elfinfo *_e, const char *orig,
+ const char *new)
+{
+ struct vdso_elfinfo32 *e = &_e->u.elf32;
+ Elf32_Sym *osym = find_sym32(e, orig);
+ Elf32_Sym *nsym = find_sym32(e, new);
- *instr = *repl;
- flushi(instr);
- p++;
+ if (!nsym || !osym) {
+ pr_err("VDSO32: Missing symbols.\n");
+ return -ENODEV;
}
+ osym->st_value = nsym->st_value;
+ osym->st_size = nsym->st_size;
+ osym->st_info = nsym->st_info;
+ osym->st_other = nsym->st_other;
+ osym->st_shndx = nsym->st_shndx;
+
+ return 0;
+}
+
+static int find_sections(const struct vdso_image *image, struct vdso_elfinfo *e,
+ bool elf64)
+{
+ if (elf64)
+ return find_sections64(image, e);
+ else
+ return find_sections32(image, e);
+}
+
+static int patch_one_symbol(struct vdso_elfinfo *e, const char *orig,
+ const char *new_target, bool elf64)
+{
+ if (elf64)
+ return patchsym64(e, orig, new_target);
+ else
+ return patchsym32(e, orig, new_target);
+}
+
+static int stick_patch(const struct vdso_image *image, struct vdso_elfinfo *e, bool elf64)
+{
+ int err;
+
+ err = find_sections(image, e, elf64);
+ if (err)
+ return err;
+
+ err = patch_one_symbol(e,
+ "__vdso_gettimeofday",
+ "__vdso_gettimeofday_stick", elf64);
+ if (err)
+ return err;
+
+ return patch_one_symbol(e,
+ "__vdso_clock_gettime",
+ "__vdso_clock_gettime_stick", elf64);
+ return 0;
}
/*
@@ -67,13 +244,19 @@ static void stick_patch(const struct vdso_image *image)
* kernel image.
*/
int __init init_vdso_image(const struct vdso_image *image,
- struct vm_special_mapping *vdso_mapping)
+ struct vm_special_mapping *vdso_mapping, bool elf64)
{
- int i;
+ int cnpages = (image->size) / PAGE_SIZE;
struct page *dp, **dpp = NULL;
- int dnpages = 0;
struct page *cp, **cpp = NULL;
- int cnpages = (image->size) / PAGE_SIZE;
+ struct vdso_elfinfo ei;
+ int i, dnpages = 0;
+
+ if (tlb_type != spitfire) {
+ int err = stick_patch(image, &ei, elf64);
+ if (err)
+ return err;
+ }
/*
* First, the vdso text. This is initialied data, an integral number of
@@ -88,9 +271,6 @@ int __init init_vdso_image(const struct vdso_image *image,
if (!cpp)
goto oom;
- if (tlb_type != spitfire)
- stick_patch(image);
-
for (i = 0; i < cnpages; i++) {
cp = alloc_page(GFP_KERNEL);
if (!cp)
@@ -153,13 +333,13 @@ static int __init init_vdso(void)
{
int err = 0;
#ifdef CONFIG_SPARC64
- err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64);
+ err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64, true);
if (err)
return err;
#endif
#ifdef CONFIG_COMPAT
- err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32);
+ err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32, false);
#endif
return err;
diff --git a/arch/unicore32/include/uapi/asm/unistd.h b/arch/unicore32/include/uapi/asm/unistd.h
index 65856eaab163..1e8fe5941b8a 100644
--- a/arch/unicore32/include/uapi/asm/unistd.h
+++ b/arch/unicore32/include/uapi/asm/unistd.h
@@ -15,4 +15,5 @@
/* Use the standard ABI for syscalls. */
#include <asm-generic/unistd.h>
+#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index a450ad573dcb..a4b0007a54e1 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -60,9 +60,6 @@ endif
ifeq ($(avx2_supported),yes)
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
- obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb/
- obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb/
- obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb/
obj-$(CONFIG_CRYPTO_MORUS1280_AVX2) += morus1280-avx2.o
endif
@@ -106,7 +103,7 @@ ifeq ($(avx2_supported),yes)
morus1280-avx2-y := morus1280-avx2-asm.o morus1280-avx2-glue.o
endif
-aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
+aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o
aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index acbe7e8336d8..661f7daf43da 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -102,9 +102,6 @@ asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
-int crypto_fpu_init(void);
-void crypto_fpu_exit(void);
-
#define AVX_GEN2_OPTSIZE 640
#define AVX_GEN4_OPTSIZE 4096
@@ -817,7 +814,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
/* Linearize assoc, if not already linear */
if (req->src->length >= assoclen && req->src->length &&
(!PageHighMem(sg_page(req->src)) ||
- req->src->offset + req->src->length < PAGE_SIZE)) {
+ req->src->offset + req->src->length <= PAGE_SIZE)) {
scatterwalk_start(&assoc_sg_walk, req->src);
assoc = scatterwalk_map(&assoc_sg_walk);
} else {
@@ -1253,22 +1250,6 @@ static struct skcipher_alg aesni_skciphers[] = {
static
struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
-static struct {
- const char *algname;
- const char *drvname;
- const char *basename;
- struct simd_skcipher_alg *simd;
-} aesni_simd_skciphers2[] = {
-#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
- IS_BUILTIN(CONFIG_CRYPTO_PCBC)
- {
- .algname = "pcbc(aes)",
- .drvname = "pcbc-aes-aesni",
- .basename = "fpu(pcbc(__aes-aesni))",
- },
-#endif
-};
-
#ifdef CONFIG_X86_64
static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
unsigned int key_len)
@@ -1422,10 +1403,6 @@ static void aesni_free_simds(void)
for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
aesni_simd_skciphers[i]; i++)
simd_skcipher_free(aesni_simd_skciphers[i]);
-
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
- if (aesni_simd_skciphers2[i].simd)
- simd_skcipher_free(aesni_simd_skciphers2[i].simd);
}
static int __init aesni_init(void)
@@ -1469,13 +1446,9 @@ static int __init aesni_init(void)
#endif
#endif
- err = crypto_fpu_init();
- if (err)
- return err;
-
err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
if (err)
- goto fpu_exit;
+ return err;
err = crypto_register_skciphers(aesni_skciphers,
ARRAY_SIZE(aesni_skciphers));
@@ -1499,18 +1472,6 @@ static int __init aesni_init(void)
aesni_simd_skciphers[i] = simd;
}
- for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++) {
- algname = aesni_simd_skciphers2[i].algname;
- drvname = aesni_simd_skciphers2[i].drvname;
- basename = aesni_simd_skciphers2[i].basename;
- simd = simd_skcipher_create_compat(algname, drvname, basename);
- err = PTR_ERR(simd);
- if (IS_ERR(simd))
- continue;
-
- aesni_simd_skciphers2[i].simd = simd;
- }
-
return 0;
unregister_simds:
@@ -1521,8 +1482,6 @@ unregister_skciphers:
ARRAY_SIZE(aesni_skciphers));
unregister_algs:
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
-fpu_exit:
- crypto_fpu_exit();
return err;
}
@@ -1533,8 +1492,6 @@ static void __exit aesni_exit(void)
crypto_unregister_skciphers(aesni_skciphers,
ARRAY_SIZE(aesni_skciphers));
crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
-
- crypto_fpu_exit();
}
late_initcall(aesni_init);
diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c
deleted file mode 100644
index 406680476c52..000000000000
--- a/arch/x86/crypto/fpu.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * FPU: Wrapper for blkcipher touching fpu
- *
- * Copyright (c) Intel Corp.
- * Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/internal/skcipher.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <asm/fpu/api.h>
-
-struct crypto_fpu_ctx {
- struct crypto_skcipher *child;
-};
-
-static int crypto_fpu_setkey(struct crypto_skcipher *parent, const u8 *key,
- unsigned int keylen)
-{
- struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(parent);
- struct crypto_skcipher *child = ctx->child;
- int err;
-
- crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
-static int crypto_fpu_encrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *child = ctx->child;
- SKCIPHER_REQUEST_ON_STACK(subreq, child);
- int err;
-
- skcipher_request_set_tfm(subreq, child);
- skcipher_request_set_callback(subreq, 0, NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
- req->iv);
-
- kernel_fpu_begin();
- err = crypto_skcipher_encrypt(subreq);
- kernel_fpu_end();
-
- skcipher_request_zero(subreq);
- return err;
-}
-
-static int crypto_fpu_decrypt(struct skcipher_request *req)
-{
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *child = ctx->child;
- SKCIPHER_REQUEST_ON_STACK(subreq, child);
- int err;
-
- skcipher_request_set_tfm(subreq, child);
- skcipher_request_set_callback(subreq, 0, NULL, NULL);
- skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
- req->iv);
-
- kernel_fpu_begin();
- err = crypto_skcipher_decrypt(subreq);
- kernel_fpu_end();
-
- skcipher_request_zero(subreq);
- return err;
-}
-
-static int crypto_fpu_init_tfm(struct crypto_skcipher *tfm)
-{
- struct skcipher_instance *inst = skcipher_alg_instance(tfm);
- struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher_spawn *spawn;
- struct crypto_skcipher *cipher;
-
- spawn = skcipher_instance_ctx(inst);
- cipher = crypto_spawn_skcipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
-
- ctx->child = cipher;
-
- return 0;
-}
-
-static void crypto_fpu_exit_tfm(struct crypto_skcipher *tfm)
-{
- struct crypto_fpu_ctx *ctx = crypto_skcipher_ctx(tfm);
-
- crypto_free_skcipher(ctx->child);
-}
-
-static void crypto_fpu_free(struct skcipher_instance *inst)
-{
- crypto_drop_skcipher(skcipher_instance_ctx(inst));
- kfree(inst);
-}
-
-static int crypto_fpu_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct crypto_skcipher_spawn *spawn;
- struct skcipher_instance *inst;
- struct crypto_attr_type *algt;
- struct skcipher_alg *alg;
- const char *cipher_name;
- int err;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- if ((algt->type ^ (CRYPTO_ALG_INTERNAL | CRYPTO_ALG_TYPE_SKCIPHER)) &
- algt->mask)
- return -EINVAL;
-
- if (!(algt->mask & CRYPTO_ALG_INTERNAL))
- return -EINVAL;
-
- cipher_name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(cipher_name))
- return PTR_ERR(cipher_name);
-
- inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
-
- spawn = skcipher_instance_ctx(inst);
-
- crypto_set_skcipher_spawn(spawn, skcipher_crypto_instance(inst));
- err = crypto_grab_skcipher(spawn, cipher_name, CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL | CRYPTO_ALG_ASYNC);
- if (err)
- goto out_free_inst;
-
- alg = crypto_skcipher_spawn_alg(spawn);
-
- err = crypto_inst_setname(skcipher_crypto_instance(inst), "fpu",
- &alg->base);
- if (err)
- goto out_drop_skcipher;
-
- inst->alg.base.cra_flags = CRYPTO_ALG_INTERNAL;
- inst->alg.base.cra_priority = alg->base.cra_priority;
- inst->alg.base.cra_blocksize = alg->base.cra_blocksize;
- inst->alg.base.cra_alignmask = alg->base.cra_alignmask;
-
- inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
- inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
- inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
-
- inst->alg.base.cra_ctxsize = sizeof(struct crypto_fpu_ctx);
-
- inst->alg.init = crypto_fpu_init_tfm;
- inst->alg.exit = crypto_fpu_exit_tfm;
-
- inst->alg.setkey = crypto_fpu_setkey;
- inst->alg.encrypt = crypto_fpu_encrypt;
- inst->alg.decrypt = crypto_fpu_decrypt;
-
- inst->free = crypto_fpu_free;
-
- err = skcipher_register_instance(tmpl, inst);
- if (err)
- goto out_drop_skcipher;
-
-out:
- return err;
-
-out_drop_skcipher:
- crypto_drop_skcipher(spawn);
-out_free_inst:
- kfree(inst);
- goto out;
-}
-
-static struct crypto_template crypto_fpu_tmpl = {
- .name = "fpu",
- .create = crypto_fpu_create,
- .module = THIS_MODULE,
-};
-
-int __init crypto_fpu_init(void)
-{
- return crypto_register_template(&crypto_fpu_tmpl);
-}
-
-void crypto_fpu_exit(void)
-{
- crypto_unregister_template(&crypto_fpu_tmpl);
-}
-
-MODULE_ALIAS_CRYPTO("fpu");
diff --git a/arch/x86/crypto/sha1-mb/Makefile b/arch/x86/crypto/sha1-mb/Makefile
deleted file mode 100644
index 815ded3ba90e..000000000000
--- a/arch/x86/crypto/sha1-mb/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-OBJECT_FILES_NON_STANDARD := y
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA1_MB) += sha1-mb.o
- sha1-mb-y := sha1_mb.o sha1_mb_mgr_flush_avx2.o \
- sha1_mb_mgr_init_avx2.o sha1_mb_mgr_submit_avx2.o sha1_x8_avx2.o
-endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb.c b/arch/x86/crypto/sha1-mb/sha1_mb.c
deleted file mode 100644
index b93805664c1d..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb.c
+++ /dev/null
@@ -1,1011 +0,0 @@
-/*
- * Multi buffer SHA1 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha1_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha1_mb_alg_state;
-
-struct sha1_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha1_job_mgr_init)(struct sha1_mb_mgr *state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_submit)
- (struct sha1_mb_mgr *state, struct job_sha1 *job);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_flush)
- (struct sha1_mb_mgr *state);
-static asmlinkage struct job_sha1* (*sha1_job_mgr_get_comp_job)
- (struct sha1_mb_mgr *state);
-
-static inline uint32_t sha1_pad(uint8_t padblock[SHA1_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA1_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA1_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA1_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA1_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA1_PADLENGTHFIELD_SIZE;
-
-#if SHA1_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA1_LOG2_BLOCK_SIZE;
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr,
- struct sha1_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA1_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA1_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA1_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha1_hash_ctx *)sha1_job_mgr_submit(&mgr->mgr,
- &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha1_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha1_hash_ctx *)
- sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha1_hash_ctx
- *sha1_ctx_mgr_get_comp_ctx(struct sha1_ctx_mgr *mgr)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user.
- * If it is not ready, resubmit the job to finish processing.
- * If sha1_ctx_mgr_resubmit returned a job, it is ready to be returned.
- * Otherwise, all jobs currently being managed by the hash_ctx_mgr
- * still need processing.
- */
- struct sha1_hash_ctx *ctx;
-
- ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr);
- return sha1_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static void sha1_ctx_mgr_init(struct sha1_ctx_mgr *mgr)
-{
- sha1_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr,
- struct sha1_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- return ctx;
- }
-
- /*
- * If we made it here, there were no errors during this call to
- * submit
- */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently
- * being processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA1_BLOCK_SIZE) {
- /*
- * Compute how many bytes to copy from user buffer into
- * extra block
- */
- uint32_t copy_len = SHA1_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /*
- * The extra block should never contain more than 1 block
- * here
- */
- assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE);
-
- /*
- * If the extra block buffer contains exactly 1 block, it can
- * be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha1_hash_ctx *)
- sha1_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- return sha1_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
-{
- struct sha1_hash_ctx *ctx;
-
- while (1) {
- ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- return NULL;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha1_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha1_ctx_mgr_resubmit returned a job, it is ready to be
- * returned. Otherwise, all jobs currently being managed by the
- * sha1_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- return ctx;
- }
-}
-
-static int sha1_mb_init(struct ahash_request *areq)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA1_H0;
- sctx->job.result_digest[1] = SHA1_H1;
- sctx->job.result_digest[2] = SHA1_H2;
- sctx->job.result_digest[3] = SHA1_H3;
- sctx->job.result_digest[4] = SHA1_H4;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be32 *dst = (__be32 *) rctx->out;
-
- for (i = 0; i < 5; ++i)
- dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha1_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha1_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha1_ctx_mgr_flush(cstate->mgr);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha1_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
-
- /* remove from work list */
- spin_lock(&cstate->work_lock);
- list_del(&rctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock(&cstate->work_lock);
- list_del(&req_ctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha1_ctx_mgr_get_comp_ctx(cstate->mgr);
- }
-
- return 0;
-}
-
-static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock(&cstate->work_lock);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock(&cstate->work_lock);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha1_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- sha1_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- sha1_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
-
- struct sha1_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha1_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
- HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha1_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha1_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha1_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha1_hash_ctx));
-
- return 0;
-}
-
-static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha1_mb_areq_alg = {
- .init = sha1_mb_init,
- .update = sha1_mb_update,
- .final = sha1_mb_final,
- .finup = sha1_mb_finup,
- .export = sha1_mb_export,
- .import = sha1_mb_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_hash_ctx),
- .base = {
- .cra_name = "__sha1-mb",
- .cra_driver_name = "__intel_sha1-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha1_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha1_mb_areq_init_tfm,
- .cra_exit = sha1_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha1_hash_ctx),
- }
- }
-};
-
-static int sha1_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha1_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha1_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha1_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha1_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha1_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha1_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha1_mb_async_alg = {
- .init = sha1_mb_async_init,
- .update = sha1_mb_async_update,
- .final = sha1_mb_async_final,
- .finup = sha1_mb_async_finup,
- .digest = sha1_mb_async_digest,
- .export = sha1_mb_async_export,
- .import = sha1_mb_async_import,
- .halg = {
- .digestsize = SHA1_DIGEST_SIZE,
- .statesize = sizeof(struct sha1_hash_ctx),
- .base = {
- .cra_name = "sha1",
- .cra_driver_name = "sha1_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA1_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT(sha1_mb_async_alg.halg.base.cra_list),
- .cra_init = sha1_mb_async_init_tfm,
- .cra_exit = sha1_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha1_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha1_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha1_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if (time_before(cur_time, rctx->tag.expire))
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha1_hash_ctx *)
- sha1_ctx_mgr_flush(cstate->mgr);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha1_mb error: nothing got flushed for non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha1_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha1_mb_alg_state.alg_cstate = alloc_percpu(struct mcryptd_alg_cstate);
-
- sha1_job_mgr_init = sha1_mb_mgr_init_avx2;
- sha1_job_mgr_submit = sha1_mb_mgr_submit_avx2;
- sha1_job_mgr_flush = sha1_mb_mgr_flush_avx2;
- sha1_job_mgr_get_comp_job = sha1_mb_mgr_get_comp_job_avx2;
-
- if (!sha1_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha1_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha1_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha1_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha1_mb_alg_state.flusher = &sha1_mb_flusher;
-
- err = crypto_register_ahash(&sha1_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha1_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha1_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha1_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha1_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha1_mb_async_alg);
- crypto_unregister_ahash(&sha1_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha1_mb_alg_state.alg_cstate);
-}
-
-module_init(sha1_mb_mod_init);
-module_exit(sha1_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h b/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
deleted file mode 100644
index 9454bd16f9f8..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_ctx.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Header file for multi buffer SHA context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha1_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-
-#ifdef HASH_CTX_DEBUG
- HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
-#endif
-};
-
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-
-/* Hash Constants and Typedefs */
-#define SHA1_DIGEST_LENGTH 5
-#define SHA1_LOG2_BLOCK_SIZE 6
-
-#define SHA1_PADLENGTHFIELD_SIZE 8
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha1_ctx_mgr {
- struct sha1_mb_mgr mgr;
-};
-
-/* typedef struct sha1_ctx_mgr sha1_ctx_mgr; */
-
-struct sha1_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha1 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA1_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h b/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
deleted file mode 100644
index 08ad1a9acfd7..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * Header file for multi buffer SHA1 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford@intel.com>
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-
-#include <linux/types.h>
-
-#define NUM_SHA1_DIGEST_WORDS 5
-
-enum job_sts { STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha1 {
- u8 *buffer;
- u32 len;
- u32 result_digest[NUM_SHA1_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-/* SHA1 out-of-order scheduler */
-
-/* typedef uint32_t sha1_digest_array[5][8]; */
-
-struct sha1_args_x8 {
- uint32_t digest[5][8];
- uint8_t *data_ptr[8];
-};
-
-struct sha1_lane_data {
- struct job_sha1 *job_in_lane;
-};
-
-struct sha1_mb_mgr {
- struct sha1_args_x8 args;
-
- uint32_t lens[8];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha1_lane_data ldata[8];
-};
-
-
-#define SHA1_MB_MGR_NUM_LANES_AVX2 8
-
-void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state);
-struct job_sha1 *sha1_mb_mgr_submit_avx2(struct sha1_mb_mgr *state,
- struct job_sha1 *job);
-struct job_sha1 *sha1_mb_mgr_flush_avx2(struct sha1_mb_mgr *state);
-struct job_sha1 *sha1_mb_mgr_get_comp_job_avx2(struct sha1_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
deleted file mode 100644
index 86688c6e7a25..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,287 +0,0 @@
-/*
- * Header file for multi buffer SHA1 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford@intel.com>
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _SHA1_MB_MGR_DATASTRUCT_ASM_
-#define _SHA1_MB_MGR_DATASTRUCT_ASM_
-
-## START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-## FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-## END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-########################################################################
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - %%tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endif
-.endstruc
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-
-#endif
-
-########################################################################
-#### Define constants
-########################################################################
-
-########################################################################
-#### Define SHA1 Out Of Order Data Structures
-########################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
-_LANE_DATA_size = _FIELD_OFFSET
-_LANE_DATA_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # SHA1_ARGS_X8
-### name size align
-FIELD _digest, 4*5*8, 16 # transposed digest
-FIELD _data_ptr, 8*8, 8 # array of pointers to data
-END_FIELDS
-
-_SHA1_ARGS_X4_size = _FIELD_OFFSET
-_SHA1_ARGS_X4_align = _STRUCT_ALIGN
-_SHA1_ARGS_X8_size = _FIELD_OFFSET
-_SHA1_ARGS_X8_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA1_ARGS_X4_size, _SHA1_ARGS_X4_align
-FIELD _lens, 4*8, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
-END_FIELDS
-
-_MB_MGR_size = _FIELD_OFFSET
-_MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-
-########################################################################
-#### Define constants
-########################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-########################################################################
-#### Define JOB_SHA1 structure
-########################################################################
-
-START_FIELDS # JOB_SHA1
-
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 4, 4 # length in bytes
-FIELD _result_digest, 5*4, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
-_JOB_SHA1_size = _FIELD_OFFSET
-_JOB_SHA1_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
deleted file mode 100644
index 7cfba738f104..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Flush routine for SHA1 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford@intel.com>
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-
-.extern sha1_x8_avx2
-
-# LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-# idx must be a register not clobbered by sha1_x8_avx2
-#define idx %r8
-#define DWORD_idx %r8d
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-#define tmp2_w %ebx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 %arg1
-
-#define extra_blocks %arg2
-#define p %arg2
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB* sha1_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha1_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+3, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
- offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne four(%rip), idx
- offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne five(%rip), idx
- offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne six(%rip), idx
- offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne seven(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 8
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 4*I)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- vmovdqu _lens+0*16(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqu %xmm0, _lens+0*16(state)
- vmovdqu %xmm1, _lens+1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha1_x8_avx2
- # state and idx are intact
-
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state , idx, 4) , %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), tmp2_w
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- mov tmp2_w, offset(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha1_mb_mgr_flush_avx2)
-
-
-#################################################################
-
-.align 16
-ENTRY(sha1_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- ## if bit 32+3 is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $(32+3), unused_lanes
- jc .return_null
-
- # Find min length
- vmovdqu _lens(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- test $~0xF, idx
- jnz .return_null
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), tmp2_w
-
- vmovdqu %xmm0, _result_digest(job_rax)
- movl tmp2_w, _result_digest+1*16(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha1_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
-.octa 0x000000000000000000000000FFFFFFF0
-
-.section .rodata.cst8, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-two:
-.quad 2
-three:
-.quad 3
-four:
-.quad 4
-five:
-.quad 5
-six:
-.quad 6
-seven:
-.quad 7
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
deleted file mode 100644
index d2add0d35f43..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Initialization code for multi buffer SHA1 algorithm for AVX2
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "sha1_mb_mgr.h"
-
-void sha1_mb_mgr_init_avx2(struct sha1_mb_mgr *state)
-{
- unsigned int j;
- state->unused_lanes = 0xF76543210ULL;
- for (j = 0; j < 8; j++) {
- state->lens[j] = 0xFFFFFFFF;
- state->ldata[j].job_in_lane = NULL;
- }
-}
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
deleted file mode 100644
index 7a93b1c0d69a..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA1 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford@intel.com>
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-
-.extern sha1_x8_avx
-
-# LINUX register definitions
-arg1 = %rdi
-arg2 = %rsi
-size_offset = %rcx
-tmp2 = %rcx
-extra_blocks = %rdx
-
-# Common definitions
-#define state arg1
-#define job %rsi
-#define len2 arg2
-#define p2 arg2
-
-# idx must be a register not clobberred by sha1_x8_avx2
-idx = %r8
-DWORD_idx = %r8d
-last_len = %r8
-
-p = %r11
-start_offset = %r11
-
-unused_lanes = %rbx
-BYTE_unused_lanes = %bl
-
-job_rax = %rax
-len = %rax
-DWORD_len = %eax
-
-lane = %r12
-tmp3 = %r12
-
-tmp = %r9
-DWORD_tmp = %r9d
-
-lane_data = %r10
-
-# JOB* submit_mb_mgr_submit_avx2(MB_MGR *state, job_sha1 *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha1_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- mov unused_lanes, lane
- and $0xF, lane
- shr $4, unused_lanes
- imul $_LANE_DATA_size, lane, lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- shl $4, len
- or lane, len
-
- movl DWORD_len, _lens(state , lane, 4)
-
- # Load digest words from result_digest
- vmovdqu _result_digest(job), %xmm0
- mov _result_digest+1*16(job), DWORD_tmp
- vmovd %xmm0, _args_digest(state, lane, 4)
- vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
- vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
- vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
- movl DWORD_tmp, _args_digest+4*32(state , lane, 4)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xF, unused_lanes
- jne return_null
-
-start_loop:
- # Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min value in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqa %xmm0, _lens + 0*16(state)
- vmovdqa %xmm1, _lens + 1*16(state)
-
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha1_x8_avx2
-
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
- movl _args_digest+4*32(state, idx, 4), DWORD_tmp
-
- vmovdqu %xmm0, _result_digest(job_rax)
- movl DWORD_tmp, _result_digest+1*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-
-ENDPROC(sha1_mb_mgr_submit_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
- .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S b/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
deleted file mode 100644
index 20f77aa633de..000000000000
--- a/arch/x86/crypto/sha1-mb/sha1_x8_avx2.S
+++ /dev/null
@@ -1,492 +0,0 @@
-/*
- * Multi-buffer SHA1 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * James Guilford <james.guilford@intel.com>
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2014 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include "sha1_mb_mgr_datastruct.S"
-
-## code to compute oct SHA1 using SSE-256
-## outer calling routine takes care of save and restore of XMM registers
-
-## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15# ymm0-15
-##
-## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
-## Linux preserves: rdi rbp r8
-##
-## clobbers ymm0-15
-
-
-# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
-# "transpose" data in {r0...r7} using temps {t0...t1}
-# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
-# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
-# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
-# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
-#
-# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
-# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
-# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
-# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
-# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
-# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
-# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
-# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
-#
-
-.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
- # process top half (r0..r3) {a...d}
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
- vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
- vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
- vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
- vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
-
- # use r2 in place of t0
- # process bottom half (r4..r7) {e...h}
- vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
- vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
- vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
- vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
- vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
- vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
- vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
- vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
-
- vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
- vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
- vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
- vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
- vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
- vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
- vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
- vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
-
-.endm
-##
-## Magic functions defined in FIPS 180-1
-##
-# macro MAGIC_F0 F,B,C,D,T ## F = (D ^ (B & (C ^ D)))
-.macro MAGIC_F0 regF regB regC regD regT
- vpxor \regD, \regC, \regF
- vpand \regB, \regF, \regF
- vpxor \regD, \regF, \regF
-.endm
-
-# macro MAGIC_F1 F,B,C,D,T ## F = (B ^ C ^ D)
-.macro MAGIC_F1 regF regB regC regD regT
- vpxor \regC, \regD, \regF
- vpxor \regB, \regF, \regF
-.endm
-
-# macro MAGIC_F2 F,B,C,D,T ## F = ((B & C) | (B & D) | (C & D))
-.macro MAGIC_F2 regF regB regC regD regT
- vpor \regC, \regB, \regF
- vpand \regC, \regB, \regT
- vpand \regD, \regF, \regF
- vpor \regT, \regF, \regF
-.endm
-
-# macro MAGIC_F3 F,B,C,D,T ## F = (B ^ C ^ D)
-.macro MAGIC_F3 regF regB regC regD regT
- MAGIC_F1 \regF,\regB,\regC,\regD,\regT
-.endm
-
-# PROLD reg, imm, tmp
-.macro PROLD reg imm tmp
- vpsrld $(32-\imm), \reg, \tmp
- vpslld $\imm, \reg, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-.macro PROLD_nd reg imm tmp src
- vpsrld $(32-\imm), \src, \tmp
- vpslld $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-.macro SHA1_STEP_00_15 regA regB regC regD regE regT regF memW immCNT MAGIC
- vpaddd \immCNT, \regE, \regE
- vpaddd \memW*32(%rsp), \regE, \regE
- PROLD_nd \regT, 5, \regF, \regA
- vpaddd \regT, \regE, \regE
- \MAGIC \regF, \regB, \regC, \regD, \regT
- PROLD \regB, 30, \regT
- vpaddd \regF, \regE, \regE
-.endm
-
-.macro SHA1_STEP_16_79 regA regB regC regD regE regT regF memW immCNT MAGIC
- vpaddd \immCNT, \regE, \regE
- offset = ((\memW - 14) & 15) * 32
- vmovdqu offset(%rsp), W14
- vpxor W14, W16, W16
- offset = ((\memW - 8) & 15) * 32
- vpxor offset(%rsp), W16, W16
- offset = ((\memW - 3) & 15) * 32
- vpxor offset(%rsp), W16, W16
- vpsrld $(32-1), W16, \regF
- vpslld $1, W16, W16
- vpor W16, \regF, \regF
-
- ROTATE_W
-
- offset = ((\memW - 0) & 15) * 32
- vmovdqu \regF, offset(%rsp)
- vpaddd \regF, \regE, \regE
- PROLD_nd \regT, 5, \regF, \regA
- vpaddd \regT, \regE, \regE
- \MAGIC \regF,\regB,\regC,\regD,\regT ## FUN = MAGIC_Fi(B,C,D)
- PROLD \regB,30, \regT
- vpaddd \regF, \regE, \regE
-.endm
-
-########################################################################
-########################################################################
-########################################################################
-
-## FRAMESZ plus pushes must be an odd multiple of 8
-YMM_SAVE = (15-15)*32
-FRAMESZ = 32*16 + YMM_SAVE
-_YMM = FRAMESZ - YMM_SAVE
-
-#define VMOVPS vmovups
-
-IDX = %rax
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-inp4 = %r13
-inp5 = %r14
-inp6 = %r15
-inp7 = %rcx
-arg1 = %rdi
-arg2 = %rsi
-RSP_SAVE = %rdx
-
-# ymm0 A
-# ymm1 B
-# ymm2 C
-# ymm3 D
-# ymm4 E
-# ymm5 F AA
-# ymm6 T0 BB
-# ymm7 T1 CC
-# ymm8 T2 DD
-# ymm9 T3 EE
-# ymm10 T4 TMP
-# ymm11 T5 FUN
-# ymm12 T6 K
-# ymm13 T7 W14
-# ymm14 T8 W15
-# ymm15 T9 W16
-
-
-A = %ymm0
-B = %ymm1
-C = %ymm2
-D = %ymm3
-E = %ymm4
-F = %ymm5
-T0 = %ymm6
-T1 = %ymm7
-T2 = %ymm8
-T3 = %ymm9
-T4 = %ymm10
-T5 = %ymm11
-T6 = %ymm12
-T7 = %ymm13
-T8 = %ymm14
-T9 = %ymm15
-
-AA = %ymm5
-BB = %ymm6
-CC = %ymm7
-DD = %ymm8
-EE = %ymm9
-TMP = %ymm10
-FUN = %ymm11
-K = %ymm12
-W14 = %ymm13
-W15 = %ymm14
-W16 = %ymm15
-
-.macro ROTATE_ARGS
- TMP_ = E
- E = D
- D = C
- C = B
- B = A
- A = TMP_
-.endm
-
-.macro ROTATE_W
-TMP_ = W16
-W16 = W15
-W15 = W14
-W14 = TMP_
-.endm
-
-# 8 streams x 5 32bit words per digest x 4 bytes per word
-#define DIGEST_SIZE (8*5*4)
-
-.align 32
-
-# void sha1_x8_avx2(void **input_data, UINT128 *digest, UINT32 size)
-# arg 1 : pointer to array[4] of pointer to input data
-# arg 2 : size (in blocks) ;; assumed to be >= 1
-#
-ENTRY(sha1_x8_avx2)
-
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- #save rsp
- mov %rsp, RSP_SAVE
- sub $FRAMESZ, %rsp
-
- #align rsp to 32 Bytes
- and $~0x1F, %rsp
-
- ## Initialize digests
- vmovdqu 0*32(arg1), A
- vmovdqu 1*32(arg1), B
- vmovdqu 2*32(arg1), C
- vmovdqu 3*32(arg1), D
- vmovdqu 4*32(arg1), E
-
- ## transpose input onto stack
- mov _data_ptr+0*8(arg1),inp0
- mov _data_ptr+1*8(arg1),inp1
- mov _data_ptr+2*8(arg1),inp2
- mov _data_ptr+3*8(arg1),inp3
- mov _data_ptr+4*8(arg1),inp4
- mov _data_ptr+5*8(arg1),inp5
- mov _data_ptr+6*8(arg1),inp6
- mov _data_ptr+7*8(arg1),inp7
-
- xor IDX, IDX
-lloop:
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), F
- I=0
-.rep 2
- VMOVPS (inp0, IDX), T0
- VMOVPS (inp1, IDX), T1
- VMOVPS (inp2, IDX), T2
- VMOVPS (inp3, IDX), T3
- VMOVPS (inp4, IDX), T4
- VMOVPS (inp5, IDX), T5
- VMOVPS (inp6, IDX), T6
- VMOVPS (inp7, IDX), T7
-
- TRANSPOSE8 T0, T1, T2, T3, T4, T5, T6, T7, T8, T9
- vpshufb F, T0, T0
- vmovdqu T0, (I*8)*32(%rsp)
- vpshufb F, T1, T1
- vmovdqu T1, (I*8+1)*32(%rsp)
- vpshufb F, T2, T2
- vmovdqu T2, (I*8+2)*32(%rsp)
- vpshufb F, T3, T3
- vmovdqu T3, (I*8+3)*32(%rsp)
- vpshufb F, T4, T4
- vmovdqu T4, (I*8+4)*32(%rsp)
- vpshufb F, T5, T5
- vmovdqu T5, (I*8+5)*32(%rsp)
- vpshufb F, T6, T6
- vmovdqu T6, (I*8+6)*32(%rsp)
- vpshufb F, T7, T7
- vmovdqu T7, (I*8+7)*32(%rsp)
- add $32, IDX
- I = (I+1)
-.endr
- # save old digests
- vmovdqu A,AA
- vmovdqu B,BB
- vmovdqu C,CC
- vmovdqu D,DD
- vmovdqu E,EE
-
-##
-## perform 0-79 steps
-##
- vmovdqu K00_19(%rip), K
-## do rounds 0...15
- I = 0
-.rep 16
- SHA1_STEP_00_15 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 16...19
- vmovdqu ((16 - 16) & 15) * 32 (%rsp), W16
- vmovdqu ((16 - 15) & 15) * 32 (%rsp), W15
-.rep 4
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F0
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 20...39
- vmovdqu K20_39(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F1
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 40...59
- vmovdqu K40_59(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F2
- ROTATE_ARGS
- I = (I+1)
-.endr
-
-## do rounds 60...79
- vmovdqu K60_79(%rip), K
-.rep 20
- SHA1_STEP_16_79 A,B,C,D,E, TMP,FUN, I, K, MAGIC_F3
- ROTATE_ARGS
- I = (I+1)
-.endr
-
- vpaddd AA,A,A
- vpaddd BB,B,B
- vpaddd CC,C,C
- vpaddd DD,D,D
- vpaddd EE,E,E
-
- sub $1, arg2
- jne lloop
-
- # write out digests
- vmovdqu A, 0*32(arg1)
- vmovdqu B, 1*32(arg1)
- vmovdqu C, 2*32(arg1)
- vmovdqu D, 3*32(arg1)
- vmovdqu E, 4*32(arg1)
-
- # update input pointers
- add IDX, inp0
- add IDX, inp1
- add IDX, inp2
- add IDX, inp3
- add IDX, inp4
- add IDX, inp5
- add IDX, inp6
- add IDX, inp7
- mov inp0, _data_ptr (arg1)
- mov inp1, _data_ptr + 1*8(arg1)
- mov inp2, _data_ptr + 2*8(arg1)
- mov inp3, _data_ptr + 3*8(arg1)
- mov inp4, _data_ptr + 4*8(arg1)
- mov inp5, _data_ptr + 5*8(arg1)
- mov inp6, _data_ptr + 6*8(arg1)
- mov inp7, _data_ptr + 7*8(arg1)
-
- ################
- ## Postamble
-
- mov RSP_SAVE, %rsp
-
- # restore callee-saved clobbered registers
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- ret
-ENDPROC(sha1_x8_avx2)
-
-
-.section .rodata.cst32.K00_19, "aM", @progbits, 32
-.align 32
-K00_19:
-.octa 0x5A8279995A8279995A8279995A827999
-.octa 0x5A8279995A8279995A8279995A827999
-
-.section .rodata.cst32.K20_39, "aM", @progbits, 32
-.align 32
-K20_39:
-.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
-.octa 0x6ED9EBA16ED9EBA16ED9EBA16ED9EBA1
-
-.section .rodata.cst32.K40_59, "aM", @progbits, 32
-.align 32
-K40_59:
-.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
-.octa 0x8F1BBCDC8F1BBCDC8F1BBCDC8F1BBCDC
-
-.section .rodata.cst32.K60_79, "aM", @progbits, 32
-.align 32
-K60_79:
-.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
-.octa 0xCA62C1D6CA62C1D6CA62C1D6CA62C1D6
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK:
-.octa 0x0c0d0e0f08090a0b0405060700010203
-.octa 0x0c0d0e0f08090a0b0405060700010203
diff --git a/arch/x86/crypto/sha256-mb/Makefile b/arch/x86/crypto/sha256-mb/Makefile
deleted file mode 100644
index 53ad6e7db747..000000000000
--- a/arch/x86/crypto/sha256-mb/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-OBJECT_FILES_NON_STANDARD := y
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA256_MB) += sha256-mb.o
- sha256-mb-y := sha256_mb.o sha256_mb_mgr_flush_avx2.o \
- sha256_mb_mgr_init_avx2.o sha256_mb_mgr_submit_avx2.o sha256_x8_avx2.o
-endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb.c b/arch/x86/crypto/sha256-mb/sha256_mb.c
deleted file mode 100644
index 97c5fc43e115..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb.c
+++ /dev/null
@@ -1,1013 +0,0 @@
-/*
- * Multi buffer SHA256 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha256_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha256_mb_alg_state;
-
-struct sha256_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha256_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha256_job_mgr_init)(struct sha256_mb_mgr *state);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_submit)
- (struct sha256_mb_mgr *state, struct job_sha256 *job);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_flush)
- (struct sha256_mb_mgr *state);
-static asmlinkage struct job_sha256* (*sha256_job_mgr_get_comp_job)
- (struct sha256_mb_mgr *state);
-
-inline uint32_t sha256_pad(uint8_t padblock[SHA256_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA256_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA256_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA256_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA256_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA256_PADLENGTHFIELD_SIZE;
-
-#if SHA256_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA256_LOG2_BLOCK_SIZE;
-}
-
-static struct sha256_hash_ctx
- *sha256_ctx_mgr_resubmit(struct sha256_ctx_mgr *mgr,
- struct sha256_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA256_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA256_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA256_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha256_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha256_hash_ctx
- *sha256_ctx_mgr_get_comp_ctx(struct sha256_ctx_mgr *mgr)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user. If it is not ready, resubmit the job to finish processing.
- * If sha256_ctx_mgr_resubmit returned a job, it is ready to be
- * returned. Otherwise, all jobs currently being managed by the
- * hash_ctx_mgr still need processing.
- */
- struct sha256_hash_ctx *ctx;
-
- ctx = (struct sha256_hash_ctx *) sha256_job_mgr_get_comp_job(&mgr->mgr);
- return sha256_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static void sha256_ctx_mgr_init(struct sha256_ctx_mgr *mgr)
-{
- sha256_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha256_hash_ctx *sha256_ctx_mgr_submit(struct sha256_ctx_mgr *mgr,
- struct sha256_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- return ctx;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- return ctx;
- }
-
- /* If we made it here, there was no error during this call to submit */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently
- * being processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA256_BLOCK_SIZE) {
- /*
- * Compute how many bytes to copy from user buffer into
- * extra block
- */
- uint32_t copy_len = SHA256_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy(
- &ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /* The extra block should never contain more than 1 block */
- assert(ctx->partial_block_buffer_length <= SHA256_BLOCK_SIZE);
-
- /*
- * If the extra block buffer contains exactly 1 block,
- * it can be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA256_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- return sha256_ctx_mgr_resubmit(mgr, ctx);
-}
-
-static struct sha256_hash_ctx *sha256_ctx_mgr_flush(struct sha256_ctx_mgr *mgr)
-{
- struct sha256_hash_ctx *ctx;
-
- while (1) {
- ctx = (struct sha256_hash_ctx *)
- sha256_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- return NULL;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha256_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha256_ctx_mgr_resubmit returned a job, it is ready to
- * be returned. Otherwise, all jobs currently being managed by
- * the sha256_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- return ctx;
- }
-}
-
-static int sha256_mb_init(struct ahash_request *areq)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA256_H0;
- sctx->job.result_digest[1] = SHA256_H1;
- sctx->job.result_digest[2] = SHA256_H2;
- sctx->job.result_digest[3] = SHA256_H3;
- sctx->job.result_digest[4] = SHA256_H4;
- sctx->job.result_digest[5] = SHA256_H5;
- sctx->job.result_digest[6] = SHA256_H6;
- sctx->job.result_digest[7] = SHA256_H7;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha256_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha256_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be32 *dst = (__be32 *) rctx->out;
-
- for (i = 0; i < 8; ++i)
- dst[i] = cpu_to_be32(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha256_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha256_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha256_ctx_mgr_flush(cstate->mgr);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha256_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
-
- /* remove from work list */
- spin_lock(&cstate->work_lock);
- list_del(&rctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock(&cstate->work_lock);
- list_del(&req_ctx->waiter);
- spin_unlock(&cstate->work_lock);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha256_ctx_mgr_get_comp_ctx(cstate->mgr);
- }
-
- return 0;
-}
-
-static void sha256_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock(&cstate->work_lock);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock(&cstate->work_lock);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha256_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- sha256_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx, areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- sha256_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha256_mb_alg_state.alg_cstate);
-
- struct sha256_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha256_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha256_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha256_ctx_mgr_submit(cstate->mgr, sha_ctx, &data, 0,
- HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha256_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha256_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha256_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha256_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha256-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha256_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha256_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha256_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha256_hash_ctx));
-
- return 0;
-}
-
-static void sha256_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha256_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha256_mb_areq_alg = {
- .init = sha256_mb_init,
- .update = sha256_mb_update,
- .final = sha256_mb_final,
- .finup = sha256_mb_finup,
- .export = sha256_mb_export,
- .import = sha256_mb_import,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_hash_ctx),
- .base = {
- .cra_name = "__sha256-mb",
- .cra_driver_name = "__intel_sha256-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha256_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha256_mb_areq_init_tfm,
- .cra_exit = sha256_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha256_hash_ctx),
- }
- }
-};
-
-static int sha256_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha256_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha256_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha256_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha256_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha256_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha256_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha256_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha256_mb_async_alg = {
- .init = sha256_mb_async_init,
- .update = sha256_mb_async_update,
- .final = sha256_mb_async_final,
- .finup = sha256_mb_async_finup,
- .export = sha256_mb_async_export,
- .import = sha256_mb_async_import,
- .digest = sha256_mb_async_digest,
- .halg = {
- .digestsize = SHA256_DIGEST_SIZE,
- .statesize = sizeof(struct sha256_hash_ctx),
- .base = {
- .cra_name = "sha256",
- .cra_driver_name = "sha256_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA256_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha256_mb_async_alg.halg.base.cra_list),
- .cra_init = sha256_mb_async_init_tfm,
- .cra_exit = sha256_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha256_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha256_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha256_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if (time_before(cur_time, rctx->tag.expire))
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha256_hash_ctx *)
- sha256_ctx_mgr_flush(cstate->mgr);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha256_mb error: nothing got"
- " flushed for non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha256_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha256_mb_alg_state.alg_cstate = alloc_percpu
- (struct mcryptd_alg_cstate);
-
- sha256_job_mgr_init = sha256_mb_mgr_init_avx2;
- sha256_job_mgr_submit = sha256_mb_mgr_submit_avx2;
- sha256_job_mgr_flush = sha256_mb_mgr_flush_avx2;
- sha256_job_mgr_get_comp_job = sha256_mb_mgr_get_comp_job_avx2;
-
- if (!sha256_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha256_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha256_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha256_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha256_mb_alg_state.flusher = &sha256_mb_flusher;
-
- err = crypto_register_ahash(&sha256_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha256_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha256_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha256_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha256_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha256_mb_async_alg);
- crypto_unregister_ahash(&sha256_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha256_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha256_mb_alg_state.alg_cstate);
-}
-
-module_init(sha256_mb_mod_init);
-module_exit(sha256_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS_CRYPTO("sha256");
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h b/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
deleted file mode 100644
index 7c432543dc7f..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_ctx.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Header file for multi buffer SHA256 context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha256_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-
-#ifdef HASH_CTX_DEBUG
- HASH_CTX_ERROR_DEBUG_DIGEST_MISMATCH = -4,
-#endif
-};
-
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-
-/* Hash Constants and Typedefs */
-#define SHA256_DIGEST_LENGTH 8
-#define SHA256_LOG2_BLOCK_SIZE 6
-
-#define SHA256_PADLENGTHFIELD_SIZE 8
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha256_ctx_mgr {
- struct sha256_mb_mgr mgr;
-};
-
-/* typedef struct sha256_ctx_mgr sha256_ctx_mgr; */
-
-struct sha256_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha256 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA256_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h b/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
deleted file mode 100644
index b01ae408c56d..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-#include <linux/types.h>
-
-#define NUM_SHA256_DIGEST_WORDS 8
-
-enum job_sts { STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha256 {
- u8 *buffer;
- u32 len;
- u32 result_digest[NUM_SHA256_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-/* SHA256 out-of-order scheduler */
-
-/* typedef uint32_t sha8_digest_array[8][8]; */
-
-struct sha256_args_x8 {
- uint32_t digest[8][8];
- uint8_t *data_ptr[8];
-};
-
-struct sha256_lane_data {
- struct job_sha256 *job_in_lane;
-};
-
-struct sha256_mb_mgr {
- struct sha256_args_x8 args;
-
- uint32_t lens[8];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha256_lane_data ldata[8];
-};
-
-
-#define SHA256_MB_MGR_NUM_LANES_AVX2 8
-
-void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state);
-struct job_sha256 *sha256_mb_mgr_submit_avx2(struct sha256_mb_mgr *state,
- struct job_sha256 *job);
-struct job_sha256 *sha256_mb_mgr_flush_avx2(struct sha256_mb_mgr *state);
-struct job_sha256 *sha256_mb_mgr_get_comp_job_avx2(struct sha256_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
deleted file mode 100644
index 5c377bac21d0..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _DATASTRUCT_ASM_
-#define _DATASTRUCT_ASM_
-
-#define SZ8 8*SHA256_DIGEST_WORD_SIZE
-#define ROUNDS 64*SZ8
-#define PTR_SZ 8
-#define SHA256_DIGEST_WORD_SIZE 4
-#define MAX_SHA256_LANES 8
-#define SHA256_DIGEST_WORDS 8
-#define SHA256_DIGEST_ROW_SIZE (MAX_SHA256_LANES * SHA256_DIGEST_WORD_SIZE)
-#define SHA256_DIGEST_SIZE (SHA256_DIGEST_ROW_SIZE * SHA256_DIGEST_WORDS)
-#define SHA256_BLK_SZ 64
-
-# START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-# FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-# END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-########################################################################
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - %%tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endif
-.endstruc
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-#endif
-
-
-########################################################################
-#### Define SHA256 Out Of Order Data Structures
-########################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
- _LANE_DATA_size = _FIELD_OFFSET
- _LANE_DATA_align = _STRUCT_ALIGN
-
-########################################################################
-
-START_FIELDS # SHA256_ARGS_X4
-### name size align
-FIELD _digest, 4*8*8, 4 # transposed digest
-FIELD _data_ptr, 8*8, 8 # array of pointers to data
-END_FIELDS
-
- _SHA256_ARGS_X4_size = _FIELD_OFFSET
- _SHA256_ARGS_X4_align = _STRUCT_ALIGN
- _SHA256_ARGS_X8_size = _FIELD_OFFSET
- _SHA256_ARGS_X8_align = _STRUCT_ALIGN
-
-#######################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA256_ARGS_X4_size, _SHA256_ARGS_X4_align
-FIELD _lens, 4*8, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*8, _LANE_DATA_align
-END_FIELDS
-
- _MB_MGR_size = _FIELD_OFFSET
- _MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-#######################################################################
-
-START_FIELDS #STACK_FRAME
-### name size align
-FIELD _data, 16*SZ8, 1 # transposed digest
-FIELD _digest, 8*SZ8, 1 # array of pointers to data
-FIELD _ytmp, 4*SZ8, 1
-FIELD _rsp, 8, 1
-END_FIELDS
-
- _STACK_FRAME_size = _FIELD_OFFSET
- _STACK_FRAME_align = _STRUCT_ALIGN
-
-#######################################################################
-
-########################################################################
-#### Define constants
-########################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-########################################################################
-#### Define JOB_SHA256 structure
-########################################################################
-
-START_FIELDS # JOB_SHA256
-
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 8, 8 # length in bytes
-FIELD _result_digest, 8*4, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
- _JOB_SHA256_size = _FIELD_OFFSET
- _JOB_SHA256_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
deleted file mode 100644
index d2364c55bbde..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,307 +0,0 @@
-/*
- * Flush routine for SHA256 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-.extern sha256_x8_avx2
-
-#LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# Common register definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-# idx must be a register not clobberred by sha1_mult
-#define idx %r8
-#define DWORD_idx %r8d
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-#define tmp2_w %ebx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 %arg1
-
-#define extra_blocks %arg2
-#define p %arg2
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB_SHA256* sha256_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha256_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+3, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
- offset = (_ldata + 4 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne four(%rip), idx
- offset = (_ldata + 5 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne five(%rip), idx
- offset = (_ldata + 6 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne six(%rip), idx
- offset = (_ldata + 7 * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne seven(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 8
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 4*I)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- vmovdqu _lens+0*16(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqu %xmm0, _lens+0*16(state)
- vmovdqu %xmm1, _lens+1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha256_x8_avx2
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
-
- mov unused_lanes, _unused_lanes(state)
- movl $0xFFFFFFFF, _lens(state,idx,4)
-
- vmovd _args_digest(state , idx, 4) , %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
- vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- vmovdqu %xmm1, offset(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha256_mb_mgr_flush_avx2)
-
-##############################################################################
-
-.align 16
-ENTRY(sha256_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- ## if bit 32+3 is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $(32+3), unused_lanes
- jc .return_null
-
- # Find min length
- vmovdqu _lens(state), %xmm0
- vmovdqu _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- test $~0xF, idx
- jnz .return_null
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state, idx, 4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
- vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- offset = (_result_digest + 1*16)
- vmovdqu %xmm1, offset(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha256_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
-.octa 0x000000000000000000000000FFFFFFF0
-
-.section .rodata.cst8, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-two:
-.quad 2
-three:
-.quad 3
-four:
-.quad 4
-five:
-.quad 5
-six:
-.quad 6
-seven:
-.quad 7
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
deleted file mode 100644
index b0c498371e67..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_init_avx2.c
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Initialization code for multi buffer SHA256 algorithm for AVX2
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "sha256_mb_mgr.h"
-
-void sha256_mb_mgr_init_avx2(struct sha256_mb_mgr *state)
-{
- unsigned int j;
-
- state->unused_lanes = 0xF76543210ULL;
- for (j = 0; j < 8; j++) {
- state->lens[j] = 0xFFFFFFFF;
- state->ldata[j].job_in_lane = NULL;
- }
-}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
deleted file mode 100644
index b36ae7454084..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,214 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA256 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-.extern sha256_x8_avx2
-
-# LINUX register definitions
-arg1 = %rdi
-arg2 = %rsi
-size_offset = %rcx
-tmp2 = %rcx
-extra_blocks = %rdx
-
-# Common definitions
-#define state arg1
-#define job %rsi
-#define len2 arg2
-#define p2 arg2
-
-# idx must be a register not clobberred by sha1_x8_avx2
-idx = %r8
-DWORD_idx = %r8d
-last_len = %r8
-
-p = %r11
-start_offset = %r11
-
-unused_lanes = %rbx
-BYTE_unused_lanes = %bl
-
-job_rax = %rax
-len = %rax
-DWORD_len = %eax
-
-lane = %r12
-tmp3 = %r12
-
-tmp = %r9
-DWORD_tmp = %r9d
-
-lane_data = %r10
-
-# JOB* sha256_mb_mgr_submit_avx2(MB_MGR *state, JOB_SHA256 *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha256_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- mov unused_lanes, lane
- and $0xF, lane
- shr $4, unused_lanes
- imul $_LANE_DATA_size, lane, lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- shl $4, len
- or lane, len
-
- movl DWORD_len, _lens(state , lane, 4)
-
- # Load digest words from result_digest
- vmovdqu _result_digest(job), %xmm0
- vmovdqu _result_digest+1*16(job), %xmm1
- vmovd %xmm0, _args_digest(state, lane, 4)
- vpextrd $1, %xmm0, _args_digest+1*32(state , lane, 4)
- vpextrd $2, %xmm0, _args_digest+2*32(state , lane, 4)
- vpextrd $3, %xmm0, _args_digest+3*32(state , lane, 4)
- vmovd %xmm1, _args_digest+4*32(state , lane, 4)
-
- vpextrd $1, %xmm1, _args_digest+5*32(state , lane, 4)
- vpextrd $2, %xmm1, _args_digest+6*32(state , lane, 4)
- vpextrd $3, %xmm1, _args_digest+7*32(state , lane, 4)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xF, unused_lanes
- jne return_null
-
-start_loop:
- # Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
-
- vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
- vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has {x,x,E,F}
- vpalignr $4, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,x,E}
- vpminud %xmm3, %xmm2, %xmm2 # xmm2 has min val in low dword
-
- vmovd %xmm2, DWORD_idx
- mov idx, len2
- and $0xF, idx
- shr $4, len2
- jz len_is_0
-
- vpand clear_low_nibble(%rip), %xmm2, %xmm2
- vpshufd $0, %xmm2, %xmm2
-
- vpsubd %xmm2, %xmm0, %xmm0
- vpsubd %xmm2, %xmm1, %xmm1
-
- vmovdqa %xmm0, _lens + 0*16(state)
- vmovdqa %xmm1, _lens + 1*16(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha256_x8_avx2
-
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $4, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens(state,idx,4)
-
- vmovd _args_digest(state, idx, 4), %xmm0
- vpinsrd $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
- vpinsrd $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
- vmovd _args_digest+4*32(state, idx, 4), %xmm1
-
- vpinsrd $1, _args_digest+5*32(state , idx, 4), %xmm1, %xmm1
- vpinsrd $2, _args_digest+6*32(state , idx, 4), %xmm1, %xmm1
- vpinsrd $3, _args_digest+7*32(state , idx, 4), %xmm1, %xmm1
-
- vmovdqu %xmm0, _result_digest(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-
-ENDPROC(sha256_mb_mgr_submit_avx2)
-
-.section .rodata.cst16.clear_low_nibble, "aM", @progbits, 16
-.align 16
-clear_low_nibble:
- .octa 0x000000000000000000000000FFFFFFF0
diff --git a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S b/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
deleted file mode 100644
index 1687c80c5995..000000000000
--- a/arch/x86/crypto/sha256-mb/sha256_x8_avx2.S
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * Multi-buffer SHA256 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include "sha256_mb_mgr_datastruct.S"
-
-## code to compute oct SHA256 using SSE-256
-## outer calling routine takes care of save and restore of XMM registers
-## Logic designed/laid out by JDG
-
-## Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; %ymm0-15
-## Linux clobbers: rax rbx rcx rdx rsi r9 r10 r11 r12 r13 r14 r15
-## Linux preserves: rdi rbp r8
-##
-## clobbers %ymm0-15
-
-arg1 = %rdi
-arg2 = %rsi
-reg3 = %rcx
-reg4 = %rdx
-
-# Common definitions
-STATE = arg1
-INP_SIZE = arg2
-
-IDX = %rax
-ROUND = %rbx
-TBL = reg3
-
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-inp4 = %r13
-inp5 = %r14
-inp6 = %r15
-inp7 = reg4
-
-a = %ymm0
-b = %ymm1
-c = %ymm2
-d = %ymm3
-e = %ymm4
-f = %ymm5
-g = %ymm6
-h = %ymm7
-
-T1 = %ymm8
-
-a0 = %ymm12
-a1 = %ymm13
-a2 = %ymm14
-TMP = %ymm15
-TMP0 = %ymm6
-TMP1 = %ymm7
-
-TT0 = %ymm8
-TT1 = %ymm9
-TT2 = %ymm10
-TT3 = %ymm11
-TT4 = %ymm12
-TT5 = %ymm13
-TT6 = %ymm14
-TT7 = %ymm15
-
-# Define stack usage
-
-# Assume stack aligned to 32 bytes before call
-# Therefore FRAMESZ mod 32 must be 32-8 = 24
-
-#define FRAMESZ 0x388
-
-#define VMOVPS vmovups
-
-# TRANSPOSE8 r0, r1, r2, r3, r4, r5, r6, r7, t0, t1
-# "transpose" data in {r0...r7} using temps {t0...t1}
-# Input looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-# r4 = {e7 e6 e5 e4 e3 e2 e1 e0}
-# r5 = {f7 f6 f5 f4 f3 f2 f1 f0}
-# r6 = {g7 g6 g5 g4 g3 g2 g1 g0}
-# r7 = {h7 h6 h5 h4 h3 h2 h1 h0}
-#
-# Output looks like: {r0 r1 r2 r3 r4 r5 r6 r7}
-# r0 = {h0 g0 f0 e0 d0 c0 b0 a0}
-# r1 = {h1 g1 f1 e1 d1 c1 b1 a1}
-# r2 = {h2 g2 f2 e2 d2 c2 b2 a2}
-# r3 = {h3 g3 f3 e3 d3 c3 b3 a3}
-# r4 = {h4 g4 f4 e4 d4 c4 b4 a4}
-# r5 = {h5 g5 f5 e5 d5 c5 b5 a5}
-# r6 = {h6 g6 f6 e6 d6 c6 b6 a6}
-# r7 = {h7 g7 f7 e7 d7 c7 b7 a7}
-#
-
-.macro TRANSPOSE8 r0 r1 r2 r3 r4 r5 r6 r7 t0 t1
- # process top half (r0..r3) {a...d}
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
- vshufps $0xDD, \t1, \t0, \r3 # r3 = {d5 c5 b5 a5 d1 c1 b1 a1}
- vshufps $0x88, \r2, \r0, \r1 # r1 = {d6 c6 b6 a6 d2 c2 b2 a2}
- vshufps $0xDD, \r2, \r0, \r0 # r0 = {d7 c7 b7 a7 d3 c3 b3 a3}
- vshufps $0x88, \t1, \t0, \t0 # t0 = {d4 c4 b4 a4 d0 c0 b0 a0}
-
- # use r2 in place of t0
- # process bottom half (r4..r7) {e...h}
- vshufps $0x44, \r5, \r4, \r2 # r2 = {f5 f4 e5 e4 f1 f0 e1 e0}
- vshufps $0xEE, \r5, \r4, \r4 # r4 = {f7 f6 e7 e6 f3 f2 e3 e2}
- vshufps $0x44, \r7, \r6, \t1 # t1 = {h5 h4 g5 g4 h1 h0 g1 g0}
- vshufps $0xEE, \r7, \r6, \r6 # r6 = {h7 h6 g7 g6 h3 h2 g3 g2}
- vshufps $0xDD, \t1, \r2, \r7 # r7 = {h5 g5 f5 e5 h1 g1 f1 e1}
- vshufps $0x88, \r6, \r4, \r5 # r5 = {h6 g6 f6 e6 h2 g2 f2 e2}
- vshufps $0xDD, \r6, \r4, \r4 # r4 = {h7 g7 f7 e7 h3 g3 f3 e3}
- vshufps $0x88, \t1, \r2, \t1 # t1 = {h4 g4 f4 e4 h0 g0 f0 e0}
-
- vperm2f128 $0x13, \r1, \r5, \r6 # h6...a6
- vperm2f128 $0x02, \r1, \r5, \r2 # h2...a2
- vperm2f128 $0x13, \r3, \r7, \r5 # h5...a5
- vperm2f128 $0x02, \r3, \r7, \r1 # h1...a1
- vperm2f128 $0x13, \r0, \r4, \r7 # h7...a7
- vperm2f128 $0x02, \r0, \r4, \r3 # h3...a3
- vperm2f128 $0x13, \t0, \t1, \r4 # h4...a4
- vperm2f128 $0x02, \t0, \t1, \r0 # h0...a0
-
-.endm
-
-.macro ROTATE_ARGS
-TMP_ = h
-h = g
-g = f
-f = e
-e = d
-d = c
-c = b
-b = a
-a = TMP_
-.endm
-
-.macro _PRORD reg imm tmp
- vpslld $(32-\imm),\reg,\tmp
- vpsrld $\imm,\reg, \reg
- vpor \tmp,\reg, \reg
-.endm
-
-# PRORD_nd reg, imm, tmp, src
-.macro _PRORD_nd reg imm tmp src
- vpslld $(32-\imm), \src, \tmp
- vpsrld $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-# PRORD dst/src, amt
-.macro PRORD reg imm
- _PRORD \reg,\imm,TMP
-.endm
-
-# PRORD_nd dst, src, amt
-.macro PRORD_nd reg tmp imm
- _PRORD_nd \reg, \imm, TMP, \tmp
-.endm
-
-# arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_00_15 _T1 i
- PRORD_nd a0,e,5 # sig1: a0 = (e >> 5)
-
- vpxor g, f, a2 # ch: a2 = f^g
- vpand e,a2, a2 # ch: a2 = (f^g)&e
- vpxor g, a2, a2 # a2 = ch
-
- PRORD_nd a1,e,25 # sig1: a1 = (e >> 25)
-
- vmovdqu \_T1,(SZ8*(\i & 0xf))(%rsp)
- vpaddd (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
- vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
- PRORD a0, 6 # sig1: a0 = (e >> 6) ^ (e >> 11)
- vpaddd a2, h, h # h = h + ch
- PRORD_nd a2,a,11 # sig0: a2 = (a >> 11)
- vpaddd \_T1,h, h # h = h + ch + W + K
- vpxor a1, a0, a0 # a0 = sigma1
- PRORD_nd a1,a,22 # sig0: a1 = (a >> 22)
- vpxor c, a, \_T1 # maj: T1 = a^c
- add $SZ8, ROUND # ROUND++
- vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
- vpaddd a0, h, h
- vpaddd h, d, d
- vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
- PRORD a2,2 # sig0: a2 = (a >> 2) ^ (a >> 13)
- vpxor a1, a2, a2 # a2 = sig0
- vpand c, a, a1 # maj: a1 = a&c
- vpor \_T1, a1, a1 # a1 = maj
- vpaddd a1, h, h # h = h + ch + W + K + maj
- vpaddd a2, h, h # h = h + ch + W + K + maj + sigma0
- ROTATE_ARGS
-.endm
-
-# arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_16_XX _T1 i
- vmovdqu (SZ8*((\i-15)&0xf))(%rsp), \_T1
- vmovdqu (SZ8*((\i-2)&0xf))(%rsp), a1
- vmovdqu \_T1, a0
- PRORD \_T1,11
- vmovdqu a1, a2
- PRORD a1,2
- vpxor a0, \_T1, \_T1
- PRORD \_T1, 7
- vpxor a2, a1, a1
- PRORD a1, 17
- vpsrld $3, a0, a0
- vpxor a0, \_T1, \_T1
- vpsrld $10, a2, a2
- vpxor a2, a1, a1
- vpaddd (SZ8*((\i-16)&0xf))(%rsp), \_T1, \_T1
- vpaddd (SZ8*((\i-7)&0xf))(%rsp), a1, a1
- vpaddd a1, \_T1, \_T1
-
- ROUND_00_15 \_T1,\i
-.endm
-
-# SHA256_ARGS:
-# UINT128 digest[8]; // transposed digests
-# UINT8 *data_ptr[4];
-
-# void sha256_x8_avx2(SHA256_ARGS *args, UINT64 bytes);
-# arg 1 : STATE : pointer to array of pointers to input data
-# arg 2 : INP_SIZE : size of input in blocks
- # general registers preserved in outer calling routine
- # outer calling routine saves all the XMM registers
- # save rsp, allocate 32-byte aligned for local variables
-ENTRY(sha256_x8_avx2)
-
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- mov %rsp, IDX
- sub $FRAMESZ, %rsp
- and $~0x1F, %rsp
- mov IDX, _rsp(%rsp)
-
- # Load the pre-transposed incoming digest.
- vmovdqu 0*SHA256_DIGEST_ROW_SIZE(STATE),a
- vmovdqu 1*SHA256_DIGEST_ROW_SIZE(STATE),b
- vmovdqu 2*SHA256_DIGEST_ROW_SIZE(STATE),c
- vmovdqu 3*SHA256_DIGEST_ROW_SIZE(STATE),d
- vmovdqu 4*SHA256_DIGEST_ROW_SIZE(STATE),e
- vmovdqu 5*SHA256_DIGEST_ROW_SIZE(STATE),f
- vmovdqu 6*SHA256_DIGEST_ROW_SIZE(STATE),g
- vmovdqu 7*SHA256_DIGEST_ROW_SIZE(STATE),h
-
- lea K256_8(%rip),TBL
-
- # load the address of each of the 4 message lanes
- # getting ready to transpose input onto stack
- mov _args_data_ptr+0*PTR_SZ(STATE),inp0
- mov _args_data_ptr+1*PTR_SZ(STATE),inp1
- mov _args_data_ptr+2*PTR_SZ(STATE),inp2
- mov _args_data_ptr+3*PTR_SZ(STATE),inp3
- mov _args_data_ptr+4*PTR_SZ(STATE),inp4
- mov _args_data_ptr+5*PTR_SZ(STATE),inp5
- mov _args_data_ptr+6*PTR_SZ(STATE),inp6
- mov _args_data_ptr+7*PTR_SZ(STATE),inp7
-
- xor IDX, IDX
-lloop:
- xor ROUND, ROUND
-
- # save old digest
- vmovdqu a, _digest(%rsp)
- vmovdqu b, _digest+1*SZ8(%rsp)
- vmovdqu c, _digest+2*SZ8(%rsp)
- vmovdqu d, _digest+3*SZ8(%rsp)
- vmovdqu e, _digest+4*SZ8(%rsp)
- vmovdqu f, _digest+5*SZ8(%rsp)
- vmovdqu g, _digest+6*SZ8(%rsp)
- vmovdqu h, _digest+7*SZ8(%rsp)
- i = 0
-.rep 2
- VMOVPS i*32(inp0, IDX), TT0
- VMOVPS i*32(inp1, IDX), TT1
- VMOVPS i*32(inp2, IDX), TT2
- VMOVPS i*32(inp3, IDX), TT3
- VMOVPS i*32(inp4, IDX), TT4
- VMOVPS i*32(inp5, IDX), TT5
- VMOVPS i*32(inp6, IDX), TT6
- VMOVPS i*32(inp7, IDX), TT7
- vmovdqu g, _ytmp(%rsp)
- vmovdqu h, _ytmp+1*SZ8(%rsp)
- TRANSPOSE8 TT0, TT1, TT2, TT3, TT4, TT5, TT6, TT7, TMP0, TMP1
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP1
- vmovdqu _ytmp(%rsp), g
- vpshufb TMP1, TT0, TT0
- vpshufb TMP1, TT1, TT1
- vpshufb TMP1, TT2, TT2
- vpshufb TMP1, TT3, TT3
- vpshufb TMP1, TT4, TT4
- vpshufb TMP1, TT5, TT5
- vpshufb TMP1, TT6, TT6
- vpshufb TMP1, TT7, TT7
- vmovdqu _ytmp+1*SZ8(%rsp), h
- vmovdqu TT4, _ytmp(%rsp)
- vmovdqu TT5, _ytmp+1*SZ8(%rsp)
- vmovdqu TT6, _ytmp+2*SZ8(%rsp)
- vmovdqu TT7, _ytmp+3*SZ8(%rsp)
- ROUND_00_15 TT0,(i*8+0)
- vmovdqu _ytmp(%rsp), TT0
- ROUND_00_15 TT1,(i*8+1)
- vmovdqu _ytmp+1*SZ8(%rsp), TT1
- ROUND_00_15 TT2,(i*8+2)
- vmovdqu _ytmp+2*SZ8(%rsp), TT2
- ROUND_00_15 TT3,(i*8+3)
- vmovdqu _ytmp+3*SZ8(%rsp), TT3
- ROUND_00_15 TT0,(i*8+4)
- ROUND_00_15 TT1,(i*8+5)
- ROUND_00_15 TT2,(i*8+6)
- ROUND_00_15 TT3,(i*8+7)
- i = (i+1)
-.endr
- add $64, IDX
- i = (i*8)
-
- jmp Lrounds_16_xx
-.align 16
-Lrounds_16_xx:
-.rep 16
- ROUND_16_XX T1, i
- i = (i+1)
-.endr
-
- cmp $ROUNDS,ROUND
- jb Lrounds_16_xx
-
- # add old digest
- vpaddd _digest+0*SZ8(%rsp), a, a
- vpaddd _digest+1*SZ8(%rsp), b, b
- vpaddd _digest+2*SZ8(%rsp), c, c
- vpaddd _digest+3*SZ8(%rsp), d, d
- vpaddd _digest+4*SZ8(%rsp), e, e
- vpaddd _digest+5*SZ8(%rsp), f, f
- vpaddd _digest+6*SZ8(%rsp), g, g
- vpaddd _digest+7*SZ8(%rsp), h, h
-
- sub $1, INP_SIZE # unit is blocks
- jne lloop
-
- # write back to memory (state object) the transposed digest
- vmovdqu a, 0*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu b, 1*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu c, 2*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu d, 3*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu e, 4*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu f, 5*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu g, 6*SHA256_DIGEST_ROW_SIZE(STATE)
- vmovdqu h, 7*SHA256_DIGEST_ROW_SIZE(STATE)
-
- # update input pointers
- add IDX, inp0
- mov inp0, _args_data_ptr+0*8(STATE)
- add IDX, inp1
- mov inp1, _args_data_ptr+1*8(STATE)
- add IDX, inp2
- mov inp2, _args_data_ptr+2*8(STATE)
- add IDX, inp3
- mov inp3, _args_data_ptr+3*8(STATE)
- add IDX, inp4
- mov inp4, _args_data_ptr+4*8(STATE)
- add IDX, inp5
- mov inp5, _args_data_ptr+5*8(STATE)
- add IDX, inp6
- mov inp6, _args_data_ptr+6*8(STATE)
- add IDX, inp7
- mov inp7, _args_data_ptr+7*8(STATE)
-
- # Postamble
- mov _rsp(%rsp), %rsp
-
- # restore callee-saved clobbered registers
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- ret
-ENDPROC(sha256_x8_avx2)
-
-.section .rodata.K256_8, "a", @progbits
-.align 64
-K256_8:
- .octa 0x428a2f98428a2f98428a2f98428a2f98
- .octa 0x428a2f98428a2f98428a2f98428a2f98
- .octa 0x71374491713744917137449171374491
- .octa 0x71374491713744917137449171374491
- .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
- .octa 0xb5c0fbcfb5c0fbcfb5c0fbcfb5c0fbcf
- .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
- .octa 0xe9b5dba5e9b5dba5e9b5dba5e9b5dba5
- .octa 0x3956c25b3956c25b3956c25b3956c25b
- .octa 0x3956c25b3956c25b3956c25b3956c25b
- .octa 0x59f111f159f111f159f111f159f111f1
- .octa 0x59f111f159f111f159f111f159f111f1
- .octa 0x923f82a4923f82a4923f82a4923f82a4
- .octa 0x923f82a4923f82a4923f82a4923f82a4
- .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
- .octa 0xab1c5ed5ab1c5ed5ab1c5ed5ab1c5ed5
- .octa 0xd807aa98d807aa98d807aa98d807aa98
- .octa 0xd807aa98d807aa98d807aa98d807aa98
- .octa 0x12835b0112835b0112835b0112835b01
- .octa 0x12835b0112835b0112835b0112835b01
- .octa 0x243185be243185be243185be243185be
- .octa 0x243185be243185be243185be243185be
- .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
- .octa 0x550c7dc3550c7dc3550c7dc3550c7dc3
- .octa 0x72be5d7472be5d7472be5d7472be5d74
- .octa 0x72be5d7472be5d7472be5d7472be5d74
- .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
- .octa 0x80deb1fe80deb1fe80deb1fe80deb1fe
- .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
- .octa 0x9bdc06a79bdc06a79bdc06a79bdc06a7
- .octa 0xc19bf174c19bf174c19bf174c19bf174
- .octa 0xc19bf174c19bf174c19bf174c19bf174
- .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
- .octa 0xe49b69c1e49b69c1e49b69c1e49b69c1
- .octa 0xefbe4786efbe4786efbe4786efbe4786
- .octa 0xefbe4786efbe4786efbe4786efbe4786
- .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
- .octa 0x0fc19dc60fc19dc60fc19dc60fc19dc6
- .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
- .octa 0x240ca1cc240ca1cc240ca1cc240ca1cc
- .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
- .octa 0x2de92c6f2de92c6f2de92c6f2de92c6f
- .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
- .octa 0x4a7484aa4a7484aa4a7484aa4a7484aa
- .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
- .octa 0x5cb0a9dc5cb0a9dc5cb0a9dc5cb0a9dc
- .octa 0x76f988da76f988da76f988da76f988da
- .octa 0x76f988da76f988da76f988da76f988da
- .octa 0x983e5152983e5152983e5152983e5152
- .octa 0x983e5152983e5152983e5152983e5152
- .octa 0xa831c66da831c66da831c66da831c66d
- .octa 0xa831c66da831c66da831c66da831c66d
- .octa 0xb00327c8b00327c8b00327c8b00327c8
- .octa 0xb00327c8b00327c8b00327c8b00327c8
- .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
- .octa 0xbf597fc7bf597fc7bf597fc7bf597fc7
- .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
- .octa 0xc6e00bf3c6e00bf3c6e00bf3c6e00bf3
- .octa 0xd5a79147d5a79147d5a79147d5a79147
- .octa 0xd5a79147d5a79147d5a79147d5a79147
- .octa 0x06ca635106ca635106ca635106ca6351
- .octa 0x06ca635106ca635106ca635106ca6351
- .octa 0x14292967142929671429296714292967
- .octa 0x14292967142929671429296714292967
- .octa 0x27b70a8527b70a8527b70a8527b70a85
- .octa 0x27b70a8527b70a8527b70a8527b70a85
- .octa 0x2e1b21382e1b21382e1b21382e1b2138
- .octa 0x2e1b21382e1b21382e1b21382e1b2138
- .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
- .octa 0x4d2c6dfc4d2c6dfc4d2c6dfc4d2c6dfc
- .octa 0x53380d1353380d1353380d1353380d13
- .octa 0x53380d1353380d1353380d1353380d13
- .octa 0x650a7354650a7354650a7354650a7354
- .octa 0x650a7354650a7354650a7354650a7354
- .octa 0x766a0abb766a0abb766a0abb766a0abb
- .octa 0x766a0abb766a0abb766a0abb766a0abb
- .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
- .octa 0x81c2c92e81c2c92e81c2c92e81c2c92e
- .octa 0x92722c8592722c8592722c8592722c85
- .octa 0x92722c8592722c8592722c8592722c85
- .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
- .octa 0xa2bfe8a1a2bfe8a1a2bfe8a1a2bfe8a1
- .octa 0xa81a664ba81a664ba81a664ba81a664b
- .octa 0xa81a664ba81a664ba81a664ba81a664b
- .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
- .octa 0xc24b8b70c24b8b70c24b8b70c24b8b70
- .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
- .octa 0xc76c51a3c76c51a3c76c51a3c76c51a3
- .octa 0xd192e819d192e819d192e819d192e819
- .octa 0xd192e819d192e819d192e819d192e819
- .octa 0xd6990624d6990624d6990624d6990624
- .octa 0xd6990624d6990624d6990624d6990624
- .octa 0xf40e3585f40e3585f40e3585f40e3585
- .octa 0xf40e3585f40e3585f40e3585f40e3585
- .octa 0x106aa070106aa070106aa070106aa070
- .octa 0x106aa070106aa070106aa070106aa070
- .octa 0x19a4c11619a4c11619a4c11619a4c116
- .octa 0x19a4c11619a4c11619a4c11619a4c116
- .octa 0x1e376c081e376c081e376c081e376c08
- .octa 0x1e376c081e376c081e376c081e376c08
- .octa 0x2748774c2748774c2748774c2748774c
- .octa 0x2748774c2748774c2748774c2748774c
- .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
- .octa 0x34b0bcb534b0bcb534b0bcb534b0bcb5
- .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
- .octa 0x391c0cb3391c0cb3391c0cb3391c0cb3
- .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
- .octa 0x4ed8aa4a4ed8aa4a4ed8aa4a4ed8aa4a
- .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
- .octa 0x5b9cca4f5b9cca4f5b9cca4f5b9cca4f
- .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
- .octa 0x682e6ff3682e6ff3682e6ff3682e6ff3
- .octa 0x748f82ee748f82ee748f82ee748f82ee
- .octa 0x748f82ee748f82ee748f82ee748f82ee
- .octa 0x78a5636f78a5636f78a5636f78a5636f
- .octa 0x78a5636f78a5636f78a5636f78a5636f
- .octa 0x84c8781484c8781484c8781484c87814
- .octa 0x84c8781484c8781484c8781484c87814
- .octa 0x8cc702088cc702088cc702088cc70208
- .octa 0x8cc702088cc702088cc702088cc70208
- .octa 0x90befffa90befffa90befffa90befffa
- .octa 0x90befffa90befffa90befffa90befffa
- .octa 0xa4506ceba4506ceba4506ceba4506ceb
- .octa 0xa4506ceba4506ceba4506ceba4506ceb
- .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
- .octa 0xbef9a3f7bef9a3f7bef9a3f7bef9a3f7
- .octa 0xc67178f2c67178f2c67178f2c67178f2
- .octa 0xc67178f2c67178f2c67178f2c67178f2
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK:
-.octa 0x0c0d0e0f08090a0b0405060700010203
-.octa 0x0c0d0e0f08090a0b0405060700010203
-
-.section .rodata.cst256.K256, "aM", @progbits, 256
-.align 64
-.global K256
-K256:
- .int 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
- .int 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
- .int 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
- .int 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
- .int 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
- .int 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
- .int 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
- .int 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
- .int 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
- .int 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
- .int 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
- .int 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
- .int 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
- .int 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
- .int 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
- .int 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
diff --git a/arch/x86/crypto/sha512-mb/Makefile b/arch/x86/crypto/sha512-mb/Makefile
deleted file mode 100644
index 90f1ef69152e..000000000000
--- a/arch/x86/crypto/sha512-mb/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-#
-# Arch-specific CryptoAPI modules.
-#
-
-avx2_supported := $(call as-instr,vpgatherdd %ymm0$(comma)(%eax$(comma)%ymm1\
- $(comma)4)$(comma)%ymm2,yes,no)
-ifeq ($(avx2_supported),yes)
- obj-$(CONFIG_CRYPTO_SHA512_MB) += sha512-mb.o
- sha512-mb-y := sha512_mb.o sha512_mb_mgr_flush_avx2.o \
- sha512_mb_mgr_init_avx2.o sha512_mb_mgr_submit_avx2.o sha512_x4_avx2.o
-endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb.c b/arch/x86/crypto/sha512-mb/sha512_mb.c
deleted file mode 100644
index 26b85678012d..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb.c
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * Multi buffer SHA512 algorithm Glue Code
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <crypto/internal/hash.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/cryptohash.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <crypto/scatterwalk.h>
-#include <crypto/sha.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <asm/byteorder.h>
-#include <linux/hardirq.h>
-#include <asm/fpu/api.h>
-#include "sha512_mb_ctx.h"
-
-#define FLUSH_INTERVAL 1000 /* in usec */
-
-static struct mcryptd_alg_state sha512_mb_alg_state;
-
-struct sha512_mb_ctx {
- struct mcryptd_ahash *mcryptd_tfm;
-};
-
-static inline struct mcryptd_hash_request_ctx
- *cast_hash_to_mcryptd_ctx(struct sha512_hash_ctx *hash_ctx)
-{
- struct ahash_request *areq;
-
- areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
- return container_of(areq, struct mcryptd_hash_request_ctx, areq);
-}
-
-static inline struct ahash_request
- *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx)
-{
- return container_of((void *) ctx, struct ahash_request, __ctx);
-}
-
-static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
- struct ahash_request *areq)
-{
- rctx->flag = HASH_UPDATE;
-}
-
-static asmlinkage void (*sha512_job_mgr_init)(struct sha512_mb_mgr *state);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_submit)
- (struct sha512_mb_mgr *state,
- struct job_sha512 *job);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_flush)
- (struct sha512_mb_mgr *state);
-static asmlinkage struct job_sha512* (*sha512_job_mgr_get_comp_job)
- (struct sha512_mb_mgr *state);
-
-inline uint32_t sha512_pad(uint8_t padblock[SHA512_BLOCK_SIZE * 2],
- uint64_t total_len)
-{
- uint32_t i = total_len & (SHA512_BLOCK_SIZE - 1);
-
- memset(&padblock[i], 0, SHA512_BLOCK_SIZE);
- padblock[i] = 0x80;
-
- i += ((SHA512_BLOCK_SIZE - 1) &
- (0 - (total_len + SHA512_PADLENGTHFIELD_SIZE + 1)))
- + 1 + SHA512_PADLENGTHFIELD_SIZE;
-
-#if SHA512_PADLENGTHFIELD_SIZE == 16
- *((uint64_t *) &padblock[i - 16]) = 0;
-#endif
-
- *((uint64_t *) &padblock[i - 8]) = cpu_to_be64(total_len << 3);
-
- /* Number of extra blocks to hash */
- return i >> SHA512_LOG2_BLOCK_SIZE;
-}
-
-static struct sha512_hash_ctx *sha512_ctx_mgr_resubmit
- (struct sha512_ctx_mgr *mgr, struct sha512_hash_ctx *ctx)
-{
- while (ctx) {
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Clear PROCESSING bit */
- ctx->status = HASH_CTX_STS_COMPLETE;
- return ctx;
- }
-
- /*
- * If the extra blocks are empty, begin hashing what remains
- * in the user's buffer.
- */
- if (ctx->partial_block_buffer_length == 0 &&
- ctx->incoming_buffer_length) {
-
- const void *buffer = ctx->incoming_buffer;
- uint32_t len = ctx->incoming_buffer_length;
- uint32_t copy_len;
-
- /*
- * Only entire blocks can be hashed.
- * Copy remainder to extra blocks buffer.
- */
- copy_len = len & (SHA512_BLOCK_SIZE-1);
-
- if (copy_len) {
- len -= copy_len;
- memcpy(ctx->partial_block_buffer,
- ((const char *) buffer + len),
- copy_len);
- ctx->partial_block_buffer_length = copy_len;
- }
-
- ctx->incoming_buffer_length = 0;
-
- /* len should be a multiple of the block size now */
- assert((len % SHA512_BLOCK_SIZE) == 0);
-
- /* Set len to the number of blocks to be hashed */
- len >>= SHA512_LOG2_BLOCK_SIZE;
-
- if (len) {
-
- ctx->job.buffer = (uint8_t *) buffer;
- ctx->job.len = len;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr,
- &ctx->job);
- continue;
- }
- }
-
- /*
- * If the extra blocks are not empty, then we are
- * either on the last block(s) or we need more
- * user input before continuing.
- */
- if (ctx->status & HASH_CTX_STS_LAST) {
-
- uint8_t *buf = ctx->partial_block_buffer;
- uint32_t n_extra_blocks =
- sha512_pad(buf, ctx->total_length);
-
- ctx->status = (HASH_CTX_STS_PROCESSING |
- HASH_CTX_STS_COMPLETE);
- ctx->job.buffer = buf;
- ctx->job.len = (uint32_t) n_extra_blocks;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
- continue;
- }
-
- if (ctx)
- ctx->status = HASH_CTX_STS_IDLE;
- return ctx;
- }
-
- return NULL;
-}
-
-static struct sha512_hash_ctx
- *sha512_ctx_mgr_get_comp_ctx(struct mcryptd_alg_cstate *cstate)
-{
- /*
- * If get_comp_job returns NULL, there are no jobs complete.
- * If get_comp_job returns a job, verify that it is safe to return to
- * the user.
- * If it is not ready, resubmit the job to finish processing.
- * If sha512_ctx_mgr_resubmit returned a job, it is ready to be
- * returned.
- * Otherwise, all jobs currently being managed by the hash_ctx_mgr
- * still need processing.
- */
- struct sha512_ctx_mgr *mgr;
- struct sha512_hash_ctx *ctx;
- unsigned long flags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, flags);
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_get_comp_job(&mgr->mgr);
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
- return ctx;
-}
-
-static void sha512_ctx_mgr_init(struct sha512_ctx_mgr *mgr)
-{
- sha512_job_mgr_init(&mgr->mgr);
-}
-
-static struct sha512_hash_ctx
- *sha512_ctx_mgr_submit(struct mcryptd_alg_cstate *cstate,
- struct sha512_hash_ctx *ctx,
- const void *buffer,
- uint32_t len,
- int flags)
-{
- struct sha512_ctx_mgr *mgr;
- unsigned long irqflags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, irqflags);
- if (flags & ~(HASH_UPDATE | HASH_LAST)) {
- /* User should not pass anything other than UPDATE or LAST */
- ctx->error = HASH_CTX_ERROR_INVALID_FLAGS;
- goto unlock;
- }
-
- if (ctx->status & HASH_CTX_STS_PROCESSING) {
- /* Cannot submit to a currently processing job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING;
- goto unlock;
- }
-
- if (ctx->status & HASH_CTX_STS_COMPLETE) {
- /* Cannot update a finished job. */
- ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED;
- goto unlock;
- }
-
- /*
- * If we made it here, there were no errors during this call to
- * submit
- */
- ctx->error = HASH_CTX_ERROR_NONE;
-
- /* Store buffer ptr info from user */
- ctx->incoming_buffer = buffer;
- ctx->incoming_buffer_length = len;
-
- /*
- * Store the user's request flags and mark this ctx as currently being
- * processed.
- */
- ctx->status = (flags & HASH_LAST) ?
- (HASH_CTX_STS_PROCESSING | HASH_CTX_STS_LAST) :
- HASH_CTX_STS_PROCESSING;
-
- /* Advance byte counter */
- ctx->total_length += len;
-
- /*
- * If there is anything currently buffered in the extra blocks,
- * append to it until it contains a whole block.
- * Or if the user's buffer contains less than a whole block,
- * append as much as possible to the extra block.
- */
- if (ctx->partial_block_buffer_length || len < SHA512_BLOCK_SIZE) {
- /* Compute how many bytes to copy from user buffer into extra
- * block
- */
- uint32_t copy_len = SHA512_BLOCK_SIZE -
- ctx->partial_block_buffer_length;
- if (len < copy_len)
- copy_len = len;
-
- if (copy_len) {
- /* Copy and update relevant pointers and counters */
- memcpy
- (&ctx->partial_block_buffer[ctx->partial_block_buffer_length],
- buffer, copy_len);
-
- ctx->partial_block_buffer_length += copy_len;
- ctx->incoming_buffer = (const void *)
- ((const char *)buffer + copy_len);
- ctx->incoming_buffer_length = len - copy_len;
- }
-
- /* The extra block should never contain more than 1 block
- * here
- */
- assert(ctx->partial_block_buffer_length <= SHA512_BLOCK_SIZE);
-
- /* If the extra block buffer contains exactly 1 block, it can
- * be hashed.
- */
- if (ctx->partial_block_buffer_length >= SHA512_BLOCK_SIZE) {
- ctx->partial_block_buffer_length = 0;
-
- ctx->job.buffer = ctx->partial_block_buffer;
- ctx->job.len = 1;
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_submit(&mgr->mgr, &ctx->job);
- }
- }
-
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
-unlock:
- spin_unlock_irqrestore(&cstate->work_lock, irqflags);
- return ctx;
-}
-
-static struct sha512_hash_ctx *sha512_ctx_mgr_flush(struct mcryptd_alg_cstate *cstate)
-{
- struct sha512_ctx_mgr *mgr;
- struct sha512_hash_ctx *ctx;
- unsigned long flags;
-
- mgr = cstate->mgr;
- spin_lock_irqsave(&cstate->work_lock, flags);
- while (1) {
- ctx = (struct sha512_hash_ctx *)
- sha512_job_mgr_flush(&mgr->mgr);
-
- /* If flush returned 0, there are no more jobs in flight. */
- if (!ctx)
- break;
-
- /*
- * If flush returned a job, resubmit the job to finish
- * processing.
- */
- ctx = sha512_ctx_mgr_resubmit(mgr, ctx);
-
- /*
- * If sha512_ctx_mgr_resubmit returned a job, it is ready to
- * be returned. Otherwise, all jobs currently being managed by
- * the sha512_ctx_mgr still need processing. Loop.
- */
- if (ctx)
- break;
- }
- spin_unlock_irqrestore(&cstate->work_lock, flags);
- return ctx;
-}
-
-static int sha512_mb_init(struct ahash_request *areq)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- hash_ctx_init(sctx);
- sctx->job.result_digest[0] = SHA512_H0;
- sctx->job.result_digest[1] = SHA512_H1;
- sctx->job.result_digest[2] = SHA512_H2;
- sctx->job.result_digest[3] = SHA512_H3;
- sctx->job.result_digest[4] = SHA512_H4;
- sctx->job.result_digest[5] = SHA512_H5;
- sctx->job.result_digest[6] = SHA512_H6;
- sctx->job.result_digest[7] = SHA512_H7;
- sctx->total_length = 0;
- sctx->partial_block_buffer_length = 0;
- sctx->status = HASH_CTX_STS_IDLE;
-
- return 0;
-}
-
-static int sha512_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
-{
- int i;
- struct sha512_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
- __be64 *dst = (__be64 *) rctx->out;
-
- for (i = 0; i < 8; ++i)
- dst[i] = cpu_to_be64(sctx->job.result_digest[i]);
-
- return 0;
-}
-
-static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
- struct mcryptd_alg_cstate *cstate, bool flush)
-{
- int flag = HASH_UPDATE;
- int nbytes, err = 0;
- struct mcryptd_hash_request_ctx *rctx = *ret_rctx;
- struct sha512_hash_ctx *sha_ctx;
-
- /* more work ? */
- while (!(rctx->flag & HASH_DONE)) {
- nbytes = crypto_ahash_walk_done(&rctx->walk, 0);
- if (nbytes < 0) {
- err = nbytes;
- goto out;
- }
- /* check if the walk is done */
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- if (rctx->flag & HASH_FINAL)
- flag |= HASH_LAST;
-
- }
- sha_ctx = (struct sha512_hash_ctx *)
- ahash_request_ctx(&rctx->areq);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx,
- rctx->walk.data, nbytes, flag);
- if (!sha_ctx) {
- if (flush)
- sha_ctx = sha512_ctx_mgr_flush(cstate);
- }
- kernel_fpu_end();
- if (sha_ctx)
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- else {
- rctx = NULL;
- goto out;
- }
- }
-
- /* copy the results */
- if (rctx->flag & HASH_FINAL)
- sha512_mb_set_results(rctx);
-
-out:
- *ret_rctx = rctx;
- return err;
-}
-
-static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate,
- int err)
-{
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- struct mcryptd_hash_request_ctx *req_ctx;
- int ret;
- unsigned long flags;
-
- /* remove from work list */
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_del(&rctx->waiter);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- if (irqs_disabled())
- rctx->complete(&req->base, err);
- else {
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
-
- /* check to see if there are other jobs that are done */
- sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
- while (sha_ctx) {
- req_ctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&req_ctx, cstate, false);
- if (req_ctx) {
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_del(&req_ctx->waiter);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- req = cast_mcryptd_ctx_to_req(req_ctx);
- if (irqs_disabled())
- req_ctx->complete(&req->base, ret);
- else {
- local_bh_disable();
- req_ctx->complete(&req->base, ret);
- local_bh_enable();
- }
- }
- sha_ctx = sha512_ctx_mgr_get_comp_ctx(cstate);
- }
-
- return 0;
-}
-
-static void sha512_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
- struct mcryptd_alg_cstate *cstate)
-{
- unsigned long next_flush;
- unsigned long delay = usecs_to_jiffies(FLUSH_INTERVAL);
- unsigned long flags;
-
- /* initialize tag */
- rctx->tag.arrival = jiffies; /* tag the arrival time */
- rctx->tag.seq_num = cstate->next_seq_num++;
- next_flush = rctx->tag.arrival + delay;
- rctx->tag.expire = next_flush;
-
- spin_lock_irqsave(&cstate->work_lock, flags);
- list_add_tail(&rctx->waiter, &cstate->work_list);
- spin_unlock_irqrestore(&cstate->work_lock, flags);
-
- mcryptd_arm_flusher(cstate, delay);
-}
-
-static int sha512_mb_update(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0, nbytes;
-
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk))
- rctx->flag |= HASH_DONE;
-
- /* submit */
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- sha512_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
- nbytes, HASH_UPDATE);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
-
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_finup(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct ahash_request *req = cast_mcryptd_ctx_to_req(rctx);
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0, flag = HASH_UPDATE, nbytes;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- nbytes = crypto_ahash_walk_first(req, &rctx->walk);
-
- if (nbytes < 0) {
- ret = nbytes;
- goto done;
- }
-
- if (crypto_ahash_walk_last(&rctx->walk)) {
- rctx->flag |= HASH_DONE;
- flag = HASH_LAST;
- }
-
- /* submit */
- rctx->flag |= HASH_FINAL;
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- sha512_mb_add_list(rctx, cstate);
-
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, rctx->walk.data,
- nbytes, flag);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_final(struct ahash_request *areq)
-{
- struct mcryptd_hash_request_ctx *rctx =
- container_of(areq, struct mcryptd_hash_request_ctx,
- areq);
- struct mcryptd_alg_cstate *cstate =
- this_cpu_ptr(sha512_mb_alg_state.alg_cstate);
-
- struct sha512_hash_ctx *sha_ctx;
- int ret = 0;
- u8 data;
-
- /* sanity check */
- if (rctx->tag.cpu != smp_processor_id()) {
- pr_err("mcryptd error: cpu clash\n");
- goto done;
- }
-
- /* need to init context */
- req_ctx_init(rctx, areq);
-
- rctx->flag |= HASH_DONE | HASH_FINAL;
-
- sha_ctx = (struct sha512_hash_ctx *) ahash_request_ctx(areq);
- /* flag HASH_FINAL and 0 data size */
- sha512_mb_add_list(rctx, cstate);
- kernel_fpu_begin();
- sha_ctx = sha512_ctx_mgr_submit(cstate, sha_ctx, &data, 0, HASH_LAST);
- kernel_fpu_end();
-
- /* check if anything is returned */
- if (!sha_ctx)
- return -EINPROGRESS;
-
- if (sha_ctx->error) {
- ret = sha_ctx->error;
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- goto done;
- }
-
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- ret = sha_finish_walk(&rctx, cstate, false);
- if (!rctx)
- return -EINPROGRESS;
-done:
- sha_complete_job(rctx, cstate, ret);
- return ret;
-}
-
-static int sha512_mb_export(struct ahash_request *areq, void *out)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(out, sctx, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_mb_import(struct ahash_request *areq, const void *in)
-{
- struct sha512_hash_ctx *sctx = ahash_request_ctx(areq);
-
- memcpy(sctx, in, sizeof(*sctx));
-
- return 0;
-}
-
-static int sha512_mb_async_init_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_ahash *mcryptd_tfm;
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
- struct mcryptd_hash_ctx *mctx;
-
- mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha512-mb",
- CRYPTO_ALG_INTERNAL,
- CRYPTO_ALG_INTERNAL);
- if (IS_ERR(mcryptd_tfm))
- return PTR_ERR(mcryptd_tfm);
- mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
- mctx->alg_state = &sha512_mb_alg_state;
- ctx->mcryptd_tfm = mcryptd_tfm;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- crypto_ahash_reqsize(&mcryptd_tfm->base));
-
- return 0;
-}
-
-static void sha512_mb_async_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static int sha512_mb_areq_init_tfm(struct crypto_tfm *tfm)
-{
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct ahash_request) +
- sizeof(struct sha512_hash_ctx));
-
- return 0;
-}
-
-static void sha512_mb_areq_exit_tfm(struct crypto_tfm *tfm)
-{
- struct sha512_mb_ctx *ctx = crypto_tfm_ctx(tfm);
-
- mcryptd_free_ahash(ctx->mcryptd_tfm);
-}
-
-static struct ahash_alg sha512_mb_areq_alg = {
- .init = sha512_mb_init,
- .update = sha512_mb_update,
- .final = sha512_mb_final,
- .finup = sha512_mb_finup,
- .export = sha512_mb_export,
- .import = sha512_mb_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct sha512_hash_ctx),
- .base = {
- .cra_name = "__sha512-mb",
- .cra_driver_name = "__intel_sha512-mb",
- .cra_priority = 100,
- /*
- * use ASYNC flag as some buffers in multi-buffer
- * algo may not have completed before hashing thread
- * sleep
- */
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_INTERNAL,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha512_mb_areq_alg.halg.base.cra_list),
- .cra_init = sha512_mb_areq_init_tfm,
- .cra_exit = sha512_mb_areq_exit_tfm,
- .cra_ctxsize = sizeof(struct sha512_hash_ctx),
- }
- }
-};
-
-static int sha512_mb_async_init(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_init(mcryptd_req);
-}
-
-static int sha512_mb_async_update(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_update(mcryptd_req);
-}
-
-static int sha512_mb_async_finup(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_finup(mcryptd_req);
-}
-
-static int sha512_mb_async_final(struct ahash_request *req)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
-
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_final(mcryptd_req);
-}
-
-static int sha512_mb_async_digest(struct ahash_request *req)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_digest(mcryptd_req);
-}
-
-static int sha512_mb_async_export(struct ahash_request *req, void *out)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- return crypto_ahash_export(mcryptd_req, out);
-}
-
-static int sha512_mb_async_import(struct ahash_request *req, const void *in)
-{
- struct ahash_request *mcryptd_req = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct sha512_mb_ctx *ctx = crypto_ahash_ctx(tfm);
- struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
- struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
- struct mcryptd_hash_request_ctx *rctx;
- struct ahash_request *areq;
-
- memcpy(mcryptd_req, req, sizeof(*req));
- ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
- rctx = ahash_request_ctx(mcryptd_req);
-
- areq = &rctx->areq;
-
- ahash_request_set_tfm(areq, child);
- ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req);
-
- return crypto_ahash_import(mcryptd_req, in);
-}
-
-static struct ahash_alg sha512_mb_async_alg = {
- .init = sha512_mb_async_init,
- .update = sha512_mb_async_update,
- .final = sha512_mb_async_final,
- .finup = sha512_mb_async_finup,
- .digest = sha512_mb_async_digest,
- .export = sha512_mb_async_export,
- .import = sha512_mb_async_import,
- .halg = {
- .digestsize = SHA512_DIGEST_SIZE,
- .statesize = sizeof(struct sha512_hash_ctx),
- .base = {
- .cra_name = "sha512",
- .cra_driver_name = "sha512_mb",
- /*
- * Low priority, since with few concurrent hash requests
- * this is extremely slow due to the flush delay. Users
- * whose workloads would benefit from this can request
- * it explicitly by driver name, or can increase its
- * priority at runtime using NETLINK_CRYPTO.
- */
- .cra_priority = 50,
- .cra_flags = CRYPTO_ALG_ASYNC,
- .cra_blocksize = SHA512_BLOCK_SIZE,
- .cra_module = THIS_MODULE,
- .cra_list = LIST_HEAD_INIT
- (sha512_mb_async_alg.halg.base.cra_list),
- .cra_init = sha512_mb_async_init_tfm,
- .cra_exit = sha512_mb_async_exit_tfm,
- .cra_ctxsize = sizeof(struct sha512_mb_ctx),
- .cra_alignmask = 0,
- },
- },
-};
-
-static unsigned long sha512_mb_flusher(struct mcryptd_alg_cstate *cstate)
-{
- struct mcryptd_hash_request_ctx *rctx;
- unsigned long cur_time;
- unsigned long next_flush = 0;
- struct sha512_hash_ctx *sha_ctx;
-
-
- cur_time = jiffies;
-
- while (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- if time_before(cur_time, rctx->tag.expire)
- break;
- kernel_fpu_begin();
- sha_ctx = (struct sha512_hash_ctx *)
- sha512_ctx_mgr_flush(cstate);
- kernel_fpu_end();
- if (!sha_ctx) {
- pr_err("sha512_mb error: nothing got flushed for"
- " non-empty list\n");
- break;
- }
- rctx = cast_hash_to_mcryptd_ctx(sha_ctx);
- sha_finish_walk(&rctx, cstate, true);
- sha_complete_job(rctx, cstate, 0);
- }
-
- if (!list_empty(&cstate->work_list)) {
- rctx = list_entry(cstate->work_list.next,
- struct mcryptd_hash_request_ctx, waiter);
- /* get the hash context and then flush time */
- next_flush = rctx->tag.expire;
- mcryptd_arm_flusher(cstate, get_delay(next_flush));
- }
- return next_flush;
-}
-
-static int __init sha512_mb_mod_init(void)
-{
-
- int cpu;
- int err;
- struct mcryptd_alg_cstate *cpu_state;
-
- /* check for dependent cpu features */
- if (!boot_cpu_has(X86_FEATURE_AVX2) ||
- !boot_cpu_has(X86_FEATURE_BMI2))
- return -ENODEV;
-
- /* initialize multibuffer structures */
- sha512_mb_alg_state.alg_cstate =
- alloc_percpu(struct mcryptd_alg_cstate);
-
- sha512_job_mgr_init = sha512_mb_mgr_init_avx2;
- sha512_job_mgr_submit = sha512_mb_mgr_submit_avx2;
- sha512_job_mgr_flush = sha512_mb_mgr_flush_avx2;
- sha512_job_mgr_get_comp_job = sha512_mb_mgr_get_comp_job_avx2;
-
- if (!sha512_mb_alg_state.alg_cstate)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- cpu_state->next_flush = 0;
- cpu_state->next_seq_num = 0;
- cpu_state->flusher_engaged = false;
- INIT_DELAYED_WORK(&cpu_state->flush, mcryptd_flusher);
- cpu_state->cpu = cpu;
- cpu_state->alg_state = &sha512_mb_alg_state;
- cpu_state->mgr = kzalloc(sizeof(struct sha512_ctx_mgr),
- GFP_KERNEL);
- if (!cpu_state->mgr)
- goto err2;
- sha512_ctx_mgr_init(cpu_state->mgr);
- INIT_LIST_HEAD(&cpu_state->work_list);
- spin_lock_init(&cpu_state->work_lock);
- }
- sha512_mb_alg_state.flusher = &sha512_mb_flusher;
-
- err = crypto_register_ahash(&sha512_mb_areq_alg);
- if (err)
- goto err2;
- err = crypto_register_ahash(&sha512_mb_async_alg);
- if (err)
- goto err1;
-
-
- return 0;
-err1:
- crypto_unregister_ahash(&sha512_mb_areq_alg);
-err2:
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha512_mb_alg_state.alg_cstate);
- return -ENODEV;
-}
-
-static void __exit sha512_mb_mod_fini(void)
-{
- int cpu;
- struct mcryptd_alg_cstate *cpu_state;
-
- crypto_unregister_ahash(&sha512_mb_async_alg);
- crypto_unregister_ahash(&sha512_mb_areq_alg);
- for_each_possible_cpu(cpu) {
- cpu_state = per_cpu_ptr(sha512_mb_alg_state.alg_cstate, cpu);
- kfree(cpu_state->mgr);
- }
- free_percpu(sha512_mb_alg_state.alg_cstate);
-}
-
-module_init(sha512_mb_mod_init);
-module_exit(sha512_mb_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, multi buffer accelerated");
-
-MODULE_ALIAS("sha512");
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h b/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
deleted file mode 100644
index e5c465bd821e..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_ctx.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Header file for multi buffer SHA512 context
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _SHA_MB_CTX_INTERNAL_H
-#define _SHA_MB_CTX_INTERNAL_H
-
-#include "sha512_mb_mgr.h"
-
-#define HASH_UPDATE 0x00
-#define HASH_LAST 0x01
-#define HASH_DONE 0x02
-#define HASH_FINAL 0x04
-
-#define HASH_CTX_STS_IDLE 0x00
-#define HASH_CTX_STS_PROCESSING 0x01
-#define HASH_CTX_STS_LAST 0x02
-#define HASH_CTX_STS_COMPLETE 0x04
-
-enum hash_ctx_error {
- HASH_CTX_ERROR_NONE = 0,
- HASH_CTX_ERROR_INVALID_FLAGS = -1,
- HASH_CTX_ERROR_ALREADY_PROCESSING = -2,
- HASH_CTX_ERROR_ALREADY_COMPLETED = -3,
-};
-
-#define hash_ctx_user_data(ctx) ((ctx)->user_data)
-#define hash_ctx_digest(ctx) ((ctx)->job.result_digest)
-#define hash_ctx_processing(ctx) ((ctx)->status & HASH_CTX_STS_PROCESSING)
-#define hash_ctx_complete(ctx) ((ctx)->status == HASH_CTX_STS_COMPLETE)
-#define hash_ctx_status(ctx) ((ctx)->status)
-#define hash_ctx_error(ctx) ((ctx)->error)
-#define hash_ctx_init(ctx) \
- do { \
- (ctx)->error = HASH_CTX_ERROR_NONE; \
- (ctx)->status = HASH_CTX_STS_COMPLETE; \
- } while (0)
-
-/* Hash Constants and Typedefs */
-#define SHA512_DIGEST_LENGTH 8
-#define SHA512_LOG2_BLOCK_SIZE 7
-
-#define SHA512_PADLENGTHFIELD_SIZE 16
-
-#ifdef SHA_MB_DEBUG
-#define assert(expr) \
-do { \
- if (unlikely(!(expr))) { \
- printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-#else
-#define assert(expr) do {} while (0)
-#endif
-
-struct sha512_ctx_mgr {
- struct sha512_mb_mgr mgr;
-};
-
-/* typedef struct sha512_ctx_mgr sha512_ctx_mgr; */
-
-struct sha512_hash_ctx {
- /* Must be at struct offset 0 */
- struct job_sha512 job;
- /* status flag */
- int status;
- /* error flag */
- int error;
-
- uint64_t total_length;
- const void *incoming_buffer;
- uint32_t incoming_buffer_length;
- uint8_t partial_block_buffer[SHA512_BLOCK_SIZE * 2];
- uint32_t partial_block_buffer_length;
- void *user_data;
-};
-
-#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h b/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
deleted file mode 100644
index 178f17eef382..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Header file for multi buffer SHA512 algorithm manager
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __SHA_MB_MGR_H
-#define __SHA_MB_MGR_H
-
-#include <linux/types.h>
-
-#define NUM_SHA512_DIGEST_WORDS 8
-
-enum job_sts {STS_UNKNOWN = 0,
- STS_BEING_PROCESSED = 1,
- STS_COMPLETED = 2,
- STS_INTERNAL_ERROR = 3,
- STS_ERROR = 4
-};
-
-struct job_sha512 {
- u8 *buffer;
- u64 len;
- u64 result_digest[NUM_SHA512_DIGEST_WORDS] __aligned(32);
- enum job_sts status;
- void *user_data;
-};
-
-struct sha512_args_x4 {
- uint64_t digest[8][4];
- uint8_t *data_ptr[4];
-};
-
-struct sha512_lane_data {
- struct job_sha512 *job_in_lane;
-};
-
-struct sha512_mb_mgr {
- struct sha512_args_x4 args;
-
- uint64_t lens[4];
-
- /* each byte is index (0...7) of unused lanes */
- uint64_t unused_lanes;
- /* byte 4 is set to FF as a flag */
- struct sha512_lane_data ldata[4];
-};
-
-#define SHA512_MB_MGR_NUM_LANES_AVX2 4
-
-void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state);
-struct job_sha512 *sha512_mb_mgr_submit_avx2(struct sha512_mb_mgr *state,
- struct job_sha512 *job);
-struct job_sha512 *sha512_mb_mgr_flush_avx2(struct sha512_mb_mgr *state);
-struct job_sha512 *sha512_mb_mgr_get_comp_job_avx2(struct sha512_mb_mgr *state);
-
-#endif
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
deleted file mode 100644
index cf2636d4c9ba..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_datastruct.S
+++ /dev/null
@@ -1,281 +0,0 @@
-/*
- * Header file for multi buffer SHA256 algorithm data structure
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# Macros for defining data structures
-
-# Usage example
-
-#START_FIELDS # JOB_AES
-### name size align
-#FIELD _plaintext, 8, 8 # pointer to plaintext
-#FIELD _ciphertext, 8, 8 # pointer to ciphertext
-#FIELD _IV, 16, 8 # IV
-#FIELD _keys, 8, 8 # pointer to keys
-#FIELD _len, 4, 4 # length in bytes
-#FIELD _status, 4, 4 # status enumeration
-#FIELD _user_data, 8, 8 # pointer to user data
-#UNION _union, size1, align1, \
-# size2, align2, \
-# size3, align3, \
-# ...
-#END_FIELDS
-#%assign _JOB_AES_size _FIELD_OFFSET
-#%assign _JOB_AES_align _STRUCT_ALIGN
-
-#########################################################################
-
-# Alternate "struc-like" syntax:
-# STRUCT job_aes2
-# RES_Q .plaintext, 1
-# RES_Q .ciphertext, 1
-# RES_DQ .IV, 1
-# RES_B .nested, _JOB_AES_SIZE, _JOB_AES_ALIGN
-# RES_U .union, size1, align1, \
-# size2, align2, \
-# ...
-# ENDSTRUCT
-# # Following only needed if nesting
-# %assign job_aes2_size _FIELD_OFFSET
-# %assign job_aes2_align _STRUCT_ALIGN
-#
-# RES_* macros take a name, a count and an optional alignment.
-# The count in in terms of the base size of the macro, and the
-# default alignment is the base size.
-# The macros are:
-# Macro Base size
-# RES_B 1
-# RES_W 2
-# RES_D 4
-# RES_Q 8
-# RES_DQ 16
-# RES_Y 32
-# RES_Z 64
-#
-# RES_U defines a union. It's arguments are a name and two or more
-# pairs of "size, alignment"
-#
-# The two assigns are only needed if this structure is being nested
-# within another. Even if the assigns are not done, one can still use
-# STRUCT_NAME_size as the size of the structure.
-#
-# Note that for nesting, you still need to assign to STRUCT_NAME_size.
-#
-# The differences between this and using "struc" directly are that each
-# type is implicitly aligned to its natural length (although this can be
-# over-ridden with an explicit third parameter), and that the structure
-# is padded at the end to its overall alignment.
-#
-
-#########################################################################
-
-#ifndef _DATASTRUCT_ASM_
-#define _DATASTRUCT_ASM_
-
-#define PTR_SZ 8
-#define SHA512_DIGEST_WORD_SIZE 8
-#define SHA512_MB_MGR_NUM_LANES_AVX2 4
-#define NUM_SHA512_DIGEST_WORDS 8
-#define SZ4 4*SHA512_DIGEST_WORD_SIZE
-#define ROUNDS 80*SZ4
-#define SHA512_DIGEST_ROW_SIZE (SHA512_MB_MGR_NUM_LANES_AVX2 * 8)
-
-# START_FIELDS
-.macro START_FIELDS
- _FIELD_OFFSET = 0
- _STRUCT_ALIGN = 0
-.endm
-
-# FIELD name size align
-.macro FIELD name size align
- _FIELD_OFFSET = (_FIELD_OFFSET + (\align) - 1) & (~ ((\align)-1))
- \name = _FIELD_OFFSET
- _FIELD_OFFSET = _FIELD_OFFSET + (\size)
-.if (\align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = \align
-.endif
-.endm
-
-# END_FIELDS
-.macro END_FIELDS
- _FIELD_OFFSET = (_FIELD_OFFSET + _STRUCT_ALIGN-1) & (~ (_STRUCT_ALIGN-1))
-.endm
-
-.macro STRUCT p1
-START_FIELDS
-.struc \p1
-.endm
-
-.macro ENDSTRUCT
- tmp = _FIELD_OFFSET
- END_FIELDS
- tmp = (_FIELD_OFFSET - ##tmp)
-.if (tmp > 0)
- .lcomm tmp
-.endm
-
-## RES_int name size align
-.macro RES_int p1 p2 p3
- name = \p1
- size = \p2
- align = .\p3
-
- _FIELD_OFFSET = (_FIELD_OFFSET + (align) - 1) & (~ ((align)-1))
-.align align
-.lcomm name size
- _FIELD_OFFSET = _FIELD_OFFSET + (size)
-.if (align > _STRUCT_ALIGN)
- _STRUCT_ALIGN = align
-.endif
-.endm
-
-# macro RES_B name, size [, align]
-.macro RES_B _name, _size, _align=1
-RES_int _name _size _align
-.endm
-
-# macro RES_W name, size [, align]
-.macro RES_W _name, _size, _align=2
-RES_int _name 2*(_size) _align
-.endm
-
-# macro RES_D name, size [, align]
-.macro RES_D _name, _size, _align=4
-RES_int _name 4*(_size) _align
-.endm
-
-# macro RES_Q name, size [, align]
-.macro RES_Q _name, _size, _align=8
-RES_int _name 8*(_size) _align
-.endm
-
-# macro RES_DQ name, size [, align]
-.macro RES_DQ _name, _size, _align=16
-RES_int _name 16*(_size) _align
-.endm
-
-# macro RES_Y name, size [, align]
-.macro RES_Y _name, _size, _align=32
-RES_int _name 32*(_size) _align
-.endm
-
-# macro RES_Z name, size [, align]
-.macro RES_Z _name, _size, _align=64
-RES_int _name 64*(_size) _align
-.endm
-
-#endif
-
-###################################################################
-### Define SHA512 Out Of Order Data Structures
-###################################################################
-
-START_FIELDS # LANE_DATA
-### name size align
-FIELD _job_in_lane, 8, 8 # pointer to job object
-END_FIELDS
-
- _LANE_DATA_size = _FIELD_OFFSET
- _LANE_DATA_align = _STRUCT_ALIGN
-
-####################################################################
-
-START_FIELDS # SHA512_ARGS_X4
-### name size align
-FIELD _digest, 8*8*4, 4 # transposed digest
-FIELD _data_ptr, 8*4, 8 # array of pointers to data
-END_FIELDS
-
- _SHA512_ARGS_X4_size = _FIELD_OFFSET
- _SHA512_ARGS_X4_align = _STRUCT_ALIGN
-
-#####################################################################
-
-START_FIELDS # MB_MGR
-### name size align
-FIELD _args, _SHA512_ARGS_X4_size, _SHA512_ARGS_X4_align
-FIELD _lens, 8*4, 8
-FIELD _unused_lanes, 8, 8
-FIELD _ldata, _LANE_DATA_size*4, _LANE_DATA_align
-END_FIELDS
-
- _MB_MGR_size = _FIELD_OFFSET
- _MB_MGR_align = _STRUCT_ALIGN
-
-_args_digest = _args + _digest
-_args_data_ptr = _args + _data_ptr
-
-#######################################################################
-
-#######################################################################
-#### Define constants
-#######################################################################
-
-#define STS_UNKNOWN 0
-#define STS_BEING_PROCESSED 1
-#define STS_COMPLETED 2
-
-#######################################################################
-#### Define JOB_SHA512 structure
-#######################################################################
-
-START_FIELDS # JOB_SHA512
-### name size align
-FIELD _buffer, 8, 8 # pointer to buffer
-FIELD _len, 8, 8 # length in bytes
-FIELD _result_digest, 8*8, 32 # Digest (output)
-FIELD _status, 4, 4
-FIELD _user_data, 8, 8
-END_FIELDS
-
- _JOB_SHA512_size = _FIELD_OFFSET
- _JOB_SHA512_align = _STRUCT_ALIGN
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
deleted file mode 100644
index 7c629caebc05..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_flush_avx2.S
+++ /dev/null
@@ -1,297 +0,0 @@
-/*
- * Flush routine for SHA512 multibuffer
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-.extern sha512_x4_avx2
-
-# LINUX register definitions
-#define arg1 %rdi
-#define arg2 %rsi
-
-# idx needs to be other than arg1, arg2, rbx, r12
-#define idx %rdx
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-
-#define unused_lanes %rbx
-#define lane_data %rbx
-#define tmp2 %rbx
-
-#define job_rax %rax
-#define tmp1 %rax
-#define size_offset %rax
-#define tmp %rax
-#define start_offset %rax
-
-#define tmp3 arg1
-
-#define extra_blocks arg2
-#define p arg2
-
-#define tmp4 %r8
-#define lens0 %r8
-
-#define lens1 %r9
-#define lens2 %r10
-#define lens3 %r11
-
-.macro LABEL prefix n
-\prefix\n\():
-.endm
-
-.macro JNE_SKIP i
-jne skip_\i
-.endm
-
-.altmacro
-.macro SET_OFFSET _offset
-offset = \_offset
-.endm
-.noaltmacro
-
-# JOB* sha512_mb_mgr_flush_avx2(MB_MGR *state)
-# arg 1 : rcx : state
-ENTRY(sha512_mb_mgr_flush_avx2)
- FRAME_BEGIN
- push %rbx
-
- # If bit (32+3) is set, then all lanes are empty
- mov _unused_lanes(state), unused_lanes
- bt $32+7, unused_lanes
- jc return_null
-
- # find a lane with a non-null job
- xor idx, idx
- offset = (_ldata + 1*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne one(%rip), idx
- offset = (_ldata + 2*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne two(%rip), idx
- offset = (_ldata + 3*_LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
- cmovne three(%rip), idx
-
- # copy idx to empty lanes
-copy_lane_data:
- offset = (_args + _data_ptr)
- mov offset(state,idx,8), tmp
-
- I = 0
-.rep 4
- offset = (_ldata + I * _LANE_DATA_size + _job_in_lane)
- cmpq $0, offset(state)
-.altmacro
- JNE_SKIP %I
- offset = (_args + _data_ptr + 8*I)
- mov tmp, offset(state)
- offset = (_lens + 8*I +4)
- movl $0xFFFFFFFF, offset(state)
-LABEL skip_ %I
- I = (I+1)
-.noaltmacro
-.endr
-
- # Find min length
- mov _lens + 0*8(state),lens0
- mov lens0,idx
- mov _lens + 1*8(state),lens1
- cmp idx,lens1
- cmovb lens1,idx
- mov _lens + 2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens + 3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- mov idx,len2
- and $0xF,idx
- and $~0xFF,len2
- jz len_is_0
-
- sub len2, lens0
- sub len2, lens1
- sub len2, lens2
- sub len2, lens3
- shr $32,len2
- mov lens0, _lens + 0*8(state)
- mov lens1, _lens + 1*8(state)
- mov lens2, _lens + 2*8(state)
- mov lens3, _lens + 3*8(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha512_x4_avx2
- # state and idx are intact
-
-len_is_0:
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens+4(state, idx, 8)
-
- vmovq _args_digest+0*32(state, idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state, idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state, idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state, idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
- vmovdqu %xmm2, _result_digest+2*16(job_rax)
- vmovdqu %xmm3, _result_digest+3*16(job_rax)
-
-return:
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha512_mb_mgr_flush_avx2)
-.align 16
-
-ENTRY(sha512_mb_mgr_get_comp_job_avx2)
- push %rbx
-
- mov _unused_lanes(state), unused_lanes
- bt $(32+7), unused_lanes
- jc .return_null
-
- # Find min length
- mov _lens(state),lens0
- mov lens0,idx
- mov _lens+1*8(state),lens1
- cmp idx,lens1
- cmovb lens1,idx
- mov _lens+2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens+3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- test $~0xF,idx
- jnz .return_null
- and $0xF,idx
-
- #process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- mov _unused_lanes(state), unused_lanes
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF, _lens+4(state, idx, 8)
-
- vmovq _args_digest(state, idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state, idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state, idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state, idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state, idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state, idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state, idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state, idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest+0*16(job_rax)
- vmovdqu %xmm1, _result_digest+1*16(job_rax)
- vmovdqu %xmm2, _result_digest+2*16(job_rax)
- vmovdqu %xmm3, _result_digest+3*16(job_rax)
-
- pop %rbx
-
- ret
-
-.return_null:
- xor job_rax, job_rax
- pop %rbx
- ret
-ENDPROC(sha512_mb_mgr_get_comp_job_avx2)
-
-.section .rodata.cst8.one, "aM", @progbits, 8
-.align 8
-one:
-.quad 1
-
-.section .rodata.cst8.two, "aM", @progbits, 8
-.align 8
-two:
-.quad 2
-
-.section .rodata.cst8.three, "aM", @progbits, 8
-.align 8
-three:
-.quad 3
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S b/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
deleted file mode 100644
index 4ba709ba78e5..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_submit_avx2.S
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Buffer submit code for multi buffer SHA512 algorithm
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/linkage.h>
-#include <asm/frame.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-.extern sha512_x4_avx2
-
-#define arg1 %rdi
-#define arg2 %rsi
-
-#define idx %rdx
-#define last_len %rdx
-
-#define size_offset %rcx
-#define tmp2 %rcx
-
-# Common definitions
-#define state arg1
-#define job arg2
-#define len2 arg2
-#define p2 arg2
-
-#define p %r11
-#define start_offset %r11
-
-#define unused_lanes %rbx
-
-#define job_rax %rax
-#define len %rax
-
-#define lane %r12
-#define tmp3 %r12
-#define lens3 %r12
-
-#define extra_blocks %r8
-#define lens0 %r8
-
-#define tmp %r9
-#define lens1 %r9
-
-#define lane_data %r10
-#define lens2 %r10
-
-#define DWORD_len %eax
-
-# JOB* sha512_mb_mgr_submit_avx2(MB_MGR *state, JOB *job)
-# arg 1 : rcx : state
-# arg 2 : rdx : job
-ENTRY(sha512_mb_mgr_submit_avx2)
- FRAME_BEGIN
- push %rbx
- push %r12
-
- mov _unused_lanes(state), unused_lanes
- movzb %bl,lane
- shr $8, unused_lanes
- imul $_LANE_DATA_size, lane,lane_data
- movl $STS_BEING_PROCESSED, _status(job)
- lea _ldata(state, lane_data), lane_data
- mov unused_lanes, _unused_lanes(state)
- movl _len(job), DWORD_len
-
- mov job, _job_in_lane(lane_data)
- movl DWORD_len,_lens+4(state , lane, 8)
-
- # Load digest words from result_digest
- vmovdqu _result_digest+0*16(job), %xmm0
- vmovdqu _result_digest+1*16(job), %xmm1
- vmovdqu _result_digest+2*16(job), %xmm2
- vmovdqu _result_digest+3*16(job), %xmm3
-
- vmovq %xmm0, _args_digest(state, lane, 8)
- vpextrq $1, %xmm0, _args_digest+1*32(state , lane, 8)
- vmovq %xmm1, _args_digest+2*32(state , lane, 8)
- vpextrq $1, %xmm1, _args_digest+3*32(state , lane, 8)
- vmovq %xmm2, _args_digest+4*32(state , lane, 8)
- vpextrq $1, %xmm2, _args_digest+5*32(state , lane, 8)
- vmovq %xmm3, _args_digest+6*32(state , lane, 8)
- vpextrq $1, %xmm3, _args_digest+7*32(state , lane, 8)
-
- mov _buffer(job), p
- mov p, _args_data_ptr(state, lane, 8)
-
- cmp $0xFF, unused_lanes
- jne return_null
-
-start_loop:
-
- # Find min length
- mov _lens+0*8(state),lens0
- mov lens0,idx
- mov _lens+1*8(state),lens1
- cmp idx,lens1
- cmovb lens1, idx
- mov _lens+2*8(state),lens2
- cmp idx,lens2
- cmovb lens2,idx
- mov _lens+3*8(state),lens3
- cmp idx,lens3
- cmovb lens3,idx
- mov idx,len2
- and $0xF,idx
- and $~0xFF,len2
- jz len_is_0
-
- sub len2,lens0
- sub len2,lens1
- sub len2,lens2
- sub len2,lens3
- shr $32,len2
- mov lens0, _lens + 0*8(state)
- mov lens1, _lens + 1*8(state)
- mov lens2, _lens + 2*8(state)
- mov lens3, _lens + 3*8(state)
-
- # "state" and "args" are the same address, arg1
- # len is arg2
- call sha512_x4_avx2
- # state and idx are intact
-
-len_is_0:
-
- # process completed job "idx"
- imul $_LANE_DATA_size, idx, lane_data
- lea _ldata(state, lane_data), lane_data
-
- mov _job_in_lane(lane_data), job_rax
- mov _unused_lanes(state), unused_lanes
- movq $0, _job_in_lane(lane_data)
- movl $STS_COMPLETED, _status(job_rax)
- shl $8, unused_lanes
- or idx, unused_lanes
- mov unused_lanes, _unused_lanes(state)
-
- movl $0xFFFFFFFF,_lens+4(state,idx,8)
- vmovq _args_digest+0*32(state , idx, 8), %xmm0
- vpinsrq $1, _args_digest+1*32(state , idx, 8), %xmm0, %xmm0
- vmovq _args_digest+2*32(state , idx, 8), %xmm1
- vpinsrq $1, _args_digest+3*32(state , idx, 8), %xmm1, %xmm1
- vmovq _args_digest+4*32(state , idx, 8), %xmm2
- vpinsrq $1, _args_digest+5*32(state , idx, 8), %xmm2, %xmm2
- vmovq _args_digest+6*32(state , idx, 8), %xmm3
- vpinsrq $1, _args_digest+7*32(state , idx, 8), %xmm3, %xmm3
-
- vmovdqu %xmm0, _result_digest + 0*16(job_rax)
- vmovdqu %xmm1, _result_digest + 1*16(job_rax)
- vmovdqu %xmm2, _result_digest + 2*16(job_rax)
- vmovdqu %xmm3, _result_digest + 3*16(job_rax)
-
-return:
- pop %r12
- pop %rbx
- FRAME_END
- ret
-
-return_null:
- xor job_rax, job_rax
- jmp return
-ENDPROC(sha512_mb_mgr_submit_avx2)
-
-/* UNUSED?
-.section .rodata.cst16, "aM", @progbits, 16
-.align 16
-H0: .int 0x6a09e667
-H1: .int 0xbb67ae85
-H2: .int 0x3c6ef372
-H3: .int 0xa54ff53a
-H4: .int 0x510e527f
-H5: .int 0x9b05688c
-H6: .int 0x1f83d9ab
-H7: .int 0x5be0cd19
-*/
diff --git a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S b/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
deleted file mode 100644
index e22e907643a6..000000000000
--- a/arch/x86/crypto/sha512-mb/sha512_x4_avx2.S
+++ /dev/null
@@ -1,531 +0,0 @@
-/*
- * Multi-buffer SHA512 algorithm hash compute routine
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
- * BSD LICENSE
- *
- * Copyright(c) 2016 Intel Corporation.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-# code to compute quad SHA512 using AVX2
-# use YMMs to tackle the larger digest size
-# outer calling routine takes care of save and restore of XMM registers
-# Logic designed/laid out by JDG
-
-# Function clobbers: rax, rcx, rdx, rbx, rsi, rdi, r9-r15; ymm0-15
-# Stack must be aligned to 32 bytes before call
-# Linux clobbers: rax rbx rcx rsi r8 r9 r10 r11 r12
-# Linux preserves: rcx rdx rdi rbp r13 r14 r15
-# clobbers ymm0-15
-
-#include <linux/linkage.h>
-#include "sha512_mb_mgr_datastruct.S"
-
-arg1 = %rdi
-arg2 = %rsi
-
-# Common definitions
-STATE = arg1
-INP_SIZE = arg2
-
-IDX = %rax
-ROUND = %rbx
-TBL = %r8
-
-inp0 = %r9
-inp1 = %r10
-inp2 = %r11
-inp3 = %r12
-
-a = %ymm0
-b = %ymm1
-c = %ymm2
-d = %ymm3
-e = %ymm4
-f = %ymm5
-g = %ymm6
-h = %ymm7
-
-a0 = %ymm8
-a1 = %ymm9
-a2 = %ymm10
-
-TT0 = %ymm14
-TT1 = %ymm13
-TT2 = %ymm12
-TT3 = %ymm11
-TT4 = %ymm10
-TT5 = %ymm9
-
-T1 = %ymm14
-TMP = %ymm15
-
-# Define stack usage
-STACK_SPACE1 = SZ4*16 + NUM_SHA512_DIGEST_WORDS*SZ4 + 24
-
-#define VMOVPD vmovupd
-_digest = SZ4*16
-
-# transpose r0, r1, r2, r3, t0, t1
-# "transpose" data in {r0..r3} using temps {t0..t3}
-# Input looks like: {r0 r1 r2 r3}
-# r0 = {a7 a6 a5 a4 a3 a2 a1 a0}
-# r1 = {b7 b6 b5 b4 b3 b2 b1 b0}
-# r2 = {c7 c6 c5 c4 c3 c2 c1 c0}
-# r3 = {d7 d6 d5 d4 d3 d2 d1 d0}
-#
-# output looks like: {t0 r1 r0 r3}
-# t0 = {d1 d0 c1 c0 b1 b0 a1 a0}
-# r1 = {d3 d2 c3 c2 b3 b2 a3 a2}
-# r0 = {d5 d4 c5 c4 b5 b4 a5 a4}
-# r3 = {d7 d6 c7 c6 b7 b6 a7 a6}
-
-.macro TRANSPOSE r0 r1 r2 r3 t0 t1
- vshufps $0x44, \r1, \r0, \t0 # t0 = {b5 b4 a5 a4 b1 b0 a1 a0}
- vshufps $0xEE, \r1, \r0, \r0 # r0 = {b7 b6 a7 a6 b3 b2 a3 a2}
- vshufps $0x44, \r3, \r2, \t1 # t1 = {d5 d4 c5 c4 d1 d0 c1 c0}
- vshufps $0xEE, \r3, \r2, \r2 # r2 = {d7 d6 c7 c6 d3 d2 c3 c2}
-
- vperm2f128 $0x20, \r2, \r0, \r1 # h6...a6
- vperm2f128 $0x31, \r2, \r0, \r3 # h2...a2
- vperm2f128 $0x31, \t1, \t0, \r0 # h5...a5
- vperm2f128 $0x20, \t1, \t0, \t0 # h1...a1
-.endm
-
-.macro ROTATE_ARGS
-TMP_ = h
-h = g
-g = f
-f = e
-e = d
-d = c
-c = b
-b = a
-a = TMP_
-.endm
-
-# PRORQ reg, imm, tmp
-# packed-rotate-right-double
-# does a rotate by doing two shifts and an or
-.macro _PRORQ reg imm tmp
- vpsllq $(64-\imm),\reg,\tmp
- vpsrlq $\imm,\reg, \reg
- vpor \tmp,\reg, \reg
-.endm
-
-# non-destructive
-# PRORQ_nd reg, imm, tmp, src
-.macro _PRORQ_nd reg imm tmp src
- vpsllq $(64-\imm), \src, \tmp
- vpsrlq $\imm, \src, \reg
- vpor \tmp, \reg, \reg
-.endm
-
-# PRORQ dst/src, amt
-.macro PRORQ reg imm
- _PRORQ \reg, \imm, TMP
-.endm
-
-# PRORQ_nd dst, src, amt
-.macro PRORQ_nd reg tmp imm
- _PRORQ_nd \reg, \imm, TMP, \tmp
-.endm
-
-#; arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_00_15 _T1 i
- PRORQ_nd a0, e, (18-14) # sig1: a0 = (e >> 4)
-
- vpxor g, f, a2 # ch: a2 = f^g
- vpand e,a2, a2 # ch: a2 = (f^g)&e
- vpxor g, a2, a2 # a2 = ch
-
- PRORQ_nd a1,e,41 # sig1: a1 = (e >> 25)
-
- offset = SZ4*(\i & 0xf)
- vmovdqu \_T1,offset(%rsp)
- vpaddq (TBL,ROUND,1), \_T1, \_T1 # T1 = W + K
- vpxor e,a0, a0 # sig1: a0 = e ^ (e >> 5)
- PRORQ a0, 14 # sig1: a0 = (e >> 6) ^ (e >> 11)
- vpaddq a2, h, h # h = h + ch
- PRORQ_nd a2,a,6 # sig0: a2 = (a >> 11)
- vpaddq \_T1,h, h # h = h + ch + W + K
- vpxor a1, a0, a0 # a0 = sigma1
- vmovdqu a,\_T1
- PRORQ_nd a1,a,39 # sig0: a1 = (a >> 22)
- vpxor c, \_T1, \_T1 # maj: T1 = a^c
- add $SZ4, ROUND # ROUND++
- vpand b, \_T1, \_T1 # maj: T1 = (a^c)&b
- vpaddq a0, h, h
- vpaddq h, d, d
- vpxor a, a2, a2 # sig0: a2 = a ^ (a >> 11)
- PRORQ a2,28 # sig0: a2 = (a >> 2) ^ (a >> 13)
- vpxor a1, a2, a2 # a2 = sig0
- vpand c, a, a1 # maj: a1 = a&c
- vpor \_T1, a1, a1 # a1 = maj
- vpaddq a1, h, h # h = h + ch + W + K + maj
- vpaddq a2, h, h # h = h + ch + W + K + maj + sigma0
- ROTATE_ARGS
-.endm
-
-
-#; arguments passed implicitly in preprocessor symbols i, a...h
-.macro ROUND_16_XX _T1 i
- vmovdqu SZ4*((\i-15)&0xf)(%rsp), \_T1
- vmovdqu SZ4*((\i-2)&0xf)(%rsp), a1
- vmovdqu \_T1, a0
- PRORQ \_T1,7
- vmovdqu a1, a2
- PRORQ a1,42
- vpxor a0, \_T1, \_T1
- PRORQ \_T1, 1
- vpxor a2, a1, a1
- PRORQ a1, 19
- vpsrlq $7, a0, a0
- vpxor a0, \_T1, \_T1
- vpsrlq $6, a2, a2
- vpxor a2, a1, a1
- vpaddq SZ4*((\i-16)&0xf)(%rsp), \_T1, \_T1
- vpaddq SZ4*((\i-7)&0xf)(%rsp), a1, a1
- vpaddq a1, \_T1, \_T1
-
- ROUND_00_15 \_T1,\i
-.endm
-
-
-# void sha512_x4_avx2(void *STATE, const int INP_SIZE)
-# arg 1 : STATE : pointer to input data
-# arg 2 : INP_SIZE : size of data in blocks (assumed >= 1)
-ENTRY(sha512_x4_avx2)
- # general registers preserved in outer calling routine
- # outer calling routine saves all the XMM registers
- # save callee-saved clobbered registers to comply with C function ABI
- push %r12
- push %r13
- push %r14
- push %r15
-
- sub $STACK_SPACE1, %rsp
-
- # Load the pre-transposed incoming digest.
- vmovdqu 0*SHA512_DIGEST_ROW_SIZE(STATE),a
- vmovdqu 1*SHA512_DIGEST_ROW_SIZE(STATE),b
- vmovdqu 2*SHA512_DIGEST_ROW_SIZE(STATE),c
- vmovdqu 3*SHA512_DIGEST_ROW_SIZE(STATE),d
- vmovdqu 4*SHA512_DIGEST_ROW_SIZE(STATE),e
- vmovdqu 5*SHA512_DIGEST_ROW_SIZE(STATE),f
- vmovdqu 6*SHA512_DIGEST_ROW_SIZE(STATE),g
- vmovdqu 7*SHA512_DIGEST_ROW_SIZE(STATE),h
-
- lea K512_4(%rip),TBL
-
- # load the address of each of the 4 message lanes
- # getting ready to transpose input onto stack
- mov _data_ptr+0*PTR_SZ(STATE),inp0
- mov _data_ptr+1*PTR_SZ(STATE),inp1
- mov _data_ptr+2*PTR_SZ(STATE),inp2
- mov _data_ptr+3*PTR_SZ(STATE),inp3
-
- xor IDX, IDX
-lloop:
- xor ROUND, ROUND
-
- # save old digest
- vmovdqu a, _digest(%rsp)
- vmovdqu b, _digest+1*SZ4(%rsp)
- vmovdqu c, _digest+2*SZ4(%rsp)
- vmovdqu d, _digest+3*SZ4(%rsp)
- vmovdqu e, _digest+4*SZ4(%rsp)
- vmovdqu f, _digest+5*SZ4(%rsp)
- vmovdqu g, _digest+6*SZ4(%rsp)
- vmovdqu h, _digest+7*SZ4(%rsp)
- i = 0
-.rep 4
- vmovdqu PSHUFFLE_BYTE_FLIP_MASK(%rip), TMP
- VMOVPD i*32(inp0, IDX), TT2
- VMOVPD i*32(inp1, IDX), TT1
- VMOVPD i*32(inp2, IDX), TT4
- VMOVPD i*32(inp3, IDX), TT3
- TRANSPOSE TT2, TT1, TT4, TT3, TT0, TT5
- vpshufb TMP, TT0, TT0
- vpshufb TMP, TT1, TT1
- vpshufb TMP, TT2, TT2
- vpshufb TMP, TT3, TT3
- ROUND_00_15 TT0,(i*4+0)
- ROUND_00_15 TT1,(i*4+1)
- ROUND_00_15 TT2,(i*4+2)
- ROUND_00_15 TT3,(i*4+3)
- i = (i+1)
-.endr
- add $128, IDX
-
- i = (i*4)
-
- jmp Lrounds_16_xx
-.align 16
-Lrounds_16_xx:
-.rep 16
- ROUND_16_XX T1, i
- i = (i+1)
-.endr
- cmp $0xa00,ROUND
- jb Lrounds_16_xx
-
- # add old digest
- vpaddq _digest(%rsp), a, a
- vpaddq _digest+1*SZ4(%rsp), b, b
- vpaddq _digest+2*SZ4(%rsp), c, c
- vpaddq _digest+3*SZ4(%rsp), d, d
- vpaddq _digest+4*SZ4(%rsp), e, e
- vpaddq _digest+5*SZ4(%rsp), f, f
- vpaddq _digest+6*SZ4(%rsp), g, g
- vpaddq _digest+7*SZ4(%rsp), h, h
-
- sub $1, INP_SIZE # unit is blocks
- jne lloop
-
- # write back to memory (state object) the transposed digest
- vmovdqu a, 0*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu b, 1*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu c, 2*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu d, 3*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu e, 4*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu f, 5*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu g, 6*SHA512_DIGEST_ROW_SIZE(STATE)
- vmovdqu h, 7*SHA512_DIGEST_ROW_SIZE(STATE)
-
- # update input data pointers
- add IDX, inp0
- mov inp0, _data_ptr+0*PTR_SZ(STATE)
- add IDX, inp1
- mov inp1, _data_ptr+1*PTR_SZ(STATE)
- add IDX, inp2
- mov inp2, _data_ptr+2*PTR_SZ(STATE)
- add IDX, inp3
- mov inp3, _data_ptr+3*PTR_SZ(STATE)
-
- #;;;;;;;;;;;;;;;
- #; Postamble
- add $STACK_SPACE1, %rsp
- # restore callee-saved clobbered registers
-
- pop %r15
- pop %r14
- pop %r13
- pop %r12
-
- # outer calling routine restores XMM and other GP registers
- ret
-ENDPROC(sha512_x4_avx2)
-
-.section .rodata.K512_4, "a", @progbits
-.align 64
-K512_4:
- .octa 0x428a2f98d728ae22428a2f98d728ae22,\
- 0x428a2f98d728ae22428a2f98d728ae22
- .octa 0x7137449123ef65cd7137449123ef65cd,\
- 0x7137449123ef65cd7137449123ef65cd
- .octa 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f,\
- 0xb5c0fbcfec4d3b2fb5c0fbcfec4d3b2f
- .octa 0xe9b5dba58189dbbce9b5dba58189dbbc,\
- 0xe9b5dba58189dbbce9b5dba58189dbbc
- .octa 0x3956c25bf348b5383956c25bf348b538,\
- 0x3956c25bf348b5383956c25bf348b538
- .octa 0x59f111f1b605d01959f111f1b605d019,\
- 0x59f111f1b605d01959f111f1b605d019
- .octa 0x923f82a4af194f9b923f82a4af194f9b,\
- 0x923f82a4af194f9b923f82a4af194f9b
- .octa 0xab1c5ed5da6d8118ab1c5ed5da6d8118,\
- 0xab1c5ed5da6d8118ab1c5ed5da6d8118
- .octa 0xd807aa98a3030242d807aa98a3030242,\
- 0xd807aa98a3030242d807aa98a3030242
- .octa 0x12835b0145706fbe12835b0145706fbe,\
- 0x12835b0145706fbe12835b0145706fbe
- .octa 0x243185be4ee4b28c243185be4ee4b28c,\
- 0x243185be4ee4b28c243185be4ee4b28c
- .octa 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2,\
- 0x550c7dc3d5ffb4e2550c7dc3d5ffb4e2
- .octa 0x72be5d74f27b896f72be5d74f27b896f,\
- 0x72be5d74f27b896f72be5d74f27b896f
- .octa 0x80deb1fe3b1696b180deb1fe3b1696b1,\
- 0x80deb1fe3b1696b180deb1fe3b1696b1
- .octa 0x9bdc06a725c712359bdc06a725c71235,\
- 0x9bdc06a725c712359bdc06a725c71235
- .octa 0xc19bf174cf692694c19bf174cf692694,\
- 0xc19bf174cf692694c19bf174cf692694
- .octa 0xe49b69c19ef14ad2e49b69c19ef14ad2,\
- 0xe49b69c19ef14ad2e49b69c19ef14ad2
- .octa 0xefbe4786384f25e3efbe4786384f25e3,\
- 0xefbe4786384f25e3efbe4786384f25e3
- .octa 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5,\
- 0x0fc19dc68b8cd5b50fc19dc68b8cd5b5
- .octa 0x240ca1cc77ac9c65240ca1cc77ac9c65,\
- 0x240ca1cc77ac9c65240ca1cc77ac9c65
- .octa 0x2de92c6f592b02752de92c6f592b0275,\
- 0x2de92c6f592b02752de92c6f592b0275
- .octa 0x4a7484aa6ea6e4834a7484aa6ea6e483,\
- 0x4a7484aa6ea6e4834a7484aa6ea6e483
- .octa 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4,\
- 0x5cb0a9dcbd41fbd45cb0a9dcbd41fbd4
- .octa 0x76f988da831153b576f988da831153b5,\
- 0x76f988da831153b576f988da831153b5
- .octa 0x983e5152ee66dfab983e5152ee66dfab,\
- 0x983e5152ee66dfab983e5152ee66dfab
- .octa 0xa831c66d2db43210a831c66d2db43210,\
- 0xa831c66d2db43210a831c66d2db43210
- .octa 0xb00327c898fb213fb00327c898fb213f,\
- 0xb00327c898fb213fb00327c898fb213f
- .octa 0xbf597fc7beef0ee4bf597fc7beef0ee4,\
- 0xbf597fc7beef0ee4bf597fc7beef0ee4
- .octa 0xc6e00bf33da88fc2c6e00bf33da88fc2,\
- 0xc6e00bf33da88fc2c6e00bf33da88fc2
- .octa 0xd5a79147930aa725d5a79147930aa725,\
- 0xd5a79147930aa725d5a79147930aa725
- .octa 0x06ca6351e003826f06ca6351e003826f,\
- 0x06ca6351e003826f06ca6351e003826f
- .octa 0x142929670a0e6e70142929670a0e6e70,\
- 0x142929670a0e6e70142929670a0e6e70
- .octa 0x27b70a8546d22ffc27b70a8546d22ffc,\
- 0x27b70a8546d22ffc27b70a8546d22ffc
- .octa 0x2e1b21385c26c9262e1b21385c26c926,\
- 0x2e1b21385c26c9262e1b21385c26c926
- .octa 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed,\
- 0x4d2c6dfc5ac42aed4d2c6dfc5ac42aed
- .octa 0x53380d139d95b3df53380d139d95b3df,\
- 0x53380d139d95b3df53380d139d95b3df
- .octa 0x650a73548baf63de650a73548baf63de,\
- 0x650a73548baf63de650a73548baf63de
- .octa 0x766a0abb3c77b2a8766a0abb3c77b2a8,\
- 0x766a0abb3c77b2a8766a0abb3c77b2a8
- .octa 0x81c2c92e47edaee681c2c92e47edaee6,\
- 0x81c2c92e47edaee681c2c92e47edaee6
- .octa 0x92722c851482353b92722c851482353b,\
- 0x92722c851482353b92722c851482353b
- .octa 0xa2bfe8a14cf10364a2bfe8a14cf10364,\
- 0xa2bfe8a14cf10364a2bfe8a14cf10364
- .octa 0xa81a664bbc423001a81a664bbc423001,\
- 0xa81a664bbc423001a81a664bbc423001
- .octa 0xc24b8b70d0f89791c24b8b70d0f89791,\
- 0xc24b8b70d0f89791c24b8b70d0f89791
- .octa 0xc76c51a30654be30c76c51a30654be30,\
- 0xc76c51a30654be30c76c51a30654be30
- .octa 0xd192e819d6ef5218d192e819d6ef5218,\
- 0xd192e819d6ef5218d192e819d6ef5218
- .octa 0xd69906245565a910d69906245565a910,\
- 0xd69906245565a910d69906245565a910
- .octa 0xf40e35855771202af40e35855771202a,\
- 0xf40e35855771202af40e35855771202a
- .octa 0x106aa07032bbd1b8106aa07032bbd1b8,\
- 0x106aa07032bbd1b8106aa07032bbd1b8
- .octa 0x19a4c116b8d2d0c819a4c116b8d2d0c8,\
- 0x19a4c116b8d2d0c819a4c116b8d2d0c8
- .octa 0x1e376c085141ab531e376c085141ab53,\
- 0x1e376c085141ab531e376c085141ab53
- .octa 0x2748774cdf8eeb992748774cdf8eeb99,\
- 0x2748774cdf8eeb992748774cdf8eeb99
- .octa 0x34b0bcb5e19b48a834b0bcb5e19b48a8,\
- 0x34b0bcb5e19b48a834b0bcb5e19b48a8
- .octa 0x391c0cb3c5c95a63391c0cb3c5c95a63,\
- 0x391c0cb3c5c95a63391c0cb3c5c95a63
- .octa 0x4ed8aa4ae3418acb4ed8aa4ae3418acb,\
- 0x4ed8aa4ae3418acb4ed8aa4ae3418acb
- .octa 0x5b9cca4f7763e3735b9cca4f7763e373,\
- 0x5b9cca4f7763e3735b9cca4f7763e373
- .octa 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3,\
- 0x682e6ff3d6b2b8a3682e6ff3d6b2b8a3
- .octa 0x748f82ee5defb2fc748f82ee5defb2fc,\
- 0x748f82ee5defb2fc748f82ee5defb2fc
- .octa 0x78a5636f43172f6078a5636f43172f60,\
- 0x78a5636f43172f6078a5636f43172f60
- .octa 0x84c87814a1f0ab7284c87814a1f0ab72,\
- 0x84c87814a1f0ab7284c87814a1f0ab72
- .octa 0x8cc702081a6439ec8cc702081a6439ec,\
- 0x8cc702081a6439ec8cc702081a6439ec
- .octa 0x90befffa23631e2890befffa23631e28,\
- 0x90befffa23631e2890befffa23631e28
- .octa 0xa4506cebde82bde9a4506cebde82bde9,\
- 0xa4506cebde82bde9a4506cebde82bde9
- .octa 0xbef9a3f7b2c67915bef9a3f7b2c67915,\
- 0xbef9a3f7b2c67915bef9a3f7b2c67915
- .octa 0xc67178f2e372532bc67178f2e372532b,\
- 0xc67178f2e372532bc67178f2e372532b
- .octa 0xca273eceea26619cca273eceea26619c,\
- 0xca273eceea26619cca273eceea26619c
- .octa 0xd186b8c721c0c207d186b8c721c0c207,\
- 0xd186b8c721c0c207d186b8c721c0c207
- .octa 0xeada7dd6cde0eb1eeada7dd6cde0eb1e,\
- 0xeada7dd6cde0eb1eeada7dd6cde0eb1e
- .octa 0xf57d4f7fee6ed178f57d4f7fee6ed178,\
- 0xf57d4f7fee6ed178f57d4f7fee6ed178
- .octa 0x06f067aa72176fba06f067aa72176fba,\
- 0x06f067aa72176fba06f067aa72176fba
- .octa 0x0a637dc5a2c898a60a637dc5a2c898a6,\
- 0x0a637dc5a2c898a60a637dc5a2c898a6
- .octa 0x113f9804bef90dae113f9804bef90dae,\
- 0x113f9804bef90dae113f9804bef90dae
- .octa 0x1b710b35131c471b1b710b35131c471b,\
- 0x1b710b35131c471b1b710b35131c471b
- .octa 0x28db77f523047d8428db77f523047d84,\
- 0x28db77f523047d8428db77f523047d84
- .octa 0x32caab7b40c7249332caab7b40c72493,\
- 0x32caab7b40c7249332caab7b40c72493
- .octa 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc,\
- 0x3c9ebe0a15c9bebc3c9ebe0a15c9bebc
- .octa 0x431d67c49c100d4c431d67c49c100d4c,\
- 0x431d67c49c100d4c431d67c49c100d4c
- .octa 0x4cc5d4becb3e42b64cc5d4becb3e42b6,\
- 0x4cc5d4becb3e42b64cc5d4becb3e42b6
- .octa 0x597f299cfc657e2a597f299cfc657e2a,\
- 0x597f299cfc657e2a597f299cfc657e2a
- .octa 0x5fcb6fab3ad6faec5fcb6fab3ad6faec,\
- 0x5fcb6fab3ad6faec5fcb6fab3ad6faec
- .octa 0x6c44198c4a4758176c44198c4a475817,\
- 0x6c44198c4a4758176c44198c4a475817
-
-.section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32
-.align 32
-PSHUFFLE_BYTE_FLIP_MASK: .octa 0x08090a0b0c0d0e0f0001020304050607
- .octa 0x18191a1b1c1d1e1f1011121314151617
diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
index a0f46bdd9f24..fab4df16a3c4 100644
--- a/arch/x86/include/asm/compat.h
+++ b/arch/x86/include/asm/compat.h
@@ -12,38 +12,23 @@
#include <asm/user32.h>
#include <asm/unistd.h>
+#include <asm-generic/compat.h>
+
#define COMPAT_USER_HZ 100
#define COMPAT_UTS_MACHINE "i686\0\0"
-typedef u32 compat_size_t;
-typedef s32 compat_ssize_t;
-typedef s32 compat_clock_t;
-typedef s32 compat_pid_t;
typedef u16 __compat_uid_t;
typedef u16 __compat_gid_t;
typedef u32 __compat_uid32_t;
typedef u32 __compat_gid32_t;
typedef u16 compat_mode_t;
-typedef u32 compat_ino_t;
typedef u16 compat_dev_t;
-typedef s32 compat_off_t;
-typedef s64 compat_loff_t;
typedef u16 compat_nlink_t;
typedef u16 compat_ipc_pid_t;
-typedef s32 compat_daddr_t;
typedef u32 compat_caddr_t;
typedef __kernel_fsid_t compat_fsid_t;
-typedef s32 compat_timer_t;
-typedef s32 compat_key_t;
-
-typedef s32 compat_int_t;
-typedef s32 compat_long_t;
typedef s64 __attribute__((aligned(4))) compat_s64;
-typedef u32 compat_uint_t;
-typedef u32 compat_ulong_t;
-typedef u32 compat_u32;
typedef u64 __attribute__((aligned(4))) compat_u64;
-typedef u32 compat_uptr_t;
struct compat_stat {
compat_dev_t st_dev;
diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 5f26962eff42..67ed72f31cc2 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -45,6 +45,8 @@ struct vcpu_data {
#ifdef CONFIG_IRQ_REMAP
+extern raw_spinlock_t irq_2_ir_lock;
+
extern bool irq_remapping_cap(enum irq_remap_cap cap);
extern void set_irq_remapping_broken(void);
extern int irq_remapping_prepare(void);
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 09b2e3e2cf1b..55e51ff7e421 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -102,7 +102,15 @@
#define UNMAPPED_GVA (~(gpa_t)0)
/* KVM Hugepage definitions for x86 */
-#define KVM_NR_PAGE_SIZES 3
+enum {
+ PT_PAGE_TABLE_LEVEL = 1,
+ PT_DIRECTORY_LEVEL = 2,
+ PT_PDPE_LEVEL = 3,
+ /* set max level to the biggest one */
+ PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
+};
+#define KVM_NR_PAGE_SIZES (PT_MAX_HUGEPAGE_LEVEL - \
+ PT_PAGE_TABLE_LEVEL + 1)
#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9)
#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x))
@@ -177,6 +185,7 @@ enum {
#define DR6_BD (1 << 13)
#define DR6_BS (1 << 14)
+#define DR6_BT (1 << 15)
#define DR6_RTM (1 << 16)
#define DR6_FIXED_1 0xfffe0ff0
#define DR6_INIT 0xffff0ff0
@@ -247,7 +256,7 @@ struct kvm_mmu_memory_cache {
* @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
*/
union kvm_mmu_page_role {
- unsigned word;
+ u32 word;
struct {
unsigned level:4;
unsigned cr4_pae:1;
@@ -273,6 +282,34 @@ union kvm_mmu_page_role {
};
};
+union kvm_mmu_extended_role {
+/*
+ * This structure complements kvm_mmu_page_role caching everything needed for
+ * MMU configuration. If nothing in both these structures changed, MMU
+ * re-configuration can be skipped. @valid bit is set on first usage so we don't
+ * treat all-zero structure as valid data.
+ */
+ u32 word;
+ struct {
+ unsigned int valid:1;
+ unsigned int execonly:1;
+ unsigned int cr0_pg:1;
+ unsigned int cr4_pse:1;
+ unsigned int cr4_pke:1;
+ unsigned int cr4_smap:1;
+ unsigned int cr4_smep:1;
+ unsigned int cr4_la57:1;
+ };
+};
+
+union kvm_mmu_role {
+ u64 as_u64;
+ struct {
+ union kvm_mmu_page_role base;
+ union kvm_mmu_extended_role ext;
+ };
+};
+
struct kvm_rmap_head {
unsigned long val;
};
@@ -280,18 +317,18 @@ struct kvm_rmap_head {
struct kvm_mmu_page {
struct list_head link;
struct hlist_node hash_link;
+ bool unsync;
/*
* The following two entries are used to key the shadow page in the
* hash table.
*/
- gfn_t gfn;
union kvm_mmu_page_role role;
+ gfn_t gfn;
u64 *spt;
/* hold the gfn of each spte inside spt */
gfn_t *gfns;
- bool unsync;
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
@@ -360,7 +397,7 @@ struct kvm_mmu {
void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, const void *pte);
hpa_t root_hpa;
- union kvm_mmu_page_role base_role;
+ union kvm_mmu_role mmu_role;
u8 root_level;
u8 shadow_root_level;
u8 ept_ad;
@@ -490,7 +527,7 @@ struct kvm_vcpu_hv {
struct kvm_hyperv_exit exit;
struct kvm_vcpu_hv_stimer stimer[HV_SYNIC_STIMER_COUNT];
DECLARE_BITMAP(stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
- cpumask_t tlb_lush;
+ cpumask_t tlb_flush;
};
struct kvm_vcpu_arch {
@@ -534,7 +571,13 @@ struct kvm_vcpu_arch {
* the paging mode of the l1 guest. This context is always used to
* handle faults.
*/
- struct kvm_mmu mmu;
+ struct kvm_mmu *mmu;
+
+ /* Non-nested MMU for L1 */
+ struct kvm_mmu root_mmu;
+
+ /* L1 MMU when running nested */
+ struct kvm_mmu guest_mmu;
/*
* Paging state of an L2 guest (used for nested npt)
@@ -585,6 +628,8 @@ struct kvm_vcpu_arch {
bool has_error_code;
u8 nr;
u32 error_code;
+ unsigned long payload;
+ bool has_payload;
u8 nested_apf;
} exception;
@@ -781,6 +826,9 @@ struct kvm_hv {
u64 hv_reenlightenment_control;
u64 hv_tsc_emulation_control;
u64 hv_tsc_emulation_status;
+
+ /* How many vCPUs have VP index != vCPU index */
+ atomic_t num_mismatched_vp_indexes;
};
enum kvm_irqchip_mode {
@@ -871,6 +919,7 @@ struct kvm_arch {
bool x2apic_broadcast_quirk_disabled;
bool guest_can_read_msr_platform_info;
+ bool exception_payload_enabled;
};
struct kvm_vm_stat {
@@ -1133,6 +1182,9 @@ struct kvm_x86_ops {
int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
int (*get_msr_feature)(struct kvm_msr_entry *entry);
+
+ int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
+ uint16_t *vmcs_version);
};
struct kvm_arch_async_pf {
@@ -1170,7 +1222,6 @@ void kvm_mmu_module_exit(void);
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
-void kvm_mmu_setup(struct kvm_vcpu *vcpu);
void kvm_mmu_init_vm(struct kvm *kvm);
void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
@@ -1324,7 +1375,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
-void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free);
+void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ ulong roots_to_free);
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h
index 51c4eee00732..dc4ed8bc2382 100644
--- a/arch/x86/include/asm/unistd.h
+++ b/arch/x86/include/asm/unistd.h
@@ -24,6 +24,7 @@
# include <asm/unistd_64.h>
# include <asm/unistd_64_x32.h>
# define __ARCH_WANT_COMPAT_SYS_TIME
+# define __ARCH_WANT_SYS_UTIME32
# define __ARCH_WANT_COMPAT_SYS_PREADV64
# define __ARCH_WANT_COMPAT_SYS_PWRITEV64
# define __ARCH_WANT_COMPAT_SYS_PREADV64V2
@@ -31,13 +32,13 @@
# endif
+# define __ARCH_WANT_NEW_STAT
# define __ARCH_WANT_OLD_READDIR
# define __ARCH_WANT_OLD_STAT
# define __ARCH_WANT_SYS_ALARM
# define __ARCH_WANT_SYS_FADVISE64
# define __ARCH_WANT_SYS_GETHOSTNAME
# define __ARCH_WANT_SYS_GETPGRP
-# define __ARCH_WANT_SYS_LLSEEK
# define __ARCH_WANT_SYS_NICE
# define __ARCH_WANT_SYS_OLDUMOUNT
# define __ARCH_WANT_SYS_OLD_GETRLIMIT
diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index e05e0d309244..1fc7a0d1e877 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -40,7 +40,7 @@ static inline int cpu_has_vmx(void)
*/
static inline void cpu_vmxoff(void)
{
- asm volatile (ASM_VMX_VMXOFF : : : "cc");
+ asm volatile ("vmxoff");
cr4_clear_bits(X86_CR4_VMXE);
}
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 9527ba5d62da..ade0f153947d 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -503,19 +503,6 @@ enum vmcs_field {
#define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
-
-#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30"
-#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2"
-#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3"
-#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30"
-#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0"
-#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0"
-#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
-#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
-#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
-#define ASM_VMX_INVEPT ".byte 0x66, 0x0f, 0x38, 0x80, 0x08"
-#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
-
struct vmx_msr_entry {
u32 index;
u32 reserved;
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index fd23d5778ea1..dabfcf7c3941 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -288,6 +288,7 @@ struct kvm_reinject_control {
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
#define KVM_VCPUEVENT_VALID_SMM 0x00000008
+#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010
/* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS 0x01
@@ -299,7 +300,7 @@ struct kvm_vcpu_events {
__u8 injected;
__u8 nr;
__u8 has_error_code;
- __u8 pad;
+ __u8 pending;
__u32 error_code;
} exception;
struct {
@@ -322,7 +323,9 @@ struct kvm_vcpu_events {
__u8 smm_inside_nmi;
__u8 latched_init;
} smi;
- __u32 reserved[9];
+ __u8 reserved[27];
+ __u8 exception_has_payload;
+ __u64 exception_payload;
};
/* for KVM_GET/SET_DEBUGREGS */
@@ -381,6 +384,7 @@ struct kvm_sync_regs {
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
+#define KVM_STATE_NESTED_EVMCS 0x00000004
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index f39f3a06c26f..7299dcbf8e85 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -140,7 +140,7 @@ static void __init dtb_cpu_setup(void)
int ret;
version = GET_APIC_VERSION(apic_read(APIC_LVR));
- for_each_node_by_type(dn, "cpu") {
+ for_each_of_cpu_node(dn) {
ret = of_property_read_u32(dn, "reg", &apic_id);
if (ret < 0) {
pr_warn("%pOF: missing local APIC ID\n", dn);
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 01d209ab5481..4e80080f277a 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -36,6 +36,8 @@
#include "trace.h"
+#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
+
static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
{
return atomic64_read(&synic->sint[sint]);
@@ -132,8 +134,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
struct kvm_vcpu *vcpu = NULL;
int i;
- if (vpidx < KVM_MAX_VCPUS)
- vcpu = kvm_get_vcpu(kvm, vpidx);
+ if (vpidx >= KVM_MAX_VCPUS)
+ return NULL;
+
+ vcpu = kvm_get_vcpu(kvm, vpidx);
if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx)
return vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
@@ -689,6 +693,24 @@ void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
stimer_cleanup(&hv_vcpu->stimer[i]);
}
+bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
+ return false;
+ return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
+}
+EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
+
+bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
+ struct hv_vp_assist_page *assist_page)
+{
+ if (!kvm_hv_assist_page_enabled(vcpu))
+ return false;
+ return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
+ assist_page, sizeof(*assist_page));
+}
+EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
+
static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
{
struct hv_message *msg = &stimer->msg;
@@ -1040,21 +1062,41 @@ static u64 current_task_runtime_100ns(void)
static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
{
- struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
switch (msr) {
- case HV_X64_MSR_VP_INDEX:
- if (!host)
+ case HV_X64_MSR_VP_INDEX: {
+ struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
+ int vcpu_idx = kvm_vcpu_get_idx(vcpu);
+ u32 new_vp_index = (u32)data;
+
+ if (!host || new_vp_index >= KVM_MAX_VCPUS)
return 1;
- hv->vp_index = (u32)data;
+
+ if (new_vp_index == hv_vcpu->vp_index)
+ return 0;
+
+ /*
+ * The VP index is initialized to vcpu_index by
+ * kvm_hv_vcpu_postcreate so they initially match. Now the
+ * VP index is changing, adjust num_mismatched_vp_indexes if
+ * it now matches or no longer matches vcpu_idx.
+ */
+ if (hv_vcpu->vp_index == vcpu_idx)
+ atomic_inc(&hv->num_mismatched_vp_indexes);
+ else if (new_vp_index == vcpu_idx)
+ atomic_dec(&hv->num_mismatched_vp_indexes);
+
+ hv_vcpu->vp_index = new_vp_index;
break;
+ }
case HV_X64_MSR_VP_ASSIST_PAGE: {
u64 gfn;
unsigned long addr;
if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
- hv->hv_vapic = data;
- if (kvm_lapic_enable_pv_eoi(vcpu, 0))
+ hv_vcpu->hv_vapic = data;
+ if (kvm_lapic_enable_pv_eoi(vcpu, 0, 0))
return 1;
break;
}
@@ -1062,12 +1104,19 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
if (kvm_is_error_hva(addr))
return 1;
- if (__clear_user((void __user *)addr, PAGE_SIZE))
+
+ /*
+ * Clear apic_assist portion of f(struct hv_vp_assist_page
+ * only, there can be valuable data in the rest which needs
+ * to be preserved e.g. on migration.
+ */
+ if (__clear_user((void __user *)addr, sizeof(u32)))
return 1;
- hv->hv_vapic = data;
+ hv_vcpu->hv_vapic = data;
kvm_vcpu_mark_page_dirty(vcpu, gfn);
if (kvm_lapic_enable_pv_eoi(vcpu,
- gfn_to_gpa(gfn) | KVM_MSR_ENABLED))
+ gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
+ sizeof(struct hv_vp_assist_page)))
return 1;
break;
}
@@ -1080,7 +1129,7 @@ static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
case HV_X64_MSR_VP_RUNTIME:
if (!host)
return 1;
- hv->runtime_offset = data - current_task_runtime_100ns();
+ hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
break;
case HV_X64_MSR_SCONTROL:
case HV_X64_MSR_SVERSION:
@@ -1172,11 +1221,11 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
bool host)
{
u64 data = 0;
- struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
switch (msr) {
case HV_X64_MSR_VP_INDEX:
- data = hv->vp_index;
+ data = hv_vcpu->vp_index;
break;
case HV_X64_MSR_EOI:
return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
@@ -1185,10 +1234,10 @@ static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
case HV_X64_MSR_TPR:
return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
case HV_X64_MSR_VP_ASSIST_PAGE:
- data = hv->hv_vapic;
+ data = hv_vcpu->hv_vapic;
break;
case HV_X64_MSR_VP_RUNTIME:
- data = current_task_runtime_100ns() + hv->runtime_offset;
+ data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
break;
case HV_X64_MSR_SCONTROL:
case HV_X64_MSR_SVERSION:
@@ -1255,32 +1304,47 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
return kvm_hv_get_msr(vcpu, msr, pdata, host);
}
-static __always_inline int get_sparse_bank_no(u64 valid_bank_mask, int bank_no)
+static __always_inline unsigned long *sparse_set_to_vcpu_mask(
+ struct kvm *kvm, u64 *sparse_banks, u64 valid_bank_mask,
+ u64 *vp_bitmap, unsigned long *vcpu_bitmap)
{
- int i = 0, j;
+ struct kvm_hv *hv = &kvm->arch.hyperv;
+ struct kvm_vcpu *vcpu;
+ int i, bank, sbank = 0;
- if (!(valid_bank_mask & BIT_ULL(bank_no)))
- return -1;
+ memset(vp_bitmap, 0,
+ KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
+ for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
+ KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
+ vp_bitmap[bank] = sparse_banks[sbank++];
- for (j = 0; j < bank_no; j++)
- if (valid_bank_mask & BIT_ULL(j))
- i++;
+ if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) {
+ /* for all vcpus vp_index == vcpu_idx */
+ return (unsigned long *)vp_bitmap;
+ }
- return i;
+ bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index,
+ (unsigned long *)vp_bitmap))
+ __set_bit(i, vcpu_bitmap);
+ }
+ return vcpu_bitmap;
}
static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
u16 rep_cnt, bool ex)
{
struct kvm *kvm = current_vcpu->kvm;
- struct kvm_vcpu_hv *hv_current = &current_vcpu->arch.hyperv;
+ struct kvm_vcpu_hv *hv_vcpu = &current_vcpu->arch.hyperv;
struct hv_tlb_flush_ex flush_ex;
struct hv_tlb_flush flush;
- struct kvm_vcpu *vcpu;
- unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)] = {0};
- unsigned long valid_bank_mask = 0;
+ u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
+ DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
+ unsigned long *vcpu_mask;
+ u64 valid_bank_mask;
u64 sparse_banks[64];
- int sparse_banks_len, i;
+ int sparse_banks_len;
bool all_cpus;
if (!ex) {
@@ -1290,6 +1354,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
trace_kvm_hv_flush_tlb(flush.processor_mask,
flush.address_space, flush.flags);
+ valid_bank_mask = BIT_ULL(0);
sparse_banks[0] = flush.processor_mask;
all_cpus = flush.flags & HV_FLUSH_ALL_PROCESSORS;
} else {
@@ -1306,7 +1371,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
all_cpus = flush_ex.hv_vp_set.format !=
HV_GENERIC_SET_SPARSE_4K;
- sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
+ sparse_banks_len =
+ bitmap_weight((unsigned long *)&valid_bank_mask, 64) *
sizeof(sparse_banks[0]);
if (!sparse_banks_len && !all_cpus)
@@ -1321,48 +1387,19 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
return HV_STATUS_INVALID_HYPERCALL_INPUT;
}
- cpumask_clear(&hv_current->tlb_lush);
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- struct kvm_vcpu_hv *hv = &vcpu->arch.hyperv;
- int bank = hv->vp_index / 64, sbank = 0;
-
- if (!all_cpus) {
- /* Banks >64 can't be represented */
- if (bank >= 64)
- continue;
-
- /* Non-ex hypercalls can only address first 64 vCPUs */
- if (!ex && bank)
- continue;
-
- if (ex) {
- /*
- * Check is the bank of this vCPU is in sparse
- * set and get the sparse bank number.
- */
- sbank = get_sparse_bank_no(valid_bank_mask,
- bank);
-
- if (sbank < 0)
- continue;
- }
-
- if (!(sparse_banks[sbank] & BIT_ULL(hv->vp_index % 64)))
- continue;
- }
+ cpumask_clear(&hv_vcpu->tlb_flush);
- /*
- * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we
- * can't analyze it here, flush TLB regardless of the specified
- * address space.
- */
- __set_bit(i, vcpu_bitmap);
- }
+ vcpu_mask = all_cpus ? NULL :
+ sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
+ vp_bitmap, vcpu_bitmap);
+ /*
+ * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
+ * analyze it here, flush TLB regardless of the specified address space.
+ */
kvm_make_vcpus_request_mask(kvm,
KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
- vcpu_bitmap, &hv_current->tlb_lush);
+ vcpu_mask, &hv_vcpu->tlb_flush);
ret_success:
/* We always do full TLB flush, set rep_done = rep_cnt. */
@@ -1370,6 +1407,99 @@ ret_success:
((u64)rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
}
+static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
+ unsigned long *vcpu_bitmap)
+{
+ struct kvm_lapic_irq irq = {
+ .delivery_mode = APIC_DM_FIXED,
+ .vector = vector
+ };
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
+ continue;
+
+ /* We fail only when APIC is disabled */
+ kvm_apic_set_irq(vcpu, &irq, NULL);
+ }
+}
+
+static u64 kvm_hv_send_ipi(struct kvm_vcpu *current_vcpu, u64 ingpa, u64 outgpa,
+ bool ex, bool fast)
+{
+ struct kvm *kvm = current_vcpu->kvm;
+ struct hv_send_ipi_ex send_ipi_ex;
+ struct hv_send_ipi send_ipi;
+ u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
+ DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
+ unsigned long *vcpu_mask;
+ unsigned long valid_bank_mask;
+ u64 sparse_banks[64];
+ int sparse_banks_len;
+ u32 vector;
+ bool all_cpus;
+
+ if (!ex) {
+ if (!fast) {
+ if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi,
+ sizeof(send_ipi))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ sparse_banks[0] = send_ipi.cpu_mask;
+ vector = send_ipi.vector;
+ } else {
+ /* 'reserved' part of hv_send_ipi should be 0 */
+ if (unlikely(ingpa >> 32 != 0))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ sparse_banks[0] = outgpa;
+ vector = (u32)ingpa;
+ }
+ all_cpus = false;
+ valid_bank_mask = BIT_ULL(0);
+
+ trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
+ } else {
+ if (unlikely(kvm_read_guest(kvm, ingpa, &send_ipi_ex,
+ sizeof(send_ipi_ex))))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+
+ trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
+ send_ipi_ex.vp_set.format,
+ send_ipi_ex.vp_set.valid_bank_mask);
+
+ vector = send_ipi_ex.vector;
+ valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
+ sparse_banks_len = bitmap_weight(&valid_bank_mask, 64) *
+ sizeof(sparse_banks[0]);
+
+ all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
+
+ if (!sparse_banks_len)
+ goto ret_success;
+
+ if (!all_cpus &&
+ kvm_read_guest(kvm,
+ ingpa + offsetof(struct hv_send_ipi_ex,
+ vp_set.bank_contents),
+ sparse_banks,
+ sparse_banks_len))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+ }
+
+ if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
+ return HV_STATUS_INVALID_HYPERCALL_INPUT;
+
+ vcpu_mask = all_cpus ? NULL :
+ sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
+ vp_bitmap, vcpu_bitmap);
+
+ kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
+
+ret_success:
+ return HV_STATUS_SUCCESS;
+}
+
bool kvm_hv_hypercall_enabled(struct kvm *kvm)
{
return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
@@ -1539,6 +1669,20 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
}
ret = kvm_hv_flush_tlb(vcpu, ingpa, rep_cnt, true);
break;
+ case HVCALL_SEND_IPI:
+ if (unlikely(rep)) {
+ ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ break;
+ }
+ ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, false, fast);
+ break;
+ case HVCALL_SEND_IPI_EX:
+ if (unlikely(fast || rep)) {
+ ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
+ break;
+ }
+ ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
+ break;
default:
ret = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
diff --git a/arch/x86/kvm/hyperv.h b/arch/x86/kvm/hyperv.h
index d6aa969e20f1..0e66c12ed2c3 100644
--- a/arch/x86/kvm/hyperv.h
+++ b/arch/x86/kvm/hyperv.h
@@ -62,6 +62,10 @@ void kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
+bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
+bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
+ struct hv_vp_assist_page *assist_page);
+
static inline struct kvm_vcpu_hv_stimer *vcpu_to_stimer(struct kvm_vcpu *vcpu,
int timer_index)
{
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index fbb0e6df121b..3cd227ff807f 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -70,6 +70,11 @@
#define APIC_BROADCAST 0xFF
#define X2APIC_BROADCAST 0xFFFFFFFFul
+static bool lapic_timer_advance_adjust_done = false;
+#define LAPIC_TIMER_ADVANCE_ADJUST_DONE 100
+/* step-by-step approximation to mitigate fluctuation */
+#define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
+
static inline int apic_test_vector(int vec, void *bitmap)
{
return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
@@ -955,14 +960,14 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
map = rcu_dereference(kvm->arch.apic_map);
ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
- if (ret)
+ if (ret) {
+ *r = 0;
for_each_set_bit(i, &bitmap, 16) {
if (!dst[i])
continue;
- if (*r < 0)
- *r = 0;
*r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
}
+ }
rcu_read_unlock();
return ret;
@@ -1472,7 +1477,7 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
void wait_lapic_expire(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
- u64 guest_tsc, tsc_deadline;
+ u64 guest_tsc, tsc_deadline, ns;
if (!lapic_in_kernel(vcpu))
return;
@@ -1492,6 +1497,24 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
if (guest_tsc < tsc_deadline)
__delay(min(tsc_deadline - guest_tsc,
nsec_to_cycles(vcpu, lapic_timer_advance_ns)));
+
+ if (!lapic_timer_advance_adjust_done) {
+ /* too early */
+ if (guest_tsc < tsc_deadline) {
+ ns = (tsc_deadline - guest_tsc) * 1000000ULL;
+ do_div(ns, vcpu->arch.virtual_tsc_khz);
+ lapic_timer_advance_ns -= min((unsigned int)ns,
+ lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
+ } else {
+ /* too late */
+ ns = (guest_tsc - tsc_deadline) * 1000000ULL;
+ do_div(ns, vcpu->arch.virtual_tsc_khz);
+ lapic_timer_advance_ns += min((unsigned int)ns,
+ lapic_timer_advance_ns / LAPIC_TIMER_ADVANCE_ADJUST_STEP);
+ }
+ if (abs(guest_tsc - tsc_deadline) < LAPIC_TIMER_ADVANCE_ADJUST_DONE)
+ lapic_timer_advance_adjust_done = true;
+ }
}
static void start_sw_tscdeadline(struct kvm_lapic *apic)
@@ -2621,17 +2644,25 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
return 0;
}
-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
+int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
{
u64 addr = data & ~KVM_MSR_ENABLED;
+ struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
+ unsigned long new_len;
+
if (!IS_ALIGNED(addr, 4))
return 1;
vcpu->arch.pv_eoi.msr_val = data;
if (!pv_eoi_enabled(vcpu))
return 0;
- return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
- addr, sizeof(u8));
+
+ if (addr == ghc->gpa && len <= ghc->len)
+ new_len = ghc->len;
+ else
+ new_len = len;
+
+ return kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
}
void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index ed0ed39abd36..ff6ef9c3d760 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -120,7 +120,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
return vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
}
-int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
+int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len);
void kvm_lapic_init(void);
void kvm_lapic_exit(void);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e843ec46609d..cf5f572f2305 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -932,7 +932,7 @@ static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
if (!obj)
- return -ENOMEM;
+ return cache->nobjs >= min ? 0 : -ENOMEM;
cache->objects[cache->nobjs++] = obj;
}
return 0;
@@ -960,7 +960,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
if (!page)
- return -ENOMEM;
+ return cache->nobjs >= min ? 0 : -ENOMEM;
cache->objects[cache->nobjs++] = page;
}
return 0;
@@ -1265,24 +1265,24 @@ pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head,
mmu_free_pte_list_desc(desc);
}
-static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
+static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc;
struct pte_list_desc *prev_desc;
int i;
if (!rmap_head->val) {
- printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
+ pr_err("%s: %p 0->BUG\n", __func__, spte);
BUG();
} else if (!(rmap_head->val & 1)) {
- rmap_printk("pte_list_remove: %p 1->0\n", spte);
+ rmap_printk("%s: %p 1->0\n", __func__, spte);
if ((u64 *)rmap_head->val != spte) {
- printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte);
+ pr_err("%s: %p 1->BUG\n", __func__, spte);
BUG();
}
rmap_head->val = 0;
} else {
- rmap_printk("pte_list_remove: %p many->many\n", spte);
+ rmap_printk("%s: %p many->many\n", __func__, spte);
desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
prev_desc = NULL;
while (desc) {
@@ -1296,11 +1296,17 @@ static void pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
prev_desc = desc;
desc = desc->more;
}
- pr_err("pte_list_remove: %p many->many\n", spte);
+ pr_err("%s: %p many->many\n", __func__, spte);
BUG();
}
}
+static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
+{
+ mmu_spte_clear_track_bits(sptep);
+ __pte_list_remove(sptep, rmap_head);
+}
+
static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
struct kvm_memory_slot *slot)
{
@@ -1349,7 +1355,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
sp = page_header(__pa(spte));
gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
rmap_head = gfn_to_rmap(kvm, gfn, sp);
- pte_list_remove(spte, rmap_head);
+ __pte_list_remove(spte, rmap_head);
}
/*
@@ -1685,7 +1691,7 @@ static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
while ((sptep = rmap_get_first(rmap_head, &iter))) {
rmap_printk("%s: spte %p %llx.\n", __func__, sptep, *sptep);
- drop_spte(kvm, sptep);
+ pte_list_remove(rmap_head, sptep);
flush = true;
}
@@ -1721,7 +1727,7 @@ restart:
need_flush = 1;
if (pte_write(*ptep)) {
- drop_spte(kvm, sptep);
+ pte_list_remove(rmap_head, sptep);
goto restart;
} else {
new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
@@ -1988,7 +1994,7 @@ static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
u64 *parent_pte)
{
- pte_list_remove(parent_pte, &sp->parent_ptes);
+ __pte_list_remove(parent_pte, &sp->parent_ptes);
}
static void drop_parent_pte(struct kvm_mmu_page *sp,
@@ -2181,7 +2187,7 @@ static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
if (sp->role.cr4_pae != !!is_pae(vcpu)
- || vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
+ || vcpu->arch.mmu->sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
return false;
}
@@ -2375,14 +2381,14 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
int collisions = 0;
LIST_HEAD(invalid_list);
- role = vcpu->arch.mmu.base_role;
+ role = vcpu->arch.mmu->mmu_role.base;
role.level = level;
role.direct = direct;
if (role.direct)
role.cr4_pae = 0;
role.access = access;
- if (!vcpu->arch.mmu.direct_map
- && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
+ if (!vcpu->arch.mmu->direct_map
+ && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
@@ -2457,11 +2463,11 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
{
iterator->addr = addr;
iterator->shadow_addr = root;
- iterator->level = vcpu->arch.mmu.shadow_root_level;
+ iterator->level = vcpu->arch.mmu->shadow_root_level;
if (iterator->level == PT64_ROOT_4LEVEL &&
- vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL &&
- !vcpu->arch.mmu.direct_map)
+ vcpu->arch.mmu->root_level < PT64_ROOT_4LEVEL &&
+ !vcpu->arch.mmu->direct_map)
--iterator->level;
if (iterator->level == PT32E_ROOT_LEVEL) {
@@ -2469,10 +2475,10 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
* prev_root is currently only used for 64-bit hosts. So only
* the active root_hpa is valid here.
*/
- BUG_ON(root != vcpu->arch.mmu.root_hpa);
+ BUG_ON(root != vcpu->arch.mmu->root_hpa);
iterator->shadow_addr
- = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
+ = vcpu->arch.mmu->pae_root[(addr >> 30) & 3];
iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
--iterator->level;
if (!iterator->shadow_addr)
@@ -2483,7 +2489,7 @@ static void shadow_walk_init_using_root(struct kvm_shadow_walk_iterator *iterato
static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
struct kvm_vcpu *vcpu, u64 addr)
{
- shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa,
+ shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu->root_hpa,
addr);
}
@@ -3095,7 +3101,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, int write, int map_writable,
int emulate = 0;
gfn_t pseudo_gfn;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return 0;
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
@@ -3301,7 +3307,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
u64 spte = 0ull;
uint retry_count = 0;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return false;
if (!page_fault_can_be_fast(error_code))
@@ -3471,11 +3477,11 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa,
}
/* roots_to_free must be some combination of the KVM_MMU_ROOT_* flags */
-void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, ulong roots_to_free)
+void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ ulong roots_to_free)
{
int i;
LIST_HEAD(invalid_list);
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
bool free_active_root = roots_to_free & KVM_MMU_ROOT_CURRENT;
BUILD_BUG_ON(KVM_MMU_NUM_PREV_ROOTS >= BITS_PER_LONG);
@@ -3535,20 +3541,20 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
struct kvm_mmu_page *sp;
unsigned i;
- if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL) {
+ if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
spin_lock(&vcpu->kvm->mmu_lock);
if(make_mmu_pages_available(vcpu) < 0) {
spin_unlock(&vcpu->kvm->mmu_lock);
return -ENOSPC;
}
sp = kvm_mmu_get_page(vcpu, 0, 0,
- vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL);
+ vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
- vcpu->arch.mmu.root_hpa = __pa(sp->spt);
- } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
+ vcpu->arch.mmu->root_hpa = __pa(sp->spt);
+ } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
for (i = 0; i < 4; ++i) {
- hpa_t root = vcpu->arch.mmu.pae_root[i];
+ hpa_t root = vcpu->arch.mmu->pae_root[i];
MMU_WARN_ON(VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock);
@@ -3561,9 +3567,9 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
- vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
+ vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
}
- vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
+ vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
} else
BUG();
@@ -3577,7 +3583,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
gfn_t root_gfn;
int i;
- root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
+ root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT;
if (mmu_check_root(vcpu, root_gfn))
return 1;
@@ -3586,8 +3592,8 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* Do we shadow a long mode page table? If so we need to
* write-protect the guests page table root.
*/
- if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
- hpa_t root = vcpu->arch.mmu.root_hpa;
+ if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
+ hpa_t root = vcpu->arch.mmu->root_hpa;
MMU_WARN_ON(VALID_PAGE(root));
@@ -3597,11 +3603,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
return -ENOSPC;
}
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
- vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL);
+ vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
root = __pa(sp->spt);
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
- vcpu->arch.mmu.root_hpa = root;
+ vcpu->arch.mmu->root_hpa = root;
return 0;
}
@@ -3611,17 +3617,17 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
* the shadow page table may be a PAE or a long mode page table.
*/
pm_mask = PT_PRESENT_MASK;
- if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL)
+ if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL)
pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
for (i = 0; i < 4; ++i) {
- hpa_t root = vcpu->arch.mmu.pae_root[i];
+ hpa_t root = vcpu->arch.mmu->pae_root[i];
MMU_WARN_ON(VALID_PAGE(root));
- if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
- pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
+ if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
+ pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
if (!(pdptr & PT_PRESENT_MASK)) {
- vcpu->arch.mmu.pae_root[i] = 0;
+ vcpu->arch.mmu->pae_root[i] = 0;
continue;
}
root_gfn = pdptr >> PAGE_SHIFT;
@@ -3639,16 +3645,16 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
++sp->root_count;
spin_unlock(&vcpu->kvm->mmu_lock);
- vcpu->arch.mmu.pae_root[i] = root | pm_mask;
+ vcpu->arch.mmu->pae_root[i] = root | pm_mask;
}
- vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
+ vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
/*
* If we shadow a 32 bit page table with a long mode page
* table we enter this path.
*/
- if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) {
- if (vcpu->arch.mmu.lm_root == NULL) {
+ if (vcpu->arch.mmu->shadow_root_level == PT64_ROOT_4LEVEL) {
+ if (vcpu->arch.mmu->lm_root == NULL) {
/*
* The additional page necessary for this is only
* allocated on demand.
@@ -3660,12 +3666,12 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
if (lm_root == NULL)
return 1;
- lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
+ lm_root[0] = __pa(vcpu->arch.mmu->pae_root) | pm_mask;
- vcpu->arch.mmu.lm_root = lm_root;
+ vcpu->arch.mmu->lm_root = lm_root;
}
- vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
+ vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
}
return 0;
@@ -3673,7 +3679,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.mmu.direct_map)
+ if (vcpu->arch.mmu->direct_map)
return mmu_alloc_direct_roots(vcpu);
else
return mmu_alloc_shadow_roots(vcpu);
@@ -3684,17 +3690,16 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
int i;
struct kvm_mmu_page *sp;
- if (vcpu->arch.mmu.direct_map)
+ if (vcpu->arch.mmu->direct_map)
return;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return;
vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
- if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
- hpa_t root = vcpu->arch.mmu.root_hpa;
-
+ if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
+ hpa_t root = vcpu->arch.mmu->root_hpa;
sp = page_header(root);
/*
@@ -3725,7 +3730,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
for (i = 0; i < 4; ++i) {
- hpa_t root = vcpu->arch.mmu.pae_root[i];
+ hpa_t root = vcpu->arch.mmu->pae_root[i];
if (root && VALID_PAGE(root)) {
root &= PT64_BASE_ADDR_MASK;
@@ -3799,7 +3804,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
int root, leaf;
bool reserved = false;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
goto exit;
walk_shadow_page_lockless_begin(vcpu);
@@ -3816,7 +3821,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
if (!is_shadow_present_pte(spte))
break;
- reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte,
+ reserved |= is_shadow_zero_bits_set(vcpu->arch.mmu, spte,
iterator.level);
}
@@ -3895,7 +3900,7 @@ static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
struct kvm_shadow_walk_iterator iterator;
u64 spte;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return;
walk_shadow_page_lockless_begin(vcpu);
@@ -3922,7 +3927,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
if (r)
return r;
- MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
return nonpaging_map(vcpu, gva & PAGE_MASK,
@@ -3935,8 +3940,8 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
arch.gfn = gfn;
- arch.direct_map = vcpu->arch.mmu.direct_map;
- arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
+ arch.direct_map = vcpu->arch.mmu->direct_map;
+ arch.cr3 = vcpu->arch.mmu->get_cr3(vcpu);
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}
@@ -4042,7 +4047,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
int write = error_code & PFERR_WRITE_MASK;
bool map_writable;
- MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa));
if (page_fault_handle_page_track(vcpu, error_code, gfn))
return RET_PF_EMULATE;
@@ -4118,7 +4123,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
{
uint i;
struct kvm_mmu_root_info root;
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
root.cr3 = mmu->get_cr3(vcpu);
root.hpa = mmu->root_hpa;
@@ -4141,7 +4146,7 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
union kvm_mmu_page_role new_role,
bool skip_tlb_flush)
{
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
/*
* For now, limit the fast switch to 64-bit hosts+VMs in order to avoid
@@ -4192,7 +4197,8 @@ static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
bool skip_tlb_flush)
{
if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
- kvm_mmu_free_roots(vcpu, KVM_MMU_ROOT_CURRENT);
+ kvm_mmu_free_roots(vcpu, vcpu->arch.mmu,
+ KVM_MMU_ROOT_CURRENT);
}
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
@@ -4210,7 +4216,7 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu)
static void inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
- vcpu->arch.mmu.inject_page_fault(vcpu, fault);
+ vcpu->arch.mmu->inject_page_fault(vcpu, fault);
}
static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
@@ -4414,7 +4420,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
void
reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
{
- bool uses_nx = context->nx || context->base_role.smep_andnot_wp;
+ bool uses_nx = context->nx ||
+ context->mmu_role.base.smep_andnot_wp;
struct rsvd_bits_validate *shadow_zero_check;
int i;
@@ -4553,7 +4560,7 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
* SMAP:kernel-mode data accesses from user-mode
* mappings should fault. A fault is considered
* as a SMAP violation if all of the following
- * conditions are ture:
+ * conditions are true:
* - X86_CR4_SMAP is set in CR4
* - A user page is accessed
* - The access is not a fetch
@@ -4714,27 +4721,65 @@ static void paging32E_init_context(struct kvm_vcpu *vcpu,
paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
}
-static union kvm_mmu_page_role
-kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu)
+static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu)
+{
+ union kvm_mmu_extended_role ext = {0};
+
+ ext.cr0_pg = !!is_paging(vcpu);
+ ext.cr4_smep = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
+ ext.cr4_smap = !!kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
+ ext.cr4_pse = !!is_pse(vcpu);
+ ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE);
+ ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57);
+
+ ext.valid = 1;
+
+ return ext;
+}
+
+static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
+ bool base_only)
+{
+ union kvm_mmu_role role = {0};
+
+ role.base.access = ACC_ALL;
+ role.base.nxe = !!is_nx(vcpu);
+ role.base.cr4_pae = !!is_pae(vcpu);
+ role.base.cr0_wp = is_write_protection(vcpu);
+ role.base.smm = is_smm(vcpu);
+ role.base.guest_mode = is_guest_mode(vcpu);
+
+ if (base_only)
+ return role;
+
+ role.ext = kvm_calc_mmu_role_ext(vcpu);
+
+ return role;
+}
+
+static union kvm_mmu_role
+kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
{
- union kvm_mmu_page_role role = {0};
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
- role.guest_mode = is_guest_mode(vcpu);
- role.smm = is_smm(vcpu);
- role.ad_disabled = (shadow_accessed_mask == 0);
- role.level = kvm_x86_ops->get_tdp_level(vcpu);
- role.direct = true;
- role.access = ACC_ALL;
+ role.base.ad_disabled = (shadow_accessed_mask == 0);
+ role.base.level = kvm_x86_ops->get_tdp_level(vcpu);
+ role.base.direct = true;
return role;
}
static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
{
- struct kvm_mmu *context = &vcpu->arch.mmu;
+ struct kvm_mmu *context = vcpu->arch.mmu;
+ union kvm_mmu_role new_role =
+ kvm_calc_tdp_mmu_root_page_role(vcpu, false);
- context->base_role.word = mmu_base_role_mask.word &
- kvm_calc_tdp_mmu_root_page_role(vcpu).word;
+ new_role.base.word &= mmu_base_role_mask.word;
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
+
+ context->mmu_role.as_u64 = new_role.as_u64;
context->page_fault = tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
@@ -4774,36 +4819,36 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
-static union kvm_mmu_page_role
-kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu)
-{
- union kvm_mmu_page_role role = {0};
- bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
- bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
-
- role.nxe = is_nx(vcpu);
- role.cr4_pae = !!is_pae(vcpu);
- role.cr0_wp = is_write_protection(vcpu);
- role.smep_andnot_wp = smep && !is_write_protection(vcpu);
- role.smap_andnot_wp = smap && !is_write_protection(vcpu);
- role.guest_mode = is_guest_mode(vcpu);
- role.smm = is_smm(vcpu);
- role.direct = !is_paging(vcpu);
- role.access = ACC_ALL;
+static union kvm_mmu_role
+kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu, bool base_only)
+{
+ union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
+
+ role.base.smep_andnot_wp = role.ext.cr4_smep &&
+ !is_write_protection(vcpu);
+ role.base.smap_andnot_wp = role.ext.cr4_smap &&
+ !is_write_protection(vcpu);
+ role.base.direct = !is_paging(vcpu);
if (!is_long_mode(vcpu))
- role.level = PT32E_ROOT_LEVEL;
+ role.base.level = PT32E_ROOT_LEVEL;
else if (is_la57_mode(vcpu))
- role.level = PT64_ROOT_5LEVEL;
+ role.base.level = PT64_ROOT_5LEVEL;
else
- role.level = PT64_ROOT_4LEVEL;
+ role.base.level = PT64_ROOT_4LEVEL;
return role;
}
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
{
- struct kvm_mmu *context = &vcpu->arch.mmu;
+ struct kvm_mmu *context = vcpu->arch.mmu;
+ union kvm_mmu_role new_role =
+ kvm_calc_shadow_mmu_root_page_role(vcpu, false);
+
+ new_role.base.word &= mmu_base_role_mask.word;
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
if (!is_paging(vcpu))
nonpaging_init_context(vcpu, context);
@@ -4814,22 +4859,28 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
else
paging32_init_context(vcpu, context);
- context->base_role.word = mmu_base_role_mask.word &
- kvm_calc_shadow_mmu_root_page_role(vcpu).word;
+ context->mmu_role.as_u64 = new_role.as_u64;
reset_shadow_zero_bits_mask(vcpu, context);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
-static union kvm_mmu_page_role
-kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
+static union kvm_mmu_role
+kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
+ bool execonly)
{
- union kvm_mmu_page_role role = vcpu->arch.mmu.base_role;
+ union kvm_mmu_role role;
+
+ /* Base role is inherited from root_mmu */
+ role.base.word = vcpu->arch.root_mmu.mmu_role.base.word;
+ role.ext = kvm_calc_mmu_role_ext(vcpu);
+
+ role.base.level = PT64_ROOT_4LEVEL;
+ role.base.direct = false;
+ role.base.ad_disabled = !accessed_dirty;
+ role.base.guest_mode = true;
+ role.base.access = ACC_ALL;
- role.level = PT64_ROOT_4LEVEL;
- role.direct = false;
- role.ad_disabled = !accessed_dirty;
- role.guest_mode = true;
- role.access = ACC_ALL;
+ role.ext.execonly = execonly;
return role;
}
@@ -4837,11 +4888,17 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty)
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
bool accessed_dirty, gpa_t new_eptp)
{
- struct kvm_mmu *context = &vcpu->arch.mmu;
- union kvm_mmu_page_role root_page_role =
- kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty);
+ struct kvm_mmu *context = vcpu->arch.mmu;
+ union kvm_mmu_role new_role =
+ kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
+ execonly);
+
+ __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false);
+
+ new_role.base.word &= mmu_base_role_mask.word;
+ if (new_role.as_u64 == context->mmu_role.as_u64)
+ return;
- __kvm_mmu_new_cr3(vcpu, new_eptp, root_page_role, false);
context->shadow_root_level = PT64_ROOT_4LEVEL;
context->nx = true;
@@ -4853,7 +4910,8 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->update_pte = ept_update_pte;
context->root_level = PT64_ROOT_4LEVEL;
context->direct_map = false;
- context->base_role.word = root_page_role.word & mmu_base_role_mask.word;
+ context->mmu_role.as_u64 = new_role.as_u64;
+
update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true);
update_last_nonleaf_level(vcpu, context);
@@ -4864,7 +4922,7 @@ EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
- struct kvm_mmu *context = &vcpu->arch.mmu;
+ struct kvm_mmu *context = vcpu->arch.mmu;
kvm_init_shadow_mmu(vcpu);
context->set_cr3 = kvm_x86_ops->set_cr3;
@@ -4875,14 +4933,20 @@ static void init_kvm_softmmu(struct kvm_vcpu *vcpu)
static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
{
+ union kvm_mmu_role new_role = kvm_calc_mmu_role_common(vcpu, false);
struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
+ new_role.base.word &= mmu_base_role_mask.word;
+ if (new_role.as_u64 == g_context->mmu_role.as_u64)
+ return;
+
+ g_context->mmu_role.as_u64 = new_role.as_u64;
g_context->get_cr3 = get_cr3;
g_context->get_pdptr = kvm_pdptr_read;
g_context->inject_page_fault = kvm_inject_page_fault;
/*
- * Note that arch.mmu.gva_to_gpa translates l2_gpa to l1_gpa using
+ * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
* L1's nested page tables (e.g. EPT12). The nested translation
* of l2_gva to l1_gpa is done by arch.nested_mmu.gva_to_gpa using
* L2's page tables as the first level of translation and L1's
@@ -4921,10 +4985,10 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots)
if (reset_roots) {
uint i;
- vcpu->arch.mmu.root_hpa = INVALID_PAGE;
+ vcpu->arch.mmu->root_hpa = INVALID_PAGE;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
+ vcpu->arch.mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
}
if (mmu_is_nested(vcpu))
@@ -4939,10 +5003,14 @@ EXPORT_SYMBOL_GPL(kvm_init_mmu);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu)
{
+ union kvm_mmu_role role;
+
if (tdp_enabled)
- return kvm_calc_tdp_mmu_root_page_role(vcpu);
+ role = kvm_calc_tdp_mmu_root_page_role(vcpu, true);
else
- return kvm_calc_shadow_mmu_root_page_role(vcpu);
+ role = kvm_calc_shadow_mmu_root_page_role(vcpu, true);
+
+ return role.base;
}
void kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
@@ -4972,8 +5040,10 @@ EXPORT_SYMBOL_GPL(kvm_mmu_load);
void kvm_mmu_unload(struct kvm_vcpu *vcpu)
{
- kvm_mmu_free_roots(vcpu, KVM_MMU_ROOTS_ALL);
- WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu, KVM_MMU_ROOTS_ALL);
+ WARN_ON(VALID_PAGE(vcpu->arch.root_mmu.root_hpa));
+ kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
+ WARN_ON(VALID_PAGE(vcpu->arch.guest_mmu.root_hpa));
}
EXPORT_SYMBOL_GPL(kvm_mmu_unload);
@@ -4987,7 +5057,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
}
++vcpu->kvm->stat.mmu_pte_updated;
- vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
+ vcpu->arch.mmu->update_pte(vcpu, sp, spte, new);
}
static bool need_remote_flush(u64 old, u64 new)
@@ -5164,10 +5234,12 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
local_flush = true;
while (npte--) {
+ u32 base_role = vcpu->arch.mmu->mmu_role.base.word;
+
entry = *spte;
mmu_page_zap_pte(vcpu->kvm, sp, spte);
if (gentry &&
- !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
+ !((sp->role.word ^ base_role)
& mmu_base_role_mask.word) && rmap_can_add(vcpu))
mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
if (need_remote_flush(entry, *spte))
@@ -5185,7 +5257,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
gpa_t gpa;
int r;
- if (vcpu->arch.mmu.direct_map)
+ if (vcpu->arch.mmu->direct_map)
return 0;
gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
@@ -5221,10 +5293,10 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
{
int r, emulation_type = 0;
enum emulation_result er;
- bool direct = vcpu->arch.mmu.direct_map;
+ bool direct = vcpu->arch.mmu->direct_map;
/* With shadow page tables, fault_address contains a GVA or nGPA. */
- if (vcpu->arch.mmu.direct_map) {
+ if (vcpu->arch.mmu->direct_map) {
vcpu->arch.gpa_available = true;
vcpu->arch.gpa_val = cr2;
}
@@ -5237,8 +5309,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
}
if (r == RET_PF_INVALID) {
- r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code),
- false);
+ r = vcpu->arch.mmu->page_fault(vcpu, cr2,
+ lower_32_bits(error_code),
+ false);
WARN_ON(r == RET_PF_INVALID);
}
@@ -5254,7 +5327,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
* paging in both guests. If true, we simply unprotect the page
* and resume the guest.
*/
- if (vcpu->arch.mmu.direct_map &&
+ if (vcpu->arch.mmu->direct_map &&
(error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) {
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2));
return 1;
@@ -5302,7 +5375,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
{
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
int i;
/* INVLPG on a * non-canonical address is a NOP according to the SDM. */
@@ -5333,7 +5406,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
{
- struct kvm_mmu *mmu = &vcpu->arch.mmu;
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
bool tlb_flush = false;
uint i;
@@ -5377,8 +5450,8 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
static void free_mmu_pages(struct kvm_vcpu *vcpu)
{
- free_page((unsigned long)vcpu->arch.mmu.pae_root);
- free_page((unsigned long)vcpu->arch.mmu.lm_root);
+ free_page((unsigned long)vcpu->arch.mmu->pae_root);
+ free_page((unsigned long)vcpu->arch.mmu->lm_root);
}
static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
@@ -5398,9 +5471,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
if (!page)
return -ENOMEM;
- vcpu->arch.mmu.pae_root = page_address(page);
+ vcpu->arch.mmu->pae_root = page_address(page);
for (i = 0; i < 4; ++i)
- vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
+ vcpu->arch.mmu->pae_root[i] = INVALID_PAGE;
return 0;
}
@@ -5409,27 +5482,21 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
{
uint i;
- vcpu->arch.walk_mmu = &vcpu->arch.mmu;
- vcpu->arch.mmu.root_hpa = INVALID_PAGE;
- vcpu->arch.mmu.translate_gpa = translate_gpa;
- vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
+ vcpu->arch.mmu = &vcpu->arch.root_mmu;
+ vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
+ vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
+ vcpu->arch.root_mmu.translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
-
- return alloc_mmu_pages(vcpu);
-}
+ vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
-void kvm_mmu_setup(struct kvm_vcpu *vcpu)
-{
- MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+ vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
+ vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
+ for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+ vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
- /*
- * kvm_mmu_setup() is called only on vCPU initialization.
- * Therefore, no need to reset mmu roots as they are not yet
- * initialized.
- */
- kvm_init_mmu(vcpu, false);
+ vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
+ return alloc_mmu_pages(vcpu);
}
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
@@ -5612,7 +5679,7 @@ restart:
if (sp->role.direct &&
!kvm_is_reserved_pfn(pfn) &&
PageTransCompoundMap(pfn_to_page(pfn))) {
- drop_spte(kvm, sptep);
+ pte_list_remove(rmap_head, sptep);
need_tlb_flush = 1;
goto restart;
}
@@ -5869,6 +5936,16 @@ int kvm_mmu_module_init(void)
{
int ret = -ENOMEM;
+ /*
+ * MMU roles use union aliasing which is, generally speaking, an
+ * undefined behavior. However, we supposedly know how compilers behave
+ * and the current status quo is unlikely to change. Guardians below are
+ * supposed to let us know if the assumption becomes false.
+ */
+ BUILD_BUG_ON(sizeof(union kvm_mmu_page_role) != sizeof(u32));
+ BUILD_BUG_ON(sizeof(union kvm_mmu_extended_role) != sizeof(u32));
+ BUILD_BUG_ON(sizeof(union kvm_mmu_role) != sizeof(u64));
+
kvm_mmu_reset_all_pte_masks();
pte_list_desc_cache = kmem_cache_create("pte_list_desc",
@@ -5898,7 +5975,7 @@ out:
}
/*
- * Caculate mmu pages needed for kvm.
+ * Calculate mmu pages needed for kvm.
*/
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 1fab69c0b2f3..c7b333147c4a 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -43,11 +43,6 @@
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
-#define PT_PDPE_LEVEL 3
-#define PT_DIRECTORY_LEVEL 2
-#define PT_PAGE_TABLE_LEVEL 1
-#define PT_MAX_HUGEPAGE_LEVEL (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES - 1)
-
static inline u64 rsvd_bits(int s, int e)
{
if (e < s)
@@ -80,7 +75,7 @@ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{
- if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
+ if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
return 0;
return kvm_mmu_load(vcpu);
@@ -102,9 +97,9 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
{
- if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
- vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa |
- kvm_get_active_pcid(vcpu));
+ if (VALID_PAGE(vcpu->arch.mmu->root_hpa))
+ vcpu->arch.mmu->set_cr3(vcpu, vcpu->arch.mmu->root_hpa |
+ kvm_get_active_pcid(vcpu));
}
/*
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 1272861e77b9..abac7e208853 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -59,19 +59,19 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
int i;
struct kvm_mmu_page *sp;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return;
- if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) {
- hpa_t root = vcpu->arch.mmu.root_hpa;
+ if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
+ hpa_t root = vcpu->arch.mmu->root_hpa;
sp = page_header(root);
- __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu.root_level);
+ __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level);
return;
}
for (i = 0; i < 4; ++i) {
- hpa_t root = vcpu->arch.mmu.pae_root[i];
+ hpa_t root = vcpu->arch.mmu->pae_root[i];
if (root && VALID_PAGE(root)) {
root &= PT64_BASE_ADDR_MASK;
@@ -122,7 +122,7 @@ static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level)
hpa = pfn << PAGE_SHIFT;
if ((*sptep & PT64_BASE_ADDR_MASK) != hpa)
audit_printk(vcpu->kvm, "levels %d pfn %llx hpa %llx "
- "ent %llxn", vcpu->arch.mmu.root_level, pfn,
+ "ent %llxn", vcpu->arch.mmu->root_level, pfn,
hpa, *sptep);
}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 14ffd973df54..7cf2185b7eb5 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -158,14 +158,15 @@ static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp, u64 *spte,
u64 gpte)
{
- if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+ if (is_rsvd_bits_set(vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
goto no_present;
if (!FNAME(is_present_gpte)(gpte))
goto no_present;
/* if accessed bit is not supported prefetch non accessed gpte */
- if (PT_HAVE_ACCESSED_DIRTY(&vcpu->arch.mmu) && !(gpte & PT_GUEST_ACCESSED_MASK))
+ if (PT_HAVE_ACCESSED_DIRTY(vcpu->arch.mmu) &&
+ !(gpte & PT_GUEST_ACCESSED_MASK))
goto no_present;
return false;
@@ -480,7 +481,7 @@ error:
static int FNAME(walk_addr)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr, u32 access)
{
- return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
+ return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr,
access);
}
@@ -509,7 +510,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access & FNAME(gpte_access)(gpte);
- FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte);
+ FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
no_dirty_log && (pte_access & ACC_WRITE_MASK));
if (is_error_pfn(pfn))
@@ -604,7 +605,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
direct_access = gw->pte_access;
- top_level = vcpu->arch.mmu.root_level;
+ top_level = vcpu->arch.mmu->root_level;
if (top_level == PT32E_ROOT_LEVEL)
top_level = PT32_ROOT_LEVEL;
/*
@@ -616,7 +617,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed;
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
goto out_gpte_changed;
for (shadow_walk_init(&it, vcpu, addr);
@@ -1004,7 +1005,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access;
pte_access &= FNAME(gpte_access)(gpte);
- FNAME(protect_clean_gpte)(&vcpu->arch.mmu, &pte_access, gpte);
+ FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte);
if (sync_mmio_spte(vcpu, &sp->spt[i], gfn, pte_access,
&nr_present))
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 61ccfb13899e..0e21ccc46792 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -809,6 +809,8 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu)
nested_svm_check_exception(svm, nr, has_error_code, error_code))
return;
+ kvm_deliver_exception_payload(&svm->vcpu);
+
if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
@@ -2922,18 +2924,18 @@ static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
WARN_ON(mmu_is_nested(vcpu));
kvm_init_shadow_mmu(vcpu);
- vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
- vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
- vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
- vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
- vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu);
- reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
+ vcpu->arch.mmu->set_cr3 = nested_svm_set_tdp_cr3;
+ vcpu->arch.mmu->get_cr3 = nested_svm_get_tdp_cr3;
+ vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
+ vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
+ vcpu->arch.mmu->shadow_root_level = get_npt_level(vcpu);
+ reset_shadow_zero_bits_mask(vcpu, vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
}
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
- vcpu->arch.walk_mmu = &vcpu->arch.mmu;
+ vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
}
static int nested_svm_check_permissions(struct vcpu_svm *svm)
@@ -2969,16 +2971,13 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
svm->vmcb->control.exit_info_1 = error_code;
/*
- * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception.
- * The fix is to add the ancillary datum (CR2 or DR6) to structs
- * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be
- * written only when inject_pending_event runs (DR6 would written here
- * too). This should be conditional on a new capability---if the
- * capability is disabled, kvm_multiple_exception would write the
- * ancillary information to CR2 or DR6, for backwards ABI-compatibility.
+ * EXITINFO2 is undefined for all exception intercepts other
+ * than #PF.
*/
if (svm->vcpu.arch.exception.nested_apf)
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
+ else if (svm->vcpu.arch.exception.has_payload)
+ svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
else
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
@@ -5642,26 +5641,24 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%r13, %c[r13](%[svm]) \n\t"
"mov %%r14, %c[r14](%[svm]) \n\t"
"mov %%r15, %c[r15](%[svm]) \n\t"
-#endif
/*
* Clear host registers marked as clobbered to prevent
* speculative use.
*/
- "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
- "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
- "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
- "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
- "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
-#ifdef CONFIG_X86_64
- "xor %%r8, %%r8 \n\t"
- "xor %%r9, %%r9 \n\t"
- "xor %%r10, %%r10 \n\t"
- "xor %%r11, %%r11 \n\t"
- "xor %%r12, %%r12 \n\t"
- "xor %%r13, %%r13 \n\t"
- "xor %%r14, %%r14 \n\t"
- "xor %%r15, %%r15 \n\t"
+ "xor %%r8d, %%r8d \n\t"
+ "xor %%r9d, %%r9d \n\t"
+ "xor %%r10d, %%r10d \n\t"
+ "xor %%r11d, %%r11d \n\t"
+ "xor %%r12d, %%r12d \n\t"
+ "xor %%r13d, %%r13d \n\t"
+ "xor %%r14d, %%r14d \n\t"
+ "xor %%r15d, %%r15d \n\t"
#endif
+ "xor %%ebx, %%ebx \n\t"
+ "xor %%ecx, %%ecx \n\t"
+ "xor %%edx, %%edx \n\t"
+ "xor %%esi, %%esi \n\t"
+ "xor %%edi, %%edi \n\t"
"pop %%" _ASM_BP
:
: [svm]"a"(svm),
@@ -7040,6 +7037,13 @@ failed:
return ret;
}
+static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+ uint16_t *vmcs_version)
+{
+ /* Intel-only feature */
+ return -ENODEV;
+}
+
static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
@@ -7169,6 +7173,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.mem_enc_op = svm_mem_enc_op,
.mem_enc_reg_region = svm_register_enc_region,
.mem_enc_unreg_region = svm_unregister_enc_region,
+
+ .nested_enable_evmcs = nested_enable_evmcs,
};
static int __init svm_init(void)
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 0f997683404f..0659465a745c 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1418,6 +1418,48 @@ TRACE_EVENT(kvm_hv_flush_tlb_ex,
__entry->valid_bank_mask, __entry->format,
__entry->address_space, __entry->flags)
);
+
+/*
+ * Tracepoints for kvm_hv_send_ipi.
+ */
+TRACE_EVENT(kvm_hv_send_ipi,
+ TP_PROTO(u32 vector, u64 processor_mask),
+ TP_ARGS(vector, processor_mask),
+
+ TP_STRUCT__entry(
+ __field(u32, vector)
+ __field(u64, processor_mask)
+ ),
+
+ TP_fast_assign(
+ __entry->vector = vector;
+ __entry->processor_mask = processor_mask;
+ ),
+
+ TP_printk("vector %x processor_mask 0x%llx",
+ __entry->vector, __entry->processor_mask)
+);
+
+TRACE_EVENT(kvm_hv_send_ipi_ex,
+ TP_PROTO(u32 vector, u64 format, u64 valid_bank_mask),
+ TP_ARGS(vector, format, valid_bank_mask),
+
+ TP_STRUCT__entry(
+ __field(u32, vector)
+ __field(u64, format)
+ __field(u64, valid_bank_mask)
+ ),
+
+ TP_fast_assign(
+ __entry->vector = vector;
+ __entry->format = format;
+ __entry->valid_bank_mask = valid_bank_mask;
+ ),
+
+ TP_printk("vector %x format %llx valid_bank_mask 0x%llx",
+ __entry->vector, __entry->format,
+ __entry->valid_bank_mask)
+);
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e665aa7167cf..4555077d69ce 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -20,6 +20,7 @@
#include "mmu.h"
#include "cpuid.h"
#include "lapic.h"
+#include "hyperv.h"
#include <linux/kvm_host.h>
#include <linux/module.h>
@@ -61,7 +62,7 @@
#define __ex(x) __kvm_handle_fault_on_reboot(x)
#define __ex_clear(x, reg) \
- ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
+ ____kvm_handle_fault_on_reboot(x, "xor " reg ", " reg)
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
@@ -107,9 +108,12 @@ module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
* VMX and be a hypervisor for its own guests. If nested=0, guests may not
* use VMX instructions.
*/
-static bool __read_mostly nested = 0;
+static bool __read_mostly nested = 1;
module_param(nested, bool, S_IRUGO);
+static bool __read_mostly nested_early_check = 0;
+module_param(nested_early_check, bool, S_IRUGO);
+
static u64 __read_mostly host_xss;
static bool __read_mostly enable_pml = 1;
@@ -131,7 +135,7 @@ static bool __read_mostly enable_preemption_timer = 1;
module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#endif
-#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
+#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
#define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | \
@@ -187,6 +191,7 @@ static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
module_param(ple_window_max, uint, 0444);
extern const ulong vmx_return;
+extern const ulong vmx_early_consistency_check_return;
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
@@ -827,14 +832,28 @@ struct nested_vmx {
*/
struct vmcs12 *cached_shadow_vmcs12;
/*
- * Indicates if the shadow vmcs must be updated with the
- * data hold by vmcs12
+ * Indicates if the shadow vmcs or enlightened vmcs must be updated
+ * with the data held by struct vmcs12.
*/
- bool sync_shadow_vmcs;
+ bool need_vmcs12_sync;
bool dirty_vmcs12;
+ /*
+ * vmcs02 has been initialized, i.e. state that is constant for
+ * vmcs02 has been written to the backing VMCS. Initialization
+ * is delayed until L1 actually attempts to run a nested VM.
+ */
+ bool vmcs02_initialized;
+
bool change_vmcs01_virtual_apic_mode;
+ /*
+ * Enlightened VMCS has been enabled. It does not mean that L1 has to
+ * use it. However, VMX features available to L1 will be limited based
+ * on what the enlightened VMCS supports.
+ */
+ bool enlightened_vmcs_enabled;
+
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
@@ -870,6 +889,10 @@ struct nested_vmx {
/* in guest mode on SMM entry? */
bool guest_mode;
} smm;
+
+ gpa_t hv_evmcs_vmptr;
+ struct page *hv_evmcs_page;
+ struct hv_enlightened_vmcs *hv_evmcs;
};
#define POSTED_INTR_ON 0
@@ -1381,6 +1404,49 @@ DEFINE_STATIC_KEY_FALSE(enable_evmcs);
#define KVM_EVMCS_VERSION 1
+/*
+ * Enlightened VMCSv1 doesn't support these:
+ *
+ * POSTED_INTR_NV = 0x00000002,
+ * GUEST_INTR_STATUS = 0x00000810,
+ * APIC_ACCESS_ADDR = 0x00002014,
+ * POSTED_INTR_DESC_ADDR = 0x00002016,
+ * EOI_EXIT_BITMAP0 = 0x0000201c,
+ * EOI_EXIT_BITMAP1 = 0x0000201e,
+ * EOI_EXIT_BITMAP2 = 0x00002020,
+ * EOI_EXIT_BITMAP3 = 0x00002022,
+ * GUEST_PML_INDEX = 0x00000812,
+ * PML_ADDRESS = 0x0000200e,
+ * VM_FUNCTION_CONTROL = 0x00002018,
+ * EPTP_LIST_ADDRESS = 0x00002024,
+ * VMREAD_BITMAP = 0x00002026,
+ * VMWRITE_BITMAP = 0x00002028,
+ *
+ * TSC_MULTIPLIER = 0x00002032,
+ * PLE_GAP = 0x00004020,
+ * PLE_WINDOW = 0x00004022,
+ * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
+ * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
+ * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
+ *
+ * Currently unsupported in KVM:
+ * GUEST_IA32_RTIT_CTL = 0x00002814,
+ */
+#define EVMCS1_UNSUPPORTED_PINCTRL (PIN_BASED_POSTED_INTR | \
+ PIN_BASED_VMX_PREEMPTION_TIMER)
+#define EVMCS1_UNSUPPORTED_2NDEXEC \
+ (SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | \
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | \
+ SECONDARY_EXEC_APIC_REGISTER_VIRT | \
+ SECONDARY_EXEC_ENABLE_PML | \
+ SECONDARY_EXEC_ENABLE_VMFUNC | \
+ SECONDARY_EXEC_SHADOW_VMCS | \
+ SECONDARY_EXEC_TSC_SCALING | \
+ SECONDARY_EXEC_PAUSE_LOOP_EXITING)
+#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
+#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
+#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
+
#if IS_ENABLED(CONFIG_HYPERV)
static bool __read_mostly enlightened_vmcs = true;
module_param(enlightened_vmcs, bool, 0444);
@@ -1473,69 +1539,12 @@ static void evmcs_load(u64 phys_addr)
static void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
{
- /*
- * Enlightened VMCSv1 doesn't support these:
- *
- * POSTED_INTR_NV = 0x00000002,
- * GUEST_INTR_STATUS = 0x00000810,
- * APIC_ACCESS_ADDR = 0x00002014,
- * POSTED_INTR_DESC_ADDR = 0x00002016,
- * EOI_EXIT_BITMAP0 = 0x0000201c,
- * EOI_EXIT_BITMAP1 = 0x0000201e,
- * EOI_EXIT_BITMAP2 = 0x00002020,
- * EOI_EXIT_BITMAP3 = 0x00002022,
- */
- vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
- vmcs_conf->cpu_based_2nd_exec_ctrl &=
- ~SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
- vmcs_conf->cpu_based_2nd_exec_ctrl &=
- ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
- vmcs_conf->cpu_based_2nd_exec_ctrl &=
- ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
-
- /*
- * GUEST_PML_INDEX = 0x00000812,
- * PML_ADDRESS = 0x0000200e,
- */
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_PML;
-
- /* VM_FUNCTION_CONTROL = 0x00002018, */
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_ENABLE_VMFUNC;
-
- /*
- * EPTP_LIST_ADDRESS = 0x00002024,
- * VMREAD_BITMAP = 0x00002026,
- * VMWRITE_BITMAP = 0x00002028,
- */
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_SHADOW_VMCS;
-
- /*
- * TSC_MULTIPLIER = 0x00002032,
- */
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_TSC_SCALING;
+ vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL;
+ vmcs_conf->cpu_based_2nd_exec_ctrl &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
- /*
- * PLE_GAP = 0x00004020,
- * PLE_WINDOW = 0x00004022,
- */
- vmcs_conf->cpu_based_2nd_exec_ctrl &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
-
- /*
- * VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
- */
- vmcs_conf->pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
-
- /*
- * GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
- * HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
- */
- vmcs_conf->vmexit_ctrl &= ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
- vmcs_conf->vmentry_ctrl &= ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
+ vmcs_conf->vmexit_ctrl &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
+ vmcs_conf->vmentry_ctrl &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
- /*
- * Currently unsupported in KVM:
- * GUEST_IA32_RTIT_CTL = 0x00002814,
- */
}
/* check_ept_pointer() should be under protection of ept_pointer_lock. */
@@ -1560,26 +1569,27 @@ static void check_ept_pointer_match(struct kvm *kvm)
static int vmx_hv_remote_flush_tlb(struct kvm *kvm)
{
- int ret;
+ struct kvm_vcpu *vcpu;
+ int ret = -ENOTSUPP, i;
spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);
if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
check_ept_pointer_match(kvm);
- if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
- ret = -ENOTSUPP;
- goto out;
- }
-
/*
* FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the
* base of EPT PML4 table, strip off EPT configuration information.
*/
- ret = hyperv_flush_guest_mapping(
- to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK);
+ if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ ret |= hyperv_flush_guest_mapping(
+ to_vmx(kvm_get_vcpu(kvm, i))->ept_pointer & PAGE_MASK);
+ } else {
+ ret = hyperv_flush_guest_mapping(
+ to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK);
+ }
-out:
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
return ret;
}
@@ -1595,6 +1605,35 @@ static inline void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf) {}
static inline void evmcs_touch_msr_bitmap(void) {}
#endif /* IS_ENABLED(CONFIG_HYPERV) */
+static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
+ uint16_t *vmcs_version)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /* We don't support disabling the feature for simplicity. */
+ if (vmx->nested.enlightened_vmcs_enabled)
+ return 0;
+
+ vmx->nested.enlightened_vmcs_enabled = true;
+
+ /*
+ * vmcs_version represents the range of supported Enlightened VMCS
+ * versions: lower 8 bits is the minimal version, higher 8 bits is the
+ * maximum supported version. KVM supports versions from 1 to
+ * KVM_EVMCS_VERSION.
+ */
+ if (vmcs_version)
+ *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1;
+
+ vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
+ vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL;
+ vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL;
+ vmx->nested.msrs.secondary_ctls_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
+ vmx->nested.msrs.vmfunc_controls &= ~EVMCS1_UNSUPPORTED_VMFUNC;
+
+ return 0;
+}
+
static inline bool is_exception_n(u32 intr_info, u8 vector)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
@@ -1617,11 +1656,6 @@ static inline bool is_page_fault(u32 intr_info)
return is_exception_n(intr_info, PF_VECTOR);
}
-static inline bool is_no_device(u32 intr_info)
-{
- return is_exception_n(intr_info, NM_VECTOR);
-}
-
static inline bool is_invalid_opcode(u32 intr_info)
{
return is_exception_n(intr_info, UD_VECTOR);
@@ -1632,12 +1666,6 @@ static inline bool is_gp_fault(u32 intr_info)
return is_exception_n(intr_info, GP_VECTOR);
}
-static inline bool is_external_interrupt(u32 intr_info)
-{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
- == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
-}
-
static inline bool is_machine_check(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
@@ -2063,9 +2091,6 @@ static inline bool is_nmi(u32 intr_info)
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
u32 exit_intr_info,
unsigned long exit_qualification);
-static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12,
- u32 reason, unsigned long qualification);
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
@@ -2077,7 +2102,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
return -1;
}
-static inline void __invvpid(int ext, u16 vpid, gva_t gva)
+static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
{
struct {
u64 vpid : 16;
@@ -2086,22 +2111,20 @@ static inline void __invvpid(int ext, u16 vpid, gva_t gva)
} operand = { vpid, 0, gva };
bool error;
- asm volatile (__ex(ASM_VMX_INVVPID) CC_SET(na)
- : CC_OUT(na) (error) : "a"(&operand), "c"(ext)
- : "memory");
+ asm volatile (__ex("invvpid %2, %1") CC_SET(na)
+ : CC_OUT(na) (error) : "r"(ext), "m"(operand));
BUG_ON(error);
}
-static inline void __invept(int ext, u64 eptp, gpa_t gpa)
+static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
{
struct {
u64 eptp, gpa;
} operand = {eptp, gpa};
bool error;
- asm volatile (__ex(ASM_VMX_INVEPT) CC_SET(na)
- : CC_OUT(na) (error) : "a" (&operand), "c" (ext)
- : "memory");
+ asm volatile (__ex("invept %2, %1") CC_SET(na)
+ : CC_OUT(na) (error) : "r"(ext), "m"(operand));
BUG_ON(error);
}
@@ -2120,9 +2143,8 @@ static void vmcs_clear(struct vmcs *vmcs)
u64 phys_addr = __pa(vmcs);
bool error;
- asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) CC_SET(na)
- : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
- : "memory");
+ asm volatile (__ex("vmclear %1") CC_SET(na)
+ : CC_OUT(na) (error) : "m"(phys_addr));
if (unlikely(error))
printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
vmcs, phys_addr);
@@ -2145,9 +2167,8 @@ static void vmcs_load(struct vmcs *vmcs)
if (static_branch_unlikely(&enable_evmcs))
return evmcs_load(phys_addr);
- asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) CC_SET(na)
- : CC_OUT(na) (error) : "a"(&phys_addr), "m"(phys_addr)
- : "memory");
+ asm volatile (__ex("vmptrld %1") CC_SET(na)
+ : CC_OUT(na) (error) : "m"(phys_addr));
if (unlikely(error))
printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
vmcs, phys_addr);
@@ -2323,8 +2344,8 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
{
unsigned long value;
- asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
- : "=a"(value) : "d"(field) : "cc");
+ asm volatile (__ex_clear("vmread %1, %0", "%k0")
+ : "=r"(value) : "r"(field));
return value;
}
@@ -2375,8 +2396,8 @@ static __always_inline void __vmcs_writel(unsigned long field, unsigned long val
{
bool error;
- asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) CC_SET(na)
- : CC_OUT(na) (error) : "a"(value), "d"(field));
+ asm volatile (__ex("vmwrite %2, %1") CC_SET(na)
+ : CC_OUT(na) (error) : "r"(field), "rm"(value));
if (unlikely(error))
vmwrite_error(field, value);
}
@@ -2707,7 +2728,8 @@ static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
u64 guest_val, u64 host_val)
{
vmcs_write64(guest_val_vmcs, guest_val);
- vmcs_write64(host_val_vmcs, host_val);
+ if (host_val_vmcs != HOST_IA32_EFER)
+ vmcs_write64(host_val_vmcs, host_val);
vm_entry_controls_setbit(vmx, entry);
vm_exit_controls_setbit(vmx, exit);
}
@@ -2805,8 +2827,6 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
ignore_bits &= ~(u64)EFER_SCE;
#endif
- clear_atomic_switch_msr(vmx, MSR_EFER);
-
/*
* On EPT, we can't emulate NX, so we must switch EFER atomically.
* On CPUs that support "load IA32_EFER", always switch EFER
@@ -2819,8 +2839,12 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER,
guest_efer, host_efer, false);
+ else
+ clear_atomic_switch_msr(vmx, MSR_EFER);
return false;
} else {
+ clear_atomic_switch_msr(vmx, MSR_EFER);
+
guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits;
@@ -3272,34 +3296,30 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
unsigned int nr = vcpu->arch.exception.nr;
+ bool has_payload = vcpu->arch.exception.has_payload;
+ unsigned long payload = vcpu->arch.exception.payload;
if (nr == PF_VECTOR) {
if (vcpu->arch.exception.nested_apf) {
*exit_qual = vcpu->arch.apf.nested_apf_token;
return 1;
}
- /*
- * FIXME: we must not write CR2 when L1 intercepts an L2 #PF exception.
- * The fix is to add the ancillary datum (CR2 or DR6) to structs
- * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6
- * can be written only when inject_pending_event runs. This should be
- * conditional on a new capability---if the capability is disabled,
- * kvm_multiple_exception would write the ancillary information to
- * CR2 or DR6, for backwards ABI-compatibility.
- */
if (nested_vmx_is_page_fault_vmexit(vmcs12,
vcpu->arch.exception.error_code)) {
- *exit_qual = vcpu->arch.cr2;
- return 1;
- }
- } else {
- if (vmcs12->exception_bitmap & (1u << nr)) {
- if (nr == DB_VECTOR)
- *exit_qual = vcpu->arch.dr6;
- else
- *exit_qual = 0;
+ *exit_qual = has_payload ? payload : vcpu->arch.cr2;
return 1;
}
+ } else if (vmcs12->exception_bitmap & (1u << nr)) {
+ if (nr == DB_VECTOR) {
+ if (!has_payload) {
+ payload = vcpu->arch.dr6;
+ payload &= ~(DR6_FIXED_1 | DR6_BT);
+ payload ^= DR6_RTM;
+ }
+ *exit_qual = payload;
+ } else
+ *exit_qual = 0;
+ return 1;
}
return 0;
@@ -3326,6 +3346,8 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu)
u32 error_code = vcpu->arch.exception.error_code;
u32 intr_info = nr | INTR_INFO_VALID_MASK;
+ kvm_deliver_exception_payload(vcpu);
+
if (has_error_code) {
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
@@ -4397,9 +4419,7 @@ static void kvm_cpu_vmxon(u64 addr)
cr4_set_bits(X86_CR4_VMXE);
intel_pt_handle_vmx(1);
- asm volatile (ASM_VMX_VMXON_RAX
- : : "a"(&addr), "m"(addr)
- : "memory", "cc");
+ asm volatile ("vmxon %0" : : "m"(addr));
}
static int hardware_enable(void)
@@ -4468,7 +4488,7 @@ static void vmclear_local_loaded_vmcss(void)
*/
static void kvm_cpu_vmxoff(void)
{
- asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
+ asm volatile (__ex("vmxoff"));
intel_pt_handle_vmx(0);
cr4_clear_bits(X86_CR4_VMXE);
@@ -5112,9 +5132,10 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid,
bool invalidate_gpa)
{
if (enable_ept && (invalidate_gpa || !enable_vpid)) {
- if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ if (!VALID_PAGE(vcpu->arch.mmu->root_hpa))
return;
- ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
+ ept_sync_context(construct_eptp(vcpu,
+ vcpu->arch.mmu->root_hpa));
} else {
vpid_sync_context(vpid);
}
@@ -5264,7 +5285,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long hw_cr0;
- hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
+ hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF);
if (enable_unrestricted_guest)
hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
else {
@@ -6339,6 +6360,9 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
rdmsr(MSR_IA32_CR_PAT, low32, high32);
vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
}
+
+ if (cpu_has_load_ia32_efer)
+ vmcs_write64(HOST_IA32_EFER, host_efer);
}
static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
@@ -6666,7 +6690,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
if (enable_pml) {
- ASSERT(vmx->pml_pg);
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
}
@@ -8067,35 +8090,39 @@ static int handle_monitor(struct kvm_vcpu *vcpu)
/*
* The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
- * set the success or error code of an emulated VMX instruction, as specified
- * by Vol 2B, VMX Instruction Reference, "Conventions".
+ * set the success or error code of an emulated VMX instruction (as specified
+ * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
+ * instruction.
*/
-static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
+static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
{
vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
+ return kvm_skip_emulated_instruction(vcpu);
}
-static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
+static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
{
vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
X86_EFLAGS_SF | X86_EFLAGS_OF))
| X86_EFLAGS_CF);
+ return kvm_skip_emulated_instruction(vcpu);
}
-static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
- u32 vm_instruction_error)
+static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
+ u32 vm_instruction_error)
{
- if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
- /*
- * failValid writes the error number to the current VMCS, which
- * can't be done there isn't a current VMCS.
- */
- nested_vmx_failInvalid(vcpu);
- return;
- }
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * failValid writes the error number to the current VMCS, which
+ * can't be done if there isn't a current VMCS.
+ */
+ if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
+ return nested_vmx_failInvalid(vcpu);
+
vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_SF | X86_EFLAGS_OF))
@@ -8105,6 +8132,7 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
* We don't need to force a shadow sync because
* VM_INSTRUCTION_ERROR is not shadowed
*/
+ return kvm_skip_emulated_instruction(vcpu);
}
static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
@@ -8292,6 +8320,7 @@ static int enter_vmx_operation(struct kvm_vcpu *vcpu)
vmx->nested.vpid02 = allocate_vpid();
+ vmx->nested.vmcs02_initialized = false;
vmx->nested.vmxon = true;
return 0;
@@ -8345,10 +8374,9 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
return 1;
}
- if (vmx->nested.vmxon) {
- nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (vmx->nested.vmxon)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
!= VMXON_NEEDED_FEATURES) {
@@ -8367,21 +8395,17 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
* Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
* which replaces physical address width with 32
*/
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ return nested_vmx_failInvalid(vcpu);
page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
- if (is_error_page(page)) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (is_error_page(page))
+ return nested_vmx_failInvalid(vcpu);
+
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
kunmap(page);
kvm_release_page_clean(page);
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_failInvalid(vcpu);
}
kunmap(page);
kvm_release_page_clean(page);
@@ -8391,8 +8415,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
if (ret)
return ret;
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
/*
@@ -8423,8 +8446,24 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
vmcs_write64(VMCS_LINK_POINTER, -1ull);
}
-static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
+static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!vmx->nested.hv_evmcs)
+ return;
+
+ kunmap(vmx->nested.hv_evmcs_page);
+ kvm_release_page_dirty(vmx->nested.hv_evmcs_page);
+ vmx->nested.hv_evmcs_vmptr = -1ull;
+ vmx->nested.hv_evmcs_page = NULL;
+ vmx->nested.hv_evmcs = NULL;
+}
+
+static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
if (vmx->nested.current_vmptr == -1ull)
return;
@@ -8432,16 +8471,18 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
/* copy to memory all shadowed fields in case
they were modified */
copy_shadow_to_vmcs12(vmx);
- vmx->nested.sync_shadow_vmcs = false;
+ vmx->nested.need_vmcs12_sync = false;
vmx_disable_shadow_vmcs(vmx);
}
vmx->nested.posted_intr_nv = -1;
/* Flush VMCS12 to guest memory */
- kvm_vcpu_write_guest_page(&vmx->vcpu,
+ kvm_vcpu_write_guest_page(vcpu,
vmx->nested.current_vmptr >> PAGE_SHIFT,
vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
+ kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
+
vmx->nested.current_vmptr = -1ull;
}
@@ -8449,8 +8490,10 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
* Free whatever needs to be freed from vmx->nested when L1 goes down, or
* just stops using VMX.
*/
-static void free_nested(struct vcpu_vmx *vmx)
+static void free_nested(struct kvm_vcpu *vcpu)
{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
return;
@@ -8483,6 +8526,10 @@ static void free_nested(struct vcpu_vmx *vmx)
vmx->nested.pi_desc = NULL;
}
+ kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
+
+ nested_release_evmcs(vcpu);
+
free_loaded_vmcs(&vmx->nested.vmcs02);
}
@@ -8491,9 +8538,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
{
if (!nested_vmx_check_permission(vcpu))
return 1;
- free_nested(to_vmx(vcpu));
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ free_nested(vcpu);
+ return nested_vmx_succeed(vcpu);
}
/* Emulate the VMCLEAR instruction */
@@ -8509,25 +8555,28 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
- nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMCLEAR_INVALID_ADDRESS);
- if (vmptr == vmx->nested.vmxon_ptr) {
- nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (vmptr == vmx->nested.vmxon_ptr)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMCLEAR_VMXON_POINTER);
- if (vmptr == vmx->nested.current_vmptr)
- nested_release_vmcs12(vmx);
+ if (vmx->nested.hv_evmcs_page) {
+ if (vmptr == vmx->nested.hv_evmcs_vmptr)
+ nested_release_evmcs(vcpu);
+ } else {
+ if (vmptr == vmx->nested.current_vmptr)
+ nested_release_vmcs12(vcpu);
- kvm_vcpu_write_guest(vcpu,
- vmptr + offsetof(struct vmcs12, launch_state),
- &zero, sizeof(zero));
+ kvm_vcpu_write_guest(vcpu,
+ vmptr + offsetof(struct vmcs12,
+ launch_state),
+ &zero, sizeof(zero));
+ }
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
@@ -8610,6 +8659,395 @@ static inline int vmcs12_write_any(struct vmcs12 *vmcs12,
}
+static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
+{
+ struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
+ struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
+
+ vmcs12->hdr.revision_id = evmcs->revision_id;
+
+ /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
+ vmcs12->tpr_threshold = evmcs->tpr_threshold;
+ vmcs12->guest_rip = evmcs->guest_rip;
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
+ vmcs12->guest_rsp = evmcs->guest_rsp;
+ vmcs12->guest_rflags = evmcs->guest_rflags;
+ vmcs12->guest_interruptibility_info =
+ evmcs->guest_interruptibility_info;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
+ vmcs12->cpu_based_vm_exec_control =
+ evmcs->cpu_based_vm_exec_control;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
+ vmcs12->exception_bitmap = evmcs->exception_bitmap;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
+ vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
+ vmcs12->vm_entry_intr_info_field =
+ evmcs->vm_entry_intr_info_field;
+ vmcs12->vm_entry_exception_error_code =
+ evmcs->vm_entry_exception_error_code;
+ vmcs12->vm_entry_instruction_len =
+ evmcs->vm_entry_instruction_len;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
+ vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
+ vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
+ vmcs12->host_cr0 = evmcs->host_cr0;
+ vmcs12->host_cr3 = evmcs->host_cr3;
+ vmcs12->host_cr4 = evmcs->host_cr4;
+ vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
+ vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
+ vmcs12->host_rip = evmcs->host_rip;
+ vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
+ vmcs12->host_es_selector = evmcs->host_es_selector;
+ vmcs12->host_cs_selector = evmcs->host_cs_selector;
+ vmcs12->host_ss_selector = evmcs->host_ss_selector;
+ vmcs12->host_ds_selector = evmcs->host_ds_selector;
+ vmcs12->host_fs_selector = evmcs->host_fs_selector;
+ vmcs12->host_gs_selector = evmcs->host_gs_selector;
+ vmcs12->host_tr_selector = evmcs->host_tr_selector;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
+ vmcs12->pin_based_vm_exec_control =
+ evmcs->pin_based_vm_exec_control;
+ vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
+ vmcs12->secondary_vm_exec_control =
+ evmcs->secondary_vm_exec_control;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
+ vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
+ vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
+ vmcs12->msr_bitmap = evmcs->msr_bitmap;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
+ vmcs12->guest_es_base = evmcs->guest_es_base;
+ vmcs12->guest_cs_base = evmcs->guest_cs_base;
+ vmcs12->guest_ss_base = evmcs->guest_ss_base;
+ vmcs12->guest_ds_base = evmcs->guest_ds_base;
+ vmcs12->guest_fs_base = evmcs->guest_fs_base;
+ vmcs12->guest_gs_base = evmcs->guest_gs_base;
+ vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
+ vmcs12->guest_tr_base = evmcs->guest_tr_base;
+ vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
+ vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
+ vmcs12->guest_es_limit = evmcs->guest_es_limit;
+ vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
+ vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
+ vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
+ vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
+ vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
+ vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
+ vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
+ vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
+ vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
+ vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
+ vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
+ vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
+ vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
+ vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
+ vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
+ vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
+ vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
+ vmcs12->guest_es_selector = evmcs->guest_es_selector;
+ vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
+ vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
+ vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
+ vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
+ vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
+ vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
+ vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
+ vmcs12->tsc_offset = evmcs->tsc_offset;
+ vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
+ vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
+ vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
+ vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
+ vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
+ vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
+ vmcs12->guest_cr0 = evmcs->guest_cr0;
+ vmcs12->guest_cr3 = evmcs->guest_cr3;
+ vmcs12->guest_cr4 = evmcs->guest_cr4;
+ vmcs12->guest_dr7 = evmcs->guest_dr7;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
+ vmcs12->host_fs_base = evmcs->host_fs_base;
+ vmcs12->host_gs_base = evmcs->host_gs_base;
+ vmcs12->host_tr_base = evmcs->host_tr_base;
+ vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
+ vmcs12->host_idtr_base = evmcs->host_idtr_base;
+ vmcs12->host_rsp = evmcs->host_rsp;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
+ vmcs12->ept_pointer = evmcs->ept_pointer;
+ vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
+ }
+
+ if (unlikely(!(evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
+ vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
+ vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
+ vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
+ vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
+ vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
+ vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
+ vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
+ vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
+ vmcs12->guest_pending_dbg_exceptions =
+ evmcs->guest_pending_dbg_exceptions;
+ vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
+ vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
+ vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
+ vmcs12->guest_activity_state = evmcs->guest_activity_state;
+ vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
+ }
+
+ /*
+ * Not used?
+ * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
+ * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
+ * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
+ * vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
+ * vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
+ * vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
+ * vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
+ * vmcs12->page_fault_error_code_mask =
+ * evmcs->page_fault_error_code_mask;
+ * vmcs12->page_fault_error_code_match =
+ * evmcs->page_fault_error_code_match;
+ * vmcs12->cr3_target_count = evmcs->cr3_target_count;
+ * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
+ * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
+ * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
+ */
+
+ /*
+ * Read only fields:
+ * vmcs12->guest_physical_address = evmcs->guest_physical_address;
+ * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
+ * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
+ * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
+ * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
+ * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
+ * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
+ * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
+ * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
+ * vmcs12->exit_qualification = evmcs->exit_qualification;
+ * vmcs12->guest_linear_address = evmcs->guest_linear_address;
+ *
+ * Not present in struct vmcs12:
+ * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
+ * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
+ * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
+ * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
+ */
+
+ return 0;
+}
+
+static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
+{
+ struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
+ struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
+
+ /*
+ * Should not be changed by KVM:
+ *
+ * evmcs->host_es_selector = vmcs12->host_es_selector;
+ * evmcs->host_cs_selector = vmcs12->host_cs_selector;
+ * evmcs->host_ss_selector = vmcs12->host_ss_selector;
+ * evmcs->host_ds_selector = vmcs12->host_ds_selector;
+ * evmcs->host_fs_selector = vmcs12->host_fs_selector;
+ * evmcs->host_gs_selector = vmcs12->host_gs_selector;
+ * evmcs->host_tr_selector = vmcs12->host_tr_selector;
+ * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
+ * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
+ * evmcs->host_cr0 = vmcs12->host_cr0;
+ * evmcs->host_cr3 = vmcs12->host_cr3;
+ * evmcs->host_cr4 = vmcs12->host_cr4;
+ * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
+ * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
+ * evmcs->host_rip = vmcs12->host_rip;
+ * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
+ * evmcs->host_fs_base = vmcs12->host_fs_base;
+ * evmcs->host_gs_base = vmcs12->host_gs_base;
+ * evmcs->host_tr_base = vmcs12->host_tr_base;
+ * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
+ * evmcs->host_idtr_base = vmcs12->host_idtr_base;
+ * evmcs->host_rsp = vmcs12->host_rsp;
+ * sync_vmcs12() doesn't read these:
+ * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
+ * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
+ * evmcs->msr_bitmap = vmcs12->msr_bitmap;
+ * evmcs->ept_pointer = vmcs12->ept_pointer;
+ * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
+ * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
+ * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
+ * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
+ * evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
+ * evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
+ * evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
+ * evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
+ * evmcs->tpr_threshold = vmcs12->tpr_threshold;
+ * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
+ * evmcs->exception_bitmap = vmcs12->exception_bitmap;
+ * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
+ * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
+ * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
+ * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
+ * evmcs->page_fault_error_code_mask =
+ * vmcs12->page_fault_error_code_mask;
+ * evmcs->page_fault_error_code_match =
+ * vmcs12->page_fault_error_code_match;
+ * evmcs->cr3_target_count = vmcs12->cr3_target_count;
+ * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
+ * evmcs->tsc_offset = vmcs12->tsc_offset;
+ * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
+ * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
+ * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
+ * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
+ * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
+ * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
+ * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
+ * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
+ *
+ * Not present in struct vmcs12:
+ * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
+ * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
+ * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
+ * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
+ */
+
+ evmcs->guest_es_selector = vmcs12->guest_es_selector;
+ evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
+ evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
+ evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
+ evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
+ evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
+ evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
+ evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
+
+ evmcs->guest_es_limit = vmcs12->guest_es_limit;
+ evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
+ evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
+ evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
+ evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
+ evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
+ evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
+ evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
+ evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
+ evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
+
+ evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
+ evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
+ evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
+ evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
+ evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
+ evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
+ evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
+ evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
+
+ evmcs->guest_es_base = vmcs12->guest_es_base;
+ evmcs->guest_cs_base = vmcs12->guest_cs_base;
+ evmcs->guest_ss_base = vmcs12->guest_ss_base;
+ evmcs->guest_ds_base = vmcs12->guest_ds_base;
+ evmcs->guest_fs_base = vmcs12->guest_fs_base;
+ evmcs->guest_gs_base = vmcs12->guest_gs_base;
+ evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
+ evmcs->guest_tr_base = vmcs12->guest_tr_base;
+ evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
+ evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
+
+ evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
+ evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
+
+ evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
+ evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
+ evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
+ evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
+
+ evmcs->guest_pending_dbg_exceptions =
+ vmcs12->guest_pending_dbg_exceptions;
+ evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
+ evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
+
+ evmcs->guest_activity_state = vmcs12->guest_activity_state;
+ evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
+
+ evmcs->guest_cr0 = vmcs12->guest_cr0;
+ evmcs->guest_cr3 = vmcs12->guest_cr3;
+ evmcs->guest_cr4 = vmcs12->guest_cr4;
+ evmcs->guest_dr7 = vmcs12->guest_dr7;
+
+ evmcs->guest_physical_address = vmcs12->guest_physical_address;
+
+ evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
+ evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
+ evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
+ evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
+ evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
+ evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
+ evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
+ evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
+
+ evmcs->exit_qualification = vmcs12->exit_qualification;
+
+ evmcs->guest_linear_address = vmcs12->guest_linear_address;
+ evmcs->guest_rsp = vmcs12->guest_rsp;
+ evmcs->guest_rflags = vmcs12->guest_rflags;
+
+ evmcs->guest_interruptibility_info =
+ vmcs12->guest_interruptibility_info;
+ evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
+ evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
+ evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
+ evmcs->vm_entry_exception_error_code =
+ vmcs12->vm_entry_exception_error_code;
+ evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
+
+ evmcs->guest_rip = vmcs12->guest_rip;
+
+ evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
+
+ return 0;
+}
+
/*
* Copy the writable VMCS shadow fields back to the VMCS12, in case
* they have been modified by the L1 guest. Note that the "read-only"
@@ -8683,20 +9121,6 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
vmcs_load(vmx->loaded_vmcs->vmcs);
}
-/*
- * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
- * used before) all generate the same failure when it is missing.
- */
-static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
-{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- if (vmx->nested.current_vmptr == -1ull) {
- nested_vmx_failInvalid(vcpu);
- return 0;
- }
- return 1;
-}
-
static int handle_vmread(struct kvm_vcpu *vcpu)
{
unsigned long field;
@@ -8709,8 +9133,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (!nested_vmx_check_vmcs12(vcpu))
- return kvm_skip_emulated_instruction(vcpu);
+ if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
+ return nested_vmx_failInvalid(vcpu);
if (!is_guest_mode(vcpu))
vmcs12 = get_vmcs12(vcpu);
@@ -8719,20 +9143,18 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
* When vmcs->vmcs_link_pointer is -1ull, any VMREAD
* to shadowed-field sets the ALU flags for VMfailInvalid.
*/
- if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
+ return nested_vmx_failInvalid(vcpu);
vmcs12 = get_shadow_vmcs12(vcpu);
}
/* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */
- if (vmcs12_read_any(vmcs12, field, &field_value) < 0) {
- nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+
/*
* Now copy part of this value to register or memory, as requested.
* Note that the number of bits actually copied is 32 or 64 depending
@@ -8750,8 +9172,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
(is_long_mode(vcpu) ? 8 : 4), NULL);
}
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
@@ -8776,8 +9197,8 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (!nested_vmx_check_vmcs12(vcpu))
- return kvm_skip_emulated_instruction(vcpu);
+ if (vmx->nested.current_vmptr == -1ull)
+ return nested_vmx_failInvalid(vcpu);
if (vmx_instruction_info & (1u << 10))
field_value = kvm_register_readl(vcpu,
@@ -8800,11 +9221,9 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
* VMCS," then the "read-only" fields are actually read/write.
*/
if (vmcs_field_readonly(field) &&
- !nested_cpu_has_vmwrite_any_field(vcpu)) {
- nested_vmx_failValid(vcpu,
+ !nested_cpu_has_vmwrite_any_field(vcpu))
+ return nested_vmx_failValid(vcpu,
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
- return kvm_skip_emulated_instruction(vcpu);
- }
if (!is_guest_mode(vcpu))
vmcs12 = get_vmcs12(vcpu);
@@ -8813,18 +9232,14 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
* When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
* to shadowed-field sets the ALU flags for VMfailInvalid.
*/
- if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
+ return nested_vmx_failInvalid(vcpu);
vmcs12 = get_shadow_vmcs12(vcpu);
-
}
- if (vmcs12_write_any(vmcs12, field, field_value) < 0) {
- nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (vmcs12_write_any(vmcs12, field, field_value) < 0)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_UNSUPPORTED_VMCS_COMPONENT);
/*
* Do not track vmcs12 dirty-state if in guest-mode
@@ -8846,8 +9261,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
}
}
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
@@ -8858,7 +9272,7 @@ static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
SECONDARY_EXEC_SHADOW_VMCS);
vmcs_write64(VMCS_LINK_POINTER,
__pa(vmx->vmcs01.shadow_vmcs));
- vmx->nested.sync_shadow_vmcs = true;
+ vmx->nested.need_vmcs12_sync = true;
}
vmx->nested.dirty_vmcs12 = true;
}
@@ -8875,36 +9289,37 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
if (nested_vmx_get_vmptr(vcpu, &vmptr))
return 1;
- if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) {
- nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMPTRLD_INVALID_ADDRESS);
- if (vmptr == vmx->nested.vmxon_ptr) {
- nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (vmptr == vmx->nested.vmxon_ptr)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_VMPTRLD_VMXON_POINTER);
+
+ /* Forbid normal VMPTRLD if Enlightened version was used */
+ if (vmx->nested.hv_evmcs)
+ return 1;
if (vmx->nested.current_vmptr != vmptr) {
struct vmcs12 *new_vmcs12;
struct page *page;
page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
- if (is_error_page(page)) {
- nested_vmx_failInvalid(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ if (is_error_page(page))
+ return nested_vmx_failInvalid(vcpu);
+
new_vmcs12 = kmap(page);
if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
(new_vmcs12->hdr.shadow_vmcs &&
!nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
kunmap(page);
kvm_release_page_clean(page);
- nested_vmx_failValid(vcpu,
+ return nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
- return kvm_skip_emulated_instruction(vcpu);
}
- nested_release_vmcs12(vmx);
+ nested_release_vmcs12(vcpu);
+
/*
* Load VMCS12 from guest memory since it is not already
* cached.
@@ -8916,8 +9331,71 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
set_current_vmptr(vmx, vmptr);
}
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
+}
+
+/*
+ * This is an equivalent of the nested hypervisor executing the vmptrld
+ * instruction.
+ */
+static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
+ bool from_launch)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct hv_vp_assist_page assist_page;
+
+ if (likely(!vmx->nested.enlightened_vmcs_enabled))
+ return 1;
+
+ if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
+ return 1;
+
+ if (unlikely(!assist_page.enlighten_vmentry))
+ return 1;
+
+ if (unlikely(assist_page.current_nested_vmcs !=
+ vmx->nested.hv_evmcs_vmptr)) {
+
+ if (!vmx->nested.hv_evmcs)
+ vmx->nested.current_vmptr = -1ull;
+
+ nested_release_evmcs(vcpu);
+
+ vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page(
+ vcpu, assist_page.current_nested_vmcs);
+
+ if (unlikely(is_error_page(vmx->nested.hv_evmcs_page)))
+ return 0;
+
+ vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
+
+ if (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION) {
+ nested_release_evmcs(vcpu);
+ return 0;
+ }
+
+ vmx->nested.dirty_vmcs12 = true;
+ /*
+ * As we keep L2 state for one guest only 'hv_clean_fields' mask
+ * can't be used when we switch between them. Reset it here for
+ * simplicity.
+ */
+ vmx->nested.hv_evmcs->hv_clean_fields &=
+ ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+ vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs;
+
+ /*
+ * Unlike normal vmcs12, enlightened vmcs12 is not fully
+ * reloaded from guest's memory (read only fields, fields not
+ * present in struct hv_enlightened_vmcs, ...). Make sure there
+ * are no leftovers.
+ */
+ if (from_launch)
+ memset(vmx->nested.cached_vmcs12, 0,
+ sizeof(*vmx->nested.cached_vmcs12));
+
+ }
+ return 1;
}
/* Emulate the VMPTRST instruction */
@@ -8932,6 +9410,9 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
+ if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
+ return 1;
+
if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
return 1;
/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
@@ -8940,8 +9421,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
kvm_inject_page_fault(vcpu, &e);
return 1;
}
- nested_vmx_succeed(vcpu);
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
/* Emulate the INVEPT instruction */
@@ -8971,11 +9451,9 @@ static int handle_invept(struct kvm_vcpu *vcpu)
types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
- if (type >= 32 || !(types & (1 << type))) {
- nested_vmx_failValid(vcpu,
+ if (type >= 32 || !(types & (1 << type)))
+ return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- return kvm_skip_emulated_instruction(vcpu);
- }
/* According to the Intel VMX instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global)
@@ -8997,14 +9475,20 @@ static int handle_invept(struct kvm_vcpu *vcpu)
case VMX_EPT_EXTENT_CONTEXT:
kvm_mmu_sync_roots(vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- nested_vmx_succeed(vcpu);
break;
default:
BUG_ON(1);
break;
}
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
+}
+
+static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
}
static int handle_invvpid(struct kvm_vcpu *vcpu)
@@ -9018,6 +9502,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
u64 vpid;
u64 gla;
} operand;
+ u16 vpid02;
if (!(vmx->nested.msrs.secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) ||
@@ -9035,11 +9520,9 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
types = (vmx->nested.msrs.vpid_caps &
VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
- if (type >= 32 || !(types & (1 << type))) {
- nested_vmx_failValid(vcpu,
+ if (type >= 32 || !(types & (1 << type)))
+ return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- return kvm_skip_emulated_instruction(vcpu);
- }
/* according to the intel vmx instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global)
@@ -9051,47 +9534,39 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
kvm_inject_page_fault(vcpu, &e);
return 1;
}
- if (operand.vpid >> 16) {
- nested_vmx_failValid(vcpu,
+ if (operand.vpid >> 16)
+ return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- return kvm_skip_emulated_instruction(vcpu);
- }
+ vpid02 = nested_get_vpid02(vcpu);
switch (type) {
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
if (!operand.vpid ||
- is_noncanonical_address(operand.gla, vcpu)) {
- nested_vmx_failValid(vcpu,
+ is_noncanonical_address(operand.gla, vcpu))
+ return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- return kvm_skip_emulated_instruction(vcpu);
- }
- if (cpu_has_vmx_invvpid_individual_addr() &&
- vmx->nested.vpid02) {
+ if (cpu_has_vmx_invvpid_individual_addr()) {
__invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
- vmx->nested.vpid02, operand.gla);
+ vpid02, operand.gla);
} else
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ __vmx_flush_tlb(vcpu, vpid02, false);
break;
case VMX_VPID_EXTENT_SINGLE_CONTEXT:
case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
- if (!operand.vpid) {
- nested_vmx_failValid(vcpu,
+ if (!operand.vpid)
+ return nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
- return kvm_skip_emulated_instruction(vcpu);
- }
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ __vmx_flush_tlb(vcpu, vpid02, false);
break;
case VMX_VPID_EXTENT_ALL_CONTEXT:
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ __vmx_flush_tlb(vcpu, vpid02, false);
break;
default:
WARN_ON_ONCE(1);
return kvm_skip_emulated_instruction(vcpu);
}
- nested_vmx_succeed(vcpu);
-
- return kvm_skip_emulated_instruction(vcpu);
+ return nested_vmx_succeed(vcpu);
}
static int handle_invpcid(struct kvm_vcpu *vcpu)
@@ -9162,11 +9637,11 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
}
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- if (kvm_get_pcid(vcpu, vcpu->arch.mmu.prev_roots[i].cr3)
+ if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3)
== operand.pcid)
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
- kvm_mmu_free_roots(vcpu, roots_to_free);
+ kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
/*
* If neither the current cr3 nor any of the prev_roots use the
* given PCID, then nothing needs to be done here because a
@@ -9293,7 +9768,7 @@ static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
kvm_mmu_unload(vcpu);
mmu->ept_ad = accessed_dirty;
- mmu->base_role.ad_disabled = !accessed_dirty;
+ mmu->mmu_role.base.ad_disabled = !accessed_dirty;
vmcs12->ept_pointer = address;
/*
* TODO: Check what's the correct approach in case
@@ -9652,9 +10127,6 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
return false;
else if (is_page_fault(intr_info))
return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
- else if (is_no_device(intr_info) &&
- !(vmcs12->guest_cr0 & X86_CR0_TS))
- return false;
else if (is_debug(intr_info) &&
vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
@@ -10676,9 +11148,25 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmcs_write32(PLE_WINDOW, vmx->ple_window);
}
- if (vmx->nested.sync_shadow_vmcs) {
- copy_vmcs12_to_shadow(vmx);
- vmx->nested.sync_shadow_vmcs = false;
+ if (vmx->nested.need_vmcs12_sync) {
+ /*
+ * hv_evmcs may end up being not mapped after migration (when
+ * L2 was running), map it here to make sure vmcs12 changes are
+ * properly reflected.
+ */
+ if (vmx->nested.enlightened_vmcs_enabled &&
+ !vmx->nested.hv_evmcs)
+ nested_vmx_handle_enlightened_vmptrld(vcpu, false);
+
+ if (vmx->nested.hv_evmcs) {
+ copy_vmcs12_to_enlightened(vmx);
+ /* All fields are clean */
+ vmx->nested.hv_evmcs->hv_clean_fields |=
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+ } else {
+ copy_vmcs12_to_shadow(vmx);
+ }
+ vmx->nested.need_vmcs12_sync = false;
}
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
@@ -10745,7 +11233,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%" _ASM_SP ", (%%" _ASM_SI ") \n\t"
"jmp 1f \n\t"
"2: \n\t"
- __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
+ __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
"1: \n\t"
/* Reload cr2 if changed */
"mov %c[cr2](%0), %%" _ASM_AX " \n\t"
@@ -10777,9 +11265,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
/* Enter guest mode */
"jne 1f \n\t"
- __ex(ASM_VMX_VMLAUNCH) "\n\t"
+ __ex("vmlaunch") "\n\t"
"jmp 2f \n\t"
- "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
+ "1: " __ex("vmresume") "\n\t"
"2: "
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
@@ -10801,6 +11289,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"mov %%r13, %c[r13](%0) \n\t"
"mov %%r14, %c[r14](%0) \n\t"
"mov %%r15, %c[r15](%0) \n\t"
+ /*
+ * Clear host registers marked as clobbered to prevent
+ * speculative use.
+ */
"xor %%r8d, %%r8d \n\t"
"xor %%r9d, %%r9d \n\t"
"xor %%r10d, %%r10d \n\t"
@@ -10958,6 +11450,10 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
vmx->loaded_vmcs = vmcs;
vmx_vcpu_load(vcpu, cpu);
put_cpu();
+
+ vm_entry_controls_reset_shadow(vmx);
+ vm_exit_controls_reset_shadow(vmx);
+ vmx_segment_cache_clear(vmx);
}
/*
@@ -10966,12 +11462,10 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
*/
static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
- vcpu_load(vcpu);
- vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- free_nested(vmx);
- vcpu_put(vcpu);
+ vcpu_load(vcpu);
+ vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
+ free_nested(vcpu);
+ vcpu_put(vcpu);
}
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
@@ -11334,28 +11828,28 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
return get_vmcs12(vcpu)->ept_pointer;
}
-static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
+static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
WARN_ON(mmu_is_nested(vcpu));
- if (!valid_ept_address(vcpu, nested_ept_get_cr3(vcpu)))
- return 1;
+ vcpu->arch.mmu = &vcpu->arch.guest_mmu;
kvm_init_shadow_ept_mmu(vcpu,
to_vmx(vcpu)->nested.msrs.ept_caps &
VMX_EPT_EXECUTE_ONLY_BIT,
nested_ept_ad_enabled(vcpu),
nested_ept_get_cr3(vcpu));
- vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
- vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
- vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
+ vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
+ vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
+ vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
+ vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
- return 0;
}
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
- vcpu->arch.walk_mmu = &vcpu->arch.mmu;
+ vcpu->arch.mmu = &vcpu->arch.root_mmu;
+ vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
}
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
@@ -11716,7 +12210,7 @@ static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
!nested_exit_intr_ack_set(vcpu) ||
(vmcs12->posted_intr_nv & 0xff00) ||
(vmcs12->posted_intr_desc_addr & 0x3f) ||
- (!page_address_valid(vcpu, vmcs12->posted_intr_desc_addr))))
+ (vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
return -EINVAL;
/* tpr shadow is needed by all apicv features. */
@@ -11772,15 +12266,12 @@ static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
- u64 address = vmcs12->pml_address;
- int maxphyaddr = cpuid_maxphyaddr(vcpu);
+ if (!nested_cpu_has_pml(vmcs12))
+ return 0;
- if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_PML)) {
- if (!nested_cpu_has_ept(vmcs12) ||
- !IS_ALIGNED(address, 4096) ||
- address >> maxphyaddr)
- return -EINVAL;
- }
+ if (!nested_cpu_has_ept(vmcs12) ||
+ !page_address_valid(vcpu, vmcs12->pml_address))
+ return -EINVAL;
return 0;
}
@@ -11960,112 +12451,87 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool ne
return 0;
}
-static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+/*
+ * Returns if KVM is able to config CPU to tag TLB entries
+ * populated by L2 differently than TLB entries populated
+ * by L1.
+ *
+ * If L1 uses EPT, then TLB entries are tagged with different EPTP.
+ *
+ * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
+ * with different VPID (L1 entries are tagged with vmx->vpid
+ * while L2 entries are tagged with vmx->nested.vpid02).
+ */
+static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
- vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
- vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
- vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
- vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
- vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
- vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
- vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
- vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
- vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
- vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
- vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
- vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
- vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
- vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
- vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
- vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
- vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
- vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
- vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
- vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
- vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
- vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
- vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
- vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
- vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
- vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
- vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
- vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
- vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
- vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
-
- vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
- vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
- vmcs12->guest_pending_dbg_exceptions);
- vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
- vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
+ return nested_cpu_has_ept(vmcs12) ||
+ (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
+}
- if (nested_cpu_has_xsaves(vmcs12))
- vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
- vmcs_write64(VMCS_LINK_POINTER, -1ull);
+static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+{
+ if (vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
+ return vmcs12->guest_ia32_efer;
+ else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
+ return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
+ else
+ return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
+}
- if (cpu_has_vmx_posted_intr())
- vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
+static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
+{
+ /*
+ * If vmcs02 hasn't been initialized, set the constant vmcs02 state
+ * according to L0's settings (vmcs12 is irrelevant here). Host
+ * fields that come from L0 and are not constant, e.g. HOST_CR3,
+ * will be set as needed prior to VMLAUNCH/VMRESUME.
+ */
+ if (vmx->nested.vmcs02_initialized)
+ return;
+ vmx->nested.vmcs02_initialized = true;
/*
- * Whether page-faults are trapped is determined by a combination of
- * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
- * If enable_ept, L0 doesn't care about page faults and we should
- * set all of these to L1's desires. However, if !enable_ept, L0 does
- * care about (at least some) page faults, and because it is not easy
- * (if at all possible?) to merge L0 and L1's desires, we simply ask
- * to exit on each and every L2 page fault. This is done by setting
- * MASK=MATCH=0 and (see below) EB.PF=1.
- * Note that below we don't need special code to set EB.PF beyond the
- * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
- * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
- * !enable_ept, EB.PF is 1, so the "or" will always be 1.
+ * We don't care what the EPTP value is we just need to guarantee
+ * it's valid so we don't get a false positive when doing early
+ * consistency checks.
*/
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
- enable_ept ? vmcs12->page_fault_error_code_mask : 0);
- vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
- enable_ept ? vmcs12->page_fault_error_code_match : 0);
+ if (enable_ept && nested_early_check)
+ vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
/* All VMFUNCs are currently emulated through L0 vmexits. */
if (cpu_has_vmx_vmfunc())
vmcs_write64(VM_FUNCTION_CONTROL, 0);
- if (cpu_has_vmx_apicv()) {
- vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
- vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
- vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
- vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
- }
+ if (cpu_has_vmx_posted_intr())
+ vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
- /*
- * Set host-state according to L0's settings (vmcs12 is irrelevant here)
- * Some constant fields are set here by vmx_set_constant_host_state().
- * Other fields are different per CPU, and will be set later when
- * vmx_vcpu_load() is called, and when vmx_prepare_switch_to_guest()
- * is called.
- */
- vmx_set_constant_host_state(vmx);
+ if (cpu_has_vmx_msr_bitmap())
+ vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
+
+ if (enable_pml)
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
/*
- * Set the MSR load/store lists to match L0's settings.
+ * Set the MSR load/store lists to match L0's settings. Only the
+ * addresses are constant (for vmcs02), the counts can change based
+ * on L2's behavior, e.g. switching to/from long mode.
*/
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
- set_cr4_guest_host_mask(vmx);
+ vmx_set_constant_host_state(vmx);
+}
- if (kvm_mpx_supported()) {
- if (vmx->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
- vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
- else
- vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
- }
+static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx,
+ struct vmcs12 *vmcs12)
+{
+ prepare_vmcs02_constant_state(vmx);
+
+ vmcs_write64(VMCS_LINK_POINTER, -1ull);
if (enable_vpid) {
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
@@ -12073,78 +12539,30 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
else
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
}
-
- /*
- * L1 may access the L2's PDPTR, so save them to construct vmcs12
- */
- if (enable_ept) {
- vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
- vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
- vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
- vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
- }
-
- if (cpu_has_vmx_msr_bitmap())
- vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
}
-/*
- * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
- * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
- * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
- * guest in a way that will both be appropriate to L1's requests, and our
- * needs. In addition to modifying the active vmcs (which is vmcs02), this
- * function also has additional necessary side-effects, like setting various
- * vcpu->arch fields.
- * Returns 0 on success, 1 on failure. Invalid state exit qualification code
- * is assigned to entry_failure_code on failure.
- */
-static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
- u32 *entry_failure_code)
+static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
{
- struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control, vmcs12_exec_ctrl;
+ u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
- if (vmx->nested.dirty_vmcs12) {
- prepare_vmcs02_full(vcpu, vmcs12);
- vmx->nested.dirty_vmcs12 = false;
- }
+ if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
+ prepare_vmcs02_early_full(vmx, vmcs12);
/*
- * First, the fields that are shadowed. This must be kept in sync
- * with vmx_shadow_fields.h.
+ * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
+ * entry, but only if the current (host) sp changed from the value
+ * we wrote last (vmx->host_rsp). This cache is no longer relevant
+ * if we switch vmcs, and rather than hold a separate cache per vmcs,
+ * here we just force the write to happen on entry. host_rsp will
+ * also be written unconditionally by nested_vmx_check_vmentry_hw()
+ * if we are doing early consistency checks via hardware.
*/
+ vmx->host_rsp = 0;
- vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
- vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
- vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
- vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
- vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
-
- if (vmx->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
- kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
- vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
- } else {
- kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
- vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
- }
- if (vmx->nested.nested_run_pending) {
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
- vmcs12->vm_entry_intr_info_field);
- vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
- vmcs12->vm_entry_exception_error_code);
- vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
- vmcs12->vm_entry_instruction_len);
- vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
- vmcs12->guest_interruptibility_info);
- vmx->loaded_vmcs->nmi_known_unmasked =
- !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
- } else {
- vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
- }
- vmx_set_rflags(vcpu, vmcs12->guest_rflags);
-
+ /*
+ * PIN CONTROLS
+ */
exec_control = vmcs12->pin_based_vm_exec_control;
/* Preemption timer setting is computed directly in vmx_vcpu_run. */
@@ -12159,13 +12577,43 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
} else {
exec_control &= ~PIN_BASED_POSTED_INTR;
}
-
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
- vmx->nested.preemption_timer_expired = false;
- if (nested_cpu_has_preemption_timer(vmcs12))
- vmx_start_preemption_timer(vcpu);
+ /*
+ * EXEC CONTROLS
+ */
+ exec_control = vmx_exec_control(vmx); /* L0's desires */
+ exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
+ exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
+ exec_control &= ~CPU_BASED_TPR_SHADOW;
+ exec_control |= vmcs12->cpu_based_vm_exec_control;
+ /*
+ * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
+ * nested_get_vmcs12_pages can't fix it up, the illegal value
+ * will result in a VM entry failure.
+ */
+ if (exec_control & CPU_BASED_TPR_SHADOW) {
+ vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
+ vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
+ } else {
+#ifdef CONFIG_X86_64
+ exec_control |= CPU_BASED_CR8_LOAD_EXITING |
+ CPU_BASED_CR8_STORE_EXITING;
+#endif
+ }
+
+ /*
+ * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
+ * for I/O port accesses.
+ */
+ exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
+ exec_control |= CPU_BASED_UNCOND_IO_EXITING;
+ vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
+
+ /*
+ * SECONDARY EXEC CONTROLS
+ */
if (cpu_has_secondary_exec_ctrls()) {
exec_control = vmx->secondary_exec_control;
@@ -12206,43 +12654,214 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
}
/*
- * HOST_RSP is normally set correctly in vmx_vcpu_run() just before
- * entry, but only if the current (host) sp changed from the value
- * we wrote last (vmx->host_rsp). This cache is no longer relevant
- * if we switch vmcs, and rather than hold a separate cache per vmcs,
- * here we just force the write to happen on entry.
+ * ENTRY CONTROLS
+ *
+ * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
+ * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
+ * on the related bits (if supported by the CPU) in the hope that
+ * we can avoid VMWrites during vmx_set_efer().
+ */
+ exec_control = (vmcs12->vm_entry_controls | vmcs_config.vmentry_ctrl) &
+ ~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
+ if (cpu_has_load_ia32_efer) {
+ if (guest_efer & EFER_LMA)
+ exec_control |= VM_ENTRY_IA32E_MODE;
+ if (guest_efer != host_efer)
+ exec_control |= VM_ENTRY_LOAD_IA32_EFER;
+ }
+ vm_entry_controls_init(vmx, exec_control);
+
+ /*
+ * EXIT CONTROLS
+ *
+ * L2->L1 exit controls are emulated - the hardware exit is to L0 so
+ * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
+ * bits may be modified by vmx_set_efer() in prepare_vmcs02().
*/
- vmx->host_rsp = 0;
+ exec_control = vmcs_config.vmexit_ctrl;
+ if (cpu_has_load_ia32_efer && guest_efer != host_efer)
+ exec_control |= VM_EXIT_LOAD_IA32_EFER;
+ vm_exit_controls_init(vmx, exec_control);
- exec_control = vmx_exec_control(vmx); /* L0's desires */
- exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
- exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
- exec_control &= ~CPU_BASED_TPR_SHADOW;
- exec_control |= vmcs12->cpu_based_vm_exec_control;
+ /*
+ * Conceptually we want to copy the PML address and index from
+ * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
+ * since we always flush the log on each vmexit and never change
+ * the PML address (once set), this happens to be equivalent to
+ * simply resetting the index in vmcs02.
+ */
+ if (enable_pml)
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
/*
- * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
- * nested_get_vmcs12_pages can't fix it up, the illegal value
- * will result in a VM entry failure.
+ * Interrupt/Exception Fields
*/
- if (exec_control & CPU_BASED_TPR_SHADOW) {
- vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
- vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
+ if (vmx->nested.nested_run_pending) {
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
+ vmcs12->vm_entry_intr_info_field);
+ vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
+ vmcs12->vm_entry_exception_error_code);
+ vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
+ vmcs12->vm_entry_instruction_len);
+ vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
+ vmcs12->guest_interruptibility_info);
+ vmx->loaded_vmcs->nmi_known_unmasked =
+ !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
} else {
-#ifdef CONFIG_X86_64
- exec_control |= CPU_BASED_CR8_LOAD_EXITING |
- CPU_BASED_CR8_STORE_EXITING;
-#endif
+ vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
}
+}
+
+static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
+{
+ struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
+
+ if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
+ vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
+ vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
+ vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
+ vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
+ vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
+ vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
+ vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
+ vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
+ vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
+ vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
+ vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
+ vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
+ vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
+ vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
+ vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
+ vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
+ vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
+ vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
+ vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
+ vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
+ vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
+ vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
+ vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
+ vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
+ vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
+ vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
+ vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
+ vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
+ vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
+ vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
+ vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
+ vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
+ vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
+ vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
+ }
+
+ if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
+ vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
+ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
+ vmcs12->guest_pending_dbg_exceptions);
+ vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
+ vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
+
+ /*
+ * L1 may access the L2's PDPTR, so save them to construct
+ * vmcs12
+ */
+ if (enable_ept) {
+ vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
+ vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
+ vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
+ vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
+ }
+ }
+
+ if (nested_cpu_has_xsaves(vmcs12))
+ vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
/*
- * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
- * for I/O port accesses.
+ * Whether page-faults are trapped is determined by a combination of
+ * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
+ * If enable_ept, L0 doesn't care about page faults and we should
+ * set all of these to L1's desires. However, if !enable_ept, L0 does
+ * care about (at least some) page faults, and because it is not easy
+ * (if at all possible?) to merge L0 and L1's desires, we simply ask
+ * to exit on each and every L2 page fault. This is done by setting
+ * MASK=MATCH=0 and (see below) EB.PF=1.
+ * Note that below we don't need special code to set EB.PF beyond the
+ * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
+ * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
+ * !enable_ept, EB.PF is 1, so the "or" will always be 1.
*/
- exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
- exec_control |= CPU_BASED_UNCOND_IO_EXITING;
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
+ enable_ept ? vmcs12->page_fault_error_code_mask : 0);
+ vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
+ enable_ept ? vmcs12->page_fault_error_code_match : 0);
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
+ if (cpu_has_vmx_apicv()) {
+ vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
+ vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
+ vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
+ vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
+ }
+
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+
+ set_cr4_guest_host_mask(vmx);
+
+ if (kvm_mpx_supported()) {
+ if (vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
+ vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
+ else
+ vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
+ }
+}
+
+/*
+ * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
+ * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
+ * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
+ * guest in a way that will both be appropriate to L1's requests, and our
+ * needs. In addition to modifying the active vmcs (which is vmcs02), this
+ * function also has additional necessary side-effects, like setting various
+ * vcpu->arch fields.
+ * Returns 0 on success, 1 on failure. Invalid state exit qualification code
+ * is assigned to entry_failure_code on failure.
+ */
+static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ u32 *entry_failure_code)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
+
+ if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) {
+ prepare_vmcs02_full(vmx, vmcs12);
+ vmx->nested.dirty_vmcs12 = false;
+ }
+
+ /*
+ * First, the fields that are shadowed. This must be kept in sync
+ * with vmx_shadow_fields.h.
+ */
+ if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
+ HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
+ vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
+ vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
+ }
+
+ if (vmx->nested.nested_run_pending &&
+ (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
+ kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
+ vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
+ } else {
+ kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
+ vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
+ }
+ vmx_set_rflags(vcpu, vmcs12->guest_rflags);
+
+ vmx->nested.preemption_timer_expired = false;
+ if (nested_cpu_has_preemption_timer(vmcs12))
+ vmx_start_preemption_timer(vcpu);
/* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
* bitwise-or of what L1 wants to trap for L2, and what we want to
@@ -12252,20 +12871,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
- /* L2->L1 exit controls are emulated - the hardware exit is to L0 so
- * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
- * bits are further modified by vmx_set_efer() below.
- */
- vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
-
- /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
- * emulated by vmx_set_efer(), below.
- */
- vm_entry_controls_init(vmx,
- (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
- ~VM_ENTRY_IA32E_MODE) |
- (vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
-
if (vmx->nested.nested_run_pending &&
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
@@ -12288,37 +12893,29 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
* influence global bitmap(for vpid01 and vpid02 allocation)
* even if spawn a lot of nested vCPUs.
*/
- if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) {
+ if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
- __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true);
+ __vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
}
} else {
- vmx_flush_tlb(vcpu, true);
+ /*
+ * If L1 use EPT, then L0 needs to execute INVEPT on
+ * EPTP02 instead of EPTP01. Therefore, delay TLB
+ * flush until vmcs02->eptp is fully updated by
+ * KVM_REQ_LOAD_CR3. Note that this assumes
+ * KVM_REQ_TLB_FLUSH is evaluated after
+ * KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
+ */
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
}
- if (enable_pml) {
- /*
- * Conceptually we want to copy the PML address and index from
- * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
- * since we always flush the log on each vmexit, this happens
- * to be equivalent to simply resetting the fields in vmcs02.
- */
- ASSERT(vmx->pml_pg);
- vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
- vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
- }
-
- if (nested_cpu_has_ept(vmcs12)) {
- if (nested_ept_init_mmu_context(vcpu)) {
- *entry_failure_code = ENTRY_FAIL_DEFAULT;
- return 1;
- }
- } else if (nested_cpu_has2(vmcs12,
- SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
+ if (nested_cpu_has_ept(vmcs12))
+ nested_ept_init_mmu_context(vcpu);
+ else if (nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
vmx_flush_tlb(vcpu, true);
- }
/*
* This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
@@ -12334,14 +12931,8 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
vmx_set_cr4(vcpu, vmcs12->guest_cr4);
vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
- if (vmx->nested.nested_run_pending &&
- (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
- vcpu->arch.efer = vmcs12->guest_ia32_efer;
- else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
- vcpu->arch.efer |= (EFER_LMA | EFER_LME);
- else
- vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
- /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
+ vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
+ /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
vmx_set_efer(vcpu, vcpu->arch.efer);
/*
@@ -12383,6 +12974,7 @@ static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool ia32e;
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
@@ -12457,6 +13049,21 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
/*
+ * If the load IA32_EFER VM-exit control is 1, bits reserved in the
+ * IA32_EFER MSR must be 0 in the field for that register. In addition,
+ * the values of the LMA and LME bits in the field must each be that of
+ * the host address-space size VM-exit control.
+ */
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
+ ia32e = (vmcs12->vm_exit_controls &
+ VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
+ if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
+ ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
+ ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
+ return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
+ }
+
+ /*
* From the Intel SDM, volume 3:
* Fields relevant to VM-entry event injection must be set properly.
* These fields are the VM-entry interruption-information field, the
@@ -12512,6 +13119,10 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
}
}
+ if (nested_cpu_has_ept(vmcs12) &&
+ !valid_ept_address(vcpu, vmcs12->ept_pointer))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
return 0;
}
@@ -12577,21 +13188,6 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
return 1;
}
- /*
- * If the load IA32_EFER VM-exit control is 1, bits reserved in the
- * IA32_EFER MSR must be 0 in the field for that register. In addition,
- * the values of the LMA and LME bits in the field must each be that of
- * the host address-space size VM-exit control.
- */
- if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
- ia32e = (vmcs12->vm_exit_controls &
- VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
- if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
- ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
- ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
- return 1;
- }
-
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
(vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
@@ -12600,26 +13196,139 @@ static int check_vmentry_postreqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
return 0;
}
+static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ unsigned long cr3, cr4;
+
+ if (!nested_early_check)
+ return 0;
+
+ if (vmx->msr_autoload.host.nr)
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+ if (vmx->msr_autoload.guest.nr)
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+
+ preempt_disable();
+
+ vmx_prepare_switch_to_guest(vcpu);
+
+ /*
+ * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
+ * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
+ * be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
+ * there is no need to preserve other bits or save/restore the field.
+ */
+ vmcs_writel(GUEST_RFLAGS, 0);
+
+ vmcs_writel(HOST_RIP, vmx_early_consistency_check_return);
+
+ cr3 = __get_current_cr3_fast();
+ if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
+ vmcs_writel(HOST_CR3, cr3);
+ vmx->loaded_vmcs->host_state.cr3 = cr3;
+ }
+
+ cr4 = cr4_read_shadow();
+ if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
+ vmcs_writel(HOST_CR4, cr4);
+ vmx->loaded_vmcs->host_state.cr4 = cr4;
+ }
+
+ vmx->__launched = vmx->loaded_vmcs->launched;
+
+ asm(
+ /* Set HOST_RSP */
+ __ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
+ "mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t"
+
+ /* Check if vmlaunch of vmresume is needed */
+ "cmpl $0, %c[launched](%0)\n\t"
+ "je 1f\n\t"
+ __ex("vmresume") "\n\t"
+ "jmp 2f\n\t"
+ "1: " __ex("vmlaunch") "\n\t"
+ "jmp 2f\n\t"
+ "2: "
+
+ /* Set vmx->fail accordingly */
+ "setbe %c[fail](%0)\n\t"
+
+ ".pushsection .rodata\n\t"
+ ".global vmx_early_consistency_check_return\n\t"
+ "vmx_early_consistency_check_return: " _ASM_PTR " 2b\n\t"
+ ".popsection"
+ :
+ : "c"(vmx), "d"((unsigned long)HOST_RSP),
+ [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
+ [fail]"i"(offsetof(struct vcpu_vmx, fail)),
+ [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp))
+ : "rax", "cc", "memory"
+ );
+
+ vmcs_writel(HOST_RIP, vmx_return);
+
+ preempt_enable();
+
+ if (vmx->msr_autoload.host.nr)
+ vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
+ if (vmx->msr_autoload.guest.nr)
+ vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
+
+ if (vmx->fail) {
+ WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ vmx->fail = 0;
+ return 1;
+ }
+
+ /*
+ * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
+ */
+ local_irq_enable();
+ if (hw_breakpoint_active())
+ set_debugreg(__this_cpu_read(cpu_dr7), 7);
+
+ /*
+ * A non-failing VMEntry means we somehow entered guest mode with
+ * an illegal RIP, and that's just the tip of the iceberg. There
+ * is no telling what memory has been modified or what state has
+ * been exposed to unknown code. Hitting this all but guarantees
+ * a (very critical) hardware issue.
+ */
+ WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
+ VMX_EXIT_REASONS_FAILED_VMENTRY));
+
+ return 0;
+}
+STACK_FRAME_NON_STANDARD(nested_vmx_check_vmentry_hw);
+
+static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12);
+
/*
- * If exit_qual is NULL, this is being called from state restore (either RSM
+ * If from_vmentry is false, this is being called from state restore (either RSM
* or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
++ *
++ * Returns:
++ * 0 - success, i.e. proceed with actual VMEnter
++ * 1 - consistency check VMExit
++ * -1 - consistency check VMFail
*/
-static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
+static int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+ bool from_vmentry)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- bool from_vmentry = !!exit_qual;
- u32 dummy_exit_qual;
bool evaluate_pending_interrupts;
- int r = 0;
+ u32 exit_reason = EXIT_REASON_INVALID_STATE;
+ u32 exit_qual;
evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
- enter_guest_mode(vcpu);
-
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
if (kvm_mpx_supported() &&
@@ -12627,24 +13336,35 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
- vmx_segment_cache_clear(vmx);
+ prepare_vmcs02_early(vmx, vmcs12);
+
+ if (from_vmentry) {
+ nested_get_vmcs12_pages(vcpu);
+
+ if (nested_vmx_check_vmentry_hw(vcpu)) {
+ vmx_switch_vmcs(vcpu, &vmx->vmcs01);
+ return -1;
+ }
+
+ if (check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
+ goto vmentry_fail_vmexit;
+ }
+
+ enter_guest_mode(vcpu);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
- r = EXIT_REASON_INVALID_STATE;
- if (prepare_vmcs02(vcpu, vmcs12, from_vmentry ? exit_qual : &dummy_exit_qual))
- goto fail;
+ if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
+ goto vmentry_fail_vmexit_guest_mode;
if (from_vmentry) {
- nested_get_vmcs12_pages(vcpu);
-
- r = EXIT_REASON_MSR_LOAD_FAIL;
- *exit_qual = nested_vmx_load_msr(vcpu,
- vmcs12->vm_entry_msr_load_addr,
- vmcs12->vm_entry_msr_load_count);
- if (*exit_qual)
- goto fail;
+ exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
+ exit_qual = nested_vmx_load_msr(vcpu,
+ vmcs12->vm_entry_msr_load_addr,
+ vmcs12->vm_entry_msr_load_count);
+ if (exit_qual)
+ goto vmentry_fail_vmexit_guest_mode;
} else {
/*
* The MMU is not initialized to point at the right entities yet and
@@ -12681,12 +13401,28 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
*/
return 0;
-fail:
+ /*
+ * A failed consistency check that leads to a VMExit during L1's
+ * VMEnter to L2 is a variation of a normal VMexit, as explained in
+ * 26.7 "VM-entry failures during or after loading guest state".
+ */
+vmentry_fail_vmexit_guest_mode:
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
leave_guest_mode(vcpu);
+
+vmentry_fail_vmexit:
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- return r;
+
+ if (!from_vmentry)
+ return 1;
+
+ load_vmcs12_host_state(vcpu, vmcs12);
+ vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
+ vmcs12->exit_qualification = exit_qual;
+ if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
+ vmx->nested.need_vmcs12_sync = true;
+ return 1;
}
/*
@@ -12698,14 +13434,16 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
- u32 exit_qual;
int ret;
if (!nested_vmx_check_permission(vcpu))
return 1;
- if (!nested_vmx_check_vmcs12(vcpu))
- goto out;
+ if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true))
+ return 1;
+
+ if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
+ return nested_vmx_failInvalid(vcpu);
vmcs12 = get_vmcs12(vcpu);
@@ -12715,13 +13453,16 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* rather than RFLAGS.ZF, and no error number is stored to the
* VM-instruction error field.
*/
- if (vmcs12->hdr.shadow_vmcs) {
- nested_vmx_failInvalid(vcpu);
- goto out;
- }
+ if (vmcs12->hdr.shadow_vmcs)
+ return nested_vmx_failInvalid(vcpu);
- if (enable_shadow_vmcs)
+ if (vmx->nested.hv_evmcs) {
+ copy_enlightened_to_vmcs12(vmx);
+ /* Enlightened VMCS doesn't have launch state */
+ vmcs12->launch_state = !launch;
+ } else if (enable_shadow_vmcs) {
copy_shadow_to_vmcs12(vmx);
+ }
/*
* The nested entry process starts with enforcing various prerequisites
@@ -12733,59 +13474,37 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
* for misconfigurations which will anyway be caught by the processor
* when using the merged vmcs02.
*/
- if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS) {
- nested_vmx_failValid(vcpu,
- VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
- goto out;
- }
+ if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
- if (vmcs12->launch_state == launch) {
- nested_vmx_failValid(vcpu,
+ if (vmcs12->launch_state == launch)
+ return nested_vmx_failValid(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
: VMXERR_VMRESUME_NONLAUNCHED_VMCS);
- goto out;
- }
ret = check_vmentry_prereqs(vcpu, vmcs12);
- if (ret) {
- nested_vmx_failValid(vcpu, ret);
- goto out;
- }
-
- /*
- * After this point, the trap flag no longer triggers a singlestep trap
- * on the vm entry instructions; don't call kvm_skip_emulated_instruction.
- * This is not 100% correct; for performance reasons, we delegate most
- * of the checks on host state to the processor. If those fail,
- * the singlestep trap is missed.
- */
- skip_emulated_instruction(vcpu);
-
- ret = check_vmentry_postreqs(vcpu, vmcs12, &exit_qual);
- if (ret) {
- nested_vmx_entry_failure(vcpu, vmcs12,
- EXIT_REASON_INVALID_STATE, exit_qual);
- return 1;
- }
+ if (ret)
+ return nested_vmx_failValid(vcpu, ret);
/*
* We're finally done with prerequisite checking, and can start with
* the nested entry.
*/
-
vmx->nested.nested_run_pending = 1;
- ret = enter_vmx_non_root_mode(vcpu, &exit_qual);
- if (ret) {
- nested_vmx_entry_failure(vcpu, vmcs12, ret, exit_qual);
- vmx->nested.nested_run_pending = 0;
+ ret = nested_vmx_enter_non_root_mode(vcpu, true);
+ vmx->nested.nested_run_pending = !ret;
+ if (ret > 0)
return 1;
- }
+ else if (ret)
+ return nested_vmx_failValid(vcpu,
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD);
/* Hide L1D cache contents from the nested guest. */
vmx->vcpu.arch.l1tf_flush_l1d = true;
/*
- * Must happen outside of enter_vmx_non_root_mode() as it will
+ * Must happen outside of nested_vmx_enter_non_root_mode() as it will
* also be used as part of restoring nVMX state for
* snapshot restore (migration).
*
@@ -12806,9 +13525,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return kvm_vcpu_halt(vcpu);
}
return 1;
-
-out:
- return kvm_skip_emulated_instruction(vcpu);
}
/*
@@ -13122,24 +13838,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
kvm_clear_interrupt_queue(vcpu);
}
-static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
-{
- u32 entry_failure_code;
-
- nested_ept_uninit_mmu_context(vcpu);
-
- /*
- * Only PDPTE load can fail as the value of cr3 was checked on entry and
- * couldn't have changed.
- */
- if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
- nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
-
- if (!enable_ept)
- vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
-}
-
/*
* A part of what we need to when the nested L2 guest exits and we want to
* run its L1 parent, is to reset L1's guest state to the host state specified
@@ -13153,6 +13851,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct kvm_segment seg;
+ u32 entry_failure_code;
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
vcpu->arch.efer = vmcs12->host_ia32_efer;
@@ -13165,6 +13864,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
+ vmx_set_interrupt_shadow(vcpu, 0);
+
/*
* Note that calling vmx_set_cr0 is important, even if cr0 hasn't
* actually changed, because vmx_set_cr0 refers to efer set above.
@@ -13179,23 +13880,35 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
vmx_set_cr4(vcpu, vmcs12->host_cr4);
- load_vmcs12_mmu_host_state(vcpu, vmcs12);
+ nested_ept_uninit_mmu_context(vcpu);
/*
- * If vmcs01 don't use VPID, CPU flushes TLB on every
+ * Only PDPTE load can fail as the value of cr3 was checked on entry and
+ * couldn't have changed.
+ */
+ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
+
+ if (!enable_ept)
+ vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+
+ /*
+ * If vmcs01 doesn't use VPID, CPU flushes TLB on every
* VMEntry/VMExit. Thus, no need to flush TLB.
*
- * If vmcs12 uses VPID, TLB entries populated by L2 are
- * tagged with vmx->nested.vpid02 while L1 entries are tagged
- * with vmx->vpid. Thus, no need to flush TLB.
+ * If vmcs12 doesn't use VPID, L1 expects TLB to be
+ * flushed on every VMEntry/VMExit.
+ *
+ * Otherwise, we can preserve TLB entries as long as we are
+ * able to tag L1 TLB entries differently than L2 TLB entries.
*
- * Therefore, flush TLB only in case vmcs01 uses VPID and
- * vmcs12 don't use VPID as in this case L1 & L2 TLB entries
- * are both tagged with vmx->vpid.
+ * If vmcs12 uses EPT, we need to execute this flush on EPTP01
+ * and therefore we request the TLB flush to happen only after VMCS EPTP
+ * has been set by KVM_REQ_LOAD_CR3.
*/
if (enable_vpid &&
- !(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02)) {
- vmx_flush_tlb(vcpu, true);
+ (!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
@@ -13275,6 +13988,140 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
}
+static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
+{
+ struct shared_msr_entry *efer_msr;
+ unsigned int i;
+
+ if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
+ return vmcs_read64(GUEST_IA32_EFER);
+
+ if (cpu_has_load_ia32_efer)
+ return host_efer;
+
+ for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
+ if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
+ return vmx->msr_autoload.guest.val[i].value;
+ }
+
+ efer_msr = find_msr_entry(vmx, MSR_EFER);
+ if (efer_msr)
+ return efer_msr->data;
+
+ return host_efer;
+}
+
+static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct vmx_msr_entry g, h;
+ struct msr_data msr;
+ gpa_t gpa;
+ u32 i, j;
+
+ vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
+
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
+ /*
+ * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
+ * as vmcs01.GUEST_DR7 contains a userspace defined value
+ * and vcpu->arch.dr7 is not squirreled away before the
+ * nested VMENTER (not worth adding a variable in nested_vmx).
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
+ kvm_set_dr(vcpu, 7, DR7_FIXED_1);
+ else
+ WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
+ }
+
+ /*
+ * Note that calling vmx_set_{efer,cr0,cr4} is important as they
+ * handle a variety of side effects to KVM's software model.
+ */
+ vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
+
+ vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+ vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
+
+ vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
+ vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
+
+ nested_ept_uninit_mmu_context(vcpu);
+ vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
+ __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
+
+ /*
+ * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
+ * from vmcs01 (if necessary). The PDPTRs are not loaded on
+ * VMFail, like everything else we just need to ensure our
+ * software model is up-to-date.
+ */
+ ept_save_pdptrs(vcpu);
+
+ kvm_mmu_reset_context(vcpu);
+
+ if (cpu_has_vmx_msr_bitmap())
+ vmx_update_msr_bitmap(vcpu);
+
+ /*
+ * This nasty bit of open coding is a compromise between blindly
+ * loading L1's MSRs using the exit load lists (incorrect emulation
+ * of VMFail), leaving the nested VM's MSRs in the software model
+ * (incorrect behavior) and snapshotting the modified MSRs (too
+ * expensive since the lists are unbound by hardware). For each
+ * MSR that was (prematurely) loaded from the nested VMEntry load
+ * list, reload it from the exit load list if it exists and differs
+ * from the guest value. The intent is to stuff host state as
+ * silently as possible, not to fully process the exit load list.
+ */
+ msr.host_initiated = false;
+ for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
+ gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
+ if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
+ pr_debug_ratelimited(
+ "%s read MSR index failed (%u, 0x%08llx)\n",
+ __func__, i, gpa);
+ goto vmabort;
+ }
+
+ for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
+ gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
+ if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
+ pr_debug_ratelimited(
+ "%s read MSR failed (%u, 0x%08llx)\n",
+ __func__, j, gpa);
+ goto vmabort;
+ }
+ if (h.index != g.index)
+ continue;
+ if (h.value == g.value)
+ break;
+
+ if (nested_vmx_load_msr_check(vcpu, &h)) {
+ pr_debug_ratelimited(
+ "%s check failed (%u, 0x%x, 0x%x)\n",
+ __func__, j, h.index, h.reserved);
+ goto vmabort;
+ }
+
+ msr.index = h.index;
+ msr.data = h.value;
+ if (kvm_set_msr(vcpu, &msr)) {
+ pr_debug_ratelimited(
+ "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
+ __func__, j, h.index, h.value);
+ goto vmabort;
+ }
+ }
+ }
+
+ return;
+
+vmabort:
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
+}
+
/*
* Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
* and modify vmcs12 to make it see what it would expect to see there if
@@ -13290,14 +14137,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
- /*
- * The only expected VM-instruction error is "VM entry with
- * invalid control field(s)." Anything else indicates a
- * problem with L0.
- */
- WARN_ON_ONCE(vmx->fail && (vmcs_read32(VM_INSTRUCTION_ERROR) !=
- VMXERR_ENTRY_INVALID_CONTROL_FIELD));
-
leave_guest_mode(vcpu);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
@@ -13324,12 +14163,19 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
vmcs12->vm_exit_msr_store_count))
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
+ } else {
+ /*
+ * The only expected VM-instruction error is "VM entry with
+ * invalid control field(s)." Anything else indicates a
+ * problem with L0. And we should never get here with a
+ * VMFail of any type if early consistency checks are enabled.
+ */
+ WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
+ VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+ WARN_ON_ONCE(nested_early_check);
}
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
- vm_entry_controls_reset_shadow(vmx);
- vm_exit_controls_reset_shadow(vmx);
- vmx_segment_cache_clear(vmx);
/* Update any VMCS fields that might have changed while L2 ran */
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
@@ -13373,8 +14219,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
*/
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
- if (enable_shadow_vmcs && exit_reason != -1)
- vmx->nested.sync_shadow_vmcs = true;
+ if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
+ vmx->nested.need_vmcs12_sync = true;
/* in case we halted in L2 */
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -13409,24 +14255,24 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
return;
}
-
+
/*
* After an early L2 VM-entry failure, we're now back
* in L1 which thinks it just finished a VMLAUNCH or
* VMRESUME instruction, so we need to set the failure
* flag and the VM-instruction error field of the VMCS
- * accordingly.
+ * accordingly, and skip the emulated instruction.
*/
- nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-
- load_vmcs12_mmu_host_state(vcpu, vmcs12);
+ (void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
/*
- * The emulated instruction was already skipped in
- * nested_vmx_run, but the updated RIP was never
- * written back to the vmcs01.
+ * Restore L1's host state to KVM's software model. We're here
+ * because a consistency check was caught by hardware, which
+ * means some amount of guest state has been propagated to KVM's
+ * model and needs to be unwound to the host's state.
*/
- skip_emulated_instruction(vcpu);
+ nested_vmx_restore_host_state(vcpu);
+
vmx->fail = 0;
}
@@ -13439,26 +14285,7 @@ static void vmx_leave_nested(struct kvm_vcpu *vcpu)
to_vmx(vcpu)->nested.nested_run_pending = 0;
nested_vmx_vmexit(vcpu, -1, 0, 0);
}
- free_nested(to_vmx(vcpu));
-}
-
-/*
- * L1's failure to enter L2 is a subset of a normal exit, as explained in
- * 23.7 "VM-entry failures during or after loading guest state" (this also
- * lists the acceptable exit-reason and exit-qualification parameters).
- * It should only be called before L2 actually succeeded to run, and when
- * vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
- */
-static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12,
- u32 reason, unsigned long qualification)
-{
- load_vmcs12_host_state(vcpu, vmcs12);
- vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
- vmcs12->exit_qualification = qualification;
- nested_vmx_succeed(vcpu);
- if (enable_shadow_vmcs)
- to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
+ free_nested(vcpu);
}
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
@@ -13884,7 +14711,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
if (vmx->nested.smm.guest_mode) {
vcpu->arch.hflags &= ~HF_SMM_MASK;
- ret = enter_vmx_non_root_mode(vcpu, NULL);
+ ret = nested_vmx_enter_non_root_mode(vcpu, false);
vcpu->arch.hflags |= HF_SMM_MASK;
if (ret)
return ret;
@@ -13899,6 +14726,20 @@ static int enable_smi_window(struct kvm_vcpu *vcpu)
return 0;
}
+static inline int vmx_has_valid_vmcs12(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ /*
+ * In case we do two consecutive get/set_nested_state()s while L2 was
+ * running hv_evmcs may end up not being mapped (we map it from
+ * nested_vmx_run()/vmx_vcpu_run()). Check is_guest_mode() as we always
+ * have vmcs12 if it is true.
+ */
+ return is_guest_mode(vcpu) || vmx->nested.current_vmptr != -1ull ||
+ vmx->nested.hv_evmcs;
+}
+
static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
u32 user_data_size)
@@ -13918,12 +14759,16 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
vmx = to_vmx(vcpu);
vmcs12 = get_vmcs12(vcpu);
+
+ if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
+ kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
+
if (nested_vmx_allowed(vcpu) &&
(vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
- if (vmx->nested.current_vmptr != -1ull) {
+ if (vmx_has_valid_vmcs12(vcpu)) {
kvm_state.size += VMCS12_SIZE;
if (is_guest_mode(vcpu) &&
@@ -13952,20 +14797,24 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
return -EFAULT;
- if (vmx->nested.current_vmptr == -1ull)
+ if (!vmx_has_valid_vmcs12(vcpu))
goto out;
/*
* When running L2, the authoritative vmcs12 state is in the
* vmcs02. When running L1, the authoritative vmcs12 state is
- * in the shadow vmcs linked to vmcs01, unless
- * sync_shadow_vmcs is set, in which case, the authoritative
+ * in the shadow or enlightened vmcs linked to vmcs01, unless
+ * need_vmcs12_sync is set, in which case, the authoritative
* vmcs12 state is in the vmcs12 already.
*/
- if (is_guest_mode(vcpu))
+ if (is_guest_mode(vcpu)) {
sync_vmcs12(vcpu, vmcs12);
- else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs)
- copy_shadow_to_vmcs12(vmx);
+ } else if (!vmx->nested.need_vmcs12_sync) {
+ if (vmx->nested.hv_evmcs)
+ copy_enlightened_to_vmcs12(vmx);
+ else if (enable_shadow_vmcs)
+ copy_shadow_to_vmcs12(vmx);
+ }
if (copy_to_user(user_kvm_nested_state->data, vmcs12, sizeof(*vmcs12)))
return -EFAULT;
@@ -13993,6 +14842,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (kvm_state->format != 0)
return -EINVAL;
+ if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
+ nested_enable_evmcs(vcpu, NULL);
+
if (!nested_vmx_allowed(vcpu))
return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
@@ -14010,13 +14862,6 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
return -EINVAL;
- if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
- return -EINVAL;
-
- if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
- !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
- return -EINVAL;
-
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
return -EINVAL;
@@ -14046,7 +14891,25 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
if (ret)
return ret;
- set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
+ /* Empty 'VMXON' state is permitted */
+ if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
+ return 0;
+
+ if (kvm_state->vmx.vmcs_pa != -1ull) {
+ if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
+ !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
+ return -EINVAL;
+
+ set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
+ } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
+ /*
+ * Sync eVMCS upon entry as we may not have
+ * HV_X64_MSR_VP_ASSIST_PAGE set up yet.
+ */
+ vmx->nested.need_vmcs12_sync = true;
+ } else {
+ return -EINVAL;
+ }
if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
vmx->nested.smm.vmxon = true;
@@ -14090,7 +14953,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
vmx->nested.dirty_vmcs12 = true;
- ret = enter_vmx_non_root_mode(vcpu, NULL);
+ ret = nested_vmx_enter_non_root_mode(vcpu, false);
if (ret)
return -EINVAL;
@@ -14242,6 +15105,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.pre_enter_smm = vmx_pre_enter_smm,
.pre_leave_smm = vmx_pre_leave_smm,
.enable_smi_window = enable_smi_window,
+
+ .nested_enable_evmcs = nested_enable_evmcs,
};
static void vmx_cleanup_l1d_flush(void)
diff --git a/arch/x86/kvm/vmx_shadow_fields.h b/arch/x86/kvm/vmx_shadow_fields.h
index cd0c75f6d037..132432f375c2 100644
--- a/arch/x86/kvm/vmx_shadow_fields.h
+++ b/arch/x86/kvm/vmx_shadow_fields.h
@@ -28,7 +28,6 @@
*/
/* 16-bits */
-SHADOW_FIELD_RW(GUEST_CS_SELECTOR)
SHADOW_FIELD_RW(GUEST_INTR_STATUS)
SHADOW_FIELD_RW(GUEST_PML_INDEX)
SHADOW_FIELD_RW(HOST_FS_SELECTOR)
@@ -47,8 +46,8 @@ SHADOW_FIELD_RW(VM_ENTRY_EXCEPTION_ERROR_CODE)
SHADOW_FIELD_RW(VM_ENTRY_INTR_INFO_FIELD)
SHADOW_FIELD_RW(VM_ENTRY_INSTRUCTION_LEN)
SHADOW_FIELD_RW(TPR_THRESHOLD)
-SHADOW_FIELD_RW(GUEST_CS_LIMIT)
SHADOW_FIELD_RW(GUEST_CS_AR_BYTES)
+SHADOW_FIELD_RW(GUEST_SS_AR_BYTES)
SHADOW_FIELD_RW(GUEST_INTERRUPTIBILITY_INFO)
SHADOW_FIELD_RW(VMX_PREEMPTION_TIMER_VALUE)
@@ -61,8 +60,6 @@ SHADOW_FIELD_RW(GUEST_CR0)
SHADOW_FIELD_RW(GUEST_CR3)
SHADOW_FIELD_RW(GUEST_CR4)
SHADOW_FIELD_RW(GUEST_RFLAGS)
-SHADOW_FIELD_RW(GUEST_CS_BASE)
-SHADOW_FIELD_RW(GUEST_ES_BASE)
SHADOW_FIELD_RW(CR0_GUEST_HOST_MASK)
SHADOW_FIELD_RW(CR0_READ_SHADOW)
SHADOW_FIELD_RW(CR4_READ_SHADOW)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ca717737347e..66d66d77caee 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -136,7 +136,7 @@ static u32 __read_mostly tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
/* lapic timer advance (tscdeadline mode only) in nanoseconds */
-unsigned int __read_mostly lapic_timer_advance_ns = 0;
+unsigned int __read_mostly lapic_timer_advance_ns = 1000;
module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
EXPORT_SYMBOL_GPL(lapic_timer_advance_ns);
@@ -400,9 +400,51 @@ static int exception_type(int vector)
return EXCPT_FAULT;
}
+void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
+{
+ unsigned nr = vcpu->arch.exception.nr;
+ bool has_payload = vcpu->arch.exception.has_payload;
+ unsigned long payload = vcpu->arch.exception.payload;
+
+ if (!has_payload)
+ return;
+
+ switch (nr) {
+ case DB_VECTOR:
+ /*
+ * "Certain debug exceptions may clear bit 0-3. The
+ * remaining contents of the DR6 register are never
+ * cleared by the processor".
+ */
+ vcpu->arch.dr6 &= ~DR_TRAP_BITS;
+ /*
+ * DR6.RTM is set by all #DB exceptions that don't clear it.
+ */
+ vcpu->arch.dr6 |= DR6_RTM;
+ vcpu->arch.dr6 |= payload;
+ /*
+ * Bit 16 should be set in the payload whenever the #DB
+ * exception should clear DR6.RTM. This makes the payload
+ * compatible with the pending debug exceptions under VMX.
+ * Though not currently documented in the SDM, this also
+ * makes the payload compatible with the exit qualification
+ * for #DB exceptions under VMX.
+ */
+ vcpu->arch.dr6 ^= payload & DR6_RTM;
+ break;
+ case PF_VECTOR:
+ vcpu->arch.cr2 = payload;
+ break;
+ }
+
+ vcpu->arch.exception.has_payload = false;
+ vcpu->arch.exception.payload = 0;
+}
+EXPORT_SYMBOL_GPL(kvm_deliver_exception_payload);
+
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
unsigned nr, bool has_error, u32 error_code,
- bool reinject)
+ bool has_payload, unsigned long payload, bool reinject)
{
u32 prev_nr;
int class1, class2;
@@ -424,6 +466,14 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
*/
WARN_ON_ONCE(vcpu->arch.exception.pending);
vcpu->arch.exception.injected = true;
+ if (WARN_ON_ONCE(has_payload)) {
+ /*
+ * A reinjected event has already
+ * delivered its payload.
+ */
+ has_payload = false;
+ payload = 0;
+ }
} else {
vcpu->arch.exception.pending = true;
vcpu->arch.exception.injected = false;
@@ -431,6 +481,22 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
vcpu->arch.exception.has_error_code = has_error;
vcpu->arch.exception.nr = nr;
vcpu->arch.exception.error_code = error_code;
+ vcpu->arch.exception.has_payload = has_payload;
+ vcpu->arch.exception.payload = payload;
+ /*
+ * In guest mode, payload delivery should be deferred,
+ * so that the L1 hypervisor can intercept #PF before
+ * CR2 is modified (or intercept #DB before DR6 is
+ * modified under nVMX). However, for ABI
+ * compatibility with KVM_GET_VCPU_EVENTS and
+ * KVM_SET_VCPU_EVENTS, we can't delay payload
+ * delivery unless userspace has enabled this
+ * functionality via the per-VM capability,
+ * KVM_CAP_EXCEPTION_PAYLOAD.
+ */
+ if (!vcpu->kvm->arch.exception_payload_enabled ||
+ !is_guest_mode(vcpu))
+ kvm_deliver_exception_payload(vcpu);
return;
}
@@ -455,6 +521,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
vcpu->arch.exception.has_error_code = true;
vcpu->arch.exception.nr = DF_VECTOR;
vcpu->arch.exception.error_code = 0;
+ vcpu->arch.exception.has_payload = false;
+ vcpu->arch.exception.payload = 0;
} else
/* replace previous exception with a new one in a hope
that instruction re-execution will regenerate lost
@@ -464,16 +532,29 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
- kvm_multiple_exception(vcpu, nr, false, 0, false);
+ kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
- kvm_multiple_exception(vcpu, nr, false, 0, true);
+ kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
+static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
+ unsigned long payload)
+{
+ kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
+}
+
+static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
+ u32 error_code, unsigned long payload)
+{
+ kvm_multiple_exception(vcpu, nr, true, error_code,
+ true, payload, false);
+}
+
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{
if (err)
@@ -490,11 +571,13 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
++vcpu->stat.pf_guest;
vcpu->arch.exception.nested_apf =
is_guest_mode(vcpu) && fault->async_page_fault;
- if (vcpu->arch.exception.nested_apf)
+ if (vcpu->arch.exception.nested_apf) {
vcpu->arch.apf.nested_apf_token = fault->address;
- else
- vcpu->arch.cr2 = fault->address;
- kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
+ kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
+ } else {
+ kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code,
+ fault->address);
+ }
}
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
@@ -503,7 +586,7 @@ static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fau
if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
else
- vcpu->arch.mmu.inject_page_fault(vcpu, fault);
+ vcpu->arch.mmu->inject_page_fault(vcpu, fault);
return fault->nested_page_fault;
}
@@ -517,13 +600,13 @@ EXPORT_SYMBOL_GPL(kvm_inject_nmi);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
- kvm_multiple_exception(vcpu, nr, true, error_code, false);
+ kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
- kvm_multiple_exception(vcpu, nr, true, error_code, true);
+ kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
@@ -602,7 +685,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
if ((pdpte[i] & PT_PRESENT_MASK) &&
(pdpte[i] &
- vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
+ vcpu->arch.mmu->guest_rsvd_check.rsvd_bits_mask[0][2])) {
ret = 0;
goto out;
}
@@ -2477,7 +2560,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_PV_EOI_EN:
- if (kvm_lapic_enable_pv_eoi(vcpu, data))
+ if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8)))
return 1;
break;
@@ -2912,6 +2995,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_HYPERV_VP_INDEX:
case KVM_CAP_HYPERV_EVENTFD:
case KVM_CAP_HYPERV_TLBFLUSH:
+ case KVM_CAP_HYPERV_SEND_IPI:
+ case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
@@ -2930,6 +3015,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_IMMEDIATE_EXIT:
case KVM_CAP_GET_MSR_FEATURES:
case KVM_CAP_MSR_PLATFORM_INFO:
+ case KVM_CAP_EXCEPTION_PAYLOAD:
r = 1;
break;
case KVM_CAP_SYNC_REGS:
@@ -3362,19 +3448,33 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
process_nmi(vcpu);
+
/*
- * FIXME: pass injected and pending separately. This is only
- * needed for nested virtualization, whose state cannot be
- * migrated yet. For now we can combine them.
+ * The API doesn't provide the instruction length for software
+ * exceptions, so don't report them. As long as the guest RIP
+ * isn't advanced, we should expect to encounter the exception
+ * again.
*/
- events->exception.injected =
- (vcpu->arch.exception.pending ||
- vcpu->arch.exception.injected) &&
- !kvm_exception_is_soft(vcpu->arch.exception.nr);
+ if (kvm_exception_is_soft(vcpu->arch.exception.nr)) {
+ events->exception.injected = 0;
+ events->exception.pending = 0;
+ } else {
+ events->exception.injected = vcpu->arch.exception.injected;
+ events->exception.pending = vcpu->arch.exception.pending;
+ /*
+ * For ABI compatibility, deliberately conflate
+ * pending and injected exceptions when
+ * KVM_CAP_EXCEPTION_PAYLOAD isn't enabled.
+ */
+ if (!vcpu->kvm->arch.exception_payload_enabled)
+ events->exception.injected |=
+ vcpu->arch.exception.pending;
+ }
events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code;
- events->exception.pad = 0;
events->exception.error_code = vcpu->arch.exception.error_code;
+ events->exception_has_payload = vcpu->arch.exception.has_payload;
+ events->exception_payload = vcpu->arch.exception.payload;
events->interrupt.injected =
vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft;
@@ -3398,6 +3498,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SHADOW
| KVM_VCPUEVENT_VALID_SMM);
+ if (vcpu->kvm->arch.exception_payload_enabled)
+ events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
+
memset(&events->reserved, 0, sizeof(events->reserved));
}
@@ -3409,12 +3512,24 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW
- | KVM_VCPUEVENT_VALID_SMM))
+ | KVM_VCPUEVENT_VALID_SMM
+ | KVM_VCPUEVENT_VALID_PAYLOAD))
return -EINVAL;
- if (events->exception.injected &&
- (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR ||
- is_guest_mode(vcpu)))
+ if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
+ if (!vcpu->kvm->arch.exception_payload_enabled)
+ return -EINVAL;
+ if (events->exception.pending)
+ events->exception.injected = 0;
+ else
+ events->exception_has_payload = 0;
+ } else {
+ events->exception.pending = 0;
+ events->exception_has_payload = 0;
+ }
+
+ if ((events->exception.injected || events->exception.pending) &&
+ (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
return -EINVAL;
/* INITs are latched while in SMM */
@@ -3424,11 +3539,13 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
return -EINVAL;
process_nmi(vcpu);
- vcpu->arch.exception.injected = false;
- vcpu->arch.exception.pending = events->exception.injected;
+ vcpu->arch.exception.injected = events->exception.injected;
+ vcpu->arch.exception.pending = events->exception.pending;
vcpu->arch.exception.nr = events->exception.nr;
vcpu->arch.exception.has_error_code = events->exception.has_error_code;
vcpu->arch.exception.error_code = events->exception.error_code;
+ vcpu->arch.exception.has_payload = events->exception_has_payload;
+ vcpu->arch.exception.payload = events->exception_payload;
vcpu->arch.interrupt.injected = events->interrupt.injected;
vcpu->arch.interrupt.nr = events->interrupt.nr;
@@ -3694,6 +3811,10 @@ static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
struct kvm_enable_cap *cap)
{
+ int r;
+ uint16_t vmcs_version;
+ void __user *user_ptr;
+
if (cap->flags)
return -EINVAL;
@@ -3706,6 +3827,16 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
return -EINVAL;
return kvm_hv_activate_synic(vcpu, cap->cap ==
KVM_CAP_HYPERV_SYNIC2);
+ case KVM_CAP_HYPERV_ENLIGHTENED_VMCS:
+ r = kvm_x86_ops->nested_enable_evmcs(vcpu, &vmcs_version);
+ if (!r) {
+ user_ptr = (void __user *)(uintptr_t)cap->args[0];
+ if (copy_to_user(user_ptr, &vmcs_version,
+ sizeof(vmcs_version)))
+ r = -EFAULT;
+ }
+ return r;
+
default:
return -EINVAL;
}
@@ -4047,11 +4178,13 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
if (kvm_state.flags &
- ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE))
+ ~(KVM_STATE_NESTED_RUN_PENDING | KVM_STATE_NESTED_GUEST_MODE
+ | KVM_STATE_NESTED_EVMCS))
break;
/* nested_run_pending implies guest_mode. */
- if (kvm_state.flags == KVM_STATE_NESTED_RUN_PENDING)
+ if ((kvm_state.flags & KVM_STATE_NESTED_RUN_PENDING)
+ && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
break;
r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
@@ -4363,6 +4496,10 @@ split_irqchip_unlock:
kvm->arch.guest_can_read_msr_platform_info = cap->args[0];
r = 0;
break;
+ case KVM_CAP_EXCEPTION_PAYLOAD:
+ kvm->arch.exception_payload_enabled = cap->args[0];
+ r = 0;
+ break;
default:
r = -EINVAL;
break;
@@ -4803,7 +4940,7 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
/* NPT walks are always user-walks */
access |= PFERR_USER_MASK;
- t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception);
+ t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
return t_gpa;
}
@@ -5889,7 +6026,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
if (WARN_ON_ONCE(is_guest_mode(vcpu)))
return false;
- if (!vcpu->arch.mmu.direct_map) {
+ if (!vcpu->arch.mmu->direct_map) {
/*
* Write permission should be allowed since only
* write access need to be emulated.
@@ -5922,7 +6059,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
kvm_release_pfn_clean(pfn);
/* The instructions are well-emulated on direct mmu. */
- if (vcpu->arch.mmu.direct_map) {
+ if (vcpu->arch.mmu->direct_map) {
unsigned int indirect_shadow_pages;
spin_lock(&vcpu->kvm->mmu_lock);
@@ -5989,7 +6126,7 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
vcpu->arch.last_retry_eip = ctxt->eip;
vcpu->arch.last_retry_addr = cr2;
- if (!vcpu->arch.mmu.direct_map)
+ if (!vcpu->arch.mmu->direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
@@ -6049,14 +6186,7 @@ static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
kvm_run->exit_reason = KVM_EXIT_DEBUG;
*r = EMULATE_USER_EXIT;
} else {
- /*
- * "Certain debug exceptions may clear bit 0-3. The
- * remaining contents of the DR6 register are never
- * cleared by the processor".
- */
- vcpu->arch.dr6 &= ~15;
- vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
- kvm_queue_exception(vcpu, DB_VECTOR);
+ kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
}
}
@@ -6995,10 +7125,22 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
X86_EFLAGS_RF);
- if (vcpu->arch.exception.nr == DB_VECTOR &&
- (vcpu->arch.dr7 & DR7_GD)) {
- vcpu->arch.dr7 &= ~DR7_GD;
- kvm_update_dr7(vcpu);
+ if (vcpu->arch.exception.nr == DB_VECTOR) {
+ /*
+ * This code assumes that nSVM doesn't use
+ * check_nested_events(). If it does, the
+ * DR6/DR7 changes should happen before L1
+ * gets a #VMEXIT for an intercepted #DB in
+ * L2. (Under VMX, on the other hand, the
+ * DR6/DR7 changes should not happen in the
+ * event of a VM-exit to L1 for an intercepted
+ * #DB in L2.)
+ */
+ kvm_deliver_exception_payload(vcpu);
+ if (vcpu->arch.dr7 & DR7_GD) {
+ vcpu->arch.dr7 &= ~DR7_GD;
+ kvm_update_dr7(vcpu);
+ }
}
kvm_x86_ops->queue_exception(vcpu);
@@ -8478,7 +8620,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_vcpu_mtrr_init(vcpu);
vcpu_load(vcpu);
kvm_vcpu_reset(vcpu, false);
- kvm_mmu_setup(vcpu);
+ kvm_init_mmu(vcpu, false);
vcpu_put(vcpu);
return 0;
}
@@ -9327,7 +9469,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
int r;
- if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
+ if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) ||
work->wakeup_all)
return;
@@ -9335,11 +9477,11 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
if (unlikely(r))
return;
- if (!vcpu->arch.mmu.direct_map &&
- work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
+ if (!vcpu->arch.mmu->direct_map &&
+ work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
return;
- vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
+ vcpu->arch.mmu->page_fault(vcpu, work->gva, 0, true);
}
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
@@ -9463,6 +9605,8 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
vcpu->arch.exception.nr = 0;
vcpu->arch.exception.has_error_code = false;
vcpu->arch.exception.error_code = 0;
+ vcpu->arch.exception.has_payload = false;
+ vcpu->arch.exception.payload = 0;
} else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
fault.vector = PF_VECTOR;
fault.error_code_valid = true;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 67b9568613f3..224cd0a47568 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -266,6 +266,8 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu,
int handle_ud(struct kvm_vcpu *vcpu);
+void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu);
+
void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu);
u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 5559dcaddd5e..948656069cdd 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -356,7 +356,7 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
} else {
struct pci_root_info *info;
- info = kzalloc_node(sizeof(*info), GFP_KERNEL, node);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
dev_err(&root->device->dev,
"pci_bus %04x:%02x: ignored (out of memory)\n",
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 13f4485ca388..30a5111ae5fd 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -629,17 +629,11 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
static void quirk_no_aersid(struct pci_dev *pdev)
{
/* VMD Domain */
- if (is_vmd(pdev->bus))
+ if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus))
pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2030, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334a, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
+DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
#ifdef CONFIG_PHYS_ADDR_T_64BIT
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index d67e30faff9c..be060dfb1cc3 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -80,28 +80,18 @@ LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
head-y := arch/xtensa/kernel/head.o
core-y += arch/xtensa/kernel/ arch/xtensa/mm/
core-y += $(buildvar) $(buildplf)
+core-y += arch/xtensa/boot/dts/
libs-y += arch/xtensa/lib/ $(LIBGCC)
drivers-$(CONFIG_OPROFILE) += arch/xtensa/oprofile/
-ifneq ($(CONFIG_BUILTIN_DTB),"")
-core-$(CONFIG_OF) += arch/xtensa/boot/dts/
-endif
-
boot := arch/xtensa/boot
all Image zImage uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
-%.dtb:
- $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
-
-dtbs: scripts
- $(Q)$(MAKE) $(build)=$(boot)/dts
-
define archhelp
@echo '* Image - Kernel ELF image with reset vector'
@echo '* zImage - Compressed kernel image (arch/xtensa/boot/images/zImage.*)'
@echo '* uImage - U-Boot wrapped image'
- @echo ' dtbs - Build device tree blobs for enabled boards'
endef
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index ed66db3bc9bb..574e5520968c 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -5,9 +5,9 @@
#define __ARCH_WANT_SYS_CLONE
#include <uapi/asm/unistd.h>
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_STAT64
#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_LLSEEK
#define __ARCH_WANT_SYS_GETPGRP
/*
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 42285f35d313..820e8738af11 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -94,7 +94,7 @@ static void __init xtfpga_clk_setup(struct device_node *np)
u32 freq;
if (!base) {
- pr_err("%s: invalid address\n", np->name);
+ pr_err("%pOFn: invalid address\n", np);
return;
}
@@ -103,12 +103,12 @@ static void __init xtfpga_clk_setup(struct device_node *np)
clk = clk_register_fixed_rate(NULL, np->name, NULL, 0, freq);
if (IS_ERR(clk)) {
- pr_err("%s: clk registration failed\n", np->name);
+ pr_err("%pOFn: clk registration failed\n", np);
return;
}
if (of_clk_add_provider(np, of_clk_src_simple_get, clk)) {
- pr_err("%s: clk provider registration failed\n", np->name);
+ pr_err("%pOFn: clk provider registration failed\n", np);
return;
}
}
diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
index 476b5a90a5a4..4b0d5fb69160 100644
--- a/block/bfq-wf2q.c
+++ b/block/bfq-wf2q.c
@@ -792,24 +792,18 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
* queue, remove the entity from its old weight counter (if
* there is a counter associated with the entity).
*/
- if (prev_weight != new_weight) {
- if (bfqq) {
- root = &bfqd->queue_weights_tree;
- __bfq_weights_tree_remove(bfqd, bfqq, root);
- } else
- bfqd->num_active_groups--;
+ if (prev_weight != new_weight && bfqq) {
+ root = &bfqd->queue_weights_tree;
+ __bfq_weights_tree_remove(bfqd, bfqq, root);
}
entity->weight = new_weight;
/*
* Add the entity, if it is not a weight-raised queue,
* to the counter associated with its new weight.
*/
- if (prev_weight != new_weight) {
- if (bfqq && bfqq->wr_coeff == 1) {
- /* If we get here, root has been initialized. */
- bfq_weights_tree_add(bfqd, bfqq, root);
- } else
- bfqd->num_active_groups++;
+ if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) {
+ /* If we get here, root has been initialized. */
+ bfq_weights_tree_add(bfqd, bfqq, root);
}
new_st->wsum += entity->weight;
diff --git a/block/blk-core.c b/block/blk-core.c
index 3ed60723e242..bc6ea87d10e0 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2300,7 +2300,6 @@ generic_make_request_checks(struct bio *bio)
if (!q->limits.max_write_same_sectors)
goto not_supported;
break;
- case REQ_OP_ZONE_REPORT:
case REQ_OP_ZONE_RESET:
if (!blk_queue_is_zoned(q))
goto not_supported;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index bbd44666f2b5..76f867ea9a9b 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -10,8 +10,7 @@
#include "blk.h"
-static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
- gfp_t gfp)
+struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
{
struct bio *new = bio_alloc(gfp, nr_pages);
@@ -63,7 +62,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
end_sect = sector + req_sects;
- bio = next_bio(bio, 0, gfp_mask);
+ bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, op, 0);
@@ -165,7 +164,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
max_write_same_sectors = UINT_MAX >> 9;
while (nr_sects) {
- bio = next_bio(bio, 1, gfp_mask);
+ bio = blk_next_bio(bio, 1, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio->bi_vcnt = 1;
@@ -241,7 +240,7 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
return -EOPNOTSUPP;
while (nr_sects) {
- bio = next_bio(bio, 0, gfp_mask);
+ bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE_ZEROES;
@@ -292,8 +291,8 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
return -EPERM;
while (nr_sects != 0) {
- bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
- gfp_mask);
+ bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
+ gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 41b86f50d126..10b284a1f18d 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -283,7 +283,6 @@ static const char *const op_name[] = {
REQ_OP_NAME(WRITE),
REQ_OP_NAME(FLUSH),
REQ_OP_NAME(DISCARD),
- REQ_OP_NAME(ZONE_REPORT),
REQ_OP_NAME(SECURE_ERASE),
REQ_OP_NAME(ZONE_RESET),
REQ_OP_NAME(WRITE_SAME),
diff --git a/block/blk-mq.c b/block/blk-mq.c
index dcf10e39995a..3f91c6e5b17a 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1850,8 +1850,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
rq_qos_throttle(q, bio, NULL);
- trace_block_getrq(q, bio, bio->bi_opf);
-
rq = blk_mq_get_request(q, bio, bio->bi_opf, &data);
if (unlikely(!rq)) {
rq_qos_cleanup(q, bio);
@@ -1860,6 +1858,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE;
}
+ trace_block_getrq(q, bio, bio->bi_opf);
+
rq_qos_track(q, rq, bio);
cookie = request_to_qc_t(data.hctx, rq);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3772671cf2bc..0641533597f1 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -300,6 +300,11 @@ static ssize_t queue_zoned_show(struct request_queue *q, char *page)
}
}
+static ssize_t queue_nr_zones_show(struct request_queue *q, char *page)
+{
+ return queue_var_show(blk_queue_nr_zones(q), page);
+}
+
static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
{
return queue_var_show((blk_queue_nomerges(q) << 1) |
@@ -637,6 +642,11 @@ static struct queue_sysfs_entry queue_zoned_entry = {
.show = queue_zoned_show,
};
+static struct queue_sysfs_entry queue_nr_zones_entry = {
+ .attr = {.name = "nr_zones", .mode = 0444 },
+ .show = queue_nr_zones_show,
+};
+
static struct queue_sysfs_entry queue_nomerges_entry = {
.attr = {.name = "nomerges", .mode = 0644 },
.show = queue_nomerges_show,
@@ -727,6 +737,7 @@ static struct attribute *default_attrs[] = {
&queue_write_zeroes_max_entry.attr,
&queue_nonrot_entry.attr,
&queue_zoned_entry.attr,
+ &queue_nr_zones_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_iostats_entry.attr,
@@ -841,6 +852,8 @@ static void __blk_release_queue(struct work_struct *work)
if (q->queue_tags)
__blk_queue_free_tags(q);
+ blk_queue_free_zone_bitmaps(q);
+
if (!q->mq_ops) {
if (q->exit_rq_fn)
q->exit_rq_fn(q, q->fq->flush_rq);
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index c461cf63f1f4..13ba2011a306 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -12,6 +12,9 @@
#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
+
+#include "blk.h"
static inline sector_t blk_zone_start(struct request_queue *q,
sector_t sector)
@@ -63,14 +66,38 @@ void __blk_req_zone_write_unlock(struct request *rq)
}
EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
+static inline unsigned int __blkdev_nr_zones(struct request_queue *q,
+ sector_t nr_sectors)
+{
+ unsigned long zone_sectors = blk_queue_zone_sectors(q);
+
+ return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors);
+}
+
+/**
+ * blkdev_nr_zones - Get number of zones
+ * @bdev: Target block device
+ *
+ * Description:
+ * Return the total number of zones of a zoned block device.
+ * For a regular block device, the number of zones is always 0.
+ */
+unsigned int blkdev_nr_zones(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (!blk_queue_is_zoned(q))
+ return 0;
+
+ return __blkdev_nr_zones(q, bdev->bd_part->nr_sects);
+}
+EXPORT_SYMBOL_GPL(blkdev_nr_zones);
+
/*
- * Check that a zone report belongs to the partition.
- * If yes, fix its start sector and write pointer, copy it in the
- * zone information array and return true. Return false otherwise.
+ * Check that a zone report belongs to this partition, and if yes, fix its start
+ * sector and write pointer and return true. Return false otherwise.
*/
-static bool blkdev_report_zone(struct block_device *bdev,
- struct blk_zone *rep,
- struct blk_zone *zone)
+static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep)
{
sector_t offset = get_start_sect(bdev);
@@ -85,11 +112,36 @@ static bool blkdev_report_zone(struct block_device *bdev,
rep->wp = rep->start + rep->len;
else
rep->wp -= offset;
- memcpy(zone, rep, sizeof(struct blk_zone));
-
return true;
}
+static int blk_report_zones(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
+{
+ struct request_queue *q = disk->queue;
+ unsigned int z = 0, n, nrz = *nr_zones;
+ sector_t capacity = get_capacity(disk);
+ int ret;
+
+ while (z < nrz && sector < capacity) {
+ n = nrz - z;
+ ret = disk->fops->report_zones(disk, sector, &zones[z], &n,
+ gfp_mask);
+ if (ret)
+ return ret;
+ if (!n)
+ break;
+ sector += blk_queue_zone_sectors(q) * n;
+ z += n;
+ }
+
+ WARN_ON(z > *nr_zones);
+ *nr_zones = z;
+
+ return 0;
+}
+
/**
* blkdev_report_zones - Get zones information
* @bdev: Target block device
@@ -104,130 +156,46 @@ static bool blkdev_report_zone(struct block_device *bdev,
* requested by @nr_zones. The number of zones actually reported is
* returned in @nr_zones.
*/
-int blkdev_report_zones(struct block_device *bdev,
- sector_t sector,
- struct blk_zone *zones,
- unsigned int *nr_zones,
+int blkdev_report_zones(struct block_device *bdev, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
gfp_t gfp_mask)
{
struct request_queue *q = bdev_get_queue(bdev);
- struct blk_zone_report_hdr *hdr;
- unsigned int nrz = *nr_zones;
- struct page *page;
- unsigned int nr_rep;
- size_t rep_bytes;
- unsigned int nr_pages;
- struct bio *bio;
- struct bio_vec *bv;
- unsigned int i, n, nz;
- unsigned int ofst;
- void *addr;
+ unsigned int i, nrz;
int ret;
- if (!q)
- return -ENXIO;
-
if (!blk_queue_is_zoned(q))
return -EOPNOTSUPP;
- if (!nrz)
- return 0;
-
- if (sector > bdev->bd_part->nr_sects) {
- *nr_zones = 0;
- return 0;
- }
-
/*
- * The zone report has a header. So make room for it in the
- * payload. Also make sure that the report fits in a single BIO
- * that will not be split down the stack.
+ * A block device that advertized itself as zoned must have a
+ * report_zones method. If it does not have one defined, the device
+ * driver has a bug. So warn about that.
*/
- rep_bytes = sizeof(struct blk_zone_report_hdr) +
- sizeof(struct blk_zone) * nrz;
- rep_bytes = (rep_bytes + PAGE_SIZE - 1) & PAGE_MASK;
- if (rep_bytes > (queue_max_sectors(q) << 9))
- rep_bytes = queue_max_sectors(q) << 9;
-
- nr_pages = min_t(unsigned int, BIO_MAX_PAGES,
- rep_bytes >> PAGE_SHIFT);
- nr_pages = min_t(unsigned int, nr_pages,
- queue_max_segments(q));
-
- bio = bio_alloc(gfp_mask, nr_pages);
- if (!bio)
- return -ENOMEM;
-
- bio_set_dev(bio, bdev);
- bio->bi_iter.bi_sector = blk_zone_start(q, sector);
- bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0);
+ if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones))
+ return -EOPNOTSUPP;
- for (i = 0; i < nr_pages; i++) {
- page = alloc_page(gfp_mask);
- if (!page) {
- ret = -ENOMEM;
- goto out;
- }
- if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
- __free_page(page);
- break;
- }
+ if (!*nr_zones || sector >= bdev->bd_part->nr_sects) {
+ *nr_zones = 0;
+ return 0;
}
- if (i == 0)
- ret = -ENOMEM;
- else
- ret = submit_bio_wait(bio);
+ nrz = min(*nr_zones,
+ __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector));
+ ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector,
+ zones, &nrz, gfp_mask);
if (ret)
- goto out;
-
- /*
- * Process the report result: skip the header and go through the
- * reported zones to fixup and fixup the zone information for
- * partitions. At the same time, return the zone information into
- * the zone array.
- */
- n = 0;
- nz = 0;
- nr_rep = 0;
- bio_for_each_segment_all(bv, bio, i) {
-
- if (!bv->bv_page)
- break;
-
- addr = kmap_atomic(bv->bv_page);
-
- /* Get header in the first page */
- ofst = 0;
- if (!nr_rep) {
- hdr = addr;
- nr_rep = hdr->nr_zones;
- ofst = sizeof(struct blk_zone_report_hdr);
- }
-
- /* Fixup and report zones */
- while (ofst < bv->bv_len &&
- n < nr_rep && nz < nrz) {
- if (blkdev_report_zone(bdev, addr + ofst, &zones[nz]))
- nz++;
- ofst += sizeof(struct blk_zone);
- n++;
- }
-
- kunmap_atomic(addr);
+ return ret;
- if (n >= nr_rep || nz >= nrz)
+ for (i = 0; i < nrz; i++) {
+ if (!blkdev_report_zone(bdev, zones))
break;
-
+ zones++;
}
- *nr_zones = nz;
-out:
- bio_for_each_segment_all(bv, bio, i)
- __free_page(bv->bv_page);
- bio_put(bio);
+ *nr_zones = i;
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(blkdev_report_zones);
@@ -250,16 +218,17 @@ int blkdev_reset_zones(struct block_device *bdev,
struct request_queue *q = bdev_get_queue(bdev);
sector_t zone_sectors;
sector_t end_sector = sector + nr_sectors;
- struct bio *bio;
+ struct bio *bio = NULL;
+ struct blk_plug plug;
int ret;
- if (!q)
- return -ENXIO;
-
if (!blk_queue_is_zoned(q))
return -EOPNOTSUPP;
- if (end_sector > bdev->bd_part->nr_sects)
+ if (bdev_read_only(bdev))
+ return -EPERM;
+
+ if (!nr_sectors || end_sector > bdev->bd_part->nr_sects)
/* Out of range */
return -EINVAL;
@@ -272,19 +241,14 @@ int blkdev_reset_zones(struct block_device *bdev,
end_sector != bdev->bd_part->nr_sects)
return -EINVAL;
+ blk_start_plug(&plug);
while (sector < end_sector) {
- bio = bio_alloc(gfp_mask, 0);
+ bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
- ret = submit_bio_wait(bio);
- bio_put(bio);
-
- if (ret)
- return ret;
-
sector += zone_sectors;
/* This may take a while, so be nice to others */
@@ -292,7 +256,12 @@ int blkdev_reset_zones(struct block_device *bdev,
}
- return 0;
+ ret = submit_bio_wait(bio);
+ bio_put(bio);
+
+ blk_finish_plug(&plug);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(blkdev_reset_zones);
@@ -328,8 +297,7 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
if (!rep.nr_zones)
return -EINVAL;
- if (rep.nr_zones > INT_MAX / sizeof(struct blk_zone))
- return -ERANGE;
+ rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones);
zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone),
GFP_KERNEL | __GFP_ZERO);
@@ -392,3 +360,138 @@ int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors,
GFP_KERNEL);
}
+
+static inline unsigned long *blk_alloc_zone_bitmap(int node,
+ unsigned int nr_zones)
+{
+ return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
+ GFP_NOIO, node);
+}
+
+/*
+ * Allocate an array of struct blk_zone to get nr_zones zone information.
+ * The allocated array may be smaller than nr_zones.
+ */
+static struct blk_zone *blk_alloc_zones(int node, unsigned int *nr_zones)
+{
+ size_t size = *nr_zones * sizeof(struct blk_zone);
+ struct page *page;
+ int order;
+
+ for (order = get_order(size); order > 0; order--) {
+ page = alloc_pages_node(node, GFP_NOIO | __GFP_ZERO, order);
+ if (page) {
+ *nr_zones = min_t(unsigned int, *nr_zones,
+ (PAGE_SIZE << order) / sizeof(struct blk_zone));
+ return page_address(page);
+ }
+ }
+
+ return NULL;
+}
+
+void blk_queue_free_zone_bitmaps(struct request_queue *q)
+{
+ kfree(q->seq_zones_bitmap);
+ q->seq_zones_bitmap = NULL;
+ kfree(q->seq_zones_wlock);
+ q->seq_zones_wlock = NULL;
+}
+
+/**
+ * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
+ * @disk: Target disk
+ *
+ * Helper function for low-level device drivers to (re) allocate and initialize
+ * a disk request queue zone bitmaps. This functions should normally be called
+ * within the disk ->revalidate method. For BIO based queues, no zone bitmap
+ * is allocated.
+ */
+int blk_revalidate_disk_zones(struct gendisk *disk)
+{
+ struct request_queue *q = disk->queue;
+ unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk));
+ unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
+ unsigned int i, rep_nr_zones = 0, z = 0, nrz;
+ struct blk_zone *zones = NULL;
+ sector_t sector = 0;
+ int ret = 0;
+
+ /*
+ * BIO based queues do not use a scheduler so only q->nr_zones
+ * needs to be updated so that the sysfs exposed value is correct.
+ */
+ if (!queue_is_rq_based(q)) {
+ q->nr_zones = nr_zones;
+ return 0;
+ }
+
+ if (!blk_queue_is_zoned(q) || !nr_zones) {
+ nr_zones = 0;
+ goto update;
+ }
+
+ /* Allocate bitmaps */
+ ret = -ENOMEM;
+ seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones);
+ if (!seq_zones_wlock)
+ goto out;
+ seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones);
+ if (!seq_zones_bitmap)
+ goto out;
+
+ /* Get zone information and initialize seq_zones_bitmap */
+ rep_nr_zones = nr_zones;
+ zones = blk_alloc_zones(q->node, &rep_nr_zones);
+ if (!zones)
+ goto out;
+
+ while (z < nr_zones) {
+ nrz = min(nr_zones - z, rep_nr_zones);
+ ret = blk_report_zones(disk, sector, zones, &nrz, GFP_NOIO);
+ if (ret)
+ goto out;
+ if (!nrz)
+ break;
+ for (i = 0; i < nrz; i++) {
+ if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL)
+ set_bit(z, seq_zones_bitmap);
+ z++;
+ }
+ sector += nrz * blk_queue_zone_sectors(q);
+ }
+
+ if (WARN_ON(z != nr_zones)) {
+ ret = -EIO;
+ goto out;
+ }
+
+update:
+ /*
+ * Install the new bitmaps, making sure the queue is stopped and
+ * all I/Os are completed (i.e. a scheduler is not referencing the
+ * bitmaps).
+ */
+ blk_mq_freeze_queue(q);
+ q->nr_zones = nr_zones;
+ swap(q->seq_zones_wlock, seq_zones_wlock);
+ swap(q->seq_zones_bitmap, seq_zones_bitmap);
+ blk_mq_unfreeze_queue(q);
+
+out:
+ free_pages((unsigned long)zones,
+ get_order(rep_nr_zones * sizeof(struct blk_zone)));
+ kfree(seq_zones_wlock);
+ kfree(seq_zones_bitmap);
+
+ if (ret) {
+ pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
+ blk_mq_freeze_queue(q);
+ blk_queue_free_zone_bitmaps(q);
+ blk_mq_unfreeze_queue(q);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
+
diff --git a/block/blk.h b/block/blk.h
index 3d2aecba96a4..a1841b8ff129 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -488,4 +488,12 @@ extern int blk_iolatency_init(struct request_queue *q);
static inline int blk_iolatency_init(struct request_queue *q) { return 0; }
#endif
+struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp);
+
+#ifdef CONFIG_BLK_DEV_ZONED
+void blk_queue_free_zone_bitmaps(struct request_queue *q);
+#else
+static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
+#endif
+
#endif /* BLK_INTERNAL_H */
diff --git a/block/ioctl.c b/block/ioctl.c
index 3884d810efd2..4825c78a6baa 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -532,6 +532,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return blkdev_report_zones_ioctl(bdev, mode, cmd, arg);
case BLKRESETZONE:
return blkdev_reset_zones_ioctl(bdev, mode, cmd, arg);
+ case BLKGETZONESZ:
+ return put_uint(arg, bdev_zone_sectors(bdev));
+ case BLKGETNRZONES:
+ return put_uint(arg, blkdev_nr_zones(bdev));
case HDIO_GETGEO:
return blkdev_getgeo(bdev, argp);
case BLKRAGET:
diff --git a/crypto/Kconfig b/crypto/Kconfig
index f3e40ac56d93..f7a235db56aa 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -213,20 +213,6 @@ config CRYPTO_CRYPTD
converts an arbitrary synchronous software crypto algorithm
into an asynchronous algorithm that executes in a kernel thread.
-config CRYPTO_MCRYPTD
- tristate "Software async multi-buffer crypto daemon"
- select CRYPTO_BLKCIPHER
- select CRYPTO_HASH
- select CRYPTO_MANAGER
- select CRYPTO_WORKQUEUE
- help
- This is a generic software asynchronous crypto daemon that
- provides the kernel thread to assist multi-buffer crypto
- algorithms for submitting jobs and flushing jobs in multi-buffer
- crypto algorithms. Multi-buffer crypto algorithms are executed
- in the context of this kernel thread and drivers can post
- their crypto request asynchronously to be processed by this daemon.
-
config CRYPTO_AUTHENC
tristate "Authenc support"
select CRYPTO_AEAD
@@ -470,6 +456,18 @@ config CRYPTO_LRW
The first 128, 192 or 256 bits in the key are used for AES and the
rest is used to tie each cipher block to its logical position.
+config CRYPTO_OFB
+ tristate "OFB support"
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_MANAGER
+ help
+ OFB: the Output Feedback mode makes a block cipher into a synchronous
+ stream cipher. It generates keystream blocks, which are then XORed
+ with the plaintext blocks to get the ciphertext. Flipping a bit in the
+ ciphertext produces a flipped bit in the plaintext at the same
+ location. This property allows many error correcting codes to function
+ normally even when applied before encryption.
+
config CRYPTO_PCBC
tristate "PCBC support"
select CRYPTO_BLKCIPHER
@@ -848,54 +846,6 @@ config CRYPTO_SHA1_PPC_SPE
SHA-1 secure hash standard (DFIPS 180-4) implemented
using powerpc SPE SIMD instruction set.
-config CRYPTO_SHA1_MB
- tristate "SHA1 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA1
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
-config CRYPTO_SHA256_MB
- tristate "SHA256 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA256
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-256 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
-config CRYPTO_SHA512_MB
- tristate "SHA512 digest algorithm (x86_64 Multi-Buffer, Experimental)"
- depends on X86 && 64BIT
- select CRYPTO_SHA512
- select CRYPTO_HASH
- select CRYPTO_MCRYPTD
- help
- SHA-512 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
- using multi-buffer technique. This algorithm computes on
- multiple data lanes concurrently with SIMD instructions for
- better throughput. It should not be enabled by default but
- used when there is significant amount of work to keep the keep
- the data lanes filled to get performance benefit. If the data
- lanes remain unfilled, a flush operation will be initiated to
- process the crypto jobs, adding a slight latency.
-
config CRYPTO_SHA256
tristate "SHA224 and SHA256 digest algorithm"
select CRYPTO_HASH
@@ -1133,7 +1083,7 @@ config CRYPTO_AES_NI_INTEL
In addition to AES cipher algorithm support, the acceleration
for some popular block cipher mode is supported too, including
- ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional
+ ECB, CBC, LRW, XTS. The 64 bit version has additional
acceleration for CTR.
config CRYPTO_AES_SPARC64
@@ -1590,20 +1540,6 @@ config CRYPTO_SM4
If unsure, say N.
-config CRYPTO_SPECK
- tristate "Speck cipher algorithm"
- select CRYPTO_ALGAPI
- help
- Speck is a lightweight block cipher that is tuned for optimal
- performance in software (rather than hardware).
-
- Speck may not be as secure as AES, and should only be used on systems
- where AES is not fast enough.
-
- See also: <https://eprint.iacr.org/2013/404.pdf>
-
- If unsure, say N.
-
config CRYPTO_TEA
tristate "TEA, XTEA and XETA cipher algorithms"
select CRYPTO_ALGAPI
@@ -1875,6 +1811,17 @@ config CRYPTO_USER_API_AEAD
This option enables the user-spaces interface for AEAD
cipher algorithms.
+config CRYPTO_STATS
+ bool "Crypto usage statistics for User-space"
+ help
+ This option enables the gathering of crypto stats.
+ This will collect:
+ - encrypt/decrypt size and numbers of symmeric operations
+ - compress/decompress size and numbers of compress operations
+ - size and numbers of hash operations
+ - encrypt/decrypt/sign/verify numbers for asymmetric operations
+ - generate/seed numbers for rng operations
+
config CRYPTO_HASH_INFO
bool
diff --git a/crypto/Makefile b/crypto/Makefile
index 6d1d40eeb964..5c207c76abf7 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -54,6 +54,7 @@ cryptomgr-y := algboss.o testmgr.o
obj-$(CONFIG_CRYPTO_MANAGER2) += cryptomgr.o
obj-$(CONFIG_CRYPTO_USER) += crypto_user.o
+crypto_user-y := crypto_user_base.o crypto_user_stat.o
obj-$(CONFIG_CRYPTO_CMAC) += cmac.o
obj-$(CONFIG_CRYPTO_HMAC) += hmac.o
obj-$(CONFIG_CRYPTO_VMAC) += vmac.o
@@ -93,7 +94,6 @@ obj-$(CONFIG_CRYPTO_MORUS640) += morus640.o
obj-$(CONFIG_CRYPTO_MORUS1280) += morus1280.o
obj-$(CONFIG_CRYPTO_PCRYPT) += pcrypt.o
obj-$(CONFIG_CRYPTO_CRYPTD) += cryptd.o
-obj-$(CONFIG_CRYPTO_MCRYPTD) += mcryptd.o
obj-$(CONFIG_CRYPTO_DES) += des_generic.o
obj-$(CONFIG_CRYPTO_FCRYPT) += fcrypt.o
obj-$(CONFIG_CRYPTO_BLOWFISH) += blowfish_generic.o
@@ -115,7 +115,6 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
-obj-$(CONFIG_CRYPTO_SPECK) += speck.o
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
@@ -143,6 +142,7 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
obj-$(CONFIG_CRYPTO_USER_API_RNG) += algif_rng.o
obj-$(CONFIG_CRYPTO_USER_API_AEAD) += algif_aead.o
obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o
+obj-$(CONFIG_CRYPTO_OFB) += ofb.o
ecdh_generic-y := ecc.o
ecdh_generic-y += ecdh.o
diff --git a/crypto/aegis.h b/crypto/aegis.h
index f1c6900ddb80..405e025fc906 100644
--- a/crypto/aegis.h
+++ b/crypto/aegis.h
@@ -21,7 +21,7 @@
union aegis_block {
__le64 words64[AEGIS_BLOCK_SIZE / sizeof(__le64)];
- u32 words32[AEGIS_BLOCK_SIZE / sizeof(u32)];
+ __le32 words32[AEGIS_BLOCK_SIZE / sizeof(__le32)];
u8 bytes[AEGIS_BLOCK_SIZE];
};
@@ -57,24 +57,22 @@ static void crypto_aegis_aesenc(union aegis_block *dst,
const union aegis_block *src,
const union aegis_block *key)
{
- u32 *d = dst->words32;
const u8 *s = src->bytes;
- const u32 *k = key->words32;
const u32 *t0 = crypto_ft_tab[0];
const u32 *t1 = crypto_ft_tab[1];
const u32 *t2 = crypto_ft_tab[2];
const u32 *t3 = crypto_ft_tab[3];
u32 d0, d1, d2, d3;
- d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]] ^ k[0];
- d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]] ^ k[1];
- d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]] ^ k[2];
- d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]] ^ k[3];
+ d0 = t0[s[ 0]] ^ t1[s[ 5]] ^ t2[s[10]] ^ t3[s[15]];
+ d1 = t0[s[ 4]] ^ t1[s[ 9]] ^ t2[s[14]] ^ t3[s[ 3]];
+ d2 = t0[s[ 8]] ^ t1[s[13]] ^ t2[s[ 2]] ^ t3[s[ 7]];
+ d3 = t0[s[12]] ^ t1[s[ 1]] ^ t2[s[ 6]] ^ t3[s[11]];
- d[0] = d0;
- d[1] = d1;
- d[2] = d2;
- d[3] = d3;
+ dst->words32[0] = cpu_to_le32(d0) ^ key->words32[0];
+ dst->words32[1] = cpu_to_le32(d1) ^ key->words32[1];
+ dst->words32[2] = cpu_to_le32(d2) ^ key->words32[2];
+ dst->words32[3] = cpu_to_le32(d3) ^ key->words32[3];
}
#endif /* _CRYPTO_AEGIS_H */
diff --git a/crypto/ahash.c b/crypto/ahash.c
index a64c143165b1..e21667b4e10a 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -364,24 +364,35 @@ static int crypto_ahash_op(struct ahash_request *req,
int crypto_ahash_final(struct ahash_request *req)
{
- return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
+ int ret;
+
+ ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final);
+ crypto_stat_ahash_final(req, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_final);
int crypto_ahash_finup(struct ahash_request *req)
{
- return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
+ int ret;
+
+ ret = crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup);
+ crypto_stat_ahash_final(req, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
int crypto_ahash_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ int ret;
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_ahash_op(req, tfm->digest);
+ ret = -ENOKEY;
+ else
+ ret = crypto_ahash_op(req, tfm->digest);
+ crypto_stat_ahash_final(req, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
@@ -550,8 +561,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
{
struct crypto_alg *base = &alg->halg.base;
- if (alg->halg.digestsize > PAGE_SIZE / 8 ||
- alg->halg.statesize > PAGE_SIZE / 8 ||
+ if (alg->halg.digestsize > HASH_MAX_DIGESTSIZE ||
+ alg->halg.statesize > HASH_MAX_STATESIZE ||
alg->halg.statesize == 0)
return -EINVAL;
diff --git a/crypto/algapi.c b/crypto/algapi.c
index c0755cf4f53f..2545c5f89c4c 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -57,9 +57,14 @@ static int crypto_check_alg(struct crypto_alg *alg)
if (alg->cra_alignmask & (alg->cra_alignmask + 1))
return -EINVAL;
- if (alg->cra_blocksize > PAGE_SIZE / 8)
+ /* General maximums for all algs. */
+ if (alg->cra_alignmask > MAX_ALGAPI_ALIGNMASK)
return -EINVAL;
+ if (alg->cra_blocksize > MAX_ALGAPI_BLOCKSIZE)
+ return -EINVAL;
+
+ /* Lower maximums for specific alg types. */
if (!alg->cra_type && (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) ==
CRYPTO_ALG_TYPE_CIPHER) {
if (alg->cra_alignmask > MAX_CIPHER_ALIGNMASK)
@@ -253,6 +258,14 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
list_add(&alg->cra_list, &crypto_alg_list);
list_add(&larval->alg.cra_list, &crypto_alg_list);
+ atomic_set(&alg->encrypt_cnt, 0);
+ atomic_set(&alg->decrypt_cnt, 0);
+ atomic64_set(&alg->encrypt_tlen, 0);
+ atomic64_set(&alg->decrypt_tlen, 0);
+ atomic_set(&alg->verify_cnt, 0);
+ atomic_set(&alg->cipher_err_cnt, 0);
+ atomic_set(&alg->sign_cnt, 0);
+
out:
return larval;
@@ -367,6 +380,8 @@ static void crypto_wait_for_test(struct crypto_larval *larval)
err = wait_for_completion_killable(&larval->completion);
WARN_ON(err);
+ if (!err)
+ crypto_probing_notify(CRYPTO_MSG_ALG_LOADED, larval);
out:
crypto_larval_kill(&larval->alg);
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 5e6df2a087fa..527b44d0af21 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -274,6 +274,8 @@ static int cryptomgr_notify(struct notifier_block *this, unsigned long msg,
return cryptomgr_schedule_probe(data);
case CRYPTO_MSG_ALG_REGISTER:
return cryptomgr_schedule_test(data);
+ case CRYPTO_MSG_ALG_LOADED:
+ break;
}
return NOTIFY_DONE;
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index c40a8c7ee8ae..eb100a04ce9f 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -42,7 +42,7 @@
struct aead_tfm {
struct crypto_aead *aead;
- struct crypto_skcipher *null_tfm;
+ struct crypto_sync_skcipher *null_tfm;
};
static inline bool aead_sufficient_data(struct sock *sk)
@@ -75,13 +75,13 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
return af_alg_sendmsg(sock, msg, size, ivsize);
}
-static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
+static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
struct scatterlist *src,
struct scatterlist *dst, unsigned int len)
{
- SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
- skcipher_request_set_tfm(skreq, null_tfm);
+ skcipher_request_set_sync_tfm(skreq, null_tfm);
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
NULL, NULL);
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
@@ -99,7 +99,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
struct af_alg_ctx *ctx = ask->private;
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
- struct crypto_skcipher *null_tfm = aeadc->null_tfm;
+ struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
unsigned int i, as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
struct af_alg_tsgl *tsgl, *tmp;
@@ -478,7 +478,7 @@ static void *aead_bind(const char *name, u32 type, u32 mask)
{
struct aead_tfm *tfm;
struct crypto_aead *aead;
- struct crypto_skcipher *null_tfm;
+ struct crypto_sync_skcipher *null_tfm;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index bfcf595fd8f9..d0cde541beb6 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -239,7 +239,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
- char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1];
+ char state[HASH_MAX_STATESIZE];
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
diff --git a/crypto/authenc.c b/crypto/authenc.c
index 4fa8d40d947b..37f54d1b2f66 100644
--- a/crypto/authenc.c
+++ b/crypto/authenc.c
@@ -33,7 +33,7 @@ struct authenc_instance_ctx {
struct crypto_authenc_ctx {
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
};
struct authenc_request_ctx {
@@ -185,9 +185,9 @@ static int crypto_authenc_copy_assoc(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
- SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
- skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_sync_tfm(skreq, ctx->null);
skcipher_request_set_callback(skreq, aead_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
@@ -318,7 +318,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 50b804747e20..80a25cc04aec 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -36,7 +36,7 @@ struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
};
struct authenc_esn_request_ctx {
@@ -183,9 +183,9 @@ static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
- SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
- skcipher_request_set_tfm(skreq, ctx->null);
+ skcipher_request_set_sync_tfm(skreq, ctx->null);
skcipher_request_set_callback(skreq, aead_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
@@ -341,7 +341,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 0a083342ec8c..b242fd0d3262 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -50,7 +50,10 @@ struct crypto_ccm_req_priv_ctx {
u32 flags;
struct scatterlist src[3];
struct scatterlist dst[3];
- struct skcipher_request skreq;
+ union {
+ struct ahash_request ahreq;
+ struct skcipher_request skreq;
+ };
};
struct cbcmac_tfm_ctx {
@@ -181,7 +184,7 @@ static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain,
struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
struct crypto_aead *aead = crypto_aead_reqtfm(req);
struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
- AHASH_REQUEST_ON_STACK(ahreq, ctx->mac);
+ struct ahash_request *ahreq = &pctx->ahreq;
unsigned int assoclen = req->assoclen;
struct scatterlist sg[3];
u8 *odata = pctx->odata;
@@ -427,7 +430,7 @@ static int crypto_ccm_init_tfm(struct crypto_aead *tfm)
crypto_aead_set_reqsize(
tfm,
align + sizeof(struct crypto_ccm_req_priv_ctx) +
- crypto_skcipher_reqsize(ctr));
+ max(crypto_ahash_reqsize(mac), crypto_skcipher_reqsize(ctr)));
return 0;
diff --git a/crypto/chacha20_generic.c b/crypto/chacha20_generic.c
index e451c3cb6a56..3ae96587caf9 100644
--- a/crypto/chacha20_generic.c
+++ b/crypto/chacha20_generic.c
@@ -18,20 +18,21 @@
static void chacha20_docrypt(u32 *state, u8 *dst, const u8 *src,
unsigned int bytes)
{
- u32 stream[CHACHA20_BLOCK_WORDS];
+ /* aligned to potentially speed up crypto_xor() */
+ u8 stream[CHACHA20_BLOCK_SIZE] __aligned(sizeof(long));
if (dst != src)
memcpy(dst, src, bytes);
while (bytes >= CHACHA20_BLOCK_SIZE) {
chacha20_block(state, stream);
- crypto_xor(dst, (const u8 *)stream, CHACHA20_BLOCK_SIZE);
+ crypto_xor(dst, stream, CHACHA20_BLOCK_SIZE);
bytes -= CHACHA20_BLOCK_SIZE;
dst += CHACHA20_BLOCK_SIZE;
}
if (bytes) {
chacha20_block(state, stream);
- crypto_xor(dst, (const u8 *)stream, bytes);
+ crypto_xor(dst, stream, bytes);
}
}
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index addca7bae33f..7118fb5efbaa 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -76,7 +76,7 @@ struct cryptd_blkcipher_request_ctx {
struct cryptd_skcipher_ctx {
atomic_t refcnt;
- struct crypto_skcipher *child;
+ struct crypto_sync_skcipher *child;
};
struct cryptd_skcipher_request_ctx {
@@ -449,14 +449,16 @@ static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
const u8 *key, unsigned int keylen)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
- struct crypto_skcipher *child = ctx->child;
+ struct crypto_sync_skcipher *child = ctx->child;
int err;
- crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(child,
+ crypto_skcipher_get_flags(parent) &
CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(child, key, keylen);
- crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
+ err = crypto_sync_skcipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent,
+ crypto_sync_skcipher_get_flags(child) &
CRYPTO_TFM_RES_MASK);
return err;
}
@@ -483,13 +485,13 @@ static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *child = ctx->child;
- SKCIPHER_REQUEST_ON_STACK(subreq, child);
+ struct crypto_sync_skcipher *child = ctx->child;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
if (unlikely(err == -EINPROGRESS))
goto out;
- skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_sync_tfm(subreq, child);
skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
@@ -511,13 +513,13 @@ static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_skcipher *child = ctx->child;
- SKCIPHER_REQUEST_ON_STACK(subreq, child);
+ struct crypto_sync_skcipher *child = ctx->child;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child);
if (unlikely(err == -EINPROGRESS))
goto out;
- skcipher_request_set_tfm(subreq, child);
+ skcipher_request_set_sync_tfm(subreq, child);
skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
@@ -568,7 +570,7 @@ static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
if (IS_ERR(cipher))
return PTR_ERR(cipher);
- ctx->child = cipher;
+ ctx->child = (struct crypto_sync_skcipher *)cipher;
crypto_skcipher_set_reqsize(
tfm, sizeof(struct cryptd_skcipher_request_ctx));
return 0;
@@ -578,7 +580,7 @@ static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_skcipher(ctx->child);
+ crypto_free_sync_skcipher(ctx->child);
}
static void cryptd_skcipher_free(struct skcipher_instance *inst)
@@ -1243,7 +1245,7 @@ struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
{
struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
- return ctx->child;
+ return &ctx->child->base;
}
EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
diff --git a/crypto/crypto_null.c b/crypto/crypto_null.c
index 0959b268966c..0bae59922a80 100644
--- a/crypto/crypto_null.c
+++ b/crypto/crypto_null.c
@@ -26,7 +26,7 @@
#include <linux/string.h>
static DEFINE_MUTEX(crypto_default_null_skcipher_lock);
-static struct crypto_skcipher *crypto_default_null_skcipher;
+static struct crypto_sync_skcipher *crypto_default_null_skcipher;
static int crypto_default_null_skcipher_refcnt;
static int null_compress(struct crypto_tfm *tfm, const u8 *src,
@@ -152,16 +152,15 @@ MODULE_ALIAS_CRYPTO("compress_null");
MODULE_ALIAS_CRYPTO("digest_null");
MODULE_ALIAS_CRYPTO("cipher_null");
-struct crypto_skcipher *crypto_get_default_null_skcipher(void)
+struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void)
{
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
mutex_lock(&crypto_default_null_skcipher_lock);
tfm = crypto_default_null_skcipher;
if (!tfm) {
- tfm = crypto_alloc_skcipher("ecb(cipher_null)",
- 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_sync_skcipher("ecb(cipher_null)", 0, 0);
if (IS_ERR(tfm))
goto unlock;
@@ -181,7 +180,7 @@ void crypto_put_default_null_skcipher(void)
{
mutex_lock(&crypto_default_null_skcipher_lock);
if (!--crypto_default_null_skcipher_refcnt) {
- crypto_free_skcipher(crypto_default_null_skcipher);
+ crypto_free_sync_skcipher(crypto_default_null_skcipher);
crypto_default_null_skcipher = NULL;
}
mutex_unlock(&crypto_default_null_skcipher_lock);
diff --git a/crypto/crypto_user.c b/crypto/crypto_user_base.c
index 0e89b5457cab..e41f6cc33fff 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user_base.c
@@ -29,6 +29,7 @@
#include <crypto/internal/rng.h>
#include <crypto/akcipher.h>
#include <crypto/kpp.h>
+#include <crypto/internal/cryptouser.h>
#include "internal.h"
@@ -37,7 +38,7 @@
static DEFINE_MUTEX(crypto_cfg_mutex);
/* The crypto netlink socket */
-static struct sock *crypto_nlsk;
+struct sock *crypto_nlsk;
struct crypto_dump_info {
struct sk_buff *in_skb;
@@ -46,7 +47,7 @@ struct crypto_dump_info {
u16 nlmsg_flags;
};
-static struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
+struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact)
{
struct crypto_alg *q, *alg = NULL;
@@ -461,6 +462,7 @@ static const int crypto_msg_min[CRYPTO_NR_MSGTYPES] = {
[CRYPTO_MSG_UPDATEALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = 0,
+ [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = MSGSIZE(crypto_user_alg),
};
static const struct nla_policy crypto_policy[CRYPTOCFGA_MAX+1] = {
@@ -481,6 +483,9 @@ static const struct crypto_link {
.dump = crypto_dump_report,
.done = crypto_dump_report_done},
[CRYPTO_MSG_DELRNG - CRYPTO_MSG_BASE] = { .doit = crypto_del_rng },
+ [CRYPTO_MSG_GETSTAT - CRYPTO_MSG_BASE] = { .doit = crypto_reportstat,
+ .dump = crypto_dump_reportstat,
+ .done = crypto_dump_reportstat_done},
};
static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
new file mode 100644
index 000000000000..021ad06bbb62
--- /dev/null
+++ b/crypto/crypto_user_stat.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Crypto user configuration API.
+ *
+ * Copyright (C) 2017-2018 Corentin Labbe <clabbe@baylibre.com>
+ *
+ */
+
+#include <linux/crypto.h>
+#include <linux/cryptouser.h>
+#include <linux/sched.h>
+#include <net/netlink.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/rng.h>
+#include <crypto/akcipher.h>
+#include <crypto/kpp.h>
+#include <crypto/internal/cryptouser.h>
+
+#include "internal.h"
+
+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
+
+static DEFINE_MUTEX(crypto_cfg_mutex);
+
+extern struct sock *crypto_nlsk;
+
+struct crypto_dump_info {
+ struct sk_buff *in_skb;
+ struct sk_buff *out_skb;
+ u32 nlmsg_seq;
+ u16 nlmsg_flags;
+};
+
+static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat raead;
+ u64 v64;
+ u32 v32;
+
+ strncpy(raead.type, "aead", sizeof(raead.type));
+
+ v32 = atomic_read(&alg->encrypt_cnt);
+ raead.stat_encrypt_cnt = v32;
+ v64 = atomic64_read(&alg->encrypt_tlen);
+ raead.stat_encrypt_tlen = v64;
+ v32 = atomic_read(&alg->decrypt_cnt);
+ raead.stat_decrypt_cnt = v32;
+ v64 = atomic64_read(&alg->decrypt_tlen);
+ raead.stat_decrypt_tlen = v64;
+ v32 = atomic_read(&alg->aead_err_cnt);
+ raead.stat_aead_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_AEAD,
+ sizeof(struct crypto_stat), &raead))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rcipher;
+ u64 v64;
+ u32 v32;
+
+ strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
+
+ v32 = atomic_read(&alg->encrypt_cnt);
+ rcipher.stat_encrypt_cnt = v32;
+ v64 = atomic64_read(&alg->encrypt_tlen);
+ rcipher.stat_encrypt_tlen = v64;
+ v32 = atomic_read(&alg->decrypt_cnt);
+ rcipher.stat_decrypt_cnt = v32;
+ v64 = atomic64_read(&alg->decrypt_tlen);
+ rcipher.stat_decrypt_tlen = v64;
+ v32 = atomic_read(&alg->cipher_err_cnt);
+ rcipher.stat_cipher_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_CIPHER,
+ sizeof(struct crypto_stat), &rcipher))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rcomp;
+ u64 v64;
+ u32 v32;
+
+ strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
+ v32 = atomic_read(&alg->compress_cnt);
+ rcomp.stat_compress_cnt = v32;
+ v64 = atomic64_read(&alg->compress_tlen);
+ rcomp.stat_compress_tlen = v64;
+ v32 = atomic_read(&alg->decompress_cnt);
+ rcomp.stat_decompress_cnt = v32;
+ v64 = atomic64_read(&alg->decompress_tlen);
+ rcomp.stat_decompress_tlen = v64;
+ v32 = atomic_read(&alg->cipher_err_cnt);
+ rcomp.stat_compress_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_COMPRESS,
+ sizeof(struct crypto_stat), &rcomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat racomp;
+ u64 v64;
+ u32 v32;
+
+ strlcpy(racomp.type, "acomp", sizeof(racomp.type));
+ v32 = atomic_read(&alg->compress_cnt);
+ racomp.stat_compress_cnt = v32;
+ v64 = atomic64_read(&alg->compress_tlen);
+ racomp.stat_compress_tlen = v64;
+ v32 = atomic_read(&alg->decompress_cnt);
+ racomp.stat_decompress_cnt = v32;
+ v64 = atomic64_read(&alg->decompress_tlen);
+ racomp.stat_decompress_tlen = v64;
+ v32 = atomic_read(&alg->cipher_err_cnt);
+ racomp.stat_compress_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_ACOMP,
+ sizeof(struct crypto_stat), &racomp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rakcipher;
+ u64 v64;
+ u32 v32;
+
+ strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
+ v32 = atomic_read(&alg->encrypt_cnt);
+ rakcipher.stat_encrypt_cnt = v32;
+ v64 = atomic64_read(&alg->encrypt_tlen);
+ rakcipher.stat_encrypt_tlen = v64;
+ v32 = atomic_read(&alg->decrypt_cnt);
+ rakcipher.stat_decrypt_cnt = v32;
+ v64 = atomic64_read(&alg->decrypt_tlen);
+ rakcipher.stat_decrypt_tlen = v64;
+ v32 = atomic_read(&alg->sign_cnt);
+ rakcipher.stat_sign_cnt = v32;
+ v32 = atomic_read(&alg->verify_cnt);
+ rakcipher.stat_verify_cnt = v32;
+ v32 = atomic_read(&alg->akcipher_err_cnt);
+ rakcipher.stat_akcipher_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER,
+ sizeof(struct crypto_stat), &rakcipher))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rkpp;
+ u32 v;
+
+ strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
+
+ v = atomic_read(&alg->setsecret_cnt);
+ rkpp.stat_setsecret_cnt = v;
+ v = atomic_read(&alg->generate_public_key_cnt);
+ rkpp.stat_generate_public_key_cnt = v;
+ v = atomic_read(&alg->compute_shared_secret_cnt);
+ rkpp.stat_compute_shared_secret_cnt = v;
+ v = atomic_read(&alg->kpp_err_cnt);
+ rkpp.stat_kpp_err_cnt = v;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_KPP,
+ sizeof(struct crypto_stat), &rkpp))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rhash;
+ u64 v64;
+ u32 v32;
+
+ strncpy(rhash.type, "ahash", sizeof(rhash.type));
+
+ v32 = atomic_read(&alg->hash_cnt);
+ rhash.stat_hash_cnt = v32;
+ v64 = atomic64_read(&alg->hash_tlen);
+ rhash.stat_hash_tlen = v64;
+ v32 = atomic_read(&alg->hash_err_cnt);
+ rhash.stat_hash_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_HASH,
+ sizeof(struct crypto_stat), &rhash))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rhash;
+ u64 v64;
+ u32 v32;
+
+ strncpy(rhash.type, "shash", sizeof(rhash.type));
+
+ v32 = atomic_read(&alg->hash_cnt);
+ rhash.stat_hash_cnt = v32;
+ v64 = atomic64_read(&alg->hash_tlen);
+ rhash.stat_hash_tlen = v64;
+ v32 = atomic_read(&alg->hash_err_cnt);
+ rhash.stat_hash_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_HASH,
+ sizeof(struct crypto_stat), &rhash))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
+{
+ struct crypto_stat rrng;
+ u64 v64;
+ u32 v32;
+
+ strncpy(rrng.type, "rng", sizeof(rrng.type));
+
+ v32 = atomic_read(&alg->generate_cnt);
+ rrng.stat_generate_cnt = v32;
+ v64 = atomic64_read(&alg->generate_tlen);
+ rrng.stat_generate_tlen = v64;
+ v32 = atomic_read(&alg->seed_cnt);
+ rrng.stat_seed_cnt = v32;
+ v32 = atomic_read(&alg->hash_err_cnt);
+ rrng.stat_rng_err_cnt = v32;
+
+ if (nla_put(skb, CRYPTOCFGA_STAT_RNG,
+ sizeof(struct crypto_stat), &rrng))
+ goto nla_put_failure;
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_reportstat_one(struct crypto_alg *alg,
+ struct crypto_user_alg *ualg,
+ struct sk_buff *skb)
+{
+ strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
+ strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
+ sizeof(ualg->cru_driver_name));
+ strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
+ sizeof(ualg->cru_module_name));
+
+ ualg->cru_type = 0;
+ ualg->cru_mask = 0;
+ ualg->cru_flags = alg->cra_flags;
+ ualg->cru_refcnt = refcount_read(&alg->cra_refcnt);
+
+ if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
+ goto nla_put_failure;
+ if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
+ struct crypto_stat rl;
+
+ strlcpy(rl.type, "larval", sizeof(rl.type));
+ if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
+ sizeof(struct crypto_stat), &rl))
+ goto nla_put_failure;
+ goto out;
+ }
+
+ switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) {
+ case CRYPTO_ALG_TYPE_AEAD:
+ if (crypto_report_aead(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ if (crypto_report_cipher(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_BLKCIPHER:
+ if (crypto_report_cipher(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_CIPHER:
+ if (crypto_report_cipher(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_COMPRESS:
+ if (crypto_report_comp(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_ACOMPRESS:
+ if (crypto_report_acomp(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_SCOMPRESS:
+ if (crypto_report_acomp(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_AKCIPHER:
+ if (crypto_report_akcipher(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_KPP:
+ if (crypto_report_kpp(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_AHASH:
+ if (crypto_report_ahash(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_HASH:
+ if (crypto_report_shash(skb, alg))
+ goto nla_put_failure;
+ break;
+ case CRYPTO_ALG_TYPE_RNG:
+ if (crypto_report_rng(skb, alg))
+ goto nla_put_failure;
+ break;
+ default:
+ pr_err("ERROR: Unhandled alg %d in %s\n",
+ alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL),
+ __func__);
+ }
+
+out:
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static int crypto_reportstat_alg(struct crypto_alg *alg,
+ struct crypto_dump_info *info)
+{
+ struct sk_buff *in_skb = info->in_skb;
+ struct sk_buff *skb = info->out_skb;
+ struct nlmsghdr *nlh;
+ struct crypto_user_alg *ualg;
+ int err = 0;
+
+ nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq,
+ CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags);
+ if (!nlh) {
+ err = -EMSGSIZE;
+ goto out;
+ }
+
+ ualg = nlmsg_data(nlh);
+
+ err = crypto_reportstat_one(alg, ualg, skb);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ goto out;
+ }
+
+ nlmsg_end(skb, nlh);
+
+out:
+ return err;
+}
+
+int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
+ struct nlattr **attrs)
+{
+ struct crypto_user_alg *p = nlmsg_data(in_nlh);
+ struct crypto_alg *alg;
+ struct sk_buff *skb;
+ struct crypto_dump_info info;
+ int err;
+
+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
+ return -EINVAL;
+
+ alg = crypto_alg_match(p, 0);
+ if (!alg)
+ return -ENOENT;
+
+ err = -ENOMEM;
+ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!skb)
+ goto drop_alg;
+
+ info.in_skb = in_skb;
+ info.out_skb = skb;
+ info.nlmsg_seq = in_nlh->nlmsg_seq;
+ info.nlmsg_flags = 0;
+
+ err = crypto_reportstat_alg(alg, &info);
+
+drop_alg:
+ crypto_mod_put(alg);
+
+ if (err)
+ return err;
+
+ return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
+}
+
+int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct crypto_alg *alg;
+ struct crypto_dump_info info;
+ int err;
+
+ if (cb->args[0])
+ goto out;
+
+ cb->args[0] = 1;
+
+ info.in_skb = cb->skb;
+ info.out_skb = skb;
+ info.nlmsg_seq = cb->nlh->nlmsg_seq;
+ info.nlmsg_flags = NLM_F_MULTI;
+
+ list_for_each_entry(alg, &crypto_alg_list, cra_list) {
+ err = crypto_reportstat_alg(alg, &info);
+ if (err)
+ goto out_err;
+ }
+
+out:
+ return skb->len;
+out_err:
+ return err;
+}
+
+int crypto_dump_reportstat_done(struct netlink_callback *cb)
+{
+ return 0;
+}
+
+MODULE_LICENSE("GPL");
diff --git a/crypto/echainiv.c b/crypto/echainiv.c
index 45819e6015bf..77e607fdbfb7 100644
--- a/crypto/echainiv.c
+++ b/crypto/echainiv.c
@@ -47,9 +47,9 @@ static int echainiv_encrypt(struct aead_request *req)
info = req->iv;
if (req->src != req->dst) {
- SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
- skcipher_request_set_tfm(nreq, ctx->sknull);
+ skcipher_request_set_sync_tfm(nreq, ctx->sknull);
skcipher_request_set_callback(nreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(nreq, req->src, req->dst,
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 0ad879e1f9b2..e438492db2ca 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -50,7 +50,7 @@ struct crypto_rfc4543_instance_ctx {
struct crypto_rfc4543_ctx {
struct crypto_aead *child;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
u8 nonce[4];
};
@@ -1067,9 +1067,9 @@ static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc)
unsigned int authsize = crypto_aead_authsize(aead);
unsigned int nbytes = req->assoclen + req->cryptlen -
(enc ? 0 : authsize);
- SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null);
- skcipher_request_set_tfm(nreq, ctx->null);
+ skcipher_request_set_sync_tfm(nreq, ctx->null);
skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL);
@@ -1093,7 +1093,7 @@ static int crypto_rfc4543_init_tfm(struct crypto_aead *tfm)
struct crypto_aead_spawn *spawn = &ictx->aead;
struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_aead *aead;
- struct crypto_skcipher *null;
+ struct crypto_sync_skcipher *null;
unsigned long align;
int err = 0;
diff --git a/crypto/internal.h b/crypto/internal.h
index 9a3f39939fba..ef769b5e8ad3 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -26,12 +26,6 @@
#include <linux/rwsem.h>
#include <linux/slab.h>
-/* Crypto notification events. */
-enum {
- CRYPTO_MSG_ALG_REQUEST,
- CRYPTO_MSG_ALG_REGISTER,
-};
-
struct crypto_instance;
struct crypto_template;
@@ -90,8 +84,6 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask);
-int crypto_register_notifier(struct notifier_block *nb);
-int crypto_unregister_notifier(struct notifier_block *nb);
int crypto_probing_notify(unsigned long val, void *v);
unsigned int crypto_alg_extsize(struct crypto_alg *alg);
diff --git a/crypto/lrw.c b/crypto/lrw.c
index 393a782679c7..0430ccd08728 100644
--- a/crypto/lrw.c
+++ b/crypto/lrw.c
@@ -29,8 +29,6 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
-#define LRW_BUFFER_SIZE 128u
-
#define LRW_BLOCK_SIZE 16
struct priv {
@@ -56,19 +54,7 @@ struct priv {
};
struct rctx {
- be128 buf[LRW_BUFFER_SIZE / sizeof(be128)];
-
be128 t;
-
- be128 *ext;
-
- struct scatterlist srcbuf[2];
- struct scatterlist dstbuf[2];
- struct scatterlist *src;
- struct scatterlist *dst;
-
- unsigned int left;
-
struct skcipher_request subreq;
};
@@ -120,112 +106,68 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
return 0;
}
-static inline void inc(be128 *iv)
-{
- be64_add_cpu(&iv->b, 1);
- if (!iv->b)
- be64_add_cpu(&iv->a, 1);
-}
-
-/* this returns the number of consequative 1 bits starting
- * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */
-static inline int get_index128(be128 *block)
+/*
+ * Returns the number of trailing '1' bits in the words of the counter, which is
+ * represented by 4 32-bit words, arranged from least to most significant.
+ * At the same time, increments the counter by one.
+ *
+ * For example:
+ *
+ * u32 counter[4] = { 0xFFFFFFFF, 0x1, 0x0, 0x0 };
+ * int i = next_index(&counter);
+ * // i == 33, counter == { 0x0, 0x2, 0x0, 0x0 }
+ */
+static int next_index(u32 *counter)
{
- int x;
- __be32 *p = (__be32 *) block;
+ int i, res = 0;
- for (p += 3, x = 0; x < 128; p--, x += 32) {
- u32 val = be32_to_cpup(p);
+ for (i = 0; i < 4; i++) {
+ if (counter[i] + 1 != 0)
+ return res + ffz(counter[i]++);
- if (!~val)
- continue;
-
- return x + ffz(val);
+ counter[i] = 0;
+ res += 32;
}
- return x;
+ /*
+ * If we get here, then x == 128 and we are incrementing the counter
+ * from all ones to all zeros. This means we must return index 127, i.e.
+ * the one corresponding to key2*{ 1,...,1 }.
+ */
+ return 127;
}
-static int post_crypt(struct skcipher_request *req)
+/*
+ * We compute the tweak masks twice (both before and after the ECB encryption or
+ * decryption) to avoid having to allocate a temporary buffer and/or make
+ * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
+ * just doing the next_index() calls again.
+ */
+static int xor_tweak(struct skcipher_request *req, bool second_pass)
{
- struct rctx *rctx = skcipher_request_ctx(req);
- be128 *buf = rctx->ext ?: rctx->buf;
- struct skcipher_request *subreq;
const int bs = LRW_BLOCK_SIZE;
- struct skcipher_walk w;
- struct scatterlist *sg;
- unsigned offset;
- int err;
-
- subreq = &rctx->subreq;
- err = skcipher_walk_virt(&w, subreq, false);
-
- while (w.nbytes) {
- unsigned int avail = w.nbytes;
- be128 *wdst;
-
- wdst = w.dst.virt.addr;
-
- do {
- be128_xor(wdst, buf++, wdst);
- wdst++;
- } while ((avail -= bs) >= bs);
-
- err = skcipher_walk_done(&w, avail);
- }
-
- rctx->left -= subreq->cryptlen;
-
- if (err || !rctx->left)
- goto out;
-
- rctx->dst = rctx->dstbuf;
-
- scatterwalk_done(&w.out, 0, 1);
- sg = w.out.sg;
- offset = w.out.offset;
-
- if (rctx->dst != sg) {
- rctx->dst[0] = *sg;
- sg_unmark_end(rctx->dst);
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
- }
- rctx->dst[0].length -= offset - sg->offset;
- rctx->dst[0].offset = offset;
-
-out:
- return err;
-}
-
-static int pre_crypt(struct skcipher_request *req)
-{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
- struct rctx *rctx = skcipher_request_ctx(req);
struct priv *ctx = crypto_skcipher_ctx(tfm);
- be128 *buf = rctx->ext ?: rctx->buf;
- struct skcipher_request *subreq;
- const int bs = LRW_BLOCK_SIZE;
+ struct rctx *rctx = skcipher_request_ctx(req);
+ be128 t = rctx->t;
struct skcipher_walk w;
- struct scatterlist *sg;
- unsigned cryptlen;
- unsigned offset;
- be128 *iv;
- bool more;
+ __be32 *iv;
+ u32 counter[4];
int err;
- subreq = &rctx->subreq;
- skcipher_request_set_tfm(subreq, tfm);
-
- cryptlen = subreq->cryptlen;
- more = rctx->left > cryptlen;
- if (!more)
- cryptlen = rctx->left;
+ if (second_pass) {
+ req = &rctx->subreq;
+ /* set to our TFM to enforce correct alignment: */
+ skcipher_request_set_tfm(req, tfm);
+ }
- skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
- cryptlen, req->iv);
+ err = skcipher_walk_virt(&w, req, false);
+ iv = (__be32 *)w.iv;
- err = skcipher_walk_virt(&w, subreq, false);
- iv = w.iv;
+ counter[0] = be32_to_cpu(iv[3]);
+ counter[1] = be32_to_cpu(iv[2]);
+ counter[2] = be32_to_cpu(iv[1]);
+ counter[3] = be32_to_cpu(iv[0]);
while (w.nbytes) {
unsigned int avail = w.nbytes;
@@ -236,188 +178,85 @@ static int pre_crypt(struct skcipher_request *req)
wdst = w.dst.virt.addr;
do {
- *buf++ = rctx->t;
- be128_xor(wdst++, &rctx->t, wsrc++);
+ be128_xor(wdst++, &t, wsrc++);
/* T <- I*Key2, using the optimization
* discussed in the specification */
- be128_xor(&rctx->t, &rctx->t,
- &ctx->mulinc[get_index128(iv)]);
- inc(iv);
+ be128_xor(&t, &t, &ctx->mulinc[next_index(counter)]);
} while ((avail -= bs) >= bs);
- err = skcipher_walk_done(&w, avail);
- }
-
- skcipher_request_set_tfm(subreq, ctx->child);
- skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
- cryptlen, NULL);
-
- if (err || !more)
- goto out;
-
- rctx->src = rctx->srcbuf;
-
- scatterwalk_done(&w.in, 0, 1);
- sg = w.in.sg;
- offset = w.in.offset;
+ if (second_pass && w.nbytes == w.total) {
+ iv[0] = cpu_to_be32(counter[3]);
+ iv[1] = cpu_to_be32(counter[2]);
+ iv[2] = cpu_to_be32(counter[1]);
+ iv[3] = cpu_to_be32(counter[0]);
+ }
- if (rctx->src != sg) {
- rctx->src[0] = *sg;
- sg_unmark_end(rctx->src);
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
+ err = skcipher_walk_done(&w, avail);
}
- rctx->src[0].length -= offset - sg->offset;
- rctx->src[0].offset = offset;
-out:
return err;
}
-static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
+static int xor_tweak_pre(struct skcipher_request *req)
{
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
- gfp_t gfp;
-
- subreq = &rctx->subreq;
- skcipher_request_set_callback(subreq, req->base.flags, done, req);
-
- gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- rctx->ext = NULL;
-
- subreq->cryptlen = LRW_BUFFER_SIZE;
- if (req->cryptlen > LRW_BUFFER_SIZE) {
- unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
-
- rctx->ext = kmalloc(n, gfp);
- if (rctx->ext)
- subreq->cryptlen = n;
- }
-
- rctx->src = req->src;
- rctx->dst = req->dst;
- rctx->left = req->cryptlen;
-
- /* calculate first value of T */
- memcpy(&rctx->t, req->iv, sizeof(rctx->t));
-
- /* T <- I*Key2 */
- gf128mul_64k_bbe(&rctx->t, ctx->table);
-
- return 0;
+ return xor_tweak(req, false);
}
-static void exit_crypt(struct skcipher_request *req)
+static int xor_tweak_post(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
-
- rctx->left = 0;
-
- if (rctx->ext)
- kzfree(rctx->ext);
+ return xor_tweak(req, true);
}
-static int do_encrypt(struct skcipher_request *req, int err)
-{
- struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
-
- subreq = &rctx->subreq;
-
- while (!err && rctx->left) {
- err = pre_crypt(req) ?:
- crypto_skcipher_encrypt(subreq) ?:
- post_crypt(req);
-
- if (err == -EINPROGRESS || err == -EBUSY)
- return err;
- }
-
- exit_crypt(req);
- return err;
-}
-
-static void encrypt_done(struct crypto_async_request *areq, int err)
+static void crypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
- struct skcipher_request *subreq;
- struct rctx *rctx;
- rctx = skcipher_request_ctx(req);
+ if (!err)
+ err = xor_tweak_post(req);
- if (err == -EINPROGRESS) {
- if (rctx->left != req->cryptlen)
- return;
- goto out;
- }
-
- subreq = &rctx->subreq;
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
-
- err = do_encrypt(req, err ?: post_crypt(req));
- if (rctx->left)
- return;
-
-out:
skcipher_request_complete(req, err);
}
-static int encrypt(struct skcipher_request *req)
-{
- return do_encrypt(req, init_crypt(req, encrypt_done));
-}
-
-static int do_decrypt(struct skcipher_request *req, int err)
+static void init_crypt(struct skcipher_request *req)
{
+ struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
-
- subreq = &rctx->subreq;
+ struct skcipher_request *subreq = &rctx->subreq;
- while (!err && rctx->left) {
- err = pre_crypt(req) ?:
- crypto_skcipher_decrypt(subreq) ?:
- post_crypt(req);
+ skcipher_request_set_tfm(subreq, ctx->child);
+ skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
+ /* pass req->iv as IV (will be used by xor_tweak, ECB will ignore it) */
+ skcipher_request_set_crypt(subreq, req->dst, req->dst,
+ req->cryptlen, req->iv);
- if (err == -EINPROGRESS || err == -EBUSY)
- return err;
- }
+ /* calculate first value of T */
+ memcpy(&rctx->t, req->iv, sizeof(rctx->t));
- exit_crypt(req);
- return err;
+ /* T <- I*Key2 */
+ gf128mul_64k_bbe(&rctx->t, ctx->table);
}
-static void decrypt_done(struct crypto_async_request *areq, int err)
+static int encrypt(struct skcipher_request *req)
{
- struct skcipher_request *req = areq->data;
- struct skcipher_request *subreq;
- struct rctx *rctx;
-
- rctx = skcipher_request_ctx(req);
-
- if (err == -EINPROGRESS) {
- if (rctx->left != req->cryptlen)
- return;
- goto out;
- }
-
- subreq = &rctx->subreq;
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
-
- err = do_decrypt(req, err ?: post_crypt(req));
- if (rctx->left)
- return;
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq = &rctx->subreq;
-out:
- skcipher_request_complete(req, err);
+ init_crypt(req);
+ return xor_tweak_pre(req) ?:
+ crypto_skcipher_encrypt(subreq) ?:
+ xor_tweak_post(req);
}
static int decrypt(struct skcipher_request *req)
{
- return do_decrypt(req, init_crypt(req, decrypt_done));
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq = &rctx->subreq;
+
+ init_crypt(req);
+ return xor_tweak_pre(req) ?:
+ crypto_skcipher_decrypt(subreq) ?:
+ xor_tweak_post(req);
}
static int init_tfm(struct crypto_skcipher *tfm)
@@ -543,7 +382,7 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb)
inst->alg.base.cra_priority = alg->base.cra_priority;
inst->alg.base.cra_blocksize = LRW_BLOCK_SIZE;
inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
- (__alignof__(u64) - 1);
+ (__alignof__(__be32) - 1);
inst->alg.ivsize = LRW_BLOCK_SIZE;
inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) +
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
deleted file mode 100644
index f14152147ce8..000000000000
--- a/crypto/mcryptd.c
+++ /dev/null
@@ -1,675 +0,0 @@
-/*
- * Software multibuffer async crypto daemon.
- *
- * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
- *
- * Adapted from crypto daemon.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-
-#include <crypto/algapi.h>
-#include <crypto/internal/hash.h>
-#include <crypto/internal/aead.h>
-#include <crypto/mcryptd.h>
-#include <crypto/crypto_wq.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <linux/sched/stat.h>
-#include <linux/slab.h>
-
-#define MCRYPTD_MAX_CPU_QLEN 100
-#define MCRYPTD_BATCH 9
-
-static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
- unsigned int tail);
-
-struct mcryptd_flush_list {
- struct list_head list;
- struct mutex lock;
-};
-
-static struct mcryptd_flush_list __percpu *mcryptd_flist;
-
-struct hashd_instance_ctx {
- struct crypto_ahash_spawn spawn;
- struct mcryptd_queue *queue;
-};
-
-static void mcryptd_queue_worker(struct work_struct *work);
-
-void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
-{
- struct mcryptd_flush_list *flist;
-
- if (!cstate->flusher_engaged) {
- /* put the flusher on the flush list */
- flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
- mutex_lock(&flist->lock);
- list_add_tail(&cstate->flush_list, &flist->list);
- cstate->flusher_engaged = true;
- cstate->next_flush = jiffies + delay;
- queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
- &cstate->flush, delay);
- mutex_unlock(&flist->lock);
- }
-}
-EXPORT_SYMBOL(mcryptd_arm_flusher);
-
-static int mcryptd_init_queue(struct mcryptd_queue *queue,
- unsigned int max_cpu_qlen)
-{
- int cpu;
- struct mcryptd_cpu_queue *cpu_queue;
-
- queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
- pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
- if (!queue->cpu_queue)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
- pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
- crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
- INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
- spin_lock_init(&cpu_queue->q_lock);
- }
- return 0;
-}
-
-static void mcryptd_fini_queue(struct mcryptd_queue *queue)
-{
- int cpu;
- struct mcryptd_cpu_queue *cpu_queue;
-
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
- BUG_ON(cpu_queue->queue.qlen);
- }
- free_percpu(queue->cpu_queue);
-}
-
-static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
- struct crypto_async_request *request,
- struct mcryptd_hash_request_ctx *rctx)
-{
- int cpu, err;
- struct mcryptd_cpu_queue *cpu_queue;
-
- cpu_queue = raw_cpu_ptr(queue->cpu_queue);
- spin_lock(&cpu_queue->q_lock);
- cpu = smp_processor_id();
- rctx->tag.cpu = smp_processor_id();
-
- err = crypto_enqueue_request(&cpu_queue->queue, request);
- pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
- cpu, cpu_queue, request);
- spin_unlock(&cpu_queue->q_lock);
- queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
-
- return err;
-}
-
-/*
- * Try to opportunisticlly flush the partially completed jobs if
- * crypto daemon is the only task running.
- */
-static void mcryptd_opportunistic_flush(void)
-{
- struct mcryptd_flush_list *flist;
- struct mcryptd_alg_cstate *cstate;
-
- flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
- while (single_task_running()) {
- mutex_lock(&flist->lock);
- cstate = list_first_entry_or_null(&flist->list,
- struct mcryptd_alg_cstate, flush_list);
- if (!cstate || !cstate->flusher_engaged) {
- mutex_unlock(&flist->lock);
- return;
- }
- list_del(&cstate->flush_list);
- cstate->flusher_engaged = false;
- mutex_unlock(&flist->lock);
- cstate->alg_state->flusher(cstate);
- }
-}
-
-/*
- * Called in workqueue context, do one real cryption work (via
- * req->complete) and reschedule itself if there are more work to
- * do.
- */
-static void mcryptd_queue_worker(struct work_struct *work)
-{
- struct mcryptd_cpu_queue *cpu_queue;
- struct crypto_async_request *req, *backlog;
- int i;
-
- /*
- * Need to loop through more than once for multi-buffer to
- * be effective.
- */
-
- cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
- i = 0;
- while (i < MCRYPTD_BATCH || single_task_running()) {
-
- spin_lock_bh(&cpu_queue->q_lock);
- backlog = crypto_get_backlog(&cpu_queue->queue);
- req = crypto_dequeue_request(&cpu_queue->queue);
- spin_unlock_bh(&cpu_queue->q_lock);
-
- if (!req) {
- mcryptd_opportunistic_flush();
- return;
- }
-
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
- req->complete(req, 0);
- if (!cpu_queue->queue.qlen)
- return;
- ++i;
- }
- if (cpu_queue->queue.qlen)
- queue_work_on(smp_processor_id(), kcrypto_wq, &cpu_queue->work);
-}
-
-void mcryptd_flusher(struct work_struct *__work)
-{
- struct mcryptd_alg_cstate *alg_cpu_state;
- struct mcryptd_alg_state *alg_state;
- struct mcryptd_flush_list *flist;
- int cpu;
-
- cpu = smp_processor_id();
- alg_cpu_state = container_of(to_delayed_work(__work),
- struct mcryptd_alg_cstate, flush);
- alg_state = alg_cpu_state->alg_state;
- if (alg_cpu_state->cpu != cpu)
- pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
- cpu, alg_cpu_state->cpu);
-
- if (alg_cpu_state->flusher_engaged) {
- flist = per_cpu_ptr(mcryptd_flist, cpu);
- mutex_lock(&flist->lock);
- list_del(&alg_cpu_state->flush_list);
- alg_cpu_state->flusher_engaged = false;
- mutex_unlock(&flist->lock);
- alg_state->flusher(alg_cpu_state);
- }
-}
-EXPORT_SYMBOL_GPL(mcryptd_flusher);
-
-static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
-
- return ictx->queue;
-}
-
-static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
- unsigned int tail)
-{
- char *p;
- struct crypto_instance *inst;
- int err;
-
- p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
-
- inst = (void *)(p + head);
-
- err = -ENAMETOOLONG;
- if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- goto out_free_inst;
-
- memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
-
- inst->alg.cra_priority = alg->cra_priority + 50;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
-
-out:
- return p;
-
-out_free_inst:
- kfree(p);
- p = ERR_PTR(err);
- goto out;
-}
-
-static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type,
- u32 *mask)
-{
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return false;
-
- *type |= algt->type & CRYPTO_ALG_INTERNAL;
- *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
-
- if (*type & *mask & CRYPTO_ALG_INTERNAL)
- return true;
- else
- return false;
-}
-
-static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_ahash_spawn *spawn = &ictx->spawn;
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_ahash *hash;
-
- hash = crypto_spawn_ahash(spawn);
- if (IS_ERR(hash))
- return PTR_ERR(hash);
-
- ctx->child = hash;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct mcryptd_hash_request_ctx) +
- crypto_ahash_reqsize(hash));
- return 0;
-}
-
-static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
-
- crypto_free_ahash(ctx->child);
-}
-
-static int mcryptd_hash_setkey(struct crypto_ahash *parent,
- const u8 *key, unsigned int keylen)
-{
- struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
- struct crypto_ahash *child = ctx->child;
- int err;
-
- crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_ahash_setkey(child, key, keylen);
- crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
-}
-
-static int mcryptd_hash_enqueue(struct ahash_request *req,
- crypto_completion_t complete)
-{
- int ret;
-
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct mcryptd_queue *queue =
- mcryptd_get_queue(crypto_ahash_tfm(tfm));
-
- rctx->complete = req->base.complete;
- req->base.complete = complete;
-
- ret = mcryptd_enqueue_request(queue, &req->base, rctx);
-
- return ret;
-}
-
-static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_ahash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct ahash_request *desc = &rctx->areq;
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- ahash_request_set_tfm(desc, child);
- ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req_async);
-
- rctx->out = req->result;
- err = crypto_ahash_init(desc);
-
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_init_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_init);
-}
-
-static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- rctx->out = req->result;
- err = crypto_ahash_update(&rctx->areq);
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_update_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_update);
-}
-
-static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- rctx->out = req->result;
- err = crypto_ahash_final(&rctx->areq);
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_final_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_final);
-}
-
-static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
-{
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
- rctx->out = req->result;
- err = crypto_ahash_finup(&rctx->areq);
-
- if (err) {
- req->base.complete = rctx->complete;
- goto out;
- }
-
- return;
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
-}
-
-static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
-{
- struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_ahash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct ahash_request *desc = &rctx->areq;
-
- if (unlikely(err == -EINPROGRESS))
- goto out;
-
- ahash_request_set_tfm(desc, child);
- ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP,
- rctx->complete, req_async);
-
- rctx->out = req->result;
- err = crypto_ahash_init(desc) ?: crypto_ahash_finup(desc);
-
-out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
-}
-
-static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
-{
- return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
-}
-
-static int mcryptd_hash_export(struct ahash_request *req, void *out)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- return crypto_ahash_export(&rctx->areq, out);
-}
-
-static int mcryptd_hash_import(struct ahash_request *req, const void *in)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
-
- return crypto_ahash_import(&rctx->areq, in);
-}
-
-static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
- struct mcryptd_queue *queue)
-{
- struct hashd_instance_ctx *ctx;
- struct ahash_instance *inst;
- struct hash_alg_common *halg;
- struct crypto_alg *alg;
- u32 type = 0;
- u32 mask = 0;
- int err;
-
- if (!mcryptd_check_internal(tb, &type, &mask))
- return -EINVAL;
-
- halg = ahash_attr_alg(tb[1], type, mask);
- if (IS_ERR(halg))
- return PTR_ERR(halg);
-
- alg = &halg->base;
- pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
- inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
- sizeof(*ctx));
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
-
- ctx = ahash_instance_ctx(inst);
- ctx->queue = queue;
-
- err = crypto_init_ahash_spawn(&ctx->spawn, halg,
- ahash_crypto_instance(inst));
- if (err)
- goto out_free_inst;
-
- inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->cra_flags & (CRYPTO_ALG_INTERNAL |
- CRYPTO_ALG_OPTIONAL_KEY));
-
- inst->alg.halg.digestsize = halg->digestsize;
- inst->alg.halg.statesize = halg->statesize;
- inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
-
- inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
- inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
-
- inst->alg.init = mcryptd_hash_init_enqueue;
- inst->alg.update = mcryptd_hash_update_enqueue;
- inst->alg.final = mcryptd_hash_final_enqueue;
- inst->alg.finup = mcryptd_hash_finup_enqueue;
- inst->alg.export = mcryptd_hash_export;
- inst->alg.import = mcryptd_hash_import;
- if (crypto_hash_alg_has_setkey(halg))
- inst->alg.setkey = mcryptd_hash_setkey;
- inst->alg.digest = mcryptd_hash_digest_enqueue;
-
- err = ahash_register_instance(tmpl, inst);
- if (err) {
- crypto_drop_ahash(&ctx->spawn);
-out_free_inst:
- kfree(inst);
- }
-
-out_put_alg:
- crypto_mod_put(alg);
- return err;
-}
-
-static struct mcryptd_queue mqueue;
-
-static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
-{
- struct crypto_attr_type *algt;
-
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
-
- switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_DIGEST:
- return mcryptd_create_hash(tmpl, tb, &mqueue);
- break;
- }
-
- return -EINVAL;
-}
-
-static void mcryptd_free(struct crypto_instance *inst)
-{
- struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
- struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
-
- switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_drop_ahash(&hctx->spawn);
- kfree(ahash_instance(inst));
- return;
- default:
- crypto_drop_spawn(&ctx->spawn);
- kfree(inst);
- }
-}
-
-static struct crypto_template mcryptd_tmpl = {
- .name = "mcryptd",
- .create = mcryptd_create,
- .free = mcryptd_free,
- .module = THIS_MODULE,
-};
-
-struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask)
-{
- char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
- struct crypto_ahash *tfm;
-
- if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
- "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-EINVAL);
- tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
- if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
- crypto_free_ahash(tfm);
- return ERR_PTR(-EINVAL);
- }
-
- return __mcryptd_ahash_cast(tfm);
-}
-EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
-
-struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
-{
- struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
-
- return ctx->child;
-}
-EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
-
-struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req)
-{
- struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- return &rctx->areq;
-}
-EXPORT_SYMBOL_GPL(mcryptd_ahash_desc);
-
-void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
-{
- crypto_free_ahash(&tfm->base);
-}
-EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
-
-static int __init mcryptd_init(void)
-{
- int err, cpu;
- struct mcryptd_flush_list *flist;
-
- mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
- for_each_possible_cpu(cpu) {
- flist = per_cpu_ptr(mcryptd_flist, cpu);
- INIT_LIST_HEAD(&flist->list);
- mutex_init(&flist->lock);
- }
-
- err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
- if (err) {
- free_percpu(mcryptd_flist);
- return err;
- }
-
- err = crypto_register_template(&mcryptd_tmpl);
- if (err) {
- mcryptd_fini_queue(&mqueue);
- free_percpu(mcryptd_flist);
- }
-
- return err;
-}
-
-static void __exit mcryptd_exit(void)
-{
- mcryptd_fini_queue(&mqueue);
- crypto_unregister_template(&mcryptd_tmpl);
- free_percpu(mcryptd_flist);
-}
-
-subsys_initcall(mcryptd_init);
-module_exit(mcryptd_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
-MODULE_ALIAS_CRYPTO("mcryptd");
diff --git a/crypto/morus1280.c b/crypto/morus1280.c
index d057cf5ac4a8..3889c188f266 100644
--- a/crypto/morus1280.c
+++ b/crypto/morus1280.c
@@ -385,14 +385,11 @@ static void crypto_morus1280_final(struct morus1280_state *state,
struct morus1280_block *tag_xor,
u64 assoclen, u64 cryptlen)
{
- u64 assocbits = assoclen * 8;
- u64 cryptbits = cryptlen * 8;
-
struct morus1280_block tmp;
unsigned int i;
- tmp.words[0] = cpu_to_le64(assocbits);
- tmp.words[1] = cpu_to_le64(cryptbits);
+ tmp.words[0] = assoclen * 8;
+ tmp.words[1] = cryptlen * 8;
tmp.words[2] = 0;
tmp.words[3] = 0;
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 1ca76e54281b..da06ec2f6a80 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -384,21 +384,13 @@ static void crypto_morus640_final(struct morus640_state *state,
struct morus640_block *tag_xor,
u64 assoclen, u64 cryptlen)
{
- u64 assocbits = assoclen * 8;
- u64 cryptbits = cryptlen * 8;
-
- u32 assocbits_lo = (u32)assocbits;
- u32 assocbits_hi = (u32)(assocbits >> 32);
- u32 cryptbits_lo = (u32)cryptbits;
- u32 cryptbits_hi = (u32)(cryptbits >> 32);
-
struct morus640_block tmp;
unsigned int i;
- tmp.words[0] = cpu_to_le32(assocbits_lo);
- tmp.words[1] = cpu_to_le32(assocbits_hi);
- tmp.words[2] = cpu_to_le32(cryptbits_lo);
- tmp.words[3] = cpu_to_le32(cryptbits_hi);
+ tmp.words[0] = lower_32_bits(assoclen * 8);
+ tmp.words[1] = upper_32_bits(assoclen * 8);
+ tmp.words[2] = lower_32_bits(cryptlen * 8);
+ tmp.words[3] = upper_32_bits(cryptlen * 8);
for (i = 0; i < MORUS_BLOCK_WORDS; i++)
state->s[4].words[i] ^= state->s[0].words[i];
diff --git a/crypto/ofb.c b/crypto/ofb.c
new file mode 100644
index 000000000000..886631708c5e
--- /dev/null
+++ b/crypto/ofb.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * OFB: Output FeedBack mode
+ *
+ * Copyright (C) 2018 ARM Limited or its affiliates.
+ * All rights reserved.
+ *
+ * Based loosely on public domain code gleaned from libtomcrypt
+ * (https://github.com/libtom/libtomcrypt).
+ */
+
+#include <crypto/algapi.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+struct crypto_ofb_ctx {
+ struct crypto_cipher *child;
+ int cnt;
+};
+
+
+static int crypto_ofb_setkey(struct crypto_skcipher *parent, const u8 *key,
+ unsigned int keylen)
+{
+ struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(parent);
+ struct crypto_cipher *child = ctx->child;
+ int err;
+
+ crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
+ crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) &
+ CRYPTO_TFM_REQ_MASK);
+ err = crypto_cipher_setkey(child, key, keylen);
+ crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) &
+ CRYPTO_TFM_RES_MASK);
+ return err;
+}
+
+static int crypto_ofb_encrypt_segment(struct crypto_ofb_ctx *ctx,
+ struct skcipher_walk *walk,
+ struct crypto_cipher *tfm)
+{
+ int bsize = crypto_cipher_blocksize(tfm);
+ int nbytes = walk->nbytes;
+
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+ u8 *iv = walk->iv;
+
+ do {
+ if (ctx->cnt == bsize) {
+ if (nbytes < bsize)
+ break;
+ crypto_cipher_encrypt_one(tfm, iv, iv);
+ ctx->cnt = 0;
+ }
+ *dst = *src ^ iv[ctx->cnt];
+ src++;
+ dst++;
+ ctx->cnt++;
+ } while (--nbytes);
+ return nbytes;
+}
+
+static int crypto_ofb_encrypt(struct skcipher_request *req)
+{
+ struct skcipher_walk walk;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ unsigned int bsize;
+ struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_cipher *child = ctx->child;
+ int ret = 0;
+
+ bsize = crypto_cipher_blocksize(child);
+ ctx->cnt = bsize;
+
+ ret = skcipher_walk_virt(&walk, req, false);
+
+ while (walk.nbytes) {
+ ret = crypto_ofb_encrypt_segment(ctx, &walk, child);
+ ret = skcipher_walk_done(&walk, ret);
+ }
+
+ return ret;
+}
+
+/* OFB encrypt and decrypt are identical */
+static int crypto_ofb_decrypt(struct skcipher_request *req)
+{
+ return crypto_ofb_encrypt(req);
+}
+
+static int crypto_ofb_init_tfm(struct crypto_skcipher *tfm)
+{
+ struct skcipher_instance *inst = skcipher_alg_instance(tfm);
+ struct crypto_spawn *spawn = skcipher_instance_ctx(inst);
+ struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
+ struct crypto_cipher *cipher;
+
+ cipher = crypto_spawn_cipher(spawn);
+ if (IS_ERR(cipher))
+ return PTR_ERR(cipher);
+
+ ctx->child = cipher;
+ return 0;
+}
+
+static void crypto_ofb_exit_tfm(struct crypto_skcipher *tfm)
+{
+ struct crypto_ofb_ctx *ctx = crypto_skcipher_ctx(tfm);
+
+ crypto_free_cipher(ctx->child);
+}
+
+static void crypto_ofb_free(struct skcipher_instance *inst)
+{
+ crypto_drop_skcipher(skcipher_instance_ctx(inst));
+ kfree(inst);
+}
+
+static int crypto_ofb_create(struct crypto_template *tmpl, struct rtattr **tb)
+{
+ struct skcipher_instance *inst;
+ struct crypto_attr_type *algt;
+ struct crypto_spawn *spawn;
+ struct crypto_alg *alg;
+ u32 mask;
+ int err;
+
+ err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER);
+ if (err)
+ return err;
+
+ inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ algt = crypto_get_attr_type(tb);
+ err = PTR_ERR(algt);
+ if (IS_ERR(algt))
+ goto err_free_inst;
+
+ mask = CRYPTO_ALG_TYPE_MASK |
+ crypto_requires_off(algt->type, algt->mask,
+ CRYPTO_ALG_NEED_FALLBACK);
+
+ alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask);
+ err = PTR_ERR(alg);
+ if (IS_ERR(alg))
+ goto err_free_inst;
+
+ spawn = skcipher_instance_ctx(inst);
+ err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst),
+ CRYPTO_ALG_TYPE_MASK);
+ crypto_mod_put(alg);
+ if (err)
+ goto err_free_inst;
+
+ err = crypto_inst_setname(skcipher_crypto_instance(inst), "ofb", alg);
+ if (err)
+ goto err_drop_spawn;
+
+ inst->alg.base.cra_priority = alg->cra_priority;
+ inst->alg.base.cra_blocksize = alg->cra_blocksize;
+ inst->alg.base.cra_alignmask = alg->cra_alignmask;
+
+ /* We access the data as u32s when xoring. */
+ inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
+
+ inst->alg.ivsize = alg->cra_blocksize;
+ inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize;
+ inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize;
+
+ inst->alg.base.cra_ctxsize = sizeof(struct crypto_ofb_ctx);
+
+ inst->alg.init = crypto_ofb_init_tfm;
+ inst->alg.exit = crypto_ofb_exit_tfm;
+
+ inst->alg.setkey = crypto_ofb_setkey;
+ inst->alg.encrypt = crypto_ofb_encrypt;
+ inst->alg.decrypt = crypto_ofb_decrypt;
+
+ inst->free = crypto_ofb_free;
+
+ err = skcipher_register_instance(tmpl, inst);
+ if (err)
+ goto err_drop_spawn;
+
+out:
+ return err;
+
+err_drop_spawn:
+ crypto_drop_spawn(spawn);
+err_free_inst:
+ kfree(inst);
+ goto out;
+}
+
+static struct crypto_template crypto_ofb_tmpl = {
+ .name = "ofb",
+ .create = crypto_ofb_create,
+ .module = THIS_MODULE,
+};
+
+static int __init crypto_ofb_module_init(void)
+{
+ return crypto_register_template(&crypto_ofb_tmpl);
+}
+
+static void __exit crypto_ofb_module_exit(void)
+{
+ crypto_unregister_template(&crypto_ofb_tmpl);
+}
+
+module_init(crypto_ofb_module_init);
+module_exit(crypto_ofb_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("OFB block cipher algorithm");
+MODULE_ALIAS_CRYPTO("ofb");
diff --git a/crypto/rng.c b/crypto/rng.c
index b4a618668161..547f16ecbfb0 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -50,6 +50,7 @@ int crypto_rng_reset(struct crypto_rng *tfm, const u8 *seed, unsigned int slen)
}
err = crypto_rng_alg(tfm)->seed(tfm, seed, slen);
+ crypto_stat_rng_seed(tfm, err);
out:
kzfree(buf);
return err;
diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
index 9893dbfc1af4..812476e46821 100644
--- a/crypto/rsa-pkcs1pad.c
+++ b/crypto/rsa-pkcs1pad.c
@@ -261,15 +261,6 @@ static int pkcs1pad_encrypt(struct akcipher_request *req)
pkcs1pad_sg_set_buf(req_ctx->in_sg, req_ctx->in_buf,
ctx->key_size - 1 - req->src_len, req->src);
- req_ctx->out_buf = kmalloc(ctx->key_size, GFP_KERNEL);
- if (!req_ctx->out_buf) {
- kfree(req_ctx->in_buf);
- return -ENOMEM;
- }
-
- pkcs1pad_sg_set_buf(req_ctx->out_sg, req_ctx->out_buf,
- ctx->key_size, NULL);
-
akcipher_request_set_tfm(&req_ctx->child_req, ctx->child);
akcipher_request_set_callback(&req_ctx->child_req, req->base.flags,
pkcs1pad_encrypt_sign_complete_cb, req);
diff --git a/crypto/seqiv.c b/crypto/seqiv.c
index 39dbf2f7e5f5..64a412be255e 100644
--- a/crypto/seqiv.c
+++ b/crypto/seqiv.c
@@ -73,9 +73,9 @@ static int seqiv_aead_encrypt(struct aead_request *req)
info = req->iv;
if (req->src != req->dst) {
- SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull);
- skcipher_request_set_tfm(nreq, ctx->sknull);
+ skcipher_request_set_sync_tfm(nreq, ctx->sknull);
skcipher_request_set_callback(nreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(nreq, req->src, req->dst,
diff --git a/crypto/shash.c b/crypto/shash.c
index 5d732c6bb4b2..d21f04d70dce 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -73,13 +73,6 @@ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
}
EXPORT_SYMBOL_GPL(crypto_shash_setkey);
-static inline unsigned int shash_align_buffer_size(unsigned len,
- unsigned long mask)
-{
- typedef u8 __aligned_largest u8_aligned;
- return len + (mask & ~(__alignof__(u8_aligned) - 1));
-}
-
static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
@@ -88,11 +81,17 @@ static int shash_update_unaligned(struct shash_desc *desc, const u8 *data,
unsigned long alignmask = crypto_shash_alignmask(tfm);
unsigned int unaligned_len = alignmask + 1 -
((unsigned long)data & alignmask);
- u8 ubuf[shash_align_buffer_size(unaligned_len, alignmask)]
- __aligned_largest;
+ /*
+ * We cannot count on __aligned() working for large values:
+ * https://patchwork.kernel.org/patch/9507697/
+ */
+ u8 ubuf[MAX_ALGAPI_ALIGNMASK * 2];
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
+ if (WARN_ON(buf + unaligned_len > ubuf + sizeof(ubuf)))
+ return -EINVAL;
+
if (unaligned_len > len)
unaligned_len = len;
@@ -124,11 +123,17 @@ static int shash_final_unaligned(struct shash_desc *desc, u8 *out)
unsigned long alignmask = crypto_shash_alignmask(tfm);
struct shash_alg *shash = crypto_shash_alg(tfm);
unsigned int ds = crypto_shash_digestsize(tfm);
- u8 ubuf[shash_align_buffer_size(ds, alignmask)]
- __aligned_largest;
+ /*
+ * We cannot count on __aligned() working for large values:
+ * https://patchwork.kernel.org/patch/9507697/
+ */
+ u8 ubuf[MAX_ALGAPI_ALIGNMASK + HASH_MAX_DIGESTSIZE];
u8 *buf = PTR_ALIGN(&ubuf[0], alignmask + 1);
int err;
+ if (WARN_ON(buf + ds > ubuf + sizeof(ubuf)))
+ return -EINVAL;
+
err = shash->final(desc, buf);
if (err)
goto out;
@@ -458,9 +463,9 @@ static int shash_prepare_alg(struct shash_alg *alg)
{
struct crypto_alg *base = &alg->base;
- if (alg->digestsize > PAGE_SIZE / 8 ||
- alg->descsize > PAGE_SIZE / 8 ||
- alg->statesize > PAGE_SIZE / 8)
+ if (alg->digestsize > HASH_MAX_DIGESTSIZE ||
+ alg->descsize > HASH_MAX_DESCSIZE ||
+ alg->statesize > HASH_MAX_STATESIZE)
return -EINVAL;
base->cra_type = &crypto_shash_type;
diff --git a/crypto/skcipher.c b/crypto/skcipher.c
index 0bd8c6caa498..4caab81d2d02 100644
--- a/crypto/skcipher.c
+++ b/crypto/skcipher.c
@@ -949,6 +949,30 @@ struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
}
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
+struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
+ const char *alg_name, u32 type, u32 mask)
+{
+ struct crypto_skcipher *tfm;
+
+ /* Only sync algorithms allowed. */
+ mask |= CRYPTO_ALG_ASYNC;
+
+ tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
+
+ /*
+ * Make sure we do not allocate something that might get used with
+ * an on-stack request: check the request size.
+ */
+ if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
+ MAX_SYNC_SKCIPHER_REQSIZE)) {
+ crypto_free_skcipher(tfm);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return (struct crypto_sync_skcipher *)tfm;
+}
+EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
+
int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask)
{
return crypto_type_has_alg(alg_name, &crypto_skcipher_type2,
diff --git a/crypto/speck.c b/crypto/speck.c
deleted file mode 100644
index 58aa9f7f91f7..000000000000
--- a/crypto/speck.c
+++ /dev/null
@@ -1,307 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Speck: a lightweight block cipher
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Speck has 10 variants, including 5 block sizes. For now we only implement
- * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
- * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
- * and a key size of K bits. The Speck128 variants are believed to be the most
- * secure variants, and they use the same block size and key sizes as AES. The
- * Speck64 variants are less secure, but on 32-bit processors are usually
- * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
- * secure and/or not as well suited for implementation on either 32-bit or
- * 64-bit processors, so are omitted.
- *
- * Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
- * https://eprint.iacr.org/2013/404.pdf
- *
- * In a correspondence, the Speck designers have also clarified that the words
- * should be interpreted in little-endian format, and the words should be
- * ordered such that the first word of each block is 'y' rather than 'x', and
- * the first key word (rather than the last) becomes the first round key.
- */
-
-#include <asm/unaligned.h>
-#include <crypto/speck.h>
-#include <linux/bitops.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-/* Speck128 */
-
-static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
-{
- *x = ror64(*x, 8);
- *x += *y;
- *x ^= k;
- *y = rol64(*y, 3);
- *y ^= *x;
-}
-
-static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
-{
- *y ^= *x;
- *y = ror64(*y, 3);
- *x ^= k;
- *x -= *y;
- *x = rol64(*x, 8);
-}
-
-void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u64 y = get_unaligned_le64(in);
- u64 x = get_unaligned_le64(in + 8);
- int i;
-
- for (i = 0; i < ctx->nrounds; i++)
- speck128_round(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le64(y, out);
- put_unaligned_le64(x, out + 8);
-}
-EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
-
-static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u64 y = get_unaligned_le64(in);
- u64 x = get_unaligned_le64(in + 8);
- int i;
-
- for (i = ctx->nrounds - 1; i >= 0; i--)
- speck128_unround(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le64(y, out);
- put_unaligned_le64(x, out + 8);
-}
-EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
-
-static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
- unsigned int keylen)
-{
- u64 l[3];
- u64 k;
- int i;
-
- switch (keylen) {
- case SPECK128_128_KEY_SIZE:
- k = get_unaligned_le64(key);
- l[0] = get_unaligned_le64(key + 8);
- ctx->nrounds = SPECK128_128_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck128_round(&l[0], &k, i);
- }
- break;
- case SPECK128_192_KEY_SIZE:
- k = get_unaligned_le64(key);
- l[0] = get_unaligned_le64(key + 8);
- l[1] = get_unaligned_le64(key + 16);
- ctx->nrounds = SPECK128_192_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck128_round(&l[i % 2], &k, i);
- }
- break;
- case SPECK128_256_KEY_SIZE:
- k = get_unaligned_le64(key);
- l[0] = get_unaligned_le64(key + 8);
- l[1] = get_unaligned_le64(key + 16);
- l[2] = get_unaligned_le64(key + 24);
- ctx->nrounds = SPECK128_256_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck128_round(&l[i % 3], &k, i);
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
-
-static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
-}
-
-/* Speck64 */
-
-static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
-{
- *x = ror32(*x, 8);
- *x += *y;
- *x ^= k;
- *y = rol32(*y, 3);
- *y ^= *x;
-}
-
-static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
-{
- *y ^= *x;
- *y = ror32(*y, 3);
- *x ^= k;
- *x -= *y;
- *x = rol32(*x, 8);
-}
-
-void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u32 y = get_unaligned_le32(in);
- u32 x = get_unaligned_le32(in + 4);
- int i;
-
- for (i = 0; i < ctx->nrounds; i++)
- speck64_round(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le32(y, out);
- put_unaligned_le32(x, out + 4);
-}
-EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
-
-static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u32 y = get_unaligned_le32(in);
- u32 x = get_unaligned_le32(in + 4);
- int i;
-
- for (i = ctx->nrounds - 1; i >= 0; i--)
- speck64_unround(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le32(y, out);
- put_unaligned_le32(x, out + 4);
-}
-EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
-
-static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
- unsigned int keylen)
-{
- u32 l[3];
- u32 k;
- int i;
-
- switch (keylen) {
- case SPECK64_96_KEY_SIZE:
- k = get_unaligned_le32(key);
- l[0] = get_unaligned_le32(key + 4);
- l[1] = get_unaligned_le32(key + 8);
- ctx->nrounds = SPECK64_96_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck64_round(&l[i % 2], &k, i);
- }
- break;
- case SPECK64_128_KEY_SIZE:
- k = get_unaligned_le32(key);
- l[0] = get_unaligned_le32(key + 4);
- l[1] = get_unaligned_le32(key + 8);
- l[2] = get_unaligned_le32(key + 12);
- ctx->nrounds = SPECK64_128_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck64_round(&l[i % 3], &k, i);
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
-
-static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
-}
-
-/* Algorithm definitions */
-
-static struct crypto_alg speck_algs[] = {
- {
- .cra_name = "speck128",
- .cra_driver_name = "speck128-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = SPECK128_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct speck128_tfm_ctx),
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = SPECK128_128_KEY_SIZE,
- .cia_max_keysize = SPECK128_256_KEY_SIZE,
- .cia_setkey = speck128_setkey,
- .cia_encrypt = speck128_encrypt,
- .cia_decrypt = speck128_decrypt
- }
- }
- }, {
- .cra_name = "speck64",
- .cra_driver_name = "speck64-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = SPECK64_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct speck64_tfm_ctx),
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = SPECK64_96_KEY_SIZE,
- .cia_max_keysize = SPECK64_128_KEY_SIZE,
- .cia_setkey = speck64_setkey,
- .cia_encrypt = speck64_encrypt,
- .cia_decrypt = speck64_decrypt
- }
- }
- }
-};
-
-static int __init speck_module_init(void)
-{
- return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-static void __exit speck_module_exit(void)
-{
- crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-module_init(speck_module_init);
-module_exit(speck_module_exit);
-
-MODULE_DESCRIPTION("Speck block cipher (generic)");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
-MODULE_ALIAS_CRYPTO("speck128");
-MODULE_ALIAS_CRYPTO("speck128-generic");
-MODULE_ALIAS_CRYPTO("speck64");
-MODULE_ALIAS_CRYPTO("speck64-generic");
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index bdde95e8d369..c20c9f5c18f2 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -76,8 +76,7 @@ static char *check[] = {
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "tnepres", "xeta", "fcrypt",
"camellia", "seed", "salsa20", "rmd128", "rmd160", "rmd256", "rmd320",
- "lzo", "cts", "zlib", "sha3-224", "sha3-256", "sha3-384", "sha3-512",
- NULL
+ "lzo", "cts", "sha3-224", "sha3-256", "sha3-384", "sha3-512", NULL
};
static u32 block_sizes[] = { 16, 64, 256, 1024, 8192, 0 };
@@ -1103,6 +1102,9 @@ static void test_ahash_speed_common(const char *algo, unsigned int secs,
break;
}
+ if (speed[i].klen)
+ crypto_ahash_setkey(tfm, tvmem[0], speed[i].klen);
+
pr_info("test%3u "
"(%5u byte blocks,%5u bytes per update,%4u updates): ",
i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
@@ -1733,6 +1735,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("xts(aes)");
ret += tcrypt_test("ctr(aes)");
ret += tcrypt_test("rfc3686(ctr(aes))");
+ ret += tcrypt_test("ofb(aes)");
break;
case 11:
@@ -1878,10 +1881,6 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("ecb(seed)");
break;
- case 44:
- ret += tcrypt_test("zlib");
- break;
-
case 45:
ret += tcrypt_test("rfc4309(ccm(aes))");
break;
@@ -2033,6 +2032,8 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
break;
case 191:
ret += tcrypt_test("ecb(sm4)");
+ ret += tcrypt_test("cbc(sm4)");
+ ret += tcrypt_test("ctr(sm4)");
break;
case 200:
test_cipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
@@ -2282,6 +2283,20 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
num_mb);
break;
+ case 218:
+ test_cipher_speed("ecb(sm4)", ENCRYPT, sec, NULL, 0,
+ speed_template_16);
+ test_cipher_speed("ecb(sm4)", DECRYPT, sec, NULL, 0,
+ speed_template_16);
+ test_cipher_speed("cbc(sm4)", ENCRYPT, sec, NULL, 0,
+ speed_template_16);
+ test_cipher_speed("cbc(sm4)", DECRYPT, sec, NULL, 0,
+ speed_template_16);
+ test_cipher_speed("ctr(sm4)", ENCRYPT, sec, NULL, 0,
+ speed_template_16);
+ test_cipher_speed("ctr(sm4)", DECRYPT, sec, NULL, 0,
+ speed_template_16);
+ break;
case 300:
if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template);
diff --git a/crypto/tcrypt.h b/crypto/tcrypt.h
index f0bfee1bb293..d09ea8b10b4f 100644
--- a/crypto/tcrypt.h
+++ b/crypto/tcrypt.h
@@ -51,6 +51,7 @@ static struct cipher_speed_template des3_speed_template[] = {
* Cipher speed tests
*/
static u8 speed_template_8[] = {8, 0};
+static u8 speed_template_16[] = {16, 0};
static u8 speed_template_24[] = {24, 0};
static u8 speed_template_8_16[] = {8, 16, 0};
static u8 speed_template_8_32[] = {8, 32, 0};
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index a1d42245082a..b1f79c6bf409 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -1400,8 +1400,8 @@ static int test_comp(struct crypto_comp *tfm,
int ilen;
unsigned int dlen = COMP_BUF_SIZE;
- memset(output, 0, sizeof(COMP_BUF_SIZE));
- memset(decomp_output, 0, sizeof(COMP_BUF_SIZE));
+ memset(output, 0, COMP_BUF_SIZE);
+ memset(decomp_output, 0, COMP_BUF_SIZE);
ilen = ctemplate[i].inlen;
ret = crypto_comp_compress(tfm, ctemplate[i].input,
@@ -1445,7 +1445,7 @@ static int test_comp(struct crypto_comp *tfm,
int ilen;
unsigned int dlen = COMP_BUF_SIZE;
- memset(decomp_output, 0, sizeof(COMP_BUF_SIZE));
+ memset(decomp_output, 0, COMP_BUF_SIZE);
ilen = dtemplate[i].inlen;
ret = crypto_comp_decompress(tfm, dtemplate[i].input,
@@ -2662,6 +2662,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(serpent_cbc_tv_template)
},
}, {
+ .alg = "cbc(sm4)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(sm4_cbc_tv_template)
+ }
+ }, {
.alg = "cbc(twofish)",
.test = alg_test_skcipher,
.suite = {
@@ -2785,6 +2791,12 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(serpent_ctr_tv_template)
}
}, {
+ .alg = "ctr(sm4)",
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(sm4_ctr_tv_template)
+ }
+ }, {
.alg = "ctr(twofish)",
.test = alg_test_skcipher,
.suite = {
@@ -3038,18 +3050,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(sm4_tv_template)
}
}, {
- .alg = "ecb(speck128)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(speck128_tv_template)
- }
- }, {
- .alg = "ecb(speck64)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(speck64_tv_template)
- }
- }, {
.alg = "ecb(tea)",
.test = alg_test_skcipher,
.suite = {
@@ -3577,18 +3577,6 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(serpent_xts_tv_template)
}
}, {
- .alg = "xts(speck128)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(speck128_xts_tv_template)
- }
- }, {
- .alg = "xts(speck64)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = __VECS(speck64_xts_tv_template)
- }
- }, {
.alg = "xts(twofish)",
.test = alg_test_skcipher,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 173111c70746..1fe7b97ba03f 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -24,8 +24,6 @@
#ifndef _CRYPTO_TESTMGR_H
#define _CRYPTO_TESTMGR_H
-#include <linux/netlink.h>
-
#define MAX_DIGEST_SIZE 64
#define MAX_TAP 8
@@ -10133,12 +10131,13 @@ static const struct cipher_testvec serpent_xts_tv_template[] = {
};
/*
- * SM4 test vector taken from the draft RFC
- * https://tools.ietf.org/html/draft-crypto-sm4-00#ref-GBT.32907-2016
+ * SM4 test vectors taken from the "The SM4 Blockcipher Algorithm And Its
+ * Modes Of Operations" draft RFC
+ * https://datatracker.ietf.org/doc/draft-ribose-cfrg-sm4
*/
static const struct cipher_testvec sm4_tv_template[] = {
- { /* SM4 Appendix A: Example Calculations. Example 1. */
+ { /* GB/T 32907-2016 Example 1. */
.key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
.klen = 16,
@@ -10147,10 +10146,7 @@ static const struct cipher_testvec sm4_tv_template[] = {
.ctext = "\x68\x1E\xDF\x34\xD2\x06\x96\x5E"
"\x86\xB3\xE9\x4F\x53\x6E\x42\x46",
.len = 16,
- }, { /*
- * SM4 Appendix A: Example Calculations.
- * Last 10 iterations of Example 2.
- */
+ }, { /* Last 10 iterations of GB/T 32907-2016 Example 2. */
.key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
"\xFE\xDC\xBA\x98\x76\x54\x32\x10",
.klen = 16,
@@ -10195,744 +10191,116 @@ static const struct cipher_testvec sm4_tv_template[] = {
"\x59\x52\x98\xc7\xc6\xfd\x27\x1f"
"\x4\x2\xf8\x4\xc3\x3d\x3f\x66",
.len = 160
+ }, { /* A.2.1.1 SM4-ECB Example 1 */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
+ .klen = 16,
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .ctext = "\x5e\xc8\x14\x3d\xe5\x09\xcf\xf7"
+ "\xb5\x17\x9f\x8f\x47\x4b\x86\x19"
+ "\x2f\x1d\x30\x5a\x7f\xb1\x7d\xf9"
+ "\x85\xf8\x1c\x84\x82\x19\x23\x04",
+ .len = 32,
+ }, { /* A.2.1.2 SM4-ECB Example 2 */
+ .key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
+ "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
+ .klen = 16,
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .ctext = "\xC5\x87\x68\x97\xE4\xA5\x9B\xBB"
+ "\xA7\x2A\x10\xC8\x38\x72\x24\x5B"
+ "\x12\xDD\x90\xBC\x2D\x20\x06\x92"
+ "\xB5\x29\xA4\x15\x5A\xC9\xE6\x00",
+ .len = 32,
}
};
-/*
- * Speck test vectors taken from the original paper:
- * "The Simon and Speck Families of Lightweight Block Ciphers"
- * https://eprint.iacr.org/2013/404.pdf
- *
- * Note that the paper does not make byte and word order clear. But it was
- * confirmed with the authors that the intended orders are little endian byte
- * order and (y, x) word order. Equivalently, the printed test vectors, when
- * looking at only the bytes (ignoring the whitespace that divides them into
- * words), are backwards: the left-most byte is actually the one with the
- * highest memory address, while the right-most byte is actually the one with
- * the lowest memory address.
- */
-
-static const struct cipher_testvec speck128_tv_template[] = {
- { /* Speck128/128 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+static const struct cipher_testvec sm4_cbc_tv_template[] = {
+ { /* A.2.2.1 SM4-CBC Example 1 */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
.klen = 16,
- .ptext = "\x20\x6d\x61\x64\x65\x20\x69\x74"
- "\x20\x65\x71\x75\x69\x76\x61\x6c",
- .ctext = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
- "\x65\x32\x78\x79\x51\x98\x5d\xa6",
- .len = 16,
- }, { /* Speck128/192 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17",
- .klen = 24,
- .ptext = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
- "\x68\x69\x65\x66\x20\x48\x61\x72",
- .ctext = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
- "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
- .len = 16,
- }, { /* Speck128/256 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
- .klen = 32,
- .ptext = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
- "\x49\x6e\x20\x74\x68\x6f\x73\x65",
- .ctext = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
- "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
- .len = 16,
- },
-};
-
-/*
- * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the
- * ciphertext recomputed with Speck128 as the cipher
- */
-static const struct cipher_testvec speck128_xts_tv_template[] = {
- {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 32,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ctext = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
- "\x3b\x99\x4a\x64\x74\x77\xac\xed"
- "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
- "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
- .len = 32,
- }, {
- .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x22\x22\x22\x22\x22\x22\x22\x22"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 32,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ctext = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
- "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
- "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
- "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .ctext = "\x78\xEB\xB1\x1C\xC4\x0B\x0A\x48"
+ "\x31\x2A\xAE\xB2\x04\x02\x44\xCB"
+ "\x4C\xB7\x01\x69\x51\x90\x92\x26"
+ "\x97\x9B\x0D\x15\xDC\x6A\x8F\x6D",
.len = 32,
- }, {
- .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
- "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
- "\x22\x22\x22\x22\x22\x22\x22\x22"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 32,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ctext = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
- "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
- "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
- "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
+ }, { /* A.2.2.2 SM4-CBC Example 2 */
+ .key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
+ "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
+ .klen = 16,
+ .ptext = "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xbb\xbb\xbb\xbb",
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .ctext = "\x0d\x3a\x6d\xdc\x2d\x21\xc6\x98"
+ "\x85\x72\x15\x58\x7b\x7b\xb5\x9a"
+ "\x91\xf2\xc1\x47\x91\x1a\x41\x44"
+ "\x66\x5e\x1f\xa1\xd4\x0b\xae\x38",
.len = 32,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x31\x41\x59\x26\x53\x58\x97\x93"
- "\x23\x84\x62\x64\x33\x83\x27\x95",
- .klen = 32,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ctext = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
- "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
- "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
- "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
- "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
- "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
- "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
- "\x19\xc5\x58\x84\x63\xb9\x12\x68"
- "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
- "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
- "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
- "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
- "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
- "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
- "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
- "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
- "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
- "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
- "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
- "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
- "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
- "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
- "\xa5\x20\xac\x24\x1c\x73\x59\x73"
- "\x58\x61\x3a\x87\x58\xb3\x20\x56"
- "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
- "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
- "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
- "\x09\x35\x71\x50\x65\xac\x92\xe3"
- "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
- "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
- "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
- "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
- "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
- "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
- "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
- "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
- "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
- "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
- "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
- "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
- "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
- "\x87\x30\xac\xd5\xea\x73\x49\x10"
- "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
- "\x66\x02\x35\x3d\x60\x06\x36\x4f"
- "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
- "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
- "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
- "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
- "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
- "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
- "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
- "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
- "\x51\x66\x02\x69\x04\x97\x36\xd4"
- "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
- "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
- "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
- "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
- "\x03\xe7\x05\x39\xf5\x05\x26\xee"
- "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
- "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
- "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
- "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
- "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
- "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
- .len = 512,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x62\x49\x77\x57\x24\x70\x93\x69"
- "\x99\x59\x57\x49\x66\x96\x76\x27"
- "\x31\x41\x59\x26\x53\x58\x97\x93"
- "\x23\x84\x62\x64\x33\x83\x27\x95"
- "\x02\x88\x41\x97\x16\x93\x99\x37"
- "\x51\x05\x82\x09\x74\x94\x45\x92",
- .klen = 64,
- .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ctext = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
- "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
- "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
- "\x92\x99\xde\xd3\x76\xed\xcd\x63"
- "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
- "\x79\x36\x31\x19\x62\xae\x10\x7e"
- "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
- "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
- "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
- "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
- "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
- "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
- "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
- "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
- "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
- "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
- "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
- "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
- "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
- "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
- "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
- "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
- "\xac\xa5\xd0\x38\xef\x19\x56\x53"
- "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
- "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
- "\xed\x22\x34\x1c\x5d\xed\x17\x06"
- "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
- "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
- "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
- "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
- "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
- "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
- "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
- "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
- "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
- "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
- "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
- "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
- "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
- "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
- "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
- "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
- "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
- "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
- "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
- "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
- "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
- "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
- "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
- "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
- "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
- "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
- "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
- "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
- "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
- "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
- "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
- "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
- "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
- "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
- "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
- "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
- "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
- "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
- .len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
}
};
-static const struct cipher_testvec speck64_tv_template[] = {
- { /* Speck64/96 */
- .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
- "\x10\x11\x12\x13",
- .klen = 12,
- .ptext = "\x65\x61\x6e\x73\x20\x46\x61\x74",
- .ctext = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
- .len = 8,
- }, { /* Speck64/128 */
- .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
- "\x10\x11\x12\x13\x18\x19\x1a\x1b",
+static const struct cipher_testvec sm4_ctr_tv_template[] = {
+ { /* A.2.5.1 SM4-CTR Example 1 */
+ .key = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
+ "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
.klen = 16,
- .ptext = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
- .ctext = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
- .len = 8,
- },
-};
-
-/*
- * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the
- * ciphertext recomputed with Speck64 as the cipher, and key lengths adjusted
- */
-static const struct cipher_testvec speck64_xts_tv_template[] = {
- {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 24,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ctext = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
- "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
- "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
- "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
- .len = 32,
- }, {
- .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 24,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ctext = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
- "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
- "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
- "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
- .len = 32,
- }, {
- .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
- "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 24,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ctext = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
- "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
- "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
- "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
- .len = 32,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x31\x41\x59\x26\x53\x58\x97\x93",
- .klen = 24,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ctext = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
- "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
- "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
- "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
- "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
- "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
- "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
- "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
- "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
- "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
- "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
- "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
- "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
- "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
- "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
- "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
- "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
- "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
- "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
- "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
- "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
- "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
- "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
- "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
- "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
- "\x07\xff\xf3\x72\x74\x48\xb5\x40"
- "\x50\xb5\xdd\x90\x43\x31\x18\x15"
- "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
- "\x29\x93\x90\x8b\xda\x07\xf0\x35"
- "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
- "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
- "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
- "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
- "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
- "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
- "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
- "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
- "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
- "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
- "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
- "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
- "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
- "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
- "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
- "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
- "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
- "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
- "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
- "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
- "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
- "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
- "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
- "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
- "\x59\xda\xee\x1a\x22\x18\xda\x0d"
- "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
- "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
- "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
- "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
- "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
- "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
- "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
- "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
- "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
- "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
- .len = 512,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x62\x49\x77\x57\x24\x70\x93\x69"
- "\x99\x59\x57\x49\x66\x96\x76\x27",
- .klen = 32,
- .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ptext = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ctext = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
- "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
- "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
- "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
- "\x78\x16\xea\x80\xdb\x33\x75\x94"
- "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
- "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
- "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
- "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
- "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
- "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
- "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
- "\x84\x5e\x46\xed\x20\x89\xb6\x44"
- "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
- "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
- "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
- "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
- "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
- "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
- "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
- "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
- "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
- "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
- "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
- "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
- "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
- "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
- "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
- "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
- "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
- "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
- "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
- "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
- "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
- "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
- "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
- "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
- "\x52\x7f\x29\x51\x94\x20\x67\x3c"
- "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
- "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
- "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
- "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
- "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
- "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
- "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
- "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
- "\x65\x53\x0f\x41\x11\xbd\x98\x99"
- "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
- "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
- "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
- "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
- "\x63\x51\x34\x9d\x96\x12\xae\xce"
- "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
- "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
- "\x54\x27\x74\xbb\x10\x86\x57\x46"
- "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
- "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
- "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
- "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
- "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
- "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
- "\x9b\x63\x76\x32\x2f\x19\x72\x10"
- "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
- "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
- .len = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
+ .ptext = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
+ "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xee\xee\xee\xee"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb",
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .ctext = "\xac\x32\x36\xcb\x97\x0c\xc2\x07"
+ "\x91\x36\x4c\x39\x5a\x13\x42\xd1"
+ "\xa3\xcb\xc1\x87\x8c\x6f\x30\xcd"
+ "\x07\x4c\xce\x38\x5c\xdd\x70\xc7"
+ "\xf2\x34\xbc\x0e\x24\xc1\x19\x80"
+ "\xfd\x12\x86\x31\x0c\xe3\x7b\x92"
+ "\x6e\x02\xfc\xd0\xfa\xa0\xba\xf3"
+ "\x8b\x29\x33\x85\x1d\x82\x45\x14",
+ .len = 64,
+ }, { /* A.2.5.2 SM4-CTR Example 2 */
+ .key = "\xFE\xDC\xBA\x98\x76\x54\x32\x10"
+ "\x01\x23\x45\x67\x89\xAB\xCD\xEF",
+ .klen = 16,
+ .ptext = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb"
+ "\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xcc"
+ "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+ "\xee\xee\xee\xee\xee\xee\xee\xee"
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ "\xbb\xbb\xbb\xbb\xbb\xbb\xbb\xbb",
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F",
+ .ctext = "\x5d\xcc\xcd\x25\xb9\x5a\xb0\x74"
+ "\x17\xa0\x85\x12\xee\x16\x0e\x2f"
+ "\x8f\x66\x15\x21\xcb\xba\xb4\x4c"
+ "\xc8\x71\x38\x44\x5b\xc2\x9e\x5c"
+ "\x0a\xe0\x29\x72\x05\xd6\x27\x04"
+ "\x17\x3b\x21\x23\x9b\x88\x7f\x6c"
+ "\x8c\xb5\xb8\x00\x91\x7a\x24\x88"
+ "\x28\x4b\xde\x9e\x16\xea\x29\x06",
+ .len = 64,
}
};
@@ -13883,6 +13251,27 @@ static const struct cipher_testvec aes_lrw_tv_template[] = {
.ctext = "\x5b\x90\x8e\xc1\xab\xdd\x67\x5f"
"\x3d\x69\x8a\x95\x53\xc8\x9c\xe5",
.len = 16,
+ }, { /* Test counter wrap-around, modified from LRW-32-AES 1 */
+ .key = "\x45\x62\xac\x25\xf8\x28\x17\x6d"
+ "\x4c\x26\x84\x14\xb5\x68\x01\x85"
+ "\x25\x8e\x2a\x05\xe7\x3e\x9d\x03"
+ "\xee\x5a\x83\x0c\xcc\x09\x4c\x87",
+ .klen = 32,
+ .iv = "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .ptext = "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x41\x42\x43\x44\x45\x46"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x41\x42\x43\x44\x45\x46"
+ "\x30\x31\x32\x33\x34\x35\x36\x37"
+ "\x38\x39\x41\x42\x43\x44\x45\x46",
+ .ctext = "\x47\x90\x50\xf6\xf4\x8d\x5c\x7f"
+ "\x84\xc7\x83\x95\x2d\xa2\x02\xc0"
+ "\xda\x7f\xa3\xc0\x88\x2a\x0a\x50"
+ "\xfb\xc1\x78\x03\x39\xfe\x1d\xe5"
+ "\xf1\xb2\x73\xcd\x65\xa3\xdf\x5f"
+ "\xe9\x5d\x48\x92\x54\x63\x4e\xb8",
+ .len = 48,
}, {
/* http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html */
.key = "\xf8\xd4\x76\xff\xd6\x46\xee\x6c"
diff --git a/crypto/xcbc.c b/crypto/xcbc.c
index 25c75af50d3f..c055f57fab11 100644
--- a/crypto/xcbc.c
+++ b/crypto/xcbc.c
@@ -57,15 +57,17 @@ struct xcbc_desc_ctx {
u8 ctx[];
};
+#define XCBC_BLOCKSIZE 16
+
static int crypto_xcbc_digest_setkey(struct crypto_shash *parent,
const u8 *inkey, unsigned int keylen)
{
unsigned long alignmask = crypto_shash_alignmask(parent);
struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent);
- int bs = crypto_shash_blocksize(parent);
u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1);
int err = 0;
- u8 key1[bs];
+ u8 key1[XCBC_BLOCKSIZE];
+ int bs = sizeof(key1);
if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen)))
return err;
@@ -212,7 +214,7 @@ static int xcbc_create(struct crypto_template *tmpl, struct rtattr **tb)
return PTR_ERR(alg);
switch(alg->cra_blocksize) {
- case 16:
+ case XCBC_BLOCKSIZE:
break;
default:
goto out_put_alg;
diff --git a/crypto/xts.c b/crypto/xts.c
index ccf55fbb8bc2..847f54f76789 100644
--- a/crypto/xts.c
+++ b/crypto/xts.c
@@ -26,8 +26,6 @@
#include <crypto/b128ops.h>
#include <crypto/gf128mul.h>
-#define XTS_BUFFER_SIZE 128u
-
struct priv {
struct crypto_skcipher *child;
struct crypto_cipher *tweak;
@@ -39,19 +37,7 @@ struct xts_instance_ctx {
};
struct rctx {
- le128 buf[XTS_BUFFER_SIZE / sizeof(le128)];
-
le128 t;
-
- le128 *ext;
-
- struct scatterlist srcbuf[2];
- struct scatterlist dstbuf[2];
- struct scatterlist *src;
- struct scatterlist *dst;
-
- unsigned int left;
-
struct skcipher_request subreq;
};
@@ -96,81 +82,27 @@ static int setkey(struct crypto_skcipher *parent, const u8 *key,
return err;
}
-static int post_crypt(struct skcipher_request *req)
+/*
+ * We compute the tweak masks twice (both before and after the ECB encryption or
+ * decryption) to avoid having to allocate a temporary buffer and/or make
+ * mutliple calls to the 'ecb(..)' instance, which usually would be slower than
+ * just doing the gf128mul_x_ble() calls again.
+ */
+static int xor_tweak(struct skcipher_request *req, bool second_pass)
{
struct rctx *rctx = skcipher_request_ctx(req);
- le128 *buf = rctx->ext ?: rctx->buf;
- struct skcipher_request *subreq;
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
const int bs = XTS_BLOCK_SIZE;
struct skcipher_walk w;
- struct scatterlist *sg;
- unsigned offset;
+ le128 t = rctx->t;
int err;
- subreq = &rctx->subreq;
- err = skcipher_walk_virt(&w, subreq, false);
-
- while (w.nbytes) {
- unsigned int avail = w.nbytes;
- le128 *wdst;
-
- wdst = w.dst.virt.addr;
-
- do {
- le128_xor(wdst, buf++, wdst);
- wdst++;
- } while ((avail -= bs) >= bs);
-
- err = skcipher_walk_done(&w, avail);
+ if (second_pass) {
+ req = &rctx->subreq;
+ /* set to our TFM to enforce correct alignment: */
+ skcipher_request_set_tfm(req, tfm);
}
-
- rctx->left -= subreq->cryptlen;
-
- if (err || !rctx->left)
- goto out;
-
- rctx->dst = rctx->dstbuf;
-
- scatterwalk_done(&w.out, 0, 1);
- sg = w.out.sg;
- offset = w.out.offset;
-
- if (rctx->dst != sg) {
- rctx->dst[0] = *sg;
- sg_unmark_end(rctx->dst);
- scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 2);
- }
- rctx->dst[0].length -= offset - sg->offset;
- rctx->dst[0].offset = offset;
-
-out:
- return err;
-}
-
-static int pre_crypt(struct skcipher_request *req)
-{
- struct rctx *rctx = skcipher_request_ctx(req);
- le128 *buf = rctx->ext ?: rctx->buf;
- struct skcipher_request *subreq;
- const int bs = XTS_BLOCK_SIZE;
- struct skcipher_walk w;
- struct scatterlist *sg;
- unsigned cryptlen;
- unsigned offset;
- bool more;
- int err;
-
- subreq = &rctx->subreq;
- cryptlen = subreq->cryptlen;
-
- more = rctx->left > cryptlen;
- if (!more)
- cryptlen = rctx->left;
-
- skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
- cryptlen, NULL);
-
- err = skcipher_walk_virt(&w, subreq, false);
+ err = skcipher_walk_virt(&w, req, false);
while (w.nbytes) {
unsigned int avail = w.nbytes;
@@ -181,180 +113,71 @@ static int pre_crypt(struct skcipher_request *req)
wdst = w.dst.virt.addr;
do {
- *buf++ = rctx->t;
- le128_xor(wdst++, &rctx->t, wsrc++);
- gf128mul_x_ble(&rctx->t, &rctx->t);
+ le128_xor(wdst++, &t, wsrc++);
+ gf128mul_x_ble(&t, &t);
} while ((avail -= bs) >= bs);
err = skcipher_walk_done(&w, avail);
}
- skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
- cryptlen, NULL);
-
- if (err || !more)
- goto out;
-
- rctx->src = rctx->srcbuf;
-
- scatterwalk_done(&w.in, 0, 1);
- sg = w.in.sg;
- offset = w.in.offset;
-
- if (rctx->src != sg) {
- rctx->src[0] = *sg;
- sg_unmark_end(rctx->src);
- scatterwalk_crypto_chain(rctx->src, sg_next(sg), 2);
- }
- rctx->src[0].length -= offset - sg->offset;
- rctx->src[0].offset = offset;
-
-out:
return err;
}
-static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
+static int xor_tweak_pre(struct skcipher_request *req)
{
- struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
- struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
- gfp_t gfp;
-
- subreq = &rctx->subreq;
- skcipher_request_set_tfm(subreq, ctx->child);
- skcipher_request_set_callback(subreq, req->base.flags, done, req);
-
- gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
- GFP_ATOMIC;
- rctx->ext = NULL;
-
- subreq->cryptlen = XTS_BUFFER_SIZE;
- if (req->cryptlen > XTS_BUFFER_SIZE) {
- unsigned int n = min(req->cryptlen, (unsigned int)PAGE_SIZE);
-
- rctx->ext = kmalloc(n, gfp);
- if (rctx->ext)
- subreq->cryptlen = n;
- }
-
- rctx->src = req->src;
- rctx->dst = req->dst;
- rctx->left = req->cryptlen;
-
- /* calculate first value of T */
- crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
-
- return 0;
+ return xor_tweak(req, false);
}
-static void exit_crypt(struct skcipher_request *req)
+static int xor_tweak_post(struct skcipher_request *req)
{
- struct rctx *rctx = skcipher_request_ctx(req);
-
- rctx->left = 0;
-
- if (rctx->ext)
- kzfree(rctx->ext);
+ return xor_tweak(req, true);
}
-static int do_encrypt(struct skcipher_request *req, int err)
-{
- struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
-
- subreq = &rctx->subreq;
-
- while (!err && rctx->left) {
- err = pre_crypt(req) ?:
- crypto_skcipher_encrypt(subreq) ?:
- post_crypt(req);
-
- if (err == -EINPROGRESS || err == -EBUSY)
- return err;
- }
-
- exit_crypt(req);
- return err;
-}
-
-static void encrypt_done(struct crypto_async_request *areq, int err)
+static void crypt_done(struct crypto_async_request *areq, int err)
{
struct skcipher_request *req = areq->data;
- struct skcipher_request *subreq;
- struct rctx *rctx;
-
- rctx = skcipher_request_ctx(req);
-
- if (err == -EINPROGRESS) {
- if (rctx->left != req->cryptlen)
- return;
- goto out;
- }
-
- subreq = &rctx->subreq;
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
- err = do_encrypt(req, err ?: post_crypt(req));
- if (rctx->left)
- return;
+ if (!err)
+ err = xor_tweak_post(req);
-out:
skcipher_request_complete(req, err);
}
-static int encrypt(struct skcipher_request *req)
-{
- return do_encrypt(req, init_crypt(req, encrypt_done));
-}
-
-static int do_decrypt(struct skcipher_request *req, int err)
+static void init_crypt(struct skcipher_request *req)
{
+ struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
struct rctx *rctx = skcipher_request_ctx(req);
- struct skcipher_request *subreq;
-
- subreq = &rctx->subreq;
+ struct skcipher_request *subreq = &rctx->subreq;
- while (!err && rctx->left) {
- err = pre_crypt(req) ?:
- crypto_skcipher_decrypt(subreq) ?:
- post_crypt(req);
-
- if (err == -EINPROGRESS || err == -EBUSY)
- return err;
- }
+ skcipher_request_set_tfm(subreq, ctx->child);
+ skcipher_request_set_callback(subreq, req->base.flags, crypt_done, req);
+ skcipher_request_set_crypt(subreq, req->dst, req->dst,
+ req->cryptlen, NULL);
- exit_crypt(req);
- return err;
+ /* calculate first value of T */
+ crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
}
-static void decrypt_done(struct crypto_async_request *areq, int err)
+static int encrypt(struct skcipher_request *req)
{
- struct skcipher_request *req = areq->data;
- struct skcipher_request *subreq;
- struct rctx *rctx;
-
- rctx = skcipher_request_ctx(req);
-
- if (err == -EINPROGRESS) {
- if (rctx->left != req->cryptlen)
- return;
- goto out;
- }
-
- subreq = &rctx->subreq;
- subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
-
- err = do_decrypt(req, err ?: post_crypt(req));
- if (rctx->left)
- return;
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq = &rctx->subreq;
-out:
- skcipher_request_complete(req, err);
+ init_crypt(req);
+ return xor_tweak_pre(req) ?:
+ crypto_skcipher_encrypt(subreq) ?:
+ xor_tweak_post(req);
}
static int decrypt(struct skcipher_request *req)
{
- return do_decrypt(req, init_crypt(req, decrypt_done));
+ struct rctx *rctx = skcipher_request_ctx(req);
+ struct skcipher_request *subreq = &rctx->subreq;
+
+ init_crypt(req);
+ return xor_tweak_pre(req) ?:
+ crypto_skcipher_decrypt(subreq) ?:
+ xor_tweak_post(req);
}
static int init_tfm(struct crypto_skcipher *tfm)
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 9705fc986da9..365e6c1a729e 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -492,6 +492,9 @@ config ACPI_EXTLOG
driver adds support for that functionality with corresponding
tracepoint which carries that information to userspace.
+config ACPI_ADXL
+ bool
+
menuconfig PMIC_OPREGION
bool "PMIC (Power Management Integrated Circuit) operation region support"
help
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 6d59aa109a91..edc039313cd6 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -61,6 +61,9 @@ acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
acpi-$(CONFIG_ACPI_GENERIC_GSI) += irq.o
acpi-$(CONFIG_ACPI_WATCHDOG) += acpi_watchdog.o
+# Address translation
+acpi-$(CONFIG_ACPI_ADXL) += acpi_adxl.o
+
# These are (potentially) separate modules
# IPMI may be used by other drivers, so it has to initialise before them
diff --git a/drivers/acpi/acpi_adxl.c b/drivers/acpi/acpi_adxl.c
new file mode 100644
index 000000000000..13c8f7b50c46
--- /dev/null
+++ b/drivers/acpi/acpi_adxl.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Address translation interface via ACPI DSM.
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Specification for this interface is available at:
+ *
+ * https://cdrdv2.intel.com/v1/dl/getContent/603354
+ */
+
+#include <linux/acpi.h>
+#include <linux/adxl.h>
+
+#define ADXL_REVISION 0x1
+#define ADXL_IDX_GET_ADDR_PARAMS 0x1
+#define ADXL_IDX_FORWARD_TRANSLATE 0x2
+#define ACPI_ADXL_PATH "\\_SB.ADXL"
+
+/*
+ * The specification doesn't provide a limit on how many
+ * components are in a memory address. But since we allocate
+ * memory based on the number the BIOS tells us, we should
+ * defend against insane values.
+ */
+#define ADXL_MAX_COMPONENTS 500
+
+#undef pr_fmt
+#define pr_fmt(fmt) "ADXL: " fmt
+
+static acpi_handle handle;
+static union acpi_object *params;
+static const guid_t adxl_guid =
+ GUID_INIT(0xAA3C050A, 0x7EA4, 0x4C1F,
+ 0xAF, 0xDA, 0x12, 0x67, 0xDF, 0xD3, 0xD4, 0x8D);
+
+static int adxl_count;
+static char **adxl_component_names;
+
+static union acpi_object *adxl_dsm(int cmd, union acpi_object argv[])
+{
+ union acpi_object *obj, *o;
+
+ obj = acpi_evaluate_dsm_typed(handle, &adxl_guid, ADXL_REVISION,
+ cmd, argv, ACPI_TYPE_PACKAGE);
+ if (!obj) {
+ pr_info("DSM call failed for cmd=%d\n", cmd);
+ return NULL;
+ }
+
+ if (obj->package.count != 2) {
+ pr_info("Bad pkg count %d\n", obj->package.count);
+ goto err;
+ }
+
+ o = obj->package.elements;
+ if (o->type != ACPI_TYPE_INTEGER) {
+ pr_info("Bad 1st element type %d\n", o->type);
+ goto err;
+ }
+ if (o->integer.value) {
+ pr_info("Bad ret val %llu\n", o->integer.value);
+ goto err;
+ }
+
+ o = obj->package.elements + 1;
+ if (o->type != ACPI_TYPE_PACKAGE) {
+ pr_info("Bad 2nd element type %d\n", o->type);
+ goto err;
+ }
+ return obj;
+
+err:
+ ACPI_FREE(obj);
+ return NULL;
+}
+
+/**
+ * adxl_get_component_names - get list of memory component names
+ * Returns NULL terminated list of string names
+ *
+ * Give the caller a pointer to the list of memory component names
+ * e.g. { "SystemAddress", "ProcessorSocketId", "ChannelId", ... NULL }
+ * Caller should count how many strings in order to allocate a buffer
+ * for the return from adxl_decode().
+ */
+const char * const *adxl_get_component_names(void)
+{
+ return (const char * const *)adxl_component_names;
+}
+EXPORT_SYMBOL_GPL(adxl_get_component_names);
+
+/**
+ * adxl_decode - ask BIOS to decode a system address to memory address
+ * @addr: the address to decode
+ * @component_values: pointer to array of values for each component
+ * Returns 0 on success, negative error code otherwise
+ *
+ * The index of each value returned in the array matches the index of
+ * each component name returned by adxl_get_component_names().
+ * Components that are not defined for this address translation (e.g.
+ * mirror channel number for a non-mirrored address) are set to ~0ull.
+ */
+int adxl_decode(u64 addr, u64 component_values[])
+{
+ union acpi_object argv4[2], *results, *r;
+ int i, cnt;
+
+ if (!adxl_component_names)
+ return -EOPNOTSUPP;
+
+ argv4[0].type = ACPI_TYPE_PACKAGE;
+ argv4[0].package.count = 1;
+ argv4[0].package.elements = &argv4[1];
+ argv4[1].integer.type = ACPI_TYPE_INTEGER;
+ argv4[1].integer.value = addr;
+
+ results = adxl_dsm(ADXL_IDX_FORWARD_TRANSLATE, argv4);
+ if (!results)
+ return -EINVAL;
+
+ r = results->package.elements + 1;
+ cnt = r->package.count;
+ if (cnt != adxl_count) {
+ ACPI_FREE(results);
+ return -EINVAL;
+ }
+ r = r->package.elements;
+
+ for (i = 0; i < cnt; i++)
+ component_values[i] = r[i].integer.value;
+
+ ACPI_FREE(results);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adxl_decode);
+
+static int __init adxl_init(void)
+{
+ char *path = ACPI_ADXL_PATH;
+ union acpi_object *p;
+ acpi_status status;
+ int i;
+
+ status = acpi_get_handle(NULL, path, &handle);
+ if (ACPI_FAILURE(status)) {
+ pr_debug("No ACPI handle for path %s\n", path);
+ return -ENODEV;
+ }
+
+ if (!acpi_has_method(handle, "_DSM")) {
+ pr_info("No DSM method\n");
+ return -ENODEV;
+ }
+
+ if (!acpi_check_dsm(handle, &adxl_guid, ADXL_REVISION,
+ ADXL_IDX_GET_ADDR_PARAMS |
+ ADXL_IDX_FORWARD_TRANSLATE)) {
+ pr_info("DSM method does not support forward translate\n");
+ return -ENODEV;
+ }
+
+ params = adxl_dsm(ADXL_IDX_GET_ADDR_PARAMS, NULL);
+ if (!params) {
+ pr_info("Failed to get component names\n");
+ return -ENODEV;
+ }
+
+ p = params->package.elements + 1;
+ adxl_count = p->package.count;
+ if (adxl_count > ADXL_MAX_COMPONENTS) {
+ pr_info("Insane number of address component names %d\n", adxl_count);
+ ACPI_FREE(params);
+ return -ENODEV;
+ }
+ p = p->package.elements;
+
+ /*
+ * Allocate one extra for NULL termination.
+ */
+ adxl_component_names = kcalloc(adxl_count + 1, sizeof(char *), GFP_KERNEL);
+ if (!adxl_component_names) {
+ ACPI_FREE(params);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < adxl_count; i++)
+ adxl_component_names[i] = p[i].string.pointer;
+
+ return 0;
+}
+subsys_initcall(adxl_init);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index b072cfc5f20e..f8c638f3c946 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -25,6 +25,7 @@
#include <asm/cacheflush.h>
#include <acpi/nfit.h>
#include "nfit.h"
+#include "intel.h"
/*
* For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
@@ -191,18 +192,20 @@ static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd
* In the _LSI, _LSR, _LSW case the locked status is
* communicated via the read/write commands
*/
- if (nfit_mem->has_lsr)
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
break;
if (status >> 16 & ND_CONFIG_LOCKED)
return -EACCES;
break;
case ND_CMD_GET_CONFIG_DATA:
- if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED)
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
+ && status == ACPI_LABELS_LOCKED)
return -EACCES;
break;
case ND_CMD_SET_CONFIG_DATA:
- if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED)
+ if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags)
+ && status == ACPI_LABELS_LOCKED)
return -EACCES;
break;
default:
@@ -480,14 +483,16 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
min_t(u32, 256, in_buf.buffer.length), true);
/* call the BIOS, prefer the named methods over _DSM if available */
- if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsr)
+ if (nvdimm && cmd == ND_CMD_GET_CONFIG_SIZE
+ && test_bit(NFIT_MEM_LSR, &nfit_mem->flags))
out_obj = acpi_label_info(handle);
- else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
+ else if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA
+ && test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
struct nd_cmd_get_config_data_hdr *p = buf;
out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
} else if (nvdimm && cmd == ND_CMD_SET_CONFIG_DATA
- && nfit_mem->has_lsw) {
+ && test_bit(NFIT_MEM_LSW, &nfit_mem->flags)) {
struct nd_cmd_set_config_hdr *p = buf;
out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
@@ -1547,7 +1552,12 @@ static DEVICE_ATTR_RO(dsm_mask);
static ssize_t flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u16 flags = to_nfit_memdev(dev)->flags;
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ u16 flags = __to_nfit_memdev(nfit_mem)->flags;
+
+ if (test_bit(NFIT_MEM_DIRTY, &nfit_mem->flags))
+ flags |= ACPI_NFIT_MEM_FLUSH_FAILED;
return sprintf(buf, "%s%s%s%s%s%s%s\n",
flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
@@ -1578,6 +1588,16 @@ static ssize_t id_show(struct device *dev,
}
static DEVICE_ATTR_RO(id);
+static ssize_t dirty_shutdown_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+
+ return sprintf(buf, "%d\n", nfit_mem->dirty_shutdown);
+}
+static DEVICE_ATTR_RO(dirty_shutdown);
+
static struct attribute *acpi_nfit_dimm_attributes[] = {
&dev_attr_handle.attr,
&dev_attr_phys_id.attr,
@@ -1595,6 +1615,7 @@ static struct attribute *acpi_nfit_dimm_attributes[] = {
&dev_attr_id.attr,
&dev_attr_family.attr,
&dev_attr_dsm_mask.attr,
+ &dev_attr_dirty_shutdown.attr,
NULL,
};
@@ -1603,6 +1624,7 @@ static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct nvdimm *nvdimm = to_nvdimm(dev);
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
if (!to_nfit_dcr(dev)) {
/* Without a dcr only the memdev attributes can be surfaced */
@@ -1616,6 +1638,11 @@ static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
return 0;
+
+ if (!test_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags)
+ && a == &dev_attr_dirty_shutdown.attr)
+ return 0;
+
return a->mode;
}
@@ -1694,6 +1721,56 @@ static bool acpi_nvdimm_has_method(struct acpi_device *adev, char *method)
return false;
}
+__weak void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
+{
+ struct nd_intel_smart smart = { 0 };
+ union acpi_object in_buf = {
+ .type = ACPI_TYPE_BUFFER,
+ .buffer.pointer = (char *) &smart,
+ .buffer.length = sizeof(smart),
+ };
+ union acpi_object in_obj = {
+ .type = ACPI_TYPE_PACKAGE,
+ .package.count = 1,
+ .package.elements = &in_buf,
+ };
+ const u8 func = ND_INTEL_SMART;
+ const guid_t *guid = to_nfit_uuid(nfit_mem->family);
+ u8 revid = nfit_dsm_revid(nfit_mem->family, func);
+ struct acpi_device *adev = nfit_mem->adev;
+ acpi_handle handle = adev->handle;
+ union acpi_object *out_obj;
+
+ if ((nfit_mem->dsm_mask & (1 << func)) == 0)
+ return;
+
+ out_obj = acpi_evaluate_dsm(handle, guid, revid, func, &in_obj);
+ if (!out_obj)
+ return;
+
+ if (smart.flags & ND_INTEL_SMART_SHUTDOWN_VALID) {
+ if (smart.shutdown_state)
+ set_bit(NFIT_MEM_DIRTY, &nfit_mem->flags);
+ }
+
+ if (smart.flags & ND_INTEL_SMART_SHUTDOWN_COUNT_VALID) {
+ set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
+ nfit_mem->dirty_shutdown = smart.shutdown_count;
+ }
+ ACPI_FREE(out_obj);
+}
+
+static void populate_shutdown_status(struct nfit_mem *nfit_mem)
+{
+ /*
+ * For DIMMs that provide a dynamic facility to retrieve a
+ * dirty-shutdown status and/or a dirty-shutdown count, cache
+ * these values in nfit_mem.
+ */
+ if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
+ nfit_intel_shutdown_status(nfit_mem);
+}
+
static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, u32 device_handle)
{
@@ -1708,8 +1785,11 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
nfit_mem->family = NVDIMM_FAMILY_INTEL;
adev = to_acpi_dev(acpi_desc);
- if (!adev)
+ if (!adev) {
+ /* unit test case */
+ populate_shutdown_status(nfit_mem);
return 0;
+ }
adev_dimm = acpi_find_child_device(adev, device_handle, false);
nfit_mem->adev = adev_dimm;
@@ -1784,14 +1864,17 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
if (acpi_nvdimm_has_method(adev_dimm, "_LSI")
&& acpi_nvdimm_has_method(adev_dimm, "_LSR")) {
dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
- nfit_mem->has_lsr = true;
+ set_bit(NFIT_MEM_LSR, &nfit_mem->flags);
}
- if (nfit_mem->has_lsr && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)
+ && acpi_nvdimm_has_method(adev_dimm, "_LSW")) {
dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
- nfit_mem->has_lsw = true;
+ set_bit(NFIT_MEM_LSW, &nfit_mem->flags);
}
+ populate_shutdown_status(nfit_mem);
+
return 0;
}
@@ -1878,11 +1961,11 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
cmd_mask |= nfit_mem->dsm_mask & NVDIMM_STANDARD_CMDMASK;
}
- if (nfit_mem->has_lsr) {
+ if (test_bit(NFIT_MEM_LSR, &nfit_mem->flags)) {
set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
}
- if (nfit_mem->has_lsw)
+ if (test_bit(NFIT_MEM_LSW, &nfit_mem->flags))
set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);
flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
@@ -2466,7 +2549,8 @@ static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
return cmd_rc;
}
-static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
+static int ars_start(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa, enum nfit_ars_state req_type)
{
int rc;
int cmd_rc;
@@ -2477,7 +2561,7 @@ static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa
memset(&ars_start, 0, sizeof(ars_start));
ars_start.address = spa->address;
ars_start.length = spa->length;
- if (test_bit(ARS_SHORT, &nfit_spa->ars_state))
+ if (req_type == ARS_REQ_SHORT)
ars_start.flags = ND_ARS_RETURN_PREV_DATA;
if (nfit_spa_type(spa) == NFIT_SPA_PM)
ars_start.type = ND_ARS_PERSISTENT;
@@ -2534,6 +2618,15 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
struct nd_region *nd_region = nfit_spa->nd_region;
struct device *dev;
+ lockdep_assert_held(&acpi_desc->init_mutex);
+ /*
+ * Only advance the ARS state for ARS runs initiated by the
+ * kernel, ignore ARS results from BIOS initiated runs for scrub
+ * completion tracking.
+ */
+ if (acpi_desc->scrub_spa != nfit_spa)
+ return;
+
if ((ars_status->address >= spa->address && ars_status->address
< spa->address + spa->length)
|| (ars_status->address < spa->address)) {
@@ -2553,28 +2646,13 @@ static void ars_complete(struct acpi_nfit_desc *acpi_desc,
} else
return;
- if (test_bit(ARS_DONE, &nfit_spa->ars_state))
- return;
-
- if (!test_and_clear_bit(ARS_REQ, &nfit_spa->ars_state))
- return;
-
+ acpi_desc->scrub_spa = NULL;
if (nd_region) {
dev = nd_region_dev(nd_region);
nvdimm_region_notify(nd_region, NVDIMM_REVALIDATE_POISON);
} else
dev = acpi_desc->dev;
-
- dev_dbg(dev, "ARS: range %d %s complete\n", spa->range_index,
- test_bit(ARS_SHORT, &nfit_spa->ars_state)
- ? "short" : "long");
- clear_bit(ARS_SHORT, &nfit_spa->ars_state);
- if (test_and_clear_bit(ARS_REQ_REDO, &nfit_spa->ars_state)) {
- set_bit(ARS_SHORT, &nfit_spa->ars_state);
- set_bit(ARS_REQ, &nfit_spa->ars_state);
- dev_dbg(dev, "ARS: processing scrub request received while in progress\n");
- } else
- set_bit(ARS_DONE, &nfit_spa->ars_state);
+ dev_dbg(dev, "ARS: range %d complete\n", spa->range_index);
}
static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc)
@@ -2855,46 +2933,55 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
return 0;
}
-static int ars_register(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa,
- int *query_rc)
+static int ars_register(struct acpi_nfit_desc *acpi_desc,
+ struct nfit_spa *nfit_spa)
{
- int rc = *query_rc;
+ int rc;
- if (no_init_ars)
+ if (no_init_ars || test_bit(ARS_FAILED, &nfit_spa->ars_state))
return acpi_nfit_register_region(acpi_desc, nfit_spa);
- set_bit(ARS_REQ, &nfit_spa->ars_state);
- set_bit(ARS_SHORT, &nfit_spa->ars_state);
+ set_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
+ set_bit(ARS_REQ_LONG, &nfit_spa->ars_state);
- switch (rc) {
+ switch (acpi_nfit_query_poison(acpi_desc)) {
case 0:
case -EAGAIN:
- rc = ars_start(acpi_desc, nfit_spa);
- if (rc == -EBUSY) {
- *query_rc = rc;
+ rc = ars_start(acpi_desc, nfit_spa, ARS_REQ_SHORT);
+ /* shouldn't happen, try again later */
+ if (rc == -EBUSY)
break;
- } else if (rc == 0) {
- rc = acpi_nfit_query_poison(acpi_desc);
- } else {
+ if (rc) {
set_bit(ARS_FAILED, &nfit_spa->ars_state);
break;
}
- if (rc == -EAGAIN)
- clear_bit(ARS_SHORT, &nfit_spa->ars_state);
- else if (rc == 0)
- ars_complete(acpi_desc, nfit_spa);
+ clear_bit(ARS_REQ_SHORT, &nfit_spa->ars_state);
+ rc = acpi_nfit_query_poison(acpi_desc);
+ if (rc)
+ break;
+ acpi_desc->scrub_spa = nfit_spa;
+ ars_complete(acpi_desc, nfit_spa);
+ /*
+ * If ars_complete() says we didn't complete the
+ * short scrub, we'll try again with a long
+ * request.
+ */
+ acpi_desc->scrub_spa = NULL;
break;
case -EBUSY:
+ case -ENOMEM:
case -ENOSPC:
+ /*
+ * BIOS was using ARS, wait for it to complete (or
+ * resources to become available) and then perform our
+ * own scrubs.
+ */
break;
default:
set_bit(ARS_FAILED, &nfit_spa->ars_state);
break;
}
- if (test_and_clear_bit(ARS_DONE, &nfit_spa->ars_state))
- set_bit(ARS_REQ, &nfit_spa->ars_state);
-
return acpi_nfit_register_region(acpi_desc, nfit_spa);
}
@@ -2916,6 +3003,8 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
struct device *dev = acpi_desc->dev;
struct nfit_spa *nfit_spa;
+ lockdep_assert_held(&acpi_desc->init_mutex);
+
if (acpi_desc->cancel)
return 0;
@@ -2939,21 +3028,49 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
ars_complete_all(acpi_desc);
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
+ enum nfit_ars_state req_type;
+ int rc;
+
if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
continue;
- if (test_bit(ARS_REQ, &nfit_spa->ars_state)) {
- int rc = ars_start(acpi_desc, nfit_spa);
-
- clear_bit(ARS_DONE, &nfit_spa->ars_state);
- dev = nd_region_dev(nfit_spa->nd_region);
- dev_dbg(dev, "ARS: range %d ARS start (%d)\n",
- nfit_spa->spa->range_index, rc);
- if (rc == 0 || rc == -EBUSY)
- return 1;
- dev_err(dev, "ARS: range %d ARS failed (%d)\n",
- nfit_spa->spa->range_index, rc);
- set_bit(ARS_FAILED, &nfit_spa->ars_state);
+
+ /* prefer short ARS requests first */
+ if (test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state))
+ req_type = ARS_REQ_SHORT;
+ else if (test_bit(ARS_REQ_LONG, &nfit_spa->ars_state))
+ req_type = ARS_REQ_LONG;
+ else
+ continue;
+ rc = ars_start(acpi_desc, nfit_spa, req_type);
+
+ dev = nd_region_dev(nfit_spa->nd_region);
+ dev_dbg(dev, "ARS: range %d ARS start %s (%d)\n",
+ nfit_spa->spa->range_index,
+ req_type == ARS_REQ_SHORT ? "short" : "long",
+ rc);
+ /*
+ * Hmm, we raced someone else starting ARS? Try again in
+ * a bit.
+ */
+ if (rc == -EBUSY)
+ return 1;
+ if (rc == 0) {
+ dev_WARN_ONCE(dev, acpi_desc->scrub_spa,
+ "scrub start while range %d active\n",
+ acpi_desc->scrub_spa->spa->range_index);
+ clear_bit(req_type, &nfit_spa->ars_state);
+ acpi_desc->scrub_spa = nfit_spa;
+ /*
+ * Consider this spa last for future scrub
+ * requests
+ */
+ list_move_tail(&nfit_spa->list, &acpi_desc->spas);
+ return 1;
}
+
+ dev_err(dev, "ARS: range %d ARS failed (%d)\n",
+ nfit_spa->spa->range_index, rc);
+ set_bit(ARS_FAILED, &nfit_spa->ars_state);
}
return 0;
}
@@ -3009,6 +3126,7 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
struct nd_cmd_ars_cap ars_cap;
int rc;
+ set_bit(ARS_FAILED, &nfit_spa->ars_state);
memset(&ars_cap, 0, sizeof(ars_cap));
rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
if (rc < 0)
@@ -3025,16 +3143,14 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc,
nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
acpi_desc->max_ars = max(nfit_spa->max_ars, acpi_desc->max_ars);
clear_bit(ARS_FAILED, &nfit_spa->ars_state);
- set_bit(ARS_REQ, &nfit_spa->ars_state);
}
static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
{
struct nfit_spa *nfit_spa;
- int rc, query_rc;
+ int rc;
list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
- set_bit(ARS_FAILED, &nfit_spa->ars_state);
switch (nfit_spa_type(nfit_spa->spa)) {
case NFIT_SPA_VOLATILE:
case NFIT_SPA_PM:
@@ -3043,20 +3159,12 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
}
}
- /*
- * Reap any results that might be pending before starting new
- * short requests.
- */
- query_rc = acpi_nfit_query_poison(acpi_desc);
- if (query_rc == 0)
- ars_complete_all(acpi_desc);
-
list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
switch (nfit_spa_type(nfit_spa->spa)) {
case NFIT_SPA_VOLATILE:
case NFIT_SPA_PM:
/* register regions and kick off initial ARS run */
- rc = ars_register(acpi_desc, nfit_spa, &query_rc);
+ rc = ars_register(acpi_desc, nfit_spa);
if (rc)
return rc;
break;
@@ -3233,6 +3341,8 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd)
{
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
+ struct nfit_spa *nfit_spa;
+ int rc = 0;
if (nvdimm)
return 0;
@@ -3242,16 +3352,24 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
/*
* The kernel and userspace may race to initiate a scrub, but
* the scrub thread is prepared to lose that initial race. It
- * just needs guarantees that any ars it initiates are not
- * interrupted by any intervening start reqeusts from userspace.
+ * just needs guarantees that any ARS it initiates are not
+ * interrupted by any intervening start requests from userspace.
*/
- if (work_busy(&acpi_desc->dwork.work))
- return -EBUSY;
+ mutex_lock(&acpi_desc->init_mutex);
+ list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
+ if (acpi_desc->scrub_spa
+ || test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)
+ || test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) {
+ rc = -EBUSY;
+ break;
+ }
+ mutex_unlock(&acpi_desc->init_mutex);
- return 0;
+ return rc;
}
-int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
+int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
+ enum nfit_ars_state req_type)
{
struct device *dev = acpi_desc->dev;
int scheduled = 0, busy = 0;
@@ -3271,14 +3389,10 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
if (test_bit(ARS_FAILED, &nfit_spa->ars_state))
continue;
- if (test_and_set_bit(ARS_REQ, &nfit_spa->ars_state)) {
+ if (test_and_set_bit(req_type, &nfit_spa->ars_state))
busy++;
- set_bit(ARS_REQ_REDO, &nfit_spa->ars_state);
- } else {
- if (test_bit(ARS_SHORT, &flags))
- set_bit(ARS_SHORT, &nfit_spa->ars_state);
+ else
scheduled++;
- }
}
if (scheduled) {
sched_ars(acpi_desc);
@@ -3464,10 +3578,11 @@ static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
{
struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
- unsigned long flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ?
- 0 : 1 << ARS_SHORT;
- acpi_nfit_ars_rescan(acpi_desc, flags);
+ if (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON)
+ acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_LONG);
+ else
+ acpi_nfit_ars_rescan(acpi_desc, ARS_REQ_SHORT);
}
void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
diff --git a/drivers/acpi/nfit/intel.h b/drivers/acpi/nfit/intel.h
new file mode 100644
index 000000000000..86746312381f
--- /dev/null
+++ b/drivers/acpi/nfit/intel.h
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2018 Intel Corporation. All rights reserved.
+ * Intel specific definitions for NVDIMM Firmware Interface Table - NFIT
+ */
+#ifndef _NFIT_INTEL_H_
+#define _NFIT_INTEL_H_
+
+#define ND_INTEL_SMART 1
+
+#define ND_INTEL_SMART_SHUTDOWN_COUNT_VALID (1 << 5)
+#define ND_INTEL_SMART_SHUTDOWN_VALID (1 << 10)
+
+struct nd_intel_smart {
+ u32 status;
+ union {
+ struct {
+ u32 flags;
+ u8 reserved0[4];
+ u8 health;
+ u8 spares;
+ u8 life_used;
+ u8 alarm_flags;
+ u16 media_temperature;
+ u16 ctrl_temperature;
+ u32 shutdown_count;
+ u8 ait_status;
+ u16 pmic_temperature;
+ u8 reserved1[8];
+ u8 shutdown_state;
+ u32 vendor_size;
+ u8 vendor_data[92];
+ } __packed;
+ u8 data[128];
+ };
+} __packed;
+
+#endif
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index d1274ea2d251..df0f6b8407e7 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -118,10 +118,8 @@ enum nfit_dimm_notifiers {
};
enum nfit_ars_state {
- ARS_REQ,
- ARS_REQ_REDO,
- ARS_DONE,
- ARS_SHORT,
+ ARS_REQ_SHORT,
+ ARS_REQ_LONG,
ARS_FAILED,
};
@@ -159,6 +157,13 @@ struct nfit_memdev {
struct acpi_nfit_memory_map memdev[0];
};
+enum nfit_mem_flags {
+ NFIT_MEM_LSR,
+ NFIT_MEM_LSW,
+ NFIT_MEM_DIRTY,
+ NFIT_MEM_DIRTY_COUNT,
+};
+
/* assembled tables for a given dimm/memory-device */
struct nfit_mem {
struct nvdimm *nvdimm;
@@ -178,9 +183,9 @@ struct nfit_mem {
struct acpi_nfit_desc *acpi_desc;
struct resource *flush_wpq;
unsigned long dsm_mask;
+ unsigned long flags;
+ u32 dirty_shutdown;
int family;
- bool has_lsr;
- bool has_lsw;
};
struct acpi_nfit_desc {
@@ -198,6 +203,7 @@ struct acpi_nfit_desc {
struct device *dev;
u8 ars_start_flags;
struct nd_cmd_ars_status *ars_status;
+ struct nfit_spa *scrub_spa;
struct delayed_work dwork;
struct list_head list;
struct kernfs_node *scrub_count_state;
@@ -252,7 +258,8 @@ struct nfit_blk {
extern struct list_head acpi_descs;
extern struct mutex acpi_desc_lock;
-int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags);
+int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
+ enum nfit_ars_state req_type);
#ifdef CONFIG_X86_MCE
void nfit_mce_register(void);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 7433035ded95..707aafc7c2aa 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -421,7 +421,8 @@ out:
}
EXPORT_SYMBOL(acpi_pci_osc_control_set);
-static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
+static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm,
+ bool is_pcie)
{
u32 support, control, requested;
acpi_status status;
@@ -455,9 +456,15 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
decode_osc_support(root, "OS supports", support);
status = acpi_pci_osc_support(root, support);
if (ACPI_FAILURE(status)) {
- dev_info(&device->dev, "_OSC failed (%s); disabling ASPM\n",
- acpi_format_exception(status));
*no_aspm = 1;
+
+ /* _OSC is optional for PCI host bridges */
+ if ((status == AE_NOT_FOUND) && !is_pcie)
+ return;
+
+ dev_info(&device->dev, "_OSC failed (%s)%s\n",
+ acpi_format_exception(status),
+ pcie_aspm_support_enabled() ? "; disabling ASPM" : "");
return;
}
@@ -533,6 +540,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_handle handle = device->handle;
int no_aspm = 0;
bool hotadd = system_state == SYSTEM_RUNNING;
+ bool is_pcie;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -590,7 +598,8 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->mcfg_addr = acpi_pci_root_get_mcfg_addr(handle);
- negotiate_os_control(root, &no_aspm);
+ is_pcie = strcmp(acpi_device_hid(device), "PNP0A08") == 0;
+ negotiate_os_control(root, &no_aspm, is_pcie);
/*
* TBD: Need PCI interface for enumeration/configuration of roots.
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 693cf05b0cc4..8c7c4583b52d 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -24,11 +24,15 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data,
acpi_object_type type,
const union acpi_object **obj);
-/* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
-static const guid_t prp_guid =
+static const guid_t prp_guids[] = {
+ /* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c,
- 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01);
-/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
+ 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01),
+ /* Hotplug in D3 GUID: 6211e2c0-58a3-4af3-90e1-927a4e0c55a4 */
+ GUID_INIT(0x6211e2c0, 0x58a3, 0x4af3,
+ 0x90, 0xe1, 0x92, 0x7a, 0x4e, 0x0c, 0x55, 0xa4),
+};
+
static const guid_t ads_guid =
GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
@@ -56,6 +60,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
dn->name = link->package.elements[0].string.pointer;
dn->fwnode.ops = &acpi_data_fwnode_ops;
dn->parent = parent;
+ INIT_LIST_HEAD(&dn->data.properties);
INIT_LIST_HEAD(&dn->data.subnodes);
result = acpi_extract_properties(desc, &dn->data);
@@ -288,6 +293,35 @@ static void acpi_init_of_compatible(struct acpi_device *adev)
adev->flags.of_compatible_ok = 1;
}
+static bool acpi_is_property_guid(const guid_t *guid)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(prp_guids); i++) {
+ if (guid_equal(guid, &prp_guids[i]))
+ return true;
+ }
+
+ return false;
+}
+
+struct acpi_device_properties *
+acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
+ const union acpi_object *properties)
+{
+ struct acpi_device_properties *props;
+
+ props = kzalloc(sizeof(*props), GFP_KERNEL);
+ if (props) {
+ INIT_LIST_HEAD(&props->list);
+ props->guid = guid;
+ props->properties = properties;
+ list_add_tail(&props->list, &data->properties);
+ }
+
+ return props;
+}
+
static bool acpi_extract_properties(const union acpi_object *desc,
struct acpi_device_data *data)
{
@@ -312,7 +346,7 @@ static bool acpi_extract_properties(const union acpi_object *desc,
properties->type != ACPI_TYPE_PACKAGE)
break;
- if (!guid_equal((guid_t *)guid->buffer.pointer, &prp_guid))
+ if (!acpi_is_property_guid((guid_t *)guid->buffer.pointer))
continue;
/*
@@ -320,13 +354,13 @@ static bool acpi_extract_properties(const union acpi_object *desc,
* package immediately following it.
*/
if (!acpi_properties_format_valid(properties))
- break;
+ continue;
- data->properties = properties;
- return true;
+ acpi_data_add_props(data, (const guid_t *)guid->buffer.pointer,
+ properties);
}
- return false;
+ return !list_empty(&data->properties);
}
void acpi_init_properties(struct acpi_device *adev)
@@ -336,6 +370,7 @@ void acpi_init_properties(struct acpi_device *adev)
acpi_status status;
bool acpi_of = false;
+ INIT_LIST_HEAD(&adev->data.properties);
INIT_LIST_HEAD(&adev->data.subnodes);
if (!adev->handle)
@@ -398,11 +433,16 @@ static void acpi_destroy_nondev_subnodes(struct list_head *list)
void acpi_free_properties(struct acpi_device *adev)
{
+ struct acpi_device_properties *props, *tmp;
+
acpi_destroy_nondev_subnodes(&adev->data.subnodes);
ACPI_FREE((void *)adev->data.pointer);
adev->data.of_compatible = NULL;
adev->data.pointer = NULL;
- adev->data.properties = NULL;
+ list_for_each_entry_safe(props, tmp, &adev->data.properties, list) {
+ list_del(&props->list);
+ kfree(props);
+ }
}
/**
@@ -427,32 +467,37 @@ static int acpi_data_get_property(const struct acpi_device_data *data,
const char *name, acpi_object_type type,
const union acpi_object **obj)
{
- const union acpi_object *properties;
- int i;
+ const struct acpi_device_properties *props;
if (!data || !name)
return -EINVAL;
- if (!data->pointer || !data->properties)
+ if (!data->pointer || list_empty(&data->properties))
return -EINVAL;
- properties = data->properties;
- for (i = 0; i < properties->package.count; i++) {
- const union acpi_object *propname, *propvalue;
- const union acpi_object *property;
+ list_for_each_entry(props, &data->properties, list) {
+ const union acpi_object *properties;
+ unsigned int i;
- property = &properties->package.elements[i];
+ properties = props->properties;
+ for (i = 0; i < properties->package.count; i++) {
+ const union acpi_object *propname, *propvalue;
+ const union acpi_object *property;
- propname = &property->package.elements[0];
- propvalue = &property->package.elements[1];
+ property = &properties->package.elements[i];
- if (!strcmp(name, propname->string.pointer)) {
- if (type != ACPI_TYPE_ANY && propvalue->type != type)
- return -EPROTO;
- if (obj)
- *obj = propvalue;
+ propname = &property->package.elements[0];
+ propvalue = &property->package.elements[1];
- return 0;
+ if (!strcmp(name, propname->string.pointer)) {
+ if (type != ACPI_TYPE_ANY &&
+ propvalue->type != type)
+ return -EPROTO;
+ if (obj)
+ *obj = propvalue;
+
+ return 0;
+ }
}
}
return -EINVAL;
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c
index bb1984f6c9fe..b7c98ff82d78 100644
--- a/drivers/acpi/x86/apple.c
+++ b/drivers/acpi/x86/apple.c
@@ -132,8 +132,8 @@ void acpi_extract_apple_properties(struct acpi_device *adev)
}
WARN_ON(free_space != (void *)newprops + newsize);
- adev->data.properties = newprops;
adev->data.pointer = newprops;
+ acpi_data_add_props(&adev->data, &apple_prp_guid, newprops);
out_free:
ACPI_FREE(props);
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 432e9ad77070..51e8250d113f 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -10,7 +10,7 @@ if ANDROID
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
- depends on MMU
+ depends on MMU && !CPU_CACHE_VIVT
default n
---help---
Binder is used in Android for both communication between processes,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index d58763b6b009..cb30a524d16d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -71,6 +71,7 @@
#include <linux/security.h>
#include <linux/spinlock.h>
#include <linux/ratelimit.h>
+#include <linux/syscalls.h>
#include <uapi/linux/android/binder.h>
@@ -457,9 +458,8 @@ struct binder_ref {
};
enum binder_deferred_state {
- BINDER_DEFERRED_PUT_FILES = 0x01,
- BINDER_DEFERRED_FLUSH = 0x02,
- BINDER_DEFERRED_RELEASE = 0x04,
+ BINDER_DEFERRED_FLUSH = 0x01,
+ BINDER_DEFERRED_RELEASE = 0x02,
};
/**
@@ -480,9 +480,6 @@ enum binder_deferred_state {
* (invariant after initialized)
* @tsk task_struct for group_leader of process
* (invariant after initialized)
- * @files files_struct for process
- * (protected by @files_lock)
- * @files_lock mutex to protect @files
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -527,8 +524,6 @@ struct binder_proc {
struct list_head waiting_threads;
int pid;
struct task_struct *tsk;
- struct files_struct *files;
- struct mutex files_lock;
struct hlist_node deferred_work_node;
int deferred_work;
bool is_dead;
@@ -611,6 +606,23 @@ struct binder_thread {
bool is_dead;
};
+/**
+ * struct binder_txn_fd_fixup - transaction fd fixup list element
+ * @fixup_entry: list entry
+ * @file: struct file to be associated with new fd
+ * @offset: offset in buffer data to this fixup
+ *
+ * List element for fd fixups in a transaction. Since file
+ * descriptors need to be allocated in the context of the
+ * target process, we pass each fd to be processed in this
+ * struct.
+ */
+struct binder_txn_fd_fixup {
+ struct list_head fixup_entry;
+ struct file *file;
+ size_t offset;
+};
+
struct binder_transaction {
int debug_id;
struct binder_work work;
@@ -628,6 +640,7 @@ struct binder_transaction {
long priority;
long saved_priority;
kuid_t sender_euid;
+ struct list_head fd_fixups;
/**
* @lock: protects @from, @to_proc, and @to_thread
*
@@ -822,6 +835,7 @@ static void
binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
struct binder_work *work)
{
+ WARN_ON(!list_empty(&thread->waiting_thread_node));
binder_enqueue_work_ilocked(work, &thread->todo);
}
@@ -839,6 +853,7 @@ static void
binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
struct binder_work *work)
{
+ WARN_ON(!list_empty(&thread->waiting_thread_node));
binder_enqueue_work_ilocked(work, &thread->todo);
thread->process_todo = true;
}
@@ -920,66 +935,6 @@ static void binder_free_thread(struct binder_thread *thread);
static void binder_free_proc(struct binder_proc *proc);
static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
-static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
-{
- unsigned long rlim_cur;
- unsigned long irqs;
- int ret;
-
- mutex_lock(&proc->files_lock);
- if (proc->files == NULL) {
- ret = -ESRCH;
- goto err;
- }
- if (!lock_task_sighand(proc->tsk, &irqs)) {
- ret = -EMFILE;
- goto err;
- }
- rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
- unlock_task_sighand(proc->tsk, &irqs);
-
- ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
-err:
- mutex_unlock(&proc->files_lock);
- return ret;
-}
-
-/*
- * copied from fd_install
- */
-static void task_fd_install(
- struct binder_proc *proc, unsigned int fd, struct file *file)
-{
- mutex_lock(&proc->files_lock);
- if (proc->files)
- __fd_install(proc->files, fd, file);
- mutex_unlock(&proc->files_lock);
-}
-
-/*
- * copied from sys_close
- */
-static long task_close_fd(struct binder_proc *proc, unsigned int fd)
-{
- int retval;
-
- mutex_lock(&proc->files_lock);
- if (proc->files == NULL) {
- retval = -ESRCH;
- goto err;
- }
- retval = __close_fd(proc->files, fd);
- /* can't restart close syscall because file table entry was cleared */
- if (unlikely(retval == -ERESTARTSYS ||
- retval == -ERESTARTNOINTR ||
- retval == -ERESTARTNOHAND ||
- retval == -ERESTART_RESTARTBLOCK))
- retval = -EINTR;
-err:
- mutex_unlock(&proc->files_lock);
- return retval;
-}
-
static bool binder_has_work_ilocked(struct binder_thread *thread,
bool do_proc_work)
{
@@ -1270,19 +1225,12 @@ static int binder_inc_node_nilocked(struct binder_node *node, int strong,
} else
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
+ struct binder_thread *thread = container_of(target_list,
+ struct binder_thread, todo);
binder_dequeue_work_ilocked(&node->work);
- /*
- * Note: this function is the only place where we queue
- * directly to a thread->todo without using the
- * corresponding binder_enqueue_thread_work() helper
- * functions; in this case it's ok to not set the
- * process_todo flag, since we know this node work will
- * always be followed by other work that starts queue
- * processing: in case of synchronous transactions, a
- * BR_REPLY or BR_ERROR; in case of oneway
- * transactions, a BR_TRANSACTION_COMPLETE.
- */
- binder_enqueue_work_ilocked(&node->work, target_list);
+ BUG_ON(&thread->todo != target_list);
+ binder_enqueue_deferred_thread_work_ilocked(thread,
+ &node->work);
}
} else {
if (!internal)
@@ -1958,10 +1906,32 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
return NULL;
}
+/**
+ * binder_free_txn_fixups() - free unprocessed fd fixups
+ * @t: binder transaction for t->from
+ *
+ * If the transaction is being torn down prior to being
+ * processed by the target process, free all of the
+ * fd fixups and fput the file structs. It is safe to
+ * call this function after the fixups have been
+ * processed -- in that case, the list will be empty.
+ */
+static void binder_free_txn_fixups(struct binder_transaction *t)
+{
+ struct binder_txn_fd_fixup *fixup, *tmp;
+
+ list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
+ fput(fixup->file);
+ list_del(&fixup->fixup_entry);
+ kfree(fixup);
+ }
+}
+
static void binder_free_transaction(struct binder_transaction *t)
{
if (t->buffer)
t->buffer->transaction = NULL;
+ binder_free_txn_fixups(t);
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
@@ -2262,12 +2232,17 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} break;
case BINDER_TYPE_FD: {
- struct binder_fd_object *fp = to_binder_fd_object(hdr);
-
- binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %d\n", fp->fd);
- if (failed_at)
- task_close_fd(proc, fp->fd);
+ /*
+ * No need to close the file here since user-space
+ * closes it for for successfully delivered
+ * transactions. For transactions that weren't
+ * delivered, the new fd was never allocated so
+ * there is no need to close and the fput on the
+ * file is done when the transaction is torn
+ * down.
+ */
+ WARN_ON(failed_at &&
+ proc->tsk == current->group_leader);
} break;
case BINDER_TYPE_PTR:
/*
@@ -2283,6 +2258,15 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
size_t fd_index;
binder_size_t fd_buf_size;
+ if (proc->tsk != current->group_leader) {
+ /*
+ * Nothing to do if running in sender context
+ * The fd fixups have not been applied so no
+ * fds need to be closed.
+ */
+ continue;
+ }
+
fda = to_binder_fd_array_object(hdr);
parent = binder_validate_ptr(buffer, fda->parent,
off_start,
@@ -2315,7 +2299,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
}
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
- task_close_fd(proc, fd_array[fd_index]);
+ ksys_close(fd_array[fd_index]);
} break;
default:
pr_err("transaction release %d bad object type %x\n",
@@ -2447,17 +2431,18 @@ done:
return ret;
}
-static int binder_translate_fd(int fd,
+static int binder_translate_fd(u32 *fdp,
struct binder_transaction *t,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
{
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
- int target_fd;
+ struct binder_txn_fd_fixup *fixup;
struct file *file;
- int ret;
+ int ret = 0;
bool target_allows_fd;
+ int fd = *fdp;
if (in_reply_to)
target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
@@ -2485,19 +2470,24 @@ static int binder_translate_fd(int fd,
goto err_security;
}
- target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
- if (target_fd < 0) {
+ /*
+ * Add fixup record for this transaction. The allocation
+ * of the fd in the target needs to be done from a
+ * target thread.
+ */
+ fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
+ if (!fixup) {
ret = -ENOMEM;
- goto err_get_unused_fd;
+ goto err_alloc;
}
- task_fd_install(target_proc, target_fd, file);
- trace_binder_transaction_fd(t, fd, target_fd);
- binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
- fd, target_fd);
+ fixup->file = file;
+ fixup->offset = (uintptr_t)fdp - (uintptr_t)t->buffer->data;
+ trace_binder_transaction_fd_send(t, fd, fixup->offset);
+ list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
- return target_fd;
+ return ret;
-err_get_unused_fd:
+err_alloc:
err_security:
fput(file);
err_fget:
@@ -2511,8 +2501,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
{
- binder_size_t fdi, fd_buf_size, num_installed_fds;
- int target_fd;
+ binder_size_t fdi, fd_buf_size;
uintptr_t parent_buffer;
u32 *fd_array;
struct binder_proc *proc = thread->proc;
@@ -2544,23 +2533,12 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
return -EINVAL;
}
for (fdi = 0; fdi < fda->num_fds; fdi++) {
- target_fd = binder_translate_fd(fd_array[fdi], t, thread,
+ int ret = binder_translate_fd(&fd_array[fdi], t, thread,
in_reply_to);
- if (target_fd < 0)
- goto err_translate_fd_failed;
- fd_array[fdi] = target_fd;
+ if (ret < 0)
+ return ret;
}
return 0;
-
-err_translate_fd_failed:
- /*
- * Failed to allocate fd or security error, free fds
- * installed so far.
- */
- num_installed_fds = fdi;
- for (fdi = 0; fdi < num_installed_fds; fdi++)
- task_close_fd(target_proc, fd_array[fdi]);
- return target_fd;
}
static int binder_fixup_parent(struct binder_transaction *t,
@@ -2723,6 +2701,7 @@ static void binder_transaction(struct binder_proc *proc,
{
int ret;
struct binder_transaction *t;
+ struct binder_work *w;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
@@ -2864,6 +2843,29 @@ static void binder_transaction(struct binder_proc *proc,
goto err_invalid_target_handle;
}
binder_inner_proc_lock(proc);
+
+ w = list_first_entry_or_null(&thread->todo,
+ struct binder_work, entry);
+ if (!(tr->flags & TF_ONE_WAY) && w &&
+ w->type == BINDER_WORK_TRANSACTION) {
+ /*
+ * Do not allow new outgoing transaction from a
+ * thread that has a transaction at the head of
+ * its todo list. Only need to check the head
+ * because binder_select_thread_ilocked picks a
+ * thread from proc->waiting_threads to enqueue
+ * the transaction, and nothing is queued to the
+ * todo list while the thread is on waiting_threads.
+ */
+ binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
+ proc->pid, thread->pid);
+ binder_inner_proc_unlock(proc);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
+ goto err_bad_todo_list;
+ }
+
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
@@ -2911,6 +2913,7 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_alloc_t_failed;
}
+ INIT_LIST_HEAD(&t->fd_fixups);
binder_stats_created(BINDER_STAT_TRANSACTION);
spin_lock_init(&t->lock);
@@ -3066,17 +3069,16 @@ static void binder_transaction(struct binder_proc *proc,
case BINDER_TYPE_FD: {
struct binder_fd_object *fp = to_binder_fd_object(hdr);
- int target_fd = binder_translate_fd(fp->fd, t, thread,
- in_reply_to);
+ int ret = binder_translate_fd(&fp->fd, t, thread,
+ in_reply_to);
- if (target_fd < 0) {
+ if (ret < 0) {
return_error = BR_FAILED_REPLY;
- return_error_param = target_fd;
+ return_error_param = ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
fp->pad_binder = 0;
- fp->fd = target_fd;
} break;
case BINDER_TYPE_FDA: {
struct binder_fd_array_object *fda =
@@ -3233,6 +3235,7 @@ err_bad_object_type:
err_bad_offset:
err_bad_parent:
err_copy_data_failed:
+ binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
if (target_node)
@@ -3247,6 +3250,7 @@ err_alloc_tcomplete_failed:
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
err_alloc_t_failed:
+err_bad_todo_list:
err_bad_call_stack:
err_empty_call_stack:
err_dead_binder:
@@ -3294,6 +3298,47 @@ err_invalid_target_handle:
}
}
+/**
+ * binder_free_buf() - free the specified buffer
+ * @proc: binder proc that owns buffer
+ * @buffer: buffer to be freed
+ *
+ * If buffer for an async transaction, enqueue the next async
+ * transaction from the node.
+ *
+ * Cleanup buffer and free it.
+ */
+static void
+binder_free_buf(struct binder_proc *proc, struct binder_buffer *buffer)
+{
+ if (buffer->transaction) {
+ buffer->transaction->buffer = NULL;
+ buffer->transaction = NULL;
+ }
+ if (buffer->async_transaction && buffer->target_node) {
+ struct binder_node *buf_node;
+ struct binder_work *w;
+
+ buf_node = buffer->target_node;
+ binder_node_inner_lock(buf_node);
+ BUG_ON(!buf_node->has_async_transaction);
+ BUG_ON(buf_node->proc != proc);
+ w = binder_dequeue_work_head_ilocked(
+ &buf_node->async_todo);
+ if (!w) {
+ buf_node->has_async_transaction = false;
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_node_inner_unlock(buf_node);
+ }
+ trace_binder_transaction_buffer_release(buffer);
+ binder_transaction_buffer_release(proc, buffer, NULL);
+ binder_alloc_free_buf(&proc->alloc, buffer);
+}
+
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
@@ -3480,33 +3525,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid, (u64)data_ptr,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
-
- if (buffer->transaction) {
- buffer->transaction->buffer = NULL;
- buffer->transaction = NULL;
- }
- if (buffer->async_transaction && buffer->target_node) {
- struct binder_node *buf_node;
- struct binder_work *w;
-
- buf_node = buffer->target_node;
- binder_node_inner_lock(buf_node);
- BUG_ON(!buf_node->has_async_transaction);
- BUG_ON(buf_node->proc != proc);
- w = binder_dequeue_work_head_ilocked(
- &buf_node->async_todo);
- if (!w) {
- buf_node->has_async_transaction = false;
- } else {
- binder_enqueue_work_ilocked(
- w, &proc->todo);
- binder_wakeup_proc_ilocked(proc);
- }
- binder_node_inner_unlock(buf_node);
- }
- trace_binder_transaction_buffer_release(buffer);
- binder_transaction_buffer_release(proc, buffer, NULL);
- binder_alloc_free_buf(&proc->alloc, buffer);
+ binder_free_buf(proc, buffer);
break;
}
@@ -3829,6 +3848,76 @@ static int binder_wait_for_work(struct binder_thread *thread,
return ret;
}
+/**
+ * binder_apply_fd_fixups() - finish fd translation
+ * @t: binder transaction with list of fd fixups
+ *
+ * Now that we are in the context of the transaction target
+ * process, we can allocate and install fds. Process the
+ * list of fds to translate and fixup the buffer with the
+ * new fds.
+ *
+ * If we fail to allocate an fd, then free the resources by
+ * fput'ing files that have not been processed and ksys_close'ing
+ * any fds that have already been allocated.
+ */
+static int binder_apply_fd_fixups(struct binder_transaction *t)
+{
+ struct binder_txn_fd_fixup *fixup, *tmp;
+ int ret = 0;
+
+ list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ u32 *fdp;
+
+ if (fd < 0) {
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "failed fd fixup txn %d fd %d\n",
+ t->debug_id, fd);
+ ret = -ENOMEM;
+ break;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "fd fixup txn %d fd %d\n",
+ t->debug_id, fd);
+ trace_binder_transaction_fd_recv(t, fd, fixup->offset);
+ fd_install(fd, fixup->file);
+ fixup->file = NULL;
+ fdp = (u32 *)(t->buffer->data + fixup->offset);
+ /*
+ * This store can cause problems for CPUs with a
+ * VIVT cache (eg ARMv5) since the cache cannot
+ * detect virtual aliases to the same physical cacheline.
+ * To support VIVT, this address and the user-space VA
+ * would both need to be flushed. Since this kernel
+ * VA is not constructed via page_to_virt(), we can't
+ * use flush_dcache_page() on it, so we'd have to use
+ * an internal function. If devices with VIVT ever
+ * need to run Android, we'll either need to go back
+ * to patching the translated fd from the sender side
+ * (using the non-standard kernel functions), or rework
+ * how the kernel uses the buffer to use page_to_virt()
+ * addresses instead of allocating in our own vm area.
+ *
+ * For now, we disable compilation if CONFIG_CPU_CACHE_VIVT.
+ */
+ *fdp = fd;
+ }
+ list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
+ if (fixup->file) {
+ fput(fixup->file);
+ } else if (ret) {
+ u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
+
+ ksys_close(*fdp);
+ }
+ list_del(&fixup->fixup_entry);
+ kfree(fixup);
+ }
+
+ return ret;
+}
+
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
@@ -4110,6 +4199,34 @@ retry:
tr.sender_pid = 0;
}
+ ret = binder_apply_fd_fixups(t);
+ if (ret) {
+ struct binder_buffer *buffer = t->buffer;
+ bool oneway = !!(t->flags & TF_ONE_WAY);
+ int tid = t->debug_id;
+
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
+ buffer->transaction = NULL;
+ binder_cleanup_transaction(t, "fd fixups failed",
+ BR_FAILED_REPLY);
+ binder_free_buf(proc, buffer);
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
+ proc->pid, thread->pid,
+ oneway ? "async " :
+ (cmd == BR_REPLY ? "reply " : ""),
+ tid, BR_FAILED_REPLY, ret, __LINE__);
+ if (cmd == BR_REPLY) {
+ cmd = BR_FAILED_REPLY;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ binder_stat_br(proc, thread, cmd);
+ break;
+ }
+ continue;
+ }
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
tr.data.ptr.buffer = (binder_uintptr_t)
@@ -4544,6 +4661,42 @@ out:
return ret;
}
+static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
+ struct binder_node_info_for_ref *info)
+{
+ struct binder_node *node;
+ struct binder_context *context = proc->context;
+ __u32 handle = info->handle;
+
+ if (info->strong_count || info->weak_count || info->reserved1 ||
+ info->reserved2 || info->reserved3) {
+ binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
+ proc->pid);
+ return -EINVAL;
+ }
+
+ /* This ioctl may only be used by the context manager */
+ mutex_lock(&context->context_mgr_node_lock);
+ if (!context->binder_context_mgr_node ||
+ context->binder_context_mgr_node->proc != proc) {
+ mutex_unlock(&context->context_mgr_node_lock);
+ return -EPERM;
+ }
+ mutex_unlock(&context->context_mgr_node_lock);
+
+ node = binder_get_node_from_ref(proc, handle, true, NULL);
+ if (!node)
+ return -EINVAL;
+
+ info->strong_count = node->local_strong_refs +
+ node->internal_strong_refs;
+ info->weak_count = node->local_weak_refs;
+
+ binder_put_node(node);
+
+ return 0;
+}
+
static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
struct binder_node_debug_info *info)
{
@@ -4638,6 +4791,25 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
+ case BINDER_GET_NODE_INFO_FOR_REF: {
+ struct binder_node_info_for_ref info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_node_info_for_ref(proc, &info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ break;
+ }
case BINDER_GET_NODE_DEBUG_INFO: {
struct binder_node_debug_info info;
@@ -4693,7 +4865,6 @@ static void binder_vma_close(struct vm_area_struct *vma)
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
binder_alloc_vma_close(&proc->alloc);
- binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
@@ -4739,9 +4910,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
ret = binder_alloc_mmap_handler(&proc->alloc, vma);
if (ret)
return ret;
- mutex_lock(&proc->files_lock);
- proc->files = get_files_struct(current);
- mutex_unlock(&proc->files_lock);
return 0;
err_bad_arg:
@@ -4765,7 +4933,6 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
- mutex_init(&proc->files_lock);
INIT_LIST_HEAD(&proc->todo);
proc->default_priority = task_nice(current);
binder_dev = container_of(filp->private_data, struct binder_device,
@@ -4915,8 +5082,6 @@ static void binder_deferred_release(struct binder_proc *proc)
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->files);
-
mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
mutex_unlock(&binder_procs_lock);
@@ -4998,7 +5163,6 @@ static void binder_deferred_release(struct binder_proc *proc)
static void binder_deferred_func(struct work_struct *work)
{
struct binder_proc *proc;
- struct files_struct *files;
int defer;
@@ -5016,23 +5180,11 @@ static void binder_deferred_func(struct work_struct *work)
}
mutex_unlock(&binder_deferred_lock);
- files = NULL;
- if (defer & BINDER_DEFERRED_PUT_FILES) {
- mutex_lock(&proc->files_lock);
- files = proc->files;
- if (files)
- proc->files = NULL;
- mutex_unlock(&proc->files_lock);
- }
-
if (defer & BINDER_DEFERRED_FLUSH)
binder_deferred_flush(proc);
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
-
- if (files)
- put_files_struct(files);
} while (proc);
}
static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
@@ -5667,12 +5819,11 @@ static int __init binder_init(void)
* Copy the module_parameter string, because we don't want to
* tokenize it in-place.
*/
- device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
+ device_names = kstrdup(binder_devices_param, GFP_KERNEL);
if (!device_names) {
ret = -ENOMEM;
goto err_alloc_device_names_failed;
}
- strcpy(device_names, binder_devices_param);
device_tmp = device_names;
while ((device_name = strsep(&device_tmp, ","))) {
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 588eb3ec3507..14de7ac57a34 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -223,22 +223,40 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
__entry->dest_ref_debug_id, __entry->dest_ref_desc)
);
-TRACE_EVENT(binder_transaction_fd,
- TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd),
- TP_ARGS(t, src_fd, dest_fd),
+TRACE_EVENT(binder_transaction_fd_send,
+ TP_PROTO(struct binder_transaction *t, int fd, size_t offset),
+ TP_ARGS(t, fd, offset),
TP_STRUCT__entry(
__field(int, debug_id)
- __field(int, src_fd)
- __field(int, dest_fd)
+ __field(int, fd)
+ __field(size_t, offset)
+ ),
+ TP_fast_assign(
+ __entry->debug_id = t->debug_id;
+ __entry->fd = fd;
+ __entry->offset = offset;
+ ),
+ TP_printk("transaction=%d src_fd=%d offset=%zu",
+ __entry->debug_id, __entry->fd, __entry->offset)
+);
+
+TRACE_EVENT(binder_transaction_fd_recv,
+ TP_PROTO(struct binder_transaction *t, int fd, size_t offset),
+ TP_ARGS(t, fd, offset),
+
+ TP_STRUCT__entry(
+ __field(int, debug_id)
+ __field(int, fd)
+ __field(size_t, offset)
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->src_fd = src_fd;
- __entry->dest_fd = dest_fd;
+ __entry->fd = fd;
+ __entry->offset = offset;
),
- TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d",
- __entry->debug_id, __entry->src_fd, __entry->dest_fd)
+ TP_printk("transaction=%d dest_fd=%d offset=%zu",
+ __entry->debug_id, __entry->fd, __entry->offset)
);
DECLARE_EVENT_CLASS(binder_buffer_class,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index a9dd4ea7467d..6e594644cb1d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4553,6 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* These specific Samsung models/firmware-revs do not handle LPM well */
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
+ { "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index 9b6d7930d1c7..e0bcf9b2dab0 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -873,7 +873,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* like others but it will lock up the whole machine HARD if
* 65536 byte PRD entry is fed. Reduce maximum segment size.
*/
- rc = pci_set_dma_max_seg_size(pdev, 65536 - 512);
+ rc = dma_set_max_seg_size(&pdev->dev, 65536 - 512);
if (rc) {
dev_err(&pdev->dev, "failed to set the maximum segment size\n");
return rc;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 8946dfee4768..e8d676fad0c9 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -536,9 +536,9 @@ int component_bind_all(struct device *master_dev, void *data)
}
if (ret != 0) {
- for (; i--; )
- if (!master->match->compare[i].duplicate) {
- c = master->match->compare[i].component;
+ for (; i > 0; i--)
+ if (!master->match->compare[i - 1].duplicate) {
+ c = master->match->compare[i - 1].component;
component_unbind(c, master, data);
}
}
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index f98a097e73f2..4aaf00d2098b 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -11,6 +11,8 @@
#include <linux/slab.h>
#include <linux/percpu.h>
+#include <asm/sections.h>
+
#include "base.h"
struct devres_node {
@@ -823,6 +825,28 @@ char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
EXPORT_SYMBOL_GPL(devm_kstrdup);
/**
+ * devm_kstrdup_const - resource managed conditional string duplication
+ * @dev: device for which to duplicate the string
+ * @s: the string to duplicate
+ * @gfp: the GFP mask used in the kmalloc() call when allocating memory
+ *
+ * Strings allocated by devm_kstrdup_const will be automatically freed when
+ * the associated device is detached.
+ *
+ * RETURNS:
+ * Source string if it is in .rodata section otherwise it falls back to
+ * devm_kstrdup.
+ */
+const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp)
+{
+ if (is_kernel_rodata((unsigned long)s))
+ return s;
+
+ return devm_kstrdup(dev, s, gfp);
+}
+EXPORT_SYMBOL_GPL(devm_kstrdup_const);
+
+/**
* devm_kvasprintf - Allocate resource managed space and format a string
* into that.
* @dev: Device to allocate memory for
@@ -885,11 +909,19 @@ EXPORT_SYMBOL_GPL(devm_kasprintf);
*
* Free memory allocated with devm_kmalloc().
*/
-void devm_kfree(struct device *dev, void *p)
+void devm_kfree(struct device *dev, const void *p)
{
int rc;
- rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);
+ /*
+ * Special case: pointer to a string in .rodata returned by
+ * devm_kstrdup_const().
+ */
+ if (unlikely(is_kernel_rodata((unsigned long)p)))
+ return;
+
+ rc = devres_destroy(dev, devm_kmalloc_release,
+ devm_kmalloc_match, (void *)p);
WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_kfree);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index f7768077e817..b93fc862d365 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -252,7 +252,7 @@ static int dev_rmdir(const char *name)
static int delete_path(const char *nodepath)
{
- const char *path;
+ char *path;
int err = 0;
path = kstrdup(nodepath, GFP_KERNEL);
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 60d6cc618f1c..f39a920496fb 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -321,11 +321,12 @@ void *platform_msi_get_host_data(struct irq_domain *domain)
* Returns an irqdomain for @nvec interrupts
*/
struct irq_domain *
-platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data)
+__platform_msi_create_device_domain(struct device *dev,
+ unsigned int nvec,
+ bool is_tree,
+ irq_write_msi_msg_t write_msi_msg,
+ const struct irq_domain_ops *ops,
+ void *host_data)
{
struct platform_msi_priv_data *data;
struct irq_domain *domain;
@@ -336,7 +337,8 @@ platform_msi_create_device_domain(struct device *dev,
return NULL;
data->host_data = host_data;
- domain = irq_domain_create_hierarchy(dev->msi_domain, 0, nvec,
+ domain = irq_domain_create_hierarchy(dev->msi_domain, 0,
+ is_tree ? 0 : nvec,
dev->fwnode, ops, data);
if (!domain)
goto free_priv;
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
index 7033a4beda66..254ee7d54e91 100644
--- a/drivers/block/cryptoloop.c
+++ b/drivers/block/cryptoloop.c
@@ -45,7 +45,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
char cms[LO_NAME_SIZE]; /* cipher-mode string */
char *mode;
char *cmsp = cms; /* c-m string pointer */
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
/* encryption breaks for non sector aligned offsets */
@@ -80,13 +80,13 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
*cmsp++ = ')';
*cmsp = 0;
- tfm = crypto_alloc_skcipher(cms, 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_sync_skcipher(cms, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
- err = crypto_skcipher_setkey(tfm, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
-
+ err = crypto_sync_skcipher_setkey(tfm, info->lo_encrypt_key,
+ info->lo_encrypt_key_size);
+
if (err != 0)
goto out_free_tfm;
@@ -94,7 +94,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
return 0;
out_free_tfm:
- crypto_free_skcipher(tfm);
+ crypto_free_sync_skcipher(tfm);
out:
return err;
@@ -109,8 +109,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
struct page *loop_page, unsigned loop_off,
int size, sector_t IV)
{
- struct crypto_skcipher *tfm = lo->key_data;
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ struct crypto_sync_skcipher *tfm = lo->key_data;
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg_out;
struct scatterlist sg_in;
@@ -119,7 +119,7 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
unsigned in_offs, out_offs;
int err;
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
NULL, NULL);
@@ -175,9 +175,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
static int
cryptoloop_release(struct loop_device *lo)
{
- struct crypto_skcipher *tfm = lo->key_data;
+ struct crypto_sync_skcipher *tfm = lo->key_data;
if (tfm != NULL) {
- crypto_free_skcipher(tfm);
+ crypto_free_sync_skcipher(tfm);
lo->key_data = NULL;
return 0;
}
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index 34e0030f0592..7685df43f1ef 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -87,7 +87,9 @@ struct nullb {
#ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev);
void null_zone_exit(struct nullb_device *dev);
-blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio);
+int null_zone_report(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask);
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors);
void null_zone_reset(struct nullb_cmd *cmd, sector_t sector);
@@ -97,10 +99,11 @@ static inline int null_zone_init(struct nullb_device *dev)
return -EINVAL;
}
static inline void null_zone_exit(struct nullb_device *dev) {}
-static inline blk_status_t null_zone_report(struct nullb *nullb,
- struct bio *bio)
+static inline int null_zone_report(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones,
+ unsigned int *nr_zones, gfp_t gfp_mask)
{
- return BLK_STS_NOTSUPP;
+ return -EOPNOTSUPP;
}
static inline void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
unsigned int nr_sectors)
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index e94591021682..09339203dfba 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1129,34 +1129,12 @@ static void null_restart_queue_async(struct nullb *nullb)
blk_mq_start_stopped_hw_queues(q, true);
}
-static bool cmd_report_zone(struct nullb *nullb, struct nullb_cmd *cmd)
-{
- struct nullb_device *dev = cmd->nq->dev;
-
- if (dev->queue_mode == NULL_Q_BIO) {
- if (bio_op(cmd->bio) == REQ_OP_ZONE_REPORT) {
- cmd->error = null_zone_report(nullb, cmd->bio);
- return true;
- }
- } else {
- if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) {
- cmd->error = null_zone_report(nullb, cmd->rq->bio);
- return true;
- }
- }
-
- return false;
-}
-
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
{
struct nullb_device *dev = cmd->nq->dev;
struct nullb *nullb = dev->nullb;
int err = 0;
- if (cmd_report_zone(nullb, cmd))
- goto out;
-
if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
struct request *rq = cmd->rq;
@@ -1443,6 +1421,7 @@ static const struct block_device_operations null_fops = {
.owner = THIS_MODULE,
.open = null_open,
.release = null_release,
+ .report_zones = null_zone_report,
};
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
@@ -1549,6 +1528,13 @@ static int null_gendisk_register(struct nullb *nullb)
disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
+ if (nullb->dev->zoned) {
+ int ret = blk_revalidate_disk_zones(disk);
+
+ if (ret != 0)
+ return ret;
+ }
+
add_disk(disk);
return 0;
}
diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
index 7c6b86d98700..c0b0e4a3fa8f 100644
--- a/drivers/block/null_blk_zoned.c
+++ b/drivers/block/null_blk_zoned.c
@@ -48,54 +48,27 @@ void null_zone_exit(struct nullb_device *dev)
kvfree(dev->zones);
}
-static void null_zone_fill_bio(struct nullb_device *dev, struct bio *bio,
- unsigned int zno, unsigned int nr_zones)
+int null_zone_report(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
{
- struct blk_zone_report_hdr *hdr = NULL;
- struct bio_vec bvec;
- struct bvec_iter iter;
- void *addr;
- unsigned int zones_to_cpy;
-
- bio_for_each_segment(bvec, bio, iter) {
- addr = kmap_atomic(bvec.bv_page);
-
- zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone);
-
- if (!hdr) {
- hdr = (struct blk_zone_report_hdr *)addr;
- hdr->nr_zones = nr_zones;
- zones_to_cpy--;
- addr += sizeof(struct blk_zone_report_hdr);
- }
-
- zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones);
-
- memcpy(addr, &dev->zones[zno],
- zones_to_cpy * sizeof(struct blk_zone));
-
- kunmap_atomic(addr);
+ struct nullb *nullb = disk->private_data;
+ struct nullb_device *dev = nullb->dev;
+ unsigned int zno, nrz = 0;
- nr_zones -= zones_to_cpy;
- zno += zones_to_cpy;
+ if (!dev->zoned)
+ /* Not a zoned null device */
+ return -EOPNOTSUPP;
- if (!nr_zones)
- break;
+ zno = null_zone_no(dev, sector);
+ if (zno < dev->nr_zones) {
+ nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
+ memcpy(zones, &dev->zones[zno], nrz * sizeof(struct blk_zone));
}
-}
-blk_status_t null_zone_report(struct nullb *nullb, struct bio *bio)
-{
- struct nullb_device *dev = nullb->dev;
- unsigned int zno = null_zone_no(dev, bio->bi_iter.bi_sector);
- unsigned int nr_zones = dev->nr_zones - zno;
- unsigned int max_zones;
+ *nr_zones = nrz;
- max_zones = (bio->bi_iter.bi_size / sizeof(struct blk_zone)) - 1;
- nr_zones = min_t(unsigned int, nr_zones, max_zones);
- null_zone_fill_bio(nullb->dev, bio, zno, nr_zones);
-
- return BLK_STS_OK;
+ return 0;
}
void null_zone_write(struct nullb_cmd *cmd, sector_t sector,
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 639051502181..0cf4509d575c 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -780,7 +780,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
goto failed_enable;
pci_set_master(dev);
- pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
+ dma_set_max_seg_size(&dev->dev, RSXX_HW_BLK_SIZE);
st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
if (st) {
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 7c5fc6942f32..2459dcc04b1c 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -3175,7 +3175,7 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
@@ -3364,7 +3364,7 @@ static int skd_pci_resume(struct pci_dev *pdev)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (rc)
- dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 9eea83ae01c6..56452cabce5b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2493,6 +2493,9 @@ static int blkfront_remove(struct xenbus_device *xbdev)
dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
+ if (!info)
+ return 0;
+
blkif_free(info, 0);
mutex_lock(&info->mutex);
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index 1106c076fa4b..600430685e28 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -191,8 +191,7 @@ static int z2_open(struct block_device *bdev, fmode_t mode)
vfree(vmalloc (size));
}
- vaddr = (unsigned long) __ioremap (paddr, size,
- _PAGE_WRITETHRU);
+ vaddr = (unsigned long)ioremap_wt(paddr, size);
#else
vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size);
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index 5d8266c6571f..f0404c6d1ff4 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -127,6 +127,16 @@ static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static int fsl_mc_dma_configure(struct device *dev)
+{
+ struct device *dma_dev = dev;
+
+ while (dev_is_fsl_mc(dma_dev))
+ dma_dev = dma_dev->parent;
+
+ return of_dma_configure(dev, dma_dev->of_node, 0);
+}
+
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -148,6 +158,7 @@ struct bus_type fsl_mc_bus_type = {
.name = "fsl-mc",
.match = fsl_mc_bus_match,
.uevent = fsl_mc_bus_uevent,
+ .dma_configure = fsl_mc_dma_configure,
.dev_groups = fsl_mc_dev_groups,
};
EXPORT_SYMBOL_GPL(fsl_mc_bus_type);
@@ -188,6 +199,10 @@ struct device_type fsl_mc_bus_dprtc_type = {
.name = "fsl_mc_bus_dprtc"
};
+struct device_type fsl_mc_bus_dpseci_type = {
+ .name = "fsl_mc_bus_dpseci"
+};
+
static struct device_type *fsl_mc_get_device_type(const char *type)
{
static const struct {
@@ -203,6 +218,7 @@ static struct device_type *fsl_mc_get_device_type(const char *type)
{ &fsl_mc_bus_dpmcp_type, "dpmcp" },
{ &fsl_mc_bus_dpmac_type, "dpmac" },
{ &fsl_mc_bus_dprtc_type, "dprtc" },
+ { &fsl_mc_bus_dpseci_type, "dpseci" },
{ NULL, NULL }
};
int i;
@@ -616,6 +632,7 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
mc_dev->icid = parent_mc_dev->icid;
mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK;
mc_dev->dev.dma_mask = &mc_dev->dma_mask;
+ mc_dev->dev.coherent_dma_mask = mc_dev->dma_mask;
dev_set_msi_domain(&mc_dev->dev,
dev_get_msi_domain(&parent_mc_dev->dev));
}
@@ -633,10 +650,6 @@ int fsl_mc_device_add(struct fsl_mc_obj_desc *obj_desc,
goto error_cleanup_dev;
}
- /* Objects are coherent, unless 'no shareability' flag set. */
- if (!(obj_desc->flags & FSL_MC_OBJ_FLAG_NO_MEM_SHAREABILITY))
- arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
-
/*
* The device-specific probe callback will get invoked by device_add()
*/
@@ -693,8 +706,8 @@ static int parse_mc_ranges(struct device *dev,
*ranges_start = of_get_property(mc_node, "ranges", &ranges_len);
if (!(*ranges_start) || !ranges_len) {
dev_warn(dev,
- "missing or empty ranges property for device tree node '%s'\n",
- mc_node->name);
+ "missing or empty ranges property for device tree node '%pOFn'\n",
+ mc_node);
return 0;
}
@@ -717,7 +730,7 @@ static int parse_mc_ranges(struct device *dev,
tuple_len = range_tuple_cell_count * sizeof(__be32);
if (ranges_len % tuple_len != 0) {
- dev_err(dev, "malformed ranges property '%s'\n", mc_node->name);
+ dev_err(dev, "malformed ranges property '%pOFn'\n", mc_node);
return -EINVAL;
}
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index 70db4d5638a6..5b2a11a88951 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -1229,7 +1229,7 @@ mbus_parse_ranges(struct device_node *node,
tuple_len = (*cell_count) * sizeof(__be32);
if (ranges_len % tuple_len) {
- pr_warn("malformed ranges entry '%s'\n", node->name);
+ pr_warn("malformed ranges entry '%pOFn'\n", node);
return -EINVAL;
}
return 0;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 757e85b81879..a5b8afe3609c 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -327,15 +327,15 @@ static int get_entry_track(int track)
static int gdrom_get_last_session(struct cdrom_device_info *cd_info,
struct cdrom_multisession *ms_info)
{
- int fentry, lentry, track, data, tocuse, err;
+ int fentry, lentry, track, data, err;
+
if (!gd.toc)
return -ENOMEM;
- tocuse = 1;
+
/* Check if GD-ROM */
err = gdrom_readtoc_cmd(gd.toc, 1);
/* Not a GD-ROM so check if standard CD-ROM */
if (err) {
- tocuse = 0;
err = gdrom_readtoc_cmd(gd.toc, 0);
if (err) {
pr_info("Could not get CD table of contents\n");
@@ -794,7 +794,7 @@ static int probe_gdrom(struct platform_device *devptr)
gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1,
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
if (IS_ERR(gd.gdrom_rq)) {
- rc = PTR_ERR(gd.gdrom_rq);
+ err = PTR_ERR(gd.gdrom_rq);
gd.gdrom_rq = NULL;
goto probe_fail_requestq;
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index aaf9e5afaad4..95be7228f327 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -44,10 +44,10 @@ static unsigned short default_quality; /* = 0; default to "off" */
module_param(current_quality, ushort, 0644);
MODULE_PARM_DESC(current_quality,
- "current hwrng entropy estimation per mill");
+ "current hwrng entropy estimation per 1024 bits of input");
module_param(default_quality, ushort, 0644);
MODULE_PARM_DESC(default_quality,
- "default entropy content of hwrng per mill");
+ "default entropy content of hwrng per 1024 bits of input");
static void drop_current_rng(void);
static int hwrng_init(struct hwrng *rng);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c75b6cdf0053..2eb70e76ed35 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -433,9 +433,9 @@ static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
#define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
static void _extract_crng(struct crng_state *crng,
- __u32 out[CHACHA20_BLOCK_WORDS]);
+ __u8 out[CHACHA20_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u32 tmp[CHACHA20_BLOCK_WORDS], int used);
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -926,7 +926,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
unsigned long flags;
int i, num;
union {
- __u32 block[CHACHA20_BLOCK_WORDS];
+ __u8 block[CHACHA20_BLOCK_SIZE];
__u32 key[8];
} buf;
@@ -973,7 +973,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
static void _extract_crng(struct crng_state *crng,
- __u32 out[CHACHA20_BLOCK_WORDS])
+ __u8 out[CHACHA20_BLOCK_SIZE])
{
unsigned long v, flags;
@@ -990,7 +990,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
+static void extract_crng(__u8 out[CHACHA20_BLOCK_SIZE])
{
struct crng_state *crng = NULL;
@@ -1008,7 +1008,7 @@ static void extract_crng(__u32 out[CHACHA20_BLOCK_WORDS])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+ __u8 tmp[CHACHA20_BLOCK_SIZE], int used)
{
unsigned long flags;
__u32 *s, *d;
@@ -1020,14 +1020,14 @@ static void _crng_backtrack_protect(struct crng_state *crng,
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = &tmp[used / sizeof(__u32)];
+ s = (__u32 *) &tmp[used];
d = &crng->state[4];
for (i=0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
+static void crng_backtrack_protect(__u8 tmp[CHACHA20_BLOCK_SIZE], int used)
{
struct crng_state *crng = NULL;
@@ -1043,7 +1043,7 @@ static void crng_backtrack_protect(__u32 tmp[CHACHA20_BLOCK_WORDS], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA20_BLOCK_SIZE;
- __u32 tmp[CHACHA20_BLOCK_WORDS];
+ __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1622,7 +1622,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u32 tmp[CHACHA20_BLOCK_WORDS];
+ __u8 tmp[CHACHA20_BLOCK_SIZE] __aligned(4);
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -2248,7 +2248,7 @@ u64 get_random_u64(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
- extract_crng((__u32 *)batch->entropy_u64);
+ extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
@@ -2278,7 +2278,7 @@ u32 get_random_u32(void)
if (use_lock)
read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
- extract_crng(batch->entropy_u32);
+ extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index 18c81cbe4704..536e55d3919f 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -5,7 +5,7 @@
menuconfig TCG_TPM
tristate "TPM Hardware Support"
depends on HAS_IOMEM
- select SECURITYFS
+ imply SECURITYFS
select CRYPTO
select CRYPTO_HASH_INFO
---help---
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index e4a04b2d3c32..99b5133a9d05 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -17,11 +17,36 @@
* License.
*
*/
+#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/workqueue.h>
#include "tpm.h"
#include "tpm-dev.h"
+static struct workqueue_struct *tpm_dev_wq;
+static DEFINE_MUTEX(tpm_dev_wq_lock);
+
+static void tpm_async_work(struct work_struct *work)
+{
+ struct file_priv *priv =
+ container_of(work, struct file_priv, async_work);
+ ssize_t ret;
+
+ mutex_lock(&priv->buffer_mutex);
+ priv->command_enqueued = false;
+ ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer), 0);
+
+ tpm_put_ops(priv->chip);
+ if (ret > 0) {
+ priv->data_pending = ret;
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ }
+ mutex_unlock(&priv->buffer_mutex);
+ wake_up_interruptible(&priv->async_wait);
+}
+
static void user_reader_timeout(struct timer_list *t)
{
struct file_priv *priv = from_timer(priv, t, user_read_timer);
@@ -29,27 +54,32 @@ static void user_reader_timeout(struct timer_list *t)
pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
task_tgid_nr(current));
- schedule_work(&priv->work);
+ schedule_work(&priv->timeout_work);
}
-static void timeout_work(struct work_struct *work)
+static void tpm_timeout_work(struct work_struct *work)
{
- struct file_priv *priv = container_of(work, struct file_priv, work);
+ struct file_priv *priv = container_of(work, struct file_priv,
+ timeout_work);
mutex_lock(&priv->buffer_mutex);
priv->data_pending = 0;
memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
mutex_unlock(&priv->buffer_mutex);
+ wake_up_interruptible(&priv->async_wait);
}
void tpm_common_open(struct file *file, struct tpm_chip *chip,
- struct file_priv *priv)
+ struct file_priv *priv, struct tpm_space *space)
{
priv->chip = chip;
+ priv->space = space;
+
mutex_init(&priv->buffer_mutex);
timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
- INIT_WORK(&priv->work, timeout_work);
-
+ INIT_WORK(&priv->timeout_work, tpm_timeout_work);
+ INIT_WORK(&priv->async_work, tpm_async_work);
+ init_waitqueue_head(&priv->async_wait);
file->private_data = priv;
}
@@ -61,15 +91,17 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
int rc;
del_singleshot_timer_sync(&priv->user_read_timer);
- flush_work(&priv->work);
+ flush_work(&priv->timeout_work);
mutex_lock(&priv->buffer_mutex);
if (priv->data_pending) {
ret_size = min_t(ssize_t, size, priv->data_pending);
- rc = copy_to_user(buf, priv->data_buffer, ret_size);
- memset(priv->data_buffer, 0, priv->data_pending);
- if (rc)
- ret_size = -EFAULT;
+ if (ret_size > 0) {
+ rc = copy_to_user(buf, priv->data_buffer, ret_size);
+ memset(priv->data_buffer, 0, priv->data_pending);
+ if (rc)
+ ret_size = -EFAULT;
+ }
priv->data_pending = 0;
}
@@ -79,13 +111,12 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
}
ssize_t tpm_common_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off, struct tpm_space *space)
+ size_t size, loff_t *off)
{
struct file_priv *priv = file->private_data;
- size_t in_size = size;
- ssize_t out_size;
+ int ret = 0;
- if (in_size > TPM_BUFSIZE)
+ if (size > TPM_BUFSIZE)
return -E2BIG;
mutex_lock(&priv->buffer_mutex);
@@ -94,21 +125,20 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
* tpm_read or a user_read_timer timeout. This also prevents split
* buffered writes from blocking here.
*/
- if (priv->data_pending != 0) {
- mutex_unlock(&priv->buffer_mutex);
- return -EBUSY;
+ if (priv->data_pending != 0 || priv->command_enqueued) {
+ ret = -EBUSY;
+ goto out;
}
- if (copy_from_user
- (priv->data_buffer, (void __user *) buf, in_size)) {
- mutex_unlock(&priv->buffer_mutex);
- return -EFAULT;
+ if (copy_from_user(priv->data_buffer, buf, size)) {
+ ret = -EFAULT;
+ goto out;
}
- if (in_size < 6 ||
- in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) {
- mutex_unlock(&priv->buffer_mutex);
- return -EINVAL;
+ if (size < 6 ||
+ size < be32_to_cpu(*((__be32 *)(priv->data_buffer + 2)))) {
+ ret = -EINVAL;
+ goto out;
}
/* atomic tpm command send and result receive. We only hold the ops
@@ -116,25 +146,50 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
* the char dev is held open.
*/
if (tpm_try_get_ops(priv->chip)) {
- mutex_unlock(&priv->buffer_mutex);
- return -EPIPE;
+ ret = -EPIPE;
+ goto out;
}
- out_size = tpm_transmit(priv->chip, space, priv->data_buffer,
- sizeof(priv->data_buffer), 0);
- tpm_put_ops(priv->chip);
- if (out_size < 0) {
+ /*
+ * If in nonblocking mode schedule an async job to send
+ * the command return the size.
+ * In case of error the err code will be returned in
+ * the subsequent read call.
+ */
+ if (file->f_flags & O_NONBLOCK) {
+ priv->command_enqueued = true;
+ queue_work(tpm_dev_wq, &priv->async_work);
mutex_unlock(&priv->buffer_mutex);
- return out_size;
+ return size;
}
- priv->data_pending = out_size;
+ ret = tpm_transmit(priv->chip, priv->space, priv->data_buffer,
+ sizeof(priv->data_buffer), 0);
+ tpm_put_ops(priv->chip);
+
+ if (ret > 0) {
+ priv->data_pending = ret;
+ mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ ret = size;
+ }
+out:
mutex_unlock(&priv->buffer_mutex);
+ return ret;
+}
+
+__poll_t tpm_common_poll(struct file *file, poll_table *wait)
+{
+ struct file_priv *priv = file->private_data;
+ __poll_t mask = 0;
+
+ poll_wait(file, &priv->async_wait, wait);
- /* Set a timeout by which the reader must come claim the result */
- mod_timer(&priv->user_read_timer, jiffies + (120 * HZ));
+ if (priv->data_pending)
+ mask = EPOLLIN | EPOLLRDNORM;
+ else
+ mask = EPOLLOUT | EPOLLWRNORM;
- return in_size;
+ return mask;
}
/*
@@ -142,8 +197,24 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
*/
void tpm_common_release(struct file *file, struct file_priv *priv)
{
+ flush_work(&priv->async_work);
del_singleshot_timer_sync(&priv->user_read_timer);
- flush_work(&priv->work);
+ flush_work(&priv->timeout_work);
file->private_data = NULL;
priv->data_pending = 0;
}
+
+int __init tpm_dev_common_init(void)
+{
+ tpm_dev_wq = alloc_workqueue("tpm_dev_wq", WQ_MEM_RECLAIM, 0);
+
+ return !tpm_dev_wq ? -ENOMEM : 0;
+}
+
+void __exit tpm_dev_common_exit(void)
+{
+ if (tpm_dev_wq) {
+ destroy_workqueue(tpm_dev_wq);
+ tpm_dev_wq = NULL;
+ }
+}
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index ebd74ab5abef..32f9738f1cb2 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -39,7 +39,7 @@ static int tpm_open(struct inode *inode, struct file *file)
if (priv == NULL)
goto out;
- tpm_common_open(file, chip, priv);
+ tpm_common_open(file, chip, priv, NULL);
return 0;
@@ -48,12 +48,6 @@ static int tpm_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
-static ssize_t tpm_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off)
-{
- return tpm_common_write(file, buf, size, off, NULL);
-}
-
/*
* Called on file close
*/
@@ -73,6 +67,7 @@ const struct file_operations tpm_fops = {
.llseek = no_llseek,
.open = tpm_open,
.read = tpm_common_read,
- .write = tpm_write,
+ .write = tpm_common_write,
+ .poll = tpm_common_poll,
.release = tpm_release,
};
diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
index b24cfb4d3ee1..a126b575cb8c 100644
--- a/drivers/char/tpm/tpm-dev.h
+++ b/drivers/char/tpm/tpm-dev.h
@@ -2,27 +2,33 @@
#ifndef _TPM_DEV_H
#define _TPM_DEV_H
+#include <linux/poll.h>
#include "tpm.h"
struct file_priv {
struct tpm_chip *chip;
+ struct tpm_space *space;
- /* Data passed to and from the tpm via the read/write calls */
- size_t data_pending;
+ /* Holds the amount of data passed or an error code from async op */
+ ssize_t data_pending;
struct mutex buffer_mutex;
struct timer_list user_read_timer; /* user needs to claim result */
- struct work_struct work;
+ struct work_struct timeout_work;
+ struct work_struct async_work;
+ wait_queue_head_t async_wait;
+ bool command_enqueued;
u8 data_buffer[TPM_BUFSIZE];
};
void tpm_common_open(struct file *file, struct tpm_chip *chip,
- struct file_priv *priv);
+ struct file_priv *priv, struct tpm_space *space);
ssize_t tpm_common_read(struct file *file, char __user *buf,
size_t size, loff_t *off);
ssize_t tpm_common_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off, struct tpm_space *space);
-void tpm_common_release(struct file *file, struct file_priv *priv);
+ size_t size, loff_t *off);
+__poll_t tpm_common_poll(struct file *file, poll_table *wait);
+void tpm_common_release(struct file *file, struct file_priv *priv);
#endif
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 1a803b0cf980..129f640424b7 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -663,7 +663,8 @@ ssize_t tpm_transmit_cmd(struct tpm_chip *chip, struct tpm_space *space,
return len;
err = be32_to_cpu(header->return_code);
- if (err != 0 && desc)
+ if (err != 0 && err != TPM_ERR_DISABLED && err != TPM_ERR_DEACTIVATED
+ && desc)
dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
desc);
if (err)
@@ -1321,7 +1322,8 @@ int tpm_get_random(struct tpm_chip *chip, u8 *out, size_t max)
}
rlength = be32_to_cpu(tpm_cmd.header.out.length);
- if (rlength < offsetof(struct tpm_getrandom_out, rng_data) +
+ if (rlength < TPM_HEADER_SIZE +
+ offsetof(struct tpm_getrandom_out, rng_data) +
recd) {
total = -EFAULT;
break;
@@ -1407,19 +1409,32 @@ static int __init tpm_init(void)
tpmrm_class = class_create(THIS_MODULE, "tpmrm");
if (IS_ERR(tpmrm_class)) {
pr_err("couldn't create tpmrm class\n");
- class_destroy(tpm_class);
- return PTR_ERR(tpmrm_class);
+ rc = PTR_ERR(tpmrm_class);
+ goto out_destroy_tpm_class;
}
rc = alloc_chrdev_region(&tpm_devt, 0, 2*TPM_NUM_DEVICES, "tpm");
if (rc < 0) {
pr_err("tpm: failed to allocate char dev region\n");
- class_destroy(tpmrm_class);
- class_destroy(tpm_class);
- return rc;
+ goto out_destroy_tpmrm_class;
+ }
+
+ rc = tpm_dev_common_init();
+ if (rc) {
+ pr_err("tpm: failed to allocate char dev region\n");
+ goto out_unreg_chrdev;
}
return 0;
+
+out_unreg_chrdev:
+ unregister_chrdev_region(tpm_devt, 2 * TPM_NUM_DEVICES);
+out_destroy_tpmrm_class:
+ class_destroy(tpmrm_class);
+out_destroy_tpm_class:
+ class_destroy(tpm_class);
+
+ return rc;
}
static void __exit tpm_exit(void)
@@ -1428,6 +1443,7 @@ static void __exit tpm_exit(void)
class_destroy(tpm_class);
class_destroy(tpmrm_class);
unregister_chrdev_region(tpm_devt, 2*TPM_NUM_DEVICES);
+ tpm_dev_common_exit();
}
subsys_initcall(tpm_init);
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index f3501d05264f..f20dc8ece348 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -604,4 +604,6 @@ int tpm2_commit_space(struct tpm_chip *chip, struct tpm_space *space,
int tpm_bios_log_setup(struct tpm_chip *chip);
void tpm_bios_log_teardown(struct tpm_chip *chip);
+int tpm_dev_common_init(void);
+void tpm_dev_common_exit(void);
#endif
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index c31b490bd41d..3acf4fd4e5a5 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -329,7 +329,9 @@ int tpm2_get_random(struct tpm_chip *chip, u8 *dest, size_t max)
&buf.data[TPM_HEADER_SIZE];
recd = min_t(u32, be16_to_cpu(out->size), num_bytes);
if (tpm_buf_length(&buf) <
- offsetof(struct tpm2_get_random_out, buffer) + recd) {
+ TPM_HEADER_SIZE +
+ offsetof(struct tpm2_get_random_out, buffer) +
+ recd) {
err = -EFAULT;
goto out;
}
diff --git a/drivers/char/tpm/tpmrm-dev.c b/drivers/char/tpm/tpmrm-dev.c
index 1a0e97a5da5a..0c751a79bbed 100644
--- a/drivers/char/tpm/tpmrm-dev.c
+++ b/drivers/char/tpm/tpmrm-dev.c
@@ -28,7 +28,7 @@ static int tpmrm_open(struct inode *inode, struct file *file)
return -ENOMEM;
}
- tpm_common_open(file, chip, &priv->priv);
+ tpm_common_open(file, chip, &priv->priv, &priv->space);
return 0;
}
@@ -45,21 +45,12 @@ static int tpmrm_release(struct inode *inode, struct file *file)
return 0;
}
-static ssize_t tpmrm_write(struct file *file, const char __user *buf,
- size_t size, loff_t *off)
-{
- struct file_priv *fpriv = file->private_data;
- struct tpmrm_priv *priv = container_of(fpriv, struct tpmrm_priv, priv);
-
- return tpm_common_write(file, buf, size, off, &priv->space);
-}
-
const struct file_operations tpmrm_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = tpmrm_open,
.read = tpm_common_read,
- .write = tpmrm_write,
+ .write = tpm_common_write,
+ .poll = tpm_common_poll,
.release = tpmrm_release,
};
-
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
index 911475d36800..b150f87f38f5 100644
--- a/drivers/char/tpm/xen-tpmfront.c
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -264,7 +264,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
return -ENOMEM;
}
- rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref);
+ rv = xenbus_grant_ring(dev, priv->shr, 1, &gref);
if (rv < 0)
return rv;
diff --git a/drivers/clk/mvebu/clk-cpu.c b/drivers/clk/mvebu/clk-cpu.c
index 072aa38374ce..3045067448fb 100644
--- a/drivers/clk/mvebu/clk-cpu.c
+++ b/drivers/clk/mvebu/clk-cpu.c
@@ -183,7 +183,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
__func__);
- for_each_node_by_type(dn, "cpu")
+ for_each_of_cpu_node(dn)
ncpus++;
cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL);
@@ -194,7 +194,7 @@ static void __init of_cpu_clk_setup(struct device_node *node)
if (WARN_ON(!clks))
goto clks_out;
- for_each_node_by_type(dn, "cpu") {
+ for_each_of_cpu_node(dn) {
struct clk_init_data init;
struct clk *clk;
char *clk_name = kzalloc(5, GFP_KERNEL);
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index db51b2427e8a..e33b21d3f9d8 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -23,8 +23,8 @@ obj-$(CONFIG_FTTMR010_TIMER) += timer-fttmr010.o
obj-$(CONFIG_ROCKCHIP_TIMER) += rockchip_timer.o
obj-$(CONFIG_CLKSRC_NOMADIK_MTU) += nomadik-mtu.o
obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += clksrc-dbx500-prcmu.o
-obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
-obj-$(CONFIG_ORION_TIMER) += time-orion.o
+obj-$(CONFIG_ARMADA_370_XP_TIMER) += timer-armada-370-xp.o
+obj-$(CONFIG_ORION_TIMER) += timer-orion.o
obj-$(CONFIG_BCM2835_TIMER) += bcm2835_timer.o
obj-$(CONFIG_CLPS711X_TIMER) += clps711x-timer.o
obj-$(CONFIG_ATLAS7_TIMER) += timer-atlas7.o
@@ -36,25 +36,25 @@ obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
obj-$(CONFIG_SUN5I_HSTIMER) += timer-sun5i.o
obj-$(CONFIG_MESON6_TIMER) += meson6_timer.o
obj-$(CONFIG_TEGRA_TIMER) += tegra20_timer.o
-obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o
-obj-$(CONFIG_NSPIRE_TIMER) += zevio-timer.o
+obj-$(CONFIG_VT8500_TIMER) += timer-vt8500.o
+obj-$(CONFIG_NSPIRE_TIMER) += timer-zevio.o
obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o
-obj-$(CONFIG_CADENCE_TTC_TIMER) += cadence_ttc_timer.o
-obj-$(CONFIG_CLKSRC_EFM32) += time-efm32.o
+obj-$(CONFIG_CADENCE_TTC_TIMER) += timer-cadence-ttc.o
+obj-$(CONFIG_CLKSRC_EFM32) += timer-efm32.o
obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o
obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o
-obj-$(CONFIG_CLKSRC_LPC32XX) += time-lpc32xx.o
+obj-$(CONFIG_CLKSRC_LPC32XX) += timer-lpc32xx.o
obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o
obj-$(CONFIG_CLKSRC_SAMSUNG_PWM) += samsung_pwm_timer.o
-obj-$(CONFIG_FSL_FTM_TIMER) += fsl_ftm_timer.o
-obj-$(CONFIG_VF_PIT_TIMER) += vf_pit_timer.o
-obj-$(CONFIG_CLKSRC_QCOM) += qcom-timer.o
+obj-$(CONFIG_FSL_FTM_TIMER) += timer-fsl-ftm.o
+obj-$(CONFIG_VF_PIT_TIMER) += timer-vf-pit.o
+obj-$(CONFIG_CLKSRC_QCOM) += timer-qcom.o
obj-$(CONFIG_MTK_TIMER) += timer-mediatek.o
-obj-$(CONFIG_CLKSRC_PISTACHIO) += time-pistachio.o
+obj-$(CONFIG_CLKSRC_PISTACHIO) += timer-pistachio.o
obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o
obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o
obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o
-obj-$(CONFIG_OWL_TIMER) += owl-timer.o
+obj-$(CONFIG_OWL_TIMER) += timer-owl.o
obj-$(CONFIG_SPRD_TIMER) += timer-sprd.o
obj-$(CONFIG_NPCM7XX_TIMER) += timer-npcm7xx.o
@@ -66,7 +66,7 @@ obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp804.o
obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
obj-$(CONFIG_KEYSTONE_TIMER) += timer-keystone.o
obj-$(CONFIG_INTEGRATOR_AP_TIMER) += timer-integrator-ap.o
-obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
+obj-$(CONFIG_CLKSRC_VERSATILE) += timer-versatile.o
obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o
obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o
diff --git a/drivers/clocksource/asm9260_timer.c b/drivers/clocksource/asm9260_timer.c
index 38cd2feb87c4..fbaee04fd1d9 100644
--- a/drivers/clocksource/asm9260_timer.c
+++ b/drivers/clocksource/asm9260_timer.c
@@ -193,7 +193,7 @@ static int __init asm9260_timer_init(struct device_node *np)
priv.base = of_io_request_and_map(np, 0, np->name);
if (IS_ERR(priv.base)) {
- pr_err("%s: unable to map resource\n", np->name);
+ pr_err("%pOFn: unable to map resource\n", np);
return PTR_ERR(priv.base);
}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 69866cd8f4bb..db410acd8964 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -22,6 +22,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/clk.h>
+#include <linux/reset.h>
#include <linux/sched_clock.h>
static void __init timer_get_base_and_rate(struct device_node *np,
@@ -29,11 +30,22 @@ static void __init timer_get_base_and_rate(struct device_node *np,
{
struct clk *timer_clk;
struct clk *pclk;
+ struct reset_control *rstc;
*base = of_iomap(np, 0);
if (!*base)
- panic("Unable to map regs for %s", np->name);
+ panic("Unable to map regs for %pOFn", np);
+
+ /*
+ * Reset the timer if the reset control is available, wiping
+ * out the state the firmware may have left it
+ */
+ rstc = of_reset_control_get(np, NULL);
+ if (!IS_ERR(rstc)) {
+ reset_control_assert(rstc);
+ reset_control_deassert(rstc);
+ }
/*
* Not all implementations use a periphal clock, so don't panic
@@ -42,8 +54,8 @@ static void __init timer_get_base_and_rate(struct device_node *np,
pclk = of_clk_get_by_name(np, "pclk");
if (!IS_ERR(pclk))
if (clk_prepare_enable(pclk))
- pr_warn("pclk for %s is present, but could not be activated\n",
- np->name);
+ pr_warn("pclk for %pOFn is present, but could not be activated\n",
+ np);
timer_clk = of_clk_get_by_name(np, "timer");
if (IS_ERR(timer_clk))
@@ -57,7 +69,7 @@ static void __init timer_get_base_and_rate(struct device_node *np,
try_clock_freq:
if (of_property_read_u32(np, "clock-freq", rate) &&
of_property_read_u32(np, "clock-frequency", rate))
- panic("No clock nor clock-frequency property for %s", np->name);
+ panic("No clock nor clock-frequency property for %pOFn", np);
}
static void __init add_clockevent(struct device_node *event_timer)
diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c
index 08cd6eaf3795..395837938301 100644
--- a/drivers/clocksource/pxa_timer.c
+++ b/drivers/clocksource/pxa_timer.c
@@ -191,13 +191,13 @@ static int __init pxa_timer_dt_init(struct device_node *np)
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
if (!timer_base) {
- pr_err("%s: unable to map resource\n", np->name);
+ pr_err("%pOFn: unable to map resource\n", np);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_crit("%s: unable to get clk\n", np->name);
+ pr_crit("%pOFn: unable to get clk\n", np);
return PTR_ERR(clk);
}
@@ -210,7 +210,7 @@ static int __init pxa_timer_dt_init(struct device_node *np)
/* we are only interested in OS-timer0 irq */
irq = irq_of_parse_and_map(np, 0);
if (irq <= 0) {
- pr_crit("%s: unable to parse OS-timer0 irq\n", np->name);
+ pr_crit("%pOFn: unable to parse OS-timer0 irq\n", np);
return -EINVAL;
}
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 6cffd7c6001a..61d5f3b539ce 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas Timer Support - OSTM
*
* Copyright (C) 2017 Renesas Electronics America, Inc.
* Copyright (C) 2017 Chris Brandt
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/of_address.h>
diff --git a/drivers/clocksource/riscv_timer.c b/drivers/clocksource/riscv_timer.c
index 4e8b347e43e2..084e97dc10ed 100644
--- a/drivers/clocksource/riscv_timer.c
+++ b/drivers/clocksource/riscv_timer.c
@@ -8,6 +8,7 @@
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/irq.h>
+#include <asm/smp.h>
#include <asm/sbi.h>
/*
@@ -84,13 +85,16 @@ void riscv_timer_interrupt(void)
static int __init riscv_timer_init_dt(struct device_node *n)
{
- int cpu_id = riscv_of_processor_hart(n), error;
+ int cpuid, hartid, error;
struct clocksource *cs;
- if (cpu_id != smp_processor_id())
+ hartid = riscv_of_processor_hartid(n);
+ cpuid = riscv_hartid_to_cpuid(hartid);
+
+ if (cpuid != smp_processor_id())
return 0;
- cs = per_cpu_ptr(&riscv_clocksource, cpu_id);
+ cs = per_cpu_ptr(&riscv_clocksource, cpuid);
clocksource_register_hz(cs, riscv_timebase);
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
@@ -98,7 +102,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
riscv_timer_starting_cpu, riscv_timer_dying_cpu);
if (error)
pr_err("RISCV timer register failed [%d] for cpu = [%d]\n",
- error, cpu_id);
+ error, cpuid);
return error;
}
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index bbbf37c471a3..55d3e03f2cd4 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - CMT
*
* Copyright (C) 2008 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
@@ -78,18 +70,17 @@ struct sh_cmt_info {
unsigned int channels_mask;
unsigned long width; /* 16 or 32 bit version of hardware block */
- unsigned long overflow_bit;
- unsigned long clear_bits;
+ u32 overflow_bit;
+ u32 clear_bits;
/* callbacks for CMSTR and CMCSR access */
- unsigned long (*read_control)(void __iomem *base, unsigned long offs);
+ u32 (*read_control)(void __iomem *base, unsigned long offs);
void (*write_control)(void __iomem *base, unsigned long offs,
- unsigned long value);
+ u32 value);
/* callbacks for CMCNT and CMCOR access */
- unsigned long (*read_count)(void __iomem *base, unsigned long offs);
- void (*write_count)(void __iomem *base, unsigned long offs,
- unsigned long value);
+ u32 (*read_count)(void __iomem *base, unsigned long offs);
+ void (*write_count)(void __iomem *base, unsigned long offs, u32 value);
};
struct sh_cmt_channel {
@@ -103,13 +94,13 @@ struct sh_cmt_channel {
unsigned int timer_bit;
unsigned long flags;
- unsigned long match_value;
- unsigned long next_match_value;
- unsigned long max_match_value;
+ u32 match_value;
+ u32 next_match_value;
+ u32 max_match_value;
raw_spinlock_t lock;
struct clock_event_device ced;
struct clocksource cs;
- unsigned long total_cycles;
+ u64 total_cycles;
bool cs_enabled;
};
@@ -160,24 +151,22 @@ struct sh_cmt_device {
#define SH_CMT32_CMCSR_CKS_RCLK1 (7 << 0)
#define SH_CMT32_CMCSR_CKS_MASK (7 << 0)
-static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
+static u32 sh_cmt_read16(void __iomem *base, unsigned long offs)
{
return ioread16(base + (offs << 1));
}
-static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
+static u32 sh_cmt_read32(void __iomem *base, unsigned long offs)
{
return ioread32(base + (offs << 2));
}
-static void sh_cmt_write16(void __iomem *base, unsigned long offs,
- unsigned long value)
+static void sh_cmt_write16(void __iomem *base, unsigned long offs, u32 value)
{
iowrite16(value, base + (offs << 1));
}
-static void sh_cmt_write32(void __iomem *base, unsigned long offs,
- unsigned long value)
+static void sh_cmt_write32(void __iomem *base, unsigned long offs, u32 value)
{
iowrite32(value, base + (offs << 2));
}
@@ -242,7 +231,7 @@ static const struct sh_cmt_info sh_cmt_info[] = {
#define CMCNT 1 /* channel register */
#define CMCOR 2 /* channel register */
-static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
+static inline u32 sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
{
if (ch->iostart)
return ch->cmt->info->read_control(ch->iostart, 0);
@@ -250,8 +239,7 @@ static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_channel *ch)
return ch->cmt->info->read_control(ch->cmt->mapbase, 0);
}
-static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch, u32 value)
{
if (ch->iostart)
ch->cmt->info->write_control(ch->iostart, 0, value);
@@ -259,39 +247,35 @@ static inline void sh_cmt_write_cmstr(struct sh_cmt_channel *ch,
ch->cmt->info->write_control(ch->cmt->mapbase, 0, value);
}
-static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
+static inline u32 sh_cmt_read_cmcsr(struct sh_cmt_channel *ch)
{
return ch->cmt->info->read_control(ch->ioctrl, CMCSR);
}
-static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmcsr(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_control(ch->ioctrl, CMCSR, value);
}
-static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
+static inline u32 sh_cmt_read_cmcnt(struct sh_cmt_channel *ch)
{
return ch->cmt->info->read_count(ch->ioctrl, CMCNT);
}
-static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmcnt(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_count(ch->ioctrl, CMCNT, value);
}
-static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch,
- unsigned long value)
+static inline void sh_cmt_write_cmcor(struct sh_cmt_channel *ch, u32 value)
{
ch->cmt->info->write_count(ch->ioctrl, CMCOR, value);
}
-static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
- int *has_wrapped)
+static u32 sh_cmt_get_counter(struct sh_cmt_channel *ch, u32 *has_wrapped)
{
- unsigned long v1, v2, v3;
- int o1, o2;
+ u32 v1, v2, v3;
+ u32 o1, o2;
o1 = sh_cmt_read_cmcsr(ch) & ch->cmt->info->overflow_bit;
@@ -311,7 +295,8 @@ static unsigned long sh_cmt_get_counter(struct sh_cmt_channel *ch,
static void sh_cmt_start_stop_ch(struct sh_cmt_channel *ch, int start)
{
- unsigned long flags, value;
+ unsigned long flags;
+ u32 value;
/* start stop register shared by multiple timer channels */
raw_spin_lock_irqsave(&ch->cmt->lock, flags);
@@ -418,11 +403,11 @@ static void sh_cmt_disable(struct sh_cmt_channel *ch)
static void sh_cmt_clock_event_program_verify(struct sh_cmt_channel *ch,
int absolute)
{
- unsigned long new_match;
- unsigned long value = ch->next_match_value;
- unsigned long delay = 0;
- unsigned long now = 0;
- int has_wrapped;
+ u32 value = ch->next_match_value;
+ u32 new_match;
+ u32 delay = 0;
+ u32 now = 0;
+ u32 has_wrapped;
now = sh_cmt_get_counter(ch, &has_wrapped);
ch->flags |= FLAG_REPROGRAM; /* force reprogram */
@@ -619,9 +604,10 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
static u64 sh_cmt_clocksource_read(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
- unsigned long flags, raw;
- unsigned long value;
- int has_wrapped;
+ unsigned long flags;
+ u32 has_wrapped;
+ u64 value;
+ u32 raw;
raw_spin_lock_irqsave(&ch->lock, flags);
value = ch->total_cycles;
@@ -694,7 +680,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
cs->disable = sh_cmt_clocksource_disable;
cs->suspend = sh_cmt_clocksource_suspend;
cs->resume = sh_cmt_clocksource_resume;
- cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
+ cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",
@@ -941,8 +927,22 @@ static const struct of_device_id sh_cmt_of_table[] __maybe_unused = {
.compatible = "renesas,cmt-48-gen2",
.data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
},
- { .compatible = "renesas,rcar-gen2-cmt0", .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] },
- { .compatible = "renesas,rcar-gen2-cmt1", .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] },
+ {
+ .compatible = "renesas,rcar-gen2-cmt0",
+ .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
+ },
+ {
+ .compatible = "renesas,rcar-gen2-cmt1",
+ .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
+ },
+ {
+ .compatible = "renesas,rcar-gen3-cmt0",
+ .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2]
+ },
+ {
+ .compatible = "renesas,rcar-gen3-cmt1",
+ .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2]
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sh_cmt_of_table);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 6812e099b6a3..354b27d14a19 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - MTU2
*
* Copyright (C) 2009 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index c74a6c543ca2..49f1c805fc95 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -1,16 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SuperH Timer Support - TMU
*
* Copyright (C) 2009 Magnus Damm
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/clk.h>
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index edf1a46269f1..edf1a46269f1 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/timer-cadence-ttc.c
index 29d51755e18b..b33402980b6f 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/timer-cadence-ttc.c
@@ -535,7 +535,7 @@ static int __init ttc_timer_init(struct device_node *timer)
if (ret)
return ret;
- pr_info("%s #0 at %p, irq=%d\n", timer->name, timer_baseaddr, irq);
+ pr_info("%pOFn #0 at %p, irq=%d\n", timer, timer_baseaddr, irq);
return 0;
}
diff --git a/drivers/clocksource/time-efm32.c b/drivers/clocksource/timer-efm32.c
index 257e810ec1ad..257e810ec1ad 100644
--- a/drivers/clocksource/time-efm32.c
+++ b/drivers/clocksource/timer-efm32.c
diff --git a/drivers/clocksource/fsl_ftm_timer.c b/drivers/clocksource/timer-fsl-ftm.c
index 846d18daf893..846d18daf893 100644
--- a/drivers/clocksource/fsl_ftm_timer.c
+++ b/drivers/clocksource/timer-fsl-ftm.c
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 62d24690ba02..76e526f58620 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -190,7 +190,7 @@ static int __init integrator_ap_timer_init_of(struct device_node *node)
clk = of_clk_get(node, 0);
if (IS_ERR(clk)) {
- pr_err("No clock for %s\n", node->name);
+ pr_err("No clock for %pOFn\n", node);
return PTR_ERR(clk);
}
clk_prepare_enable(clk);
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/timer-lpc32xx.c
index d51a62a79ef7..d51a62a79ef7 100644
--- a/drivers/clocksource/time-lpc32xx.c
+++ b/drivers/clocksource/timer-lpc32xx.c
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/timer-orion.c
index 12202067fe4b..7d487107e3cd 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/timer-orion.c
@@ -129,13 +129,13 @@ static int __init orion_timer_init(struct device_node *np)
/* timer registers are shared with watchdog timer */
timer_base = of_iomap(np, 0);
if (!timer_base) {
- pr_err("%s: unable to map resource\n", np->name);
+ pr_err("%pOFn: unable to map resource\n", np);
return -ENXIO;
}
clk = of_clk_get(np, 0);
if (IS_ERR(clk)) {
- pr_err("%s: unable to get clk\n", np->name);
+ pr_err("%pOFn: unable to get clk\n", np);
return PTR_ERR(clk);
}
@@ -148,7 +148,7 @@ static int __init orion_timer_init(struct device_node *np)
/* we are only interested in timer1 irq */
irq = irq_of_parse_and_map(np, 1);
if (irq <= 0) {
- pr_err("%s: unable to parse timer1 irq\n", np->name);
+ pr_err("%pOFn: unable to parse timer1 irq\n", np);
return -EINVAL;
}
@@ -174,7 +174,7 @@ static int __init orion_timer_init(struct device_node *np)
/* setup timer1 as clockevent timer */
ret = setup_irq(irq, &orion_clkevt_irq);
if (ret) {
- pr_err("%s: unable to setup irq\n", np->name);
+ pr_err("%pOFn: unable to setup irq\n", np);
return ret;
}
diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/timer-owl.c
index ea00a5e8f95d..ea00a5e8f95d 100644
--- a/drivers/clocksource/owl-timer.c
+++ b/drivers/clocksource/timer-owl.c
diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/timer-pistachio.c
index a2dd85d0c1d7..a2dd85d0c1d7 100644
--- a/drivers/clocksource/time-pistachio.c
+++ b/drivers/clocksource/timer-pistachio.c
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/timer-qcom.c
index 89816f89ff3f..89816f89ff3f 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/timer-qcom.c
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index e01222ea888f..052b230ca312 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -249,7 +249,7 @@ static int __init sp804_of_init(struct device_node *np)
if (of_clk_get_parent_count(np) == 3) {
clk2 = of_clk_get(np, 1);
if (IS_ERR(clk2)) {
- pr_err("sp804: %s clock not found: %d\n", np->name,
+ pr_err("sp804: %pOFn clock not found: %d\n", np,
(int)PTR_ERR(clk2));
clk2 = NULL;
}
diff --git a/drivers/clocksource/versatile.c b/drivers/clocksource/timer-versatile.c
index 39725d38aede..39725d38aede 100644
--- a/drivers/clocksource/versatile.c
+++ b/drivers/clocksource/timer-versatile.c
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/timer-vf-pit.c
index 0f92089ec08c..0f92089ec08c 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/timer-vf-pit.c
diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/timer-vt8500.c
index e0f7489cfc8e..e0f7489cfc8e 100644
--- a/drivers/clocksource/vt8500_timer.c
+++ b/drivers/clocksource/timer-vt8500.c
diff --git a/drivers/clocksource/zevio-timer.c b/drivers/clocksource/timer-zevio.c
index f74689334f7c..6127e8062a71 100644
--- a/drivers/clocksource/zevio-timer.c
+++ b/drivers/clocksource/timer-zevio.c
@@ -148,12 +148,12 @@ static int __init zevio_timer_add(struct device_node *node)
of_address_to_resource(node, 0, &res);
scnprintf(timer->clocksource_name, sizeof(timer->clocksource_name),
- "%llx.%s_clocksource",
- (unsigned long long)res.start, node->name);
+ "%llx.%pOFn_clocksource",
+ (unsigned long long)res.start, node);
scnprintf(timer->clockevent_name, sizeof(timer->clockevent_name),
- "%llx.%s_clockevent",
- (unsigned long long)res.start, node->name);
+ "%llx.%pOFn_clockevent",
+ (unsigned long long)res.start, node);
if (timer->interrupt_regs && irqnr) {
timer->clkevt.name = timer->clockevent_name;
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index c23396f32c8a..8e7e225d2446 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/
obj-$(CONFIG_CRYPTO_DEV_CPT) += cavium/cpt/
obj-$(CONFIG_CRYPTO_DEV_NITROX) += cavium/nitrox/
obj-$(CONFIG_CRYPTO_DEV_EXYNOS_RNG) += exynos-rng.o
-obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += caam/
obj-$(CONFIG_CRYPTO_DEV_GEODE) += geode-aes.o
obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o
obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 801aeab5ab1e..2b7af44c7b85 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-aes.c driver.
*/
diff --git a/drivers/crypto/atmel-authenc.h b/drivers/crypto/atmel-authenc.h
index 2a60d1224143..cbd37a2edada 100644
--- a/drivers/crypto/atmel-authenc.h
+++ b/drivers/crypto/atmel-authenc.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* API for Atmel Secure Protocol Layers Improved Performances (SPLIP)
*
@@ -5,18 +6,6 @@
*
* Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
*/
diff --git a/drivers/crypto/atmel-ecc.c b/drivers/crypto/atmel-ecc.c
index 74f083f45e97..ba00e4563ca0 100644
--- a/drivers/crypto/atmel-ecc.c
+++ b/drivers/crypto/atmel-ecc.c
@@ -1,18 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Microchip / Atmel ECC (I2C) driver.
*
* Copyright (c) 2017, Microchip Technology Inc.
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/bitrev.h>
diff --git a/drivers/crypto/atmel-ecc.h b/drivers/crypto/atmel-ecc.h
index 25232c8abcc2..643a3b947338 100644
--- a/drivers/crypto/atmel-ecc.h
+++ b/drivers/crypto/atmel-ecc.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2017, Microchip Technology Inc.
* Author: Tudor Ambarus <tudor.ambarus@microchip.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- *
*/
#ifndef __ATMEL_ECC_H__
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 8a19df2fba6a..ab0cfe748931 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-sham.c drivers.
*/
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 97b0423efa7f..438e1ffb2ec0 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
@@ -6,10 +7,6 @@
* Copyright (c) 2012 Eukréa Electromatique - ATMEL
* Author: Nicolas Royer <nicolas@eukrea.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- *
* Some ideas are from omap-aes.c drivers.
*/
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c
index 7f07a5085e9b..f3442c2bdbdc 100644
--- a/drivers/crypto/axis/artpec6_crypto.c
+++ b/drivers/crypto/axis/artpec6_crypto.c
@@ -330,7 +330,7 @@ struct artpec6_cryptotfm_context {
size_t key_length;
u32 key_md;
int crypto_type;
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
struct artpec6_crypto_aead_hw_ctx {
@@ -1199,15 +1199,15 @@ artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt)
pr_debug("counter %x will overflow (nblks %u), falling back\n",
counter, counter + nblks);
- ret = crypto_skcipher_setkey(ctx->fallback, ctx->aes_key,
- ctx->key_length);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key,
+ ctx->key_length);
if (ret)
return ret;
{
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -1561,10 +1561,9 @@ static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base),
- 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback =
+ crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback))
return PTR_ERR(ctx->fallback);
@@ -1605,7 +1604,7 @@ static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm)
{
struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
artpec6_crypto_aes_exit(tfm);
}
@@ -3174,7 +3173,6 @@ static struct platform_driver artpec6_crypto_driver = {
.remove = artpec6_crypto_remove,
.driver = {
.name = "artpec6-crypto",
- .owner = THIS_MODULE,
.of_match_table = artpec6_crypto_of_match,
},
};
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index 1eb852765469..c4b1cade55c1 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,7 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+config CRYPTO_DEV_FSL_CAAM_COMMON
+ tristate
+
config CRYPTO_DEV_FSL_CAAM
- tristate "Freescale CAAM-Multicore driver backend"
+ tristate "Freescale CAAM-Multicore platform driver backend"
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
select SOC_BUS
+ select CRYPTO_DEV_FSL_CAAM_COMMON
help
Enables the driver module for Freescale's Cryptographic Accelerator
and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -12,9 +17,16 @@ config CRYPTO_DEV_FSL_CAAM
To compile this driver as a module, choose M here: the module
will be called caam.
+if CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_CAAM_DEBUG
+ bool "Enable debug output in CAAM driver"
+ help
+ Selecting this will enable printing of various debug
+ information in the CAAM driver.
+
config CRYPTO_DEV_FSL_CAAM_JR
tristate "Freescale CAAM Job Ring driver backend"
- depends on CRYPTO_DEV_FSL_CAAM
default y
help
Enables the driver module for Job Rings which are part of
@@ -25,9 +37,10 @@ config CRYPTO_DEV_FSL_CAAM_JR
To compile this driver as a module, choose M here: the module
will be called caam_jr.
+if CRYPTO_DEV_FSL_CAAM_JR
+
config CRYPTO_DEV_FSL_CAAM_RINGSIZE
int "Job Ring size"
- depends on CRYPTO_DEV_FSL_CAAM_JR
range 2 9
default "9"
help
@@ -45,7 +58,6 @@ config CRYPTO_DEV_FSL_CAAM_RINGSIZE
config CRYPTO_DEV_FSL_CAAM_INTC
bool "Job Ring interrupt coalescing"
- depends on CRYPTO_DEV_FSL_CAAM_JR
help
Enable the Job Ring's interrupt coalescing feature.
@@ -75,7 +87,6 @@ config CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
tristate "Register algorithm implementations with the Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_AEAD
select CRYPTO_AUTHENC
@@ -90,7 +101,7 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
tristate "Queue Interface as Crypto API backend"
- depends on CRYPTO_DEV_FSL_CAAM_JR && FSL_DPAA && NET
+ depends on FSL_DPAA && NET
default y
select CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
@@ -107,7 +118,6 @@ config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
config CRYPTO_DEV_FSL_CAAM_AHASH_API
tristate "Register hash algorithm implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_HASH
help
@@ -119,7 +129,6 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API
config CRYPTO_DEV_FSL_CAAM_PKC_API
tristate "Register public key cryptography implementations with Crypto API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RSA
help
@@ -131,7 +140,6 @@ config CRYPTO_DEV_FSL_CAAM_PKC_API
config CRYPTO_DEV_FSL_CAAM_RNG_API
tristate "Register caam device for hwrng API"
- depends on CRYPTO_DEV_FSL_CAAM_JR
default y
select CRYPTO_RNG
select HW_RANDOM
@@ -142,13 +150,32 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
To compile this as a module, choose M here: the module
will be called caamrng.
-config CRYPTO_DEV_FSL_CAAM_DEBUG
- bool "Enable debug output in CAAM driver"
- depends on CRYPTO_DEV_FSL_CAAM
+endif # CRYPTO_DEV_FSL_CAAM_JR
+
+endif # CRYPTO_DEV_FSL_CAAM
+
+config CRYPTO_DEV_FSL_DPAA2_CAAM
+ tristate "QorIQ DPAA2 CAAM (DPSECI) driver"
+ depends on FSL_MC_DPIO
+ depends on NETDEVICES
+ select CRYPTO_DEV_FSL_CAAM_COMMON
+ select CRYPTO_BLKCIPHER
+ select CRYPTO_AUTHENC
+ select CRYPTO_AEAD
+ select CRYPTO_HASH
help
- Selecting this will enable printing of various debug
- information in the CAAM driver.
+ CAAM driver for QorIQ Data Path Acceleration Architecture 2.
+ It handles DPSECI DPAA2 objects that sit on the Management Complex
+ (MC) fsl-mc bus.
+
+ To compile this as a module, choose M here: the module
+ will be called dpaa2_caam.
config CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC
def_tristate (CRYPTO_DEV_FSL_CAAM_CRYPTO_API || \
- CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI)
+ CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI || \
+ CRYPTO_DEV_FSL_DPAA2_CAAM)
+
+config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
+ def_tristate (CRYPTO_DEV_FSL_CAAM_AHASH_API || \
+ CRYPTO_DEV_FSL_DPAA2_CAAM)
diff --git a/drivers/crypto/caam/Makefile b/drivers/crypto/caam/Makefile
index cb652ee7dfc8..7bbfd06a11ff 100644
--- a/drivers/crypto/caam/Makefile
+++ b/drivers/crypto/caam/Makefile
@@ -6,19 +6,27 @@ ifeq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_DEBUG), y)
ccflags-y := -DDEBUG
endif
+ccflags-y += -DVERSION=\"\"
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_COMMON) += error.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_JR) += caam_jr.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API) += caamalg.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI) += caamalg_qi.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC) += caamalg_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API) += caamhash.o
+obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC) += caamhash_desc.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API) += caamrng.o
obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API) += caam_pkc.o
caam-objs := ctrl.o
-caam_jr-objs := jr.o key_gen.o error.o
+caam_jr-objs := jr.o key_gen.o
caam_pkc-y := caampkc.o pkc_desc.o
ifneq ($(CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI),)
ccflags-y += -DCONFIG_CAAM_QI
caam-objs += qi.o
endif
+
+obj-$(CONFIG_CRYPTO_DEV_FSL_DPAA2_CAAM) += dpaa2_caam.o
+
+dpaa2_caam-y := caamalg_qi2.o dpseci.o
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index ec40f991e6c6..869f092432de 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -1,8 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for crypto API
*
* Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*
* Based on talitos crypto API driver.
*
@@ -81,8 +82,6 @@
#define debug(format, arg...)
#endif
-static struct list_head alg_list;
-
struct caam_alg_entry {
int class1_alg_type;
int class2_alg_type;
@@ -96,17 +95,21 @@ struct caam_aead_alg {
bool registered;
};
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/*
* per-session context
*/
struct caam_ctx {
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t sh_desc_enc_dma;
dma_addr_t sh_desc_dec_dma;
- dma_addr_t sh_desc_givenc_dma;
dma_addr_t key_dma;
enum dma_data_direction dir;
struct device *jrdev;
@@ -648,20 +651,20 @@ static int rfc4543_setkey(struct crypto_aead *aead,
return rfc4543_set_sh_desc(aead);
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
+ skcipher);
struct device *jrdev = ctx->jrdev;
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc;
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode &&
- (strstr(alg_name, "rfc3686") != NULL));
+ const bool is_rfc3686 = alg->caam.rfc3686;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "key in @"__stringify(__LINE__)": ",
@@ -689,40 +692,32 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* ablkcipher_encrypt shared descriptor */
+ /* skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_ablkcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
+ cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
- /* ablkcipher_decrypt shared descriptor */
+ /* skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_ablkcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
+ cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
- /* ablkcipher_givencrypt shared descriptor */
- desc = ctx->sh_desc_givenc;
- cnstr_shdsc_ablkcipher_givencap(desc, &ctx->cdata, ivsize, is_rfc3686,
- ctx1_iv_off);
- dma_sync_single_for_device(jrdev, ctx->sh_desc_givenc_dma,
- desc_bytes(desc), ctx->dir);
-
return 0;
}
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
- crypto_ablkcipher_set_flags(ablkcipher,
- CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
dev_err(jrdev, "key size mismatch\n");
return -EINVAL;
}
@@ -731,15 +726,15 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* xts_ablkcipher_encrypt shared descriptor */
+ /* xts_skcipher_encrypt shared descriptor */
desc = ctx->sh_desc_enc;
- cnstr_shdsc_xts_ablkcipher_encap(desc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma,
desc_bytes(desc), ctx->dir);
- /* xts_ablkcipher_decrypt shared descriptor */
+ /* xts_skcipher_decrypt shared descriptor */
desc = ctx->sh_desc_dec;
- cnstr_shdsc_xts_ablkcipher_decap(desc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma,
desc_bytes(desc), ctx->dir);
@@ -765,22 +760,20 @@ struct aead_edesc {
};
/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input s/w scatterlist
* @dst_nents: number of segments in output s/w scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
- * @iv_dir: DMA mapping direction for IV
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
* and IV
*/
-struct ablkcipher_edesc {
+struct skcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
- enum dma_data_direction iv_dir;
int sec4_sg_bytes;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
@@ -790,8 +783,7 @@ struct ablkcipher_edesc {
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents,
- dma_addr_t iv_dma, int ivsize,
- enum dma_data_direction iv_dir, dma_addr_t sec4_sg_dma,
+ dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma,
int sec4_sg_bytes)
{
if (dst != src) {
@@ -803,7 +795,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
if (sec4_sg_bytes)
dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes,
DMA_TO_DEVICE);
@@ -814,20 +806,19 @@ static void aead_unmap(struct device *dev,
struct aead_request *req)
{
caam_unmap(dev, req->src, req->dst,
- edesc->src_nents, edesc->dst_nents, 0, 0, DMA_NONE,
+ edesc->src_nents, edesc->dst_nents, 0, 0,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
-static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst,
edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->iv_dir,
+ edesc->iv_dma, ivsize,
edesc->sec4_sg_dma, edesc->sec4_sg_bytes);
}
@@ -881,87 +872,74 @@ static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
aead_request_complete(req, err);
}
-static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ablkcipher_request *req = context;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
#ifdef DEBUG
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+ edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
#endif
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize,
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
ivsize, 0);
- /* In case initial IV was generated, copy it in GIVCIPHER request */
- if (edesc->iv_dir == DMA_FROM_DEVICE) {
- u8 *iv;
- struct skcipher_givcrypt_request *greq;
-
- greq = container_of(req, struct skcipher_givcrypt_request,
- creq);
- iv = (u8 *)edesc->hw_desc + desc_bytes(edesc->hw_desc) +
- edesc->sec4_sg_bytes;
- memcpy(greq->giv, iv, ivsize);
- }
-
kfree(edesc);
- ablkcipher_request_complete(req, err);
+ skcipher_request_complete(req, err);
}
-static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
- void *context)
+static void skcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err,
+ void *context)
{
- struct ablkcipher_request *req = context;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_request *req = context;
+ struct skcipher_edesc *edesc;
#ifdef DEBUG
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
#endif
- edesc = container_of(desc, struct ablkcipher_edesc, hw_desc[0]);
+ edesc = container_of(desc, struct skcipher_edesc, hw_desc[0]);
if (err)
caam_jr_strstatus(jrdev, err);
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
#endif
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
- ablkcipher_request_complete(req, err);
+ skcipher_request_complete(req, err);
}
/*
@@ -1103,34 +1081,38 @@ static void init_authenc_job(struct aead_request *req,
}
/*
- * Fill in ablkcipher job descriptor
+ * Fill in skcipher job descriptor
*/
-static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void init_skcipher_job(struct skcipher_request *req,
+ struct skcipher_edesc *edesc,
+ const bool encrypt)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
u32 *desc = edesc->hw_desc;
+ u32 *sh_desc;
u32 out_options = 0;
- dma_addr_t dst_dma;
+ dma_addr_t dst_dma, ptr;
int len;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "presciv@"__stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
- pr_err("asked=%d, nbytes%d\n",
- (int)edesc->src_nents > 1 ? 100 : req->nbytes, req->nbytes);
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1);
+ pr_err("asked=%d, cryptlen%d\n",
+ (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen);
#endif
caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->src_nents > 1 ? 100 : req->cryptlen, 1);
+
+ sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec;
+ ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma;
len = desc_len(sh_desc);
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
- append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->nbytes + ivsize,
+ append_seq_in_ptr(desc, edesc->sec4_sg_dma, req->cryptlen + ivsize,
LDST_SGF);
if (likely(req->src == req->dst)) {
@@ -1145,48 +1127,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
out_options = LDST_SGF;
}
}
- append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
-}
-
-/*
- * Fill in ablkcipher givencrypt job descriptor
- */
-static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
-{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- u32 *desc = edesc->hw_desc;
- u32 in_options;
- dma_addr_t dst_dma, src_dma;
- int len, sec4_sg_index = 0;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
- ivsize, 1);
-#endif
- caam_dump_sg(KERN_ERR, "src @" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->src,
- edesc->src_nents > 1 ? 100 : req->nbytes, 1);
-
- len = desc_len(sh_desc);
- init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
-
- if (edesc->src_nents == 1) {
- src_dma = sg_dma_address(req->src);
- in_options = 0;
- } else {
- src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += edesc->src_nents;
- in_options = LDST_SGF;
- }
- append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
-
- dst_dma = edesc->sec4_sg_dma + sec4_sg_index *
- sizeof(struct sec4_sg_entry);
- append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, LDST_SGF);
+ append_seq_out_ptr(desc, dst_dma, req->cryptlen, out_options);
}
/*
@@ -1275,7 +1216,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
GFP_DMA | flags);
if (!edesc) {
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1476,35 +1417,35 @@ static int aead_decrypt(struct aead_request *req)
}
/*
- * allocate and map the ablkcipher extended descriptor for ablkcipher
+ * allocate and map the skcipher extended descriptor for skcipher
*/
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, int desc_bytes)
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ int desc_bytes)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_edesc *edesc;
dma_addr_t iv_dma;
u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(src_nents);
}
if (req->dst != req->src) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (unlikely(dst_nents < 0)) {
dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(dst_nents);
}
}
@@ -1546,7 +1487,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (!edesc) {
dev_err(jrdev, "could not allocate extended descriptor\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1555,17 +1496,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->sec4_sg_bytes = sec4_sg_bytes;
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
desc_bytes);
- edesc->iv_dir = DMA_TO_DEVICE;
/* Make sure IV is located in a DMAable area */
iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(jrdev, iv_dma)) {
dev_err(jrdev, "unable to map IV\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
+ 0, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1583,7 +1523,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
dev_err(jrdev, "unable to map S/G table\n");
caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
+ iv_dma, ivsize, 0, 0);
kfree(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1591,7 +1531,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->iv_dma = iv_dma;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher sec4_sg@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher sec4_sg@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
sec4_sg_bytes, 1);
#endif
@@ -1599,362 +1539,187 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
return edesc;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_enc, ctx->sh_desc_enc_dma, edesc, req);
+ init_skcipher_job(req, edesc, true);
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_encrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
struct device *jrdev = ctx->jrdev;
u32 *desc;
int ret = 0;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
+ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize,
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
ivsize, 0);
/* Create and submit job descriptor*/
- init_ablkcipher_job(ctx->sh_desc_dec, ctx->sh_desc_dec_dma, edesc, req);
+ init_skcipher_job(req, edesc, false);
desc = edesc->hw_desc;
#ifdef DEBUG
- print_hex_dump(KERN_ERR, "ablkcipher jobdesc@"__stringify(__LINE__)": ",
+ print_hex_dump(KERN_ERR, "skcipher jobdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
desc_bytes(edesc->hw_desc), 1);
#endif
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req);
+ ret = caam_jr_enqueue(jrdev, desc, skcipher_decrypt_done, req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(jrdev, edesc, req);
+ skcipher_unmap(jrdev, edesc, req);
kfree(edesc);
}
return ret;
}
-/*
- * allocate and map the ablkcipher extended descriptor
- * for ablkcipher givencrypt
- */
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
- struct skcipher_givcrypt_request *greq,
- int desc_bytes)
-{
- struct ablkcipher_request *req = &greq->creq;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *jrdev = ctx->jrdev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
- struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma;
- u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
-
- src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (unlikely(src_nents < 0)) {
- dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
- return ERR_PTR(src_nents);
- }
-
- if (likely(req->src == req->dst)) {
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(!mapped_src_nents)) {
- dev_err(jrdev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = src_nents;
- mapped_dst_nents = src_nents;
- } else {
- mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents,
- DMA_TO_DEVICE);
- if (unlikely(!mapped_src_nents)) {
- dev_err(jrdev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- if (unlikely(dst_nents < 0)) {
- dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
- return ERR_PTR(dst_nents);
- }
-
- mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(jrdev, "unable to map destination\n");
- dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
- }
- }
-
- sec4_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
- dst_sg_idx = sec4_sg_ents;
- sec4_sg_ents += 1 + mapped_dst_nents;
-
- /*
- * allocate space for base edesc and hw desc commands, link tables, IV
- */
- sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
- GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
- edesc->sec4_sg_bytes = sec4_sg_bytes;
- edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
- desc_bytes);
- edesc->iv_dir = DMA_FROM_DEVICE;
-
- /* Make sure IV is located in a DMAable area */
- iv = (u8 *)edesc->hw_desc + desc_bytes + sec4_sg_bytes;
- iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(jrdev, iv_dma)) {
- dev_err(jrdev, "unable to map IV\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, DMA_NONE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
- if (mapped_src_nents > 1)
- sg_to_sec4_sg_last(req->src, mapped_src_nents, edesc->sec4_sg,
- 0);
-
- dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_sec4_sg_last(req->dst, mapped_dst_nents, edesc->sec4_sg +
- dst_sg_idx + 1, 0);
-
- edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
- sec4_sg_bytes, DMA_TO_DEVICE);
- if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
- dev_err(jrdev, "unable to map S/G table\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, DMA_FROM_DEVICE, 0, 0);
- kfree(edesc);
- return ERR_PTR(-ENOMEM);
- }
- edesc->iv_dma = iv_dma;
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
- sec4_sg_bytes, 1);
-#endif
-
- return edesc;
-}
-
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *jrdev = ctx->jrdev;
- u32 *desc;
- int ret = 0;
-
- /* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN * CAAM_CMD_SZ);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- /* Create and submit job descriptor*/
- init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
- edesc, req);
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher jobdesc@" __stringify(__LINE__) ": ",
- DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
- desc_bytes(edesc->hw_desc), 1);
-#endif
- desc = edesc->hw_desc;
- ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
-
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ablkcipher_unmap(jrdev, edesc, req);
- kfree(edesc);
- }
-
- return ret;
-}
-
-#define template_aead template_u.aead
-#define template_ablkcipher template_u.ablkcipher
-struct caam_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- } template_u;
- u32 class1_alg_type;
- u32 class2_alg_type;
-};
-
-static struct caam_alg_template driver_algs[] = {
- /* ablkcipher descriptor */
+static struct caam_skcipher_alg driver_algs[] = {
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
+ },
},
{
- .name = "xts(aes)",
- .driver_name = "xts-aes-caam",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = xts_ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
- },
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
};
@@ -3239,12 +3004,6 @@ static struct caam_aead_alg driver_aeads[] = {
},
};
-struct caam_crypto_alg {
- struct crypto_alg crypto_alg;
- struct list_head entry;
- struct caam_alg_entry caam;
-};
-
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{
@@ -3276,8 +3035,6 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
ctx->sh_desc_enc_dma = dma_addr;
ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx,
sh_desc_dec);
- ctx->sh_desc_givenc_dma = dma_addr + offsetof(struct caam_ctx,
- sh_desc_givenc);
ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key);
/* copy descriptor header template value */
@@ -3287,14 +3044,14 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
return 0;
}
-static int caam_cra_init(struct crypto_tfm *tfm)
+static int caam_cra_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct caam_crypto_alg *caam_alg =
- container_of(alg, struct caam_crypto_alg, crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
- return caam_init_common(ctx, &caam_alg->caam, false);
+ return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+ false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -3316,9 +3073,9 @@ static void caam_exit_common(struct caam_ctx *ctx)
caam_jr_free(ctx->jrdev);
}
-static void caam_cra_exit(struct crypto_tfm *tfm)
+static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_tfm_ctx(tfm));
+ caam_exit_common(crypto_skcipher_ctx(tfm));
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -3328,8 +3085,6 @@ static void caam_aead_exit(struct crypto_aead *tfm)
static void __exit caam_algapi_exit(void)
{
-
- struct caam_crypto_alg *t_alg, *n;
int i;
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -3339,57 +3094,25 @@ static void __exit caam_algapi_exit(void)
crypto_unregister_aead(&t_alg->aead);
}
- if (!alg_list.next)
- return;
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
- *template)
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct caam_crypto_alg *t_alg;
- struct crypto_alg *alg;
+ struct skcipher_alg *alg = &t_alg->skcipher;
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg) {
- pr_err("failed to allocate t_alg\n");
- return ERR_PTR(-ENOMEM);
- }
-
- alg = &t_alg->crypto_alg;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
- alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_cra_init;
- alg->cra_exit = caam_cra_exit;
- alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_blocksize = template->blocksize;
- alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- switch (template->type) {
- case CRYPTO_ALG_TYPE_GIVCIPHER:
- alg->cra_type = &crypto_givcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- }
-
- t_alg->caam.class1_alg_type = template->class1_alg_type;
- t_alg->caam.class2_alg_type = template->class2_alg_type;
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- return t_alg;
+ alg->init = caam_cra_init;
+ alg->exit = caam_cra_exit;
}
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -3441,8 +3164,6 @@ static int __init caam_algapi_init(void)
return -ENODEV;
- INIT_LIST_HEAD(&alg_list);
-
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
@@ -3458,9 +3179,8 @@ static int __init caam_algapi_init(void)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- struct caam_crypto_alg *t_alg;
- struct caam_alg_template *alg = driver_algs + i;
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
/* Skip DES algorithms if not supported by device */
if (!des_inst &&
@@ -3477,26 +3197,20 @@ static int __init caam_algapi_init(void)
* on LP devices.
*/
if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP)
- if ((alg->class1_alg_type & OP_ALG_AAI_MASK) ==
+ if ((t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_XTS)
continue;
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
- pr_warn("%s alg allocation failed\n", alg->driver_name);
- continue;
- }
+ caam_skcipher_alg_init(t_alg);
- err = crypto_register_alg(&t_alg->crypto_alg);
+ err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
pr_warn("%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
- kfree(t_alg);
+ t_alg->skcipher.base.cra_driver_name);
continue;
}
- list_add_tail(&t_alg->entry, &alg_list);
+ t_alg->registered = true;
registered = true;
}
diff --git a/drivers/crypto/caam/caamalg_desc.c b/drivers/crypto/caam/caamalg_desc.c
index a408edd84f34..1a6f0da14106 100644
--- a/drivers/crypto/caam/caamalg_desc.c
+++ b/drivers/crypto/caam/caamalg_desc.c
@@ -1,7 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
- * Shared descriptors for aead, ablkcipher algorithms
+ * Shared descriptors for aead, skcipher algorithms
*
- * Copyright 2016 NXP
+ * Copyright 2016-2018 NXP
*/
#include "compat.h"
@@ -1212,11 +1213,8 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
}
EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap);
-/*
- * For ablkcipher encrypt and decrypt, read from req->src and
- * write to req->dst
- */
-static inline void ablkcipher_append_src_dst(u32 *desc)
+/* For skcipher encrypt and decrypt, read from req->src and write to req->dst */
+static inline void skcipher_append_src_dst(u32 *desc)
{
append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
@@ -1226,7 +1224,7 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
}
/**
- * cnstr_shdsc_ablkcipher_encap - ablkcipher encapsulation shared descriptor
+ * cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
@@ -1235,9 +1233,9 @@ static inline void ablkcipher_append_src_dst(u32 *desc)
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
*/
-void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
+void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
@@ -1280,18 +1278,18 @@ void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
OP_ALG_ENCRYPT);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "ablkcipher enc shdesc@" __stringify(__LINE__)": ",
+ "skcipher enc shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
+EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap);
/**
- * cnstr_shdsc_ablkcipher_decap - ablkcipher decapsulation shared descriptor
+ * cnstr_shdsc_skcipher_decap - skcipher decapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
@@ -1300,9 +1298,9 @@ EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_encap);
* @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
* @ctx1_iv_off: IV offset in CONTEXT1 register
*/
-void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
+void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off)
{
u32 *key_jump_cmd;
@@ -1348,105 +1346,23 @@ void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
-
-#ifdef DEBUG
- print_hex_dump(KERN_ERR,
- "ablkcipher dec shdesc@" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
-#endif
-}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_decap);
-
-/**
- * cnstr_shdsc_ablkcipher_givencap - ablkcipher encapsulation shared descriptor
- * with HW-generated initialization vector.
- * @desc: pointer to buffer used for descriptor construction
- * @cdata: pointer to block cipher transform definitions
- * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed
- * with OP_ALG_AAI_CBC.
- * @ivsize: initialization vector size
- * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template
- * @ctx1_iv_off: IV offset in CONTEXT1 register
- */
-void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off)
-{
- u32 *key_jump_cmd, geniv;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
- /* Skip if already shared */
- key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- /* Load class1 key only */
- append_key_as_imm(desc, cdata->key_virt, cdata->keylen,
- cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
-
- /* Load Nonce into CONTEXT1 reg */
- if (is_rfc3686) {
- const u8 *nonce = cdata->key_virt + cdata->keylen;
-
- append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE,
- LDST_CLASS_IND_CCB |
- LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
- append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO |
- MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) |
- (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
- }
- set_jump_tgt_here(desc, key_jump_cmd);
-
- /* Generate IV */
- geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
- NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND |
- (ivsize << NFIFOENTRY_DLEN_SHIFT);
- append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
- LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
- append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
- append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO |
- MOVE_DEST_CLASS1CTX | (ivsize << MOVE_LEN_SHIFT) |
- (ctx1_iv_off << MOVE_OFFSET_SHIFT));
- append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
-
- /* Copy generated IV to memory */
- append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT |
- LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT));
-
- /* Load Counter into CONTEXT1 reg */
- if (is_rfc3686)
- append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB |
- LDST_SRCDST_BYTE_CONTEXT |
- ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
- LDST_OFFSET_SHIFT));
-
- if (ctx1_iv_off)
- append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
- (1 << JUMP_OFFSET_SHIFT));
-
- /* Load operation */
- append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL |
- OP_ALG_ENCRYPT);
-
- /* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
+ "skcipher dec shdesc@" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_ablkcipher_givencap);
+EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap);
/**
- * cnstr_shdsc_xts_ablkcipher_encap - xts ablkcipher encapsulation shared
- * descriptor
+ * cnstr_shdsc_xts_skcipher_encap - xts skcipher encapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
*/
-void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
+void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
u32 *key_jump_cmd;
@@ -1481,24 +1397,23 @@ void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata)
OP_ALG_ENCRYPT);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "xts ablkcipher enc shdesc@" __stringify(__LINE__) ": ",
+ "xts skcipher enc shdesc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_encap);
+EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap);
/**
- * cnstr_shdsc_xts_ablkcipher_decap - xts ablkcipher decapsulation shared
- * descriptor
+ * cnstr_shdsc_xts_skcipher_decap - xts skcipher decapsulation shared descriptor
* @desc: pointer to buffer used for descriptor construction
* @cdata: pointer to block cipher transform definitions
* Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS.
*/
-void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
+void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata)
{
__be64 sector_size = cpu_to_be64(512);
u32 *key_jump_cmd;
@@ -1532,15 +1447,15 @@ void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata)
append_dec_op1(desc, cdata->algtype);
/* Perform operation */
- ablkcipher_append_src_dst(desc);
+ skcipher_append_src_dst(desc);
#ifdef DEBUG
print_hex_dump(KERN_ERR,
- "xts ablkcipher dec shdesc@" __stringify(__LINE__) ": ",
+ "xts skcipher dec shdesc@" __stringify(__LINE__) ": ",
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1);
#endif
}
-EXPORT_SYMBOL(cnstr_shdsc_xts_ablkcipher_decap);
+EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM descriptor support");
diff --git a/drivers/crypto/caam/caamalg_desc.h b/drivers/crypto/caam/caamalg_desc.h
index a917af5776ce..1315c8f6f951 100644
--- a/drivers/crypto/caam/caamalg_desc.h
+++ b/drivers/crypto/caam/caamalg_desc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Shared descriptors for aead, ablkcipher algorithms
+ * Shared descriptors for aead, skcipher algorithms
*
* Copyright 2016 NXP
*/
@@ -42,10 +42,10 @@
#define DESC_QI_RFC4543_ENC_LEN (DESC_RFC4543_ENC_LEN + 4 * CAAM_CMD_SZ)
#define DESC_QI_RFC4543_DEC_LEN (DESC_RFC4543_DEC_LEN + 4 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_BASE (3 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_ENC_LEN (DESC_ABLKCIPHER_BASE + \
+#define DESC_SKCIPHER_BASE (3 * CAAM_CMD_SZ)
+#define DESC_SKCIPHER_ENC_LEN (DESC_SKCIPHER_BASE + \
20 * CAAM_CMD_SZ)
-#define DESC_ABLKCIPHER_DEC_LEN (DESC_ABLKCIPHER_BASE + \
+#define DESC_SKCIPHER_DEC_LEN (DESC_SKCIPHER_BASE + \
15 * CAAM_CMD_SZ)
void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata,
@@ -96,20 +96,16 @@ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata,
unsigned int ivsize, unsigned int icvsize,
const bool is_qi);
-void cnstr_shdsc_ablkcipher_encap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
-void cnstr_shdsc_ablkcipher_decap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata,
+ unsigned int ivsize, const bool is_rfc3686,
+ const u32 ctx1_iv_off);
-void cnstr_shdsc_ablkcipher_givencap(u32 * const desc, struct alginfo *cdata,
- unsigned int ivsize, const bool is_rfc3686,
- const u32 ctx1_iv_off);
+void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata);
-void cnstr_shdsc_xts_ablkcipher_encap(u32 * const desc, struct alginfo *cdata);
-
-void cnstr_shdsc_xts_ablkcipher_decap(u32 * const desc, struct alginfo *cdata);
+void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata);
#endif /* _CAAMALG_DESC_H_ */
diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c
index d7aa7d7ff102..23c9fc4975f8 100644
--- a/drivers/crypto/caam/caamalg_qi.c
+++ b/drivers/crypto/caam/caamalg_qi.c
@@ -1,9 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale FSL CAAM support for crypto API over QI backend.
* Based on caamalg.c
*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
- * Copyright 2016-2017 NXP
+ * Copyright 2016-2018 NXP
*/
#include "compat.h"
@@ -43,6 +44,12 @@ struct caam_aead_alg {
bool registered;
};
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
/*
* per-session context
*/
@@ -50,7 +57,6 @@ struct caam_ctx {
struct device *jrdev;
u32 sh_desc_enc[DESC_MAX_USED_LEN];
u32 sh_desc_dec[DESC_MAX_USED_LEN];
- u32 sh_desc_givenc[DESC_MAX_USED_LEN];
u8 key[CAAM_MAX_KEY_SIZE];
dma_addr_t key_dma;
enum dma_data_direction dir;
@@ -589,18 +595,19 @@ static int rfc4543_setkey(struct crypto_aead *aead,
return 0;
}
-static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher);
- const char *alg_name = crypto_tfm_alg_name(tfm);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher), typeof(*alg),
+ skcipher);
struct device *jrdev = ctx->jrdev;
- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
u32 ctx1_iv_off = 0;
const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
OP_ALG_AAI_CTR_MOD128);
- const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686"));
+ const bool is_rfc3686 = alg->caam.rfc3686;
int ret = 0;
#ifdef DEBUG
@@ -629,13 +636,11 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */
- cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
- is_rfc3686, ctx1_iv_off);
- cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
- is_rfc3686, ctx1_iv_off);
- cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata,
- ivsize, is_rfc3686, ctx1_iv_off);
+ /* skcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
+ cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize,
+ is_rfc3686, ctx1_iv_off);
/* Now update the driver contexts with the new shared descriptor */
if (ctx->drv_ctx[ENCRYPT]) {
@@ -656,25 +661,16 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
}
}
- if (ctx->drv_ctx[GIVENCRYPT]) {
- ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT],
- ctx->sh_desc_givenc);
- if (ret) {
- dev_err(jrdev, "driver givenc context update failed\n");
- goto badkey;
- }
- }
-
return ret;
badkey:
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
-static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
- const u8 *key, unsigned int keylen)
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
{
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *jrdev = ctx->jrdev;
int ret = 0;
@@ -687,9 +683,9 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
ctx->cdata.key_virt = key;
ctx->cdata.key_inline = true;
- /* xts ablkcipher encrypt, decrypt shared descriptors */
- cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
- cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
+ /* xts skcipher encrypt, decrypt shared descriptors */
+ cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata);
+ cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata);
/* Now update the driver contexts with the new shared descriptor */
if (ctx->drv_ctx[ENCRYPT]) {
@@ -712,7 +708,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
return ret;
badkey:
- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
return -EINVAL;
}
@@ -741,7 +737,7 @@ struct aead_edesc {
};
/*
- * ablkcipher_edesc - s/w-extended ablkcipher descriptor
+ * skcipher_edesc - s/w-extended skcipher descriptor
* @src_nents: number of segments in input scatterlist
* @dst_nents: number of segments in output scatterlist
* @iv_dma: dma address of iv for checking continuity and link table
@@ -750,7 +746,7 @@ struct aead_edesc {
* @drv_req: driver-specific request structure
* @sgt: the h/w link table, followed by IV
*/
-struct ablkcipher_edesc {
+struct skcipher_edesc {
int src_nents;
int dst_nents;
dma_addr_t iv_dma;
@@ -781,10 +777,8 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
if (type == ENCRYPT)
desc = ctx->sh_desc_enc;
- else if (type == DECRYPT)
+ else /* (type == DECRYPT) */
desc = ctx->sh_desc_dec;
- else /* (type == GIVENCRYPT) */
- desc = ctx->sh_desc_givenc;
cpu = smp_processor_id();
drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc);
@@ -803,8 +797,7 @@ static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx,
static void caam_unmap(struct device *dev, struct scatterlist *src,
struct scatterlist *dst, int src_nents,
int dst_nents, dma_addr_t iv_dma, int ivsize,
- enum optype op_type, dma_addr_t qm_sg_dma,
- int qm_sg_bytes)
+ dma_addr_t qm_sg_dma, int qm_sg_bytes)
{
if (dst != src) {
if (src_nents)
@@ -815,9 +808,7 @@ static void caam_unmap(struct device *dev, struct scatterlist *src,
}
if (iv_dma)
- dma_unmap_single(dev, iv_dma, ivsize,
- op_type == GIVENCRYPT ? DMA_FROM_DEVICE :
- DMA_TO_DEVICE);
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
if (qm_sg_bytes)
dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
}
@@ -830,21 +821,18 @@ static void aead_unmap(struct device *dev,
int ivsize = crypto_aead_ivsize(aead);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
}
-static void ablkcipher_unmap(struct device *dev,
- struct ablkcipher_edesc *edesc,
- struct ablkcipher_request *req)
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type,
- edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
}
static void aead_done(struct caam_drv_req *drv_req, u32 status)
@@ -902,9 +890,8 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
int in_len, out_len;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
- drv_ctx = get_drv_ctx(ctx, op_type);
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
return (struct aead_edesc *)drv_ctx;
@@ -994,7 +981,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1009,7 +996,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents,
- dst_nents, 0, 0, 0, 0, 0);
+ dst_nents, 0, 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1028,7 +1015,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (dma_mapping_error(qidev, edesc->assoclen_dma)) {
dev_err(qidev, "unable to map assoclen\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1051,7 +1038,7 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
dev_err(qidev, "unable to map S/G table\n");
dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1138,14 +1125,14 @@ static int ipsec_gcm_decrypt(struct aead_request *req)
return aead_crypt(req, false);
}
-static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
+static void skcipher_done(struct caam_drv_req *drv_req, u32 status)
{
- struct ablkcipher_edesc *edesc;
- struct ablkcipher_request *req = drv_req->app_ctx;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *caam_ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct skcipher_request *req = drv_req->app_ctx;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *caam_ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = caam_ctx->qidev;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
#ifdef DEBUG
dev_err(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status);
@@ -1158,72 +1145,60 @@ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status)
#ifdef DEBUG
print_hex_dump(KERN_ERR, "dstiv @" __stringify(__LINE__)": ",
- DUMP_PREFIX_ADDRESS, 16, 4, req->info,
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
edesc->src_nents > 1 ? 100 : ivsize, 1);
caam_dump_sg(KERN_ERR, "dst @" __stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
- edesc->dst_nents > 1 ? 100 : req->nbytes, 1);
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
#endif
- ablkcipher_unmap(qidev, edesc, req);
-
- /* In case initial IV was generated, copy it in GIVCIPHER request */
- if (edesc->drv_req.drv_ctx->op_type == GIVENCRYPT) {
- u8 *iv;
- struct skcipher_givcrypt_request *greq;
-
- greq = container_of(req, struct skcipher_givcrypt_request,
- creq);
- iv = (u8 *)edesc->sgt + edesc->qm_sg_bytes;
- memcpy(greq->giv, iv, ivsize);
- }
+ skcipher_unmap(qidev, edesc, req);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block. This is used e.g. by the CTS mode.
*/
- if (edesc->drv_req.drv_ctx->op_type != DECRYPT)
- scatterwalk_map_and_copy(req->info, req->dst, req->nbytes -
+ if (edesc->drv_req.drv_ctx->op_type == ENCRYPT)
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen -
ivsize, ivsize, 0);
qi_cache_free(edesc);
- ablkcipher_request_complete(req, status);
+ skcipher_request_complete(req, status);
}
-static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
- *req, bool encrypt)
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
+ bool encrypt)
{
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
struct device *qidev = ctx->qidev;
gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
GFP_KERNEL : GFP_ATOMIC;
int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
- struct ablkcipher_edesc *edesc;
+ struct skcipher_edesc *edesc;
dma_addr_t iv_dma;
u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
struct qm_sg_entry *sg_table, *fd_sgt;
struct caam_drv_ctx *drv_ctx;
- enum optype op_type = encrypt ? ENCRYPT : DECRYPT;
- drv_ctx = get_drv_ctx(ctx, op_type);
+ drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT);
if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
- return (struct ablkcipher_edesc *)drv_ctx;
+ return (struct skcipher_edesc *)drv_ctx;
- src_nents = sg_nents_for_len(req->src, req->nbytes);
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(src_nents);
}
if (unlikely(req->src != req->dst)) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
if (unlikely(dst_nents < 0)) {
dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
+ req->cryptlen);
return ERR_PTR(dst_nents);
}
@@ -1255,12 +1230,12 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
ivsize > CAAM_QI_MEMCACHE_SIZE)) {
dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
qm_sg_ents, ivsize);
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
@@ -1269,20 +1244,20 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (unlikely(!edesc)) {
dev_err(qidev, "could not allocate extended descriptor\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
return ERR_PTR(-ENOMEM);
}
/* Make sure IV is located in a DMAable area */
sg_table = &edesc->sgt[0];
iv = (u8 *)(sg_table + qm_sg_ents);
- memcpy(iv, req->info, ivsize);
+ memcpy(iv, req->iv, ivsize);
iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE);
if (dma_mapping_error(qidev, iv_dma)) {
dev_err(qidev, "unable to map IV\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
+ 0, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1292,7 +1267,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
edesc->iv_dma = iv_dma;
edesc->qm_sg_bytes = qm_sg_bytes;
edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = ablkcipher_done;
+ edesc->drv_req.cbk = skcipher_done;
edesc->drv_req.drv_ctx = drv_ctx;
dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
@@ -1307,7 +1282,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
dev_err(qidev, "unable to map S/G table\n");
caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, op_type, 0, 0);
+ iv_dma, ivsize, 0, 0);
qi_cache_free(edesc);
return ERR_PTR(-ENOMEM);
}
@@ -1315,348 +1290,172 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
fd_sgt = &edesc->drv_req.fd_sgt[0];
dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma,
- ivsize + req->nbytes, 0);
+ ivsize + req->cryptlen, 0);
if (req->src == req->dst) {
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma +
- sizeof(*sg_table), req->nbytes, 0);
+ sizeof(*sg_table), req->cryptlen, 0);
} else if (mapped_dst_nents > 1) {
dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), req->nbytes, 0);
+ sizeof(*sg_table), req->cryptlen, 0);
} else {
dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst),
- req->nbytes, 0);
- }
-
- return edesc;
-}
-
-static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
- struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- struct device *qidev = ctx->qidev;
- gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
- GFP_KERNEL : GFP_ATOMIC;
- int src_nents, mapped_src_nents, dst_nents, mapped_dst_nents;
- struct ablkcipher_edesc *edesc;
- dma_addr_t iv_dma;
- u8 *iv;
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
- struct qm_sg_entry *sg_table, *fd_sgt;
- int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
- struct caam_drv_ctx *drv_ctx;
-
- drv_ctx = get_drv_ctx(ctx, GIVENCRYPT);
- if (unlikely(IS_ERR_OR_NULL(drv_ctx)))
- return (struct ablkcipher_edesc *)drv_ctx;
-
- src_nents = sg_nents_for_len(req->src, req->nbytes);
- if (unlikely(src_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in src S/G\n",
- req->nbytes);
- return ERR_PTR(src_nents);
- }
-
- if (unlikely(req->src != req->dst)) {
- dst_nents = sg_nents_for_len(req->dst, req->nbytes);
- if (unlikely(dst_nents < 0)) {
- dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n",
- req->nbytes);
- return ERR_PTR(dst_nents);
- }
-
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
- DMA_TO_DEVICE);
- if (unlikely(!mapped_src_nents)) {
- dev_err(qidev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents,
- DMA_FROM_DEVICE);
- if (unlikely(!mapped_dst_nents)) {
- dev_err(qidev, "unable to map destination\n");
- dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE);
- return ERR_PTR(-ENOMEM);
- }
- } else {
- mapped_src_nents = dma_map_sg(qidev, req->src, src_nents,
- DMA_BIDIRECTIONAL);
- if (unlikely(!mapped_src_nents)) {
- dev_err(qidev, "unable to map source\n");
- return ERR_PTR(-ENOMEM);
- }
-
- dst_nents = src_nents;
- mapped_dst_nents = src_nents;
- }
-
- qm_sg_ents = mapped_src_nents > 1 ? mapped_src_nents : 0;
- dst_sg_idx = qm_sg_ents;
-
- qm_sg_ents += 1 + mapped_dst_nents;
- qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry);
- if (unlikely(offsetof(struct ablkcipher_edesc, sgt) + qm_sg_bytes +
- ivsize > CAAM_QI_MEMCACHE_SIZE)) {
- dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n",
- qm_sg_ents, ivsize);
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- /* allocate space for base edesc, link tables and IV */
- edesc = qi_cache_alloc(GFP_DMA | flags);
- if (!edesc) {
- dev_err(qidev, "could not allocate extended descriptor\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- return ERR_PTR(-ENOMEM);
- }
-
- /* Make sure IV is located in a DMAable area */
- sg_table = &edesc->sgt[0];
- iv = (u8 *)(sg_table + qm_sg_ents);
- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_FROM_DEVICE);
- if (dma_mapping_error(qidev, iv_dma)) {
- dev_err(qidev, "unable to map IV\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
- }
-
- edesc->src_nents = src_nents;
- edesc->dst_nents = dst_nents;
- edesc->iv_dma = iv_dma;
- edesc->qm_sg_bytes = qm_sg_bytes;
- edesc->drv_req.app_ctx = req;
- edesc->drv_req.cbk = ablkcipher_done;
- edesc->drv_req.drv_ctx = drv_ctx;
-
- if (mapped_src_nents > 1)
- sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table, 0);
-
- dma_to_qm_sg_one(sg_table + dst_sg_idx, iv_dma, ivsize, 0);
- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + dst_sg_idx + 1,
- 0);
-
- edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes,
- DMA_TO_DEVICE);
- if (dma_mapping_error(qidev, edesc->qm_sg_dma)) {
- dev_err(qidev, "unable to map S/G table\n");
- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents,
- iv_dma, ivsize, GIVENCRYPT, 0, 0);
- qi_cache_free(edesc);
- return ERR_PTR(-ENOMEM);
+ req->cryptlen, 0);
}
- fd_sgt = &edesc->drv_req.fd_sgt[0];
-
- if (mapped_src_nents > 1)
- dma_to_qm_sg_one_ext(&fd_sgt[1], edesc->qm_sg_dma, req->nbytes,
- 0);
- else
- dma_to_qm_sg_one(&fd_sgt[1], sg_dma_address(req->src),
- req->nbytes, 0);
-
- dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx *
- sizeof(*sg_table), ivsize + req->nbytes, 0);
-
return edesc;
}
-static inline int ablkcipher_crypt(struct ablkcipher_request *req, bool encrypt)
+static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt)
{
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
int ret;
if (unlikely(caam_congested))
return -EAGAIN;
/* allocate extended descriptor */
- edesc = ablkcipher_edesc_alloc(req, encrypt);
+ edesc = skcipher_edesc_alloc(req, encrypt);
if (IS_ERR(edesc))
return PTR_ERR(edesc);
/*
- * The crypto API expects us to set the IV (req->info) to the last
+ * The crypto API expects us to set the IV (req->iv) to the last
* ciphertext block.
*/
if (!encrypt)
- scatterwalk_map_and_copy(req->info, req->src, req->nbytes -
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen -
ivsize, ivsize, 0);
ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
if (!ret) {
ret = -EINPROGRESS;
} else {
- ablkcipher_unmap(ctx->qidev, edesc, req);
+ skcipher_unmap(ctx->qidev, edesc, req);
qi_cache_free(edesc);
}
return ret;
}
-static int ablkcipher_encrypt(struct ablkcipher_request *req)
+static int skcipher_encrypt(struct skcipher_request *req)
{
- return ablkcipher_crypt(req, true);
+ return skcipher_crypt(req, true);
}
-static int ablkcipher_decrypt(struct ablkcipher_request *req)
+static int skcipher_decrypt(struct skcipher_request *req)
{
- return ablkcipher_crypt(req, false);
-}
-
-static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
-{
- struct ablkcipher_request *req = &creq->creq;
- struct ablkcipher_edesc *edesc;
- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
- int ret;
-
- if (unlikely(caam_congested))
- return -EAGAIN;
-
- /* allocate extended descriptor */
- edesc = ablkcipher_giv_edesc_alloc(creq);
- if (IS_ERR(edesc))
- return PTR_ERR(edesc);
-
- ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req);
- if (!ret) {
- ret = -EINPROGRESS;
- } else {
- ablkcipher_unmap(ctx->qidev, edesc, req);
- qi_cache_free(edesc);
- }
-
- return ret;
+ return skcipher_crypt(req, false);
}
-#define template_ablkcipher template_u.ablkcipher
-struct caam_alg_template {
- char name[CRYPTO_MAX_ALG_NAME];
- char driver_name[CRYPTO_MAX_ALG_NAME];
- unsigned int blocksize;
- u32 type;
- union {
- struct ablkcipher_alg ablkcipher;
- } template_u;
- u32 class1_alg_type;
- u32 class2_alg_type;
-};
-
-static struct caam_alg_template driver_algs[] = {
- /* ablkcipher descriptor */
+static struct caam_skcipher_alg driver_algs[] = {
{
- .name = "cbc(aes)",
- .driver_name = "cbc-aes-caam-qi",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des3_ede)",
- .driver_name = "cbc-3des-caam-qi",
- .blocksize = DES3_EDE_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam-qi",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES3_EDE_KEY_SIZE,
.max_keysize = DES3_EDE_KEY_SIZE,
.ivsize = DES3_EDE_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
},
{
- .name = "cbc(des)",
- .driver_name = "cbc-des-caam-qi",
- .blocksize = DES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam-qi",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = DES_KEY_SIZE,
.max_keysize = DES_KEY_SIZE,
.ivsize = DES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
},
{
- .name = "ctr(aes)",
- .driver_name = "ctr-aes-caam-qi",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "chainiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "rfc3686(ctr(aes))",
- .driver_name = "rfc3686-ctr-aes-caam-qi",
- .blocksize = 1,
- .type = CRYPTO_ALG_TYPE_GIVCIPHER,
- .template_ablkcipher = {
- .setkey = ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .givencrypt = ablkcipher_givencrypt,
- .geniv = "<built-in>",
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = AES_MIN_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.max_keysize = AES_MAX_KEY_SIZE +
CTR_RFC3686_NONCE_SIZE,
.ivsize = CTR_RFC3686_IV_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128,
},
{
- .name = "xts(aes)",
- .driver_name = "xts-aes-caam-qi",
- .blocksize = AES_BLOCK_SIZE,
- .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
- .template_ablkcipher = {
- .setkey = xts_ablkcipher_setkey,
- .encrypt = ablkcipher_encrypt,
- .decrypt = ablkcipher_decrypt,
- .geniv = "eseqiv",
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam-qi",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
},
- .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
},
};
@@ -2528,12 +2327,6 @@ static struct caam_aead_alg driver_aeads[] = {
},
};
-struct caam_crypto_alg {
- struct list_head entry;
- struct crypto_alg crypto_alg;
- struct caam_alg_entry caam;
-};
-
static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
bool uses_dkp)
{
@@ -2572,19 +2365,18 @@ static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam,
spin_lock_init(&ctx->lock);
ctx->drv_ctx[ENCRYPT] = NULL;
ctx->drv_ctx[DECRYPT] = NULL;
- ctx->drv_ctx[GIVENCRYPT] = NULL;
return 0;
}
-static int caam_cra_init(struct crypto_tfm *tfm)
+static int caam_cra_init(struct crypto_skcipher *tfm)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct caam_crypto_alg *caam_alg = container_of(alg, typeof(*caam_alg),
- crypto_alg);
- struct caam_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
- return caam_init_common(ctx, &caam_alg->caam, false);
+ return caam_init_common(crypto_skcipher_ctx(tfm), &caam_alg->caam,
+ false);
}
static int caam_aead_init(struct crypto_aead *tfm)
@@ -2602,16 +2394,15 @@ static void caam_exit_common(struct caam_ctx *ctx)
{
caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]);
caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]);
- caam_drv_ctx_rel(ctx->drv_ctx[GIVENCRYPT]);
dma_unmap_single(ctx->jrdev, ctx->key_dma, sizeof(ctx->key), ctx->dir);
caam_jr_free(ctx->jrdev);
}
-static void caam_cra_exit(struct crypto_tfm *tfm)
+static void caam_cra_exit(struct crypto_skcipher *tfm)
{
- caam_exit_common(crypto_tfm_ctx(tfm));
+ caam_exit_common(crypto_skcipher_ctx(tfm));
}
static void caam_aead_exit(struct crypto_aead *tfm)
@@ -2619,10 +2410,8 @@ static void caam_aead_exit(struct crypto_aead *tfm)
caam_exit_common(crypto_aead_ctx(tfm));
}
-static struct list_head alg_list;
static void __exit caam_qi_algapi_exit(void)
{
- struct caam_crypto_alg *t_alg, *n;
int i;
for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
@@ -2632,55 +2421,25 @@ static void __exit caam_qi_algapi_exit(void)
crypto_unregister_aead(&t_alg->aead);
}
- if (!alg_list.next)
- return;
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
- list_for_each_entry_safe(t_alg, n, &alg_list, entry) {
- crypto_unregister_alg(&t_alg->crypto_alg);
- list_del(&t_alg->entry);
- kfree(t_alg);
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
}
}
-static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
- *template)
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
{
- struct caam_crypto_alg *t_alg;
- struct crypto_alg *alg;
-
- t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
- if (!t_alg)
- return ERR_PTR(-ENOMEM);
+ struct skcipher_alg *alg = &t_alg->skcipher;
- alg = &t_alg->crypto_alg;
-
- snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name);
- snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
- template->driver_name);
- alg->cra_module = THIS_MODULE;
- alg->cra_init = caam_cra_init;
- alg->cra_exit = caam_cra_exit;
- alg->cra_priority = CAAM_CRA_PRIORITY;
- alg->cra_blocksize = template->blocksize;
- alg->cra_alignmask = 0;
- alg->cra_ctxsize = sizeof(struct caam_ctx);
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
- template->type;
- switch (template->type) {
- case CRYPTO_ALG_TYPE_GIVCIPHER:
- alg->cra_type = &crypto_givcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- case CRYPTO_ALG_TYPE_ABLKCIPHER:
- alg->cra_type = &crypto_ablkcipher_type;
- alg->cra_ablkcipher = template->template_ablkcipher;
- break;
- }
-
- t_alg->caam.class1_alg_type = template->class1_alg_type;
- t_alg->caam.class2_alg_type = template->class2_alg_type;
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
- return t_alg;
+ alg->init = caam_cra_init;
+ alg->exit = caam_cra_exit;
}
static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
@@ -2734,8 +2493,6 @@ static int __init caam_qi_algapi_init(void)
return -ENODEV;
}
- INIT_LIST_HEAD(&alg_list);
-
/*
* Register crypto algorithms the device supports.
* First, detect presence and attributes of DES, AES, and MD blocks.
@@ -2751,9 +2508,8 @@ static int __init caam_qi_algapi_init(void)
md_limit = SHA256_DIGEST_SIZE;
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
- struct caam_crypto_alg *t_alg;
- struct caam_alg_template *alg = driver_algs + i;
- u32 alg_sel = alg->class1_alg_type & OP_ALG_ALGSEL_MASK;
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
/* Skip DES algorithms if not supported by device */
if (!des_inst &&
@@ -2765,23 +2521,16 @@ static int __init caam_qi_algapi_init(void)
if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES))
continue;
- t_alg = caam_alg_alloc(alg);
- if (IS_ERR(t_alg)) {
- err = PTR_ERR(t_alg);
- dev_warn(priv->qidev, "%s alg allocation failed\n",
- alg->driver_name);
- continue;
- }
+ caam_skcipher_alg_init(t_alg);
- err = crypto_register_alg(&t_alg->crypto_alg);
+ err = crypto_register_skcipher(&t_alg->skcipher);
if (err) {
dev_warn(priv->qidev, "%s alg registration failed\n",
- t_alg->crypto_alg.cra_driver_name);
- kfree(t_alg);
+ t_alg->skcipher.base.cra_driver_name);
continue;
}
- list_add_tail(&t_alg->entry, &alg_list);
+ t_alg->registered = true;
registered = true;
}
diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c
new file mode 100644
index 000000000000..7d8ac0222fa3
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.c
@@ -0,0 +1,5165 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#include "compat.h"
+#include "regs.h"
+#include "caamalg_qi2.h"
+#include "dpseci_cmd.h"
+#include "desc_constr.h"
+#include "error.h"
+#include "sg_sw_sec4.h"
+#include "sg_sw_qm2.h"
+#include "key_gen.h"
+#include "caamalg_desc.h"
+#include "caamhash_desc.h"
+#include <linux/fsl/mc.h>
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+
+#define CAAM_CRA_PRIORITY 2000
+
+/* max key is sum of AES_MAX_KEY_SIZE, max split key size */
+#define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
+ SHA512_DIGEST_SIZE * 2)
+
+#if !IS_ENABLED(CONFIG_CRYPTO_DEV_FSL_CAAM)
+bool caam_little_end;
+EXPORT_SYMBOL(caam_little_end);
+bool caam_imx;
+EXPORT_SYMBOL(caam_imx);
+#endif
+
+/*
+ * This is a a cache of buffers, from which the users of CAAM QI driver
+ * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
+ * NOTE: A more elegant solution would be to have some headroom in the frames
+ * being processed. This can be added by the dpaa2-eth driver. This would
+ * pose a problem for userspace application processing which cannot
+ * know of this limitation. So for now, this will work.
+ * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
+ */
+static struct kmem_cache *qi_cache;
+
+struct caam_alg_entry {
+ struct device *dev;
+ int class1_alg_type;
+ int class2_alg_type;
+ bool rfc3686;
+ bool geniv;
+};
+
+struct caam_aead_alg {
+ struct aead_alg aead;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+struct caam_skcipher_alg {
+ struct skcipher_alg skcipher;
+ struct caam_alg_entry caam;
+ bool registered;
+};
+
+/**
+ * caam_ctx - per-session context
+ * @flc: Flow Contexts array
+ * @key: [authentication key], encryption key
+ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @key_dma: I/O virtual address of the key
+ * @dir: DMA direction for mapping key and Flow Contexts
+ * @dev: dpseci device
+ * @adata: authentication algorithm details
+ * @cdata: encryption algorithm details
+ * @authsize: authentication tag (a.k.a. ICV / MAC) size
+ */
+struct caam_ctx {
+ struct caam_flc flc[NUM_OP];
+ u8 key[CAAM_MAX_KEY_SIZE];
+ dma_addr_t flc_dma[NUM_OP];
+ dma_addr_t key_dma;
+ enum dma_data_direction dir;
+ struct device *dev;
+ struct alginfo adata;
+ struct alginfo cdata;
+ unsigned int authsize;
+};
+
+static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
+ dma_addr_t iova_addr)
+{
+ phys_addr_t phys_addr;
+
+ phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
+ iova_addr;
+
+ return phys_to_virt(phys_addr);
+}
+
+/*
+ * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
+ *
+ * Allocate data on the hotpath. Instead of using kzalloc, one can use the
+ * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
+ * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
+ * hosting 16 SG entries.
+ *
+ * @flags - flags that would be used for the equivalent kmalloc(..) call
+ *
+ * Returns a pointer to a retrieved buffer on success or NULL on failure.
+ */
+static inline void *qi_cache_zalloc(gfp_t flags)
+{
+ return kmem_cache_zalloc(qi_cache, flags);
+}
+
+/*
+ * qi_cache_free - Frees buffers allocated from CAAM-QI cache
+ *
+ * @obj - buffer previously allocated by qi_cache_zalloc
+ *
+ * No checking is being done, the call is a passthrough call to
+ * kmem_cache_free(...)
+ */
+static inline void qi_cache_free(void *obj)
+{
+ kmem_cache_free(qi_cache, obj);
+}
+
+static struct caam_request *to_caam_req(struct crypto_async_request *areq)
+{
+ switch (crypto_tfm_alg_type(areq->tfm)) {
+ case CRYPTO_ALG_TYPE_SKCIPHER:
+ return skcipher_request_ctx(skcipher_request_cast(areq));
+ case CRYPTO_ALG_TYPE_AEAD:
+ return aead_request_ctx(container_of(areq, struct aead_request,
+ base));
+ case CRYPTO_ALG_TYPE_AHASH:
+ return ahash_request_ctx(ahash_request_cast(areq));
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static void caam_unmap(struct device *dev, struct scatterlist *src,
+ struct scatterlist *dst, int src_nents,
+ int dst_nents, dma_addr_t iv_dma, int ivsize,
+ dma_addr_t qm_sg_dma, int qm_sg_bytes)
+{
+ if (dst != src) {
+ if (src_nents)
+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
+ } else {
+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
+ }
+
+ if (iv_dma)
+ dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
+
+ if (qm_sg_bytes)
+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
+}
+
+static int aead_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct device *dev = ctx->dev;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ struct caam_flc *flc;
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ u32 *nonce = NULL;
+ unsigned int data_len[2];
+ u32 inl_mask;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
+ ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
+ }
+
+ data_len[0] = ctx->adata.keylen_pad;
+ data_len[1] = ctx->cdata.keylen;
+
+ /* aead_encrypt shared descriptor */
+ if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
+ DESC_QI_AEAD_ENC_LEN) +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+
+ if (alg->caam.geniv)
+ cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686,
+ nonce, ctx1_iv_off, true,
+ priv->sec_attr.era);
+ else
+ cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, is_rfc3686, nonce,
+ ctx1_iv_off, true, priv->sec_attr.era);
+
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* aead_decrypt shared descriptor */
+ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
+ (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
+ DESC_JOB_IO_LEN, data_len, &inl_mask,
+ ARRAY_SIZE(data_len)) < 0)
+ return -EINVAL;
+
+ if (inl_mask & 1)
+ ctx->adata.key_virt = ctx->key;
+ else
+ ctx->adata.key_dma = ctx->key_dma;
+
+ if (inl_mask & 2)
+ ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
+ else
+ ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
+
+ ctx->adata.key_inline = !!(inl_mask & 1);
+ ctx->cdata.key_inline = !!(inl_mask & 2);
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
+ ivsize, ctx->authsize, alg->caam.geniv,
+ is_rfc3686, nonce, ctx1_iv_off, true,
+ priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ aead_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int aead_setkey(struct crypto_aead *aead, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ struct crypto_authenc_keys keys;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
+ goto badkey;
+
+ dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
+ keys.authkeylen + keys.enckeylen, keys.enckeylen,
+ keys.authkeylen);
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ ctx->adata.keylen = keys.authkeylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+
+ if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
+ goto badkey;
+
+ memcpy(ctx->key, keys.authkey, keys.authkeylen);
+ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
+ keys.enckeylen, ctx->dir);
+ print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
+ ctx->adata.keylen_pad + keys.enckeylen, 1);
+
+ ctx->cdata.keylen = keys.enckeylen;
+
+ memzero_explicit(&keys, sizeof(keys));
+ return aead_set_sh_desc(aead);
+badkey:
+ crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ memzero_explicit(&keys, sizeof(keys));
+ return -EINVAL;
+}
+
+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
+ bool encrypt)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_request *req_ctx = aead_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
+ typeof(*alg), aead);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct aead_edesc *edesc;
+ dma_addr_t qm_sg_dma, iv_dma = 0;
+ int ivsize = 0;
+ unsigned int authsize = ctx->authsize;
+ int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
+ int in_len, out_len;
+ struct dpaa2_sg_entry *sg_table;
+
+ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen);
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ dst_nents = sg_nents_for_len(req->dst, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize :
+ (-authsize)));
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : (-authsize)));
+ qi_cache_free(edesc);
+ return ERR_PTR(dst_nents);
+ }
+
+ if (src_nents) {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = 0;
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ src_nents = sg_nents_for_len(req->src, req->assoclen +
+ req->cryptlen +
+ (encrypt ? authsize : 0));
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->assoclen + req->cryptlen +
+ (encrypt ? authsize : 0));
+ qi_cache_free(edesc);
+ return ERR_PTR(src_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
+ ivsize = crypto_aead_ivsize(aead);
+
+ /*
+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
+ * Input is not contiguous.
+ */
+ qm_sg_nents = 1 + !!ivsize + mapped_src_nents +
+ (mapped_dst_nents > 1 ? mapped_dst_nents : 0);
+ sg_table = &edesc->sgt[0];
+ qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
+ CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_nents, ivsize);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (ivsize) {
+ u8 *iv = (u8 *)(sg_table + qm_sg_nents);
+
+ /* Make sure IV is located in a DMAable area */
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+
+ edesc->assoclen = cpu_to_caam32(req->assoclen);
+ edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->assoclen_dma)) {
+ dev_err(dev, "unable to map assoclen\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
+ qm_sg_index++;
+ if (ivsize) {
+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
+ qm_sg_index++;
+ }
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0);
+ qm_sg_index += mapped_src_nents;
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ qm_sg_index, 0);
+
+ qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->qm_sg_dma = qm_sg_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ out_len = req->assoclen + req->cryptlen +
+ (encrypt ? ctx->authsize : (-ctx->authsize));
+ in_len = 4 + ivsize + req->assoclen + req->cryptlen;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, in_len);
+
+ if (req->dst == req->src) {
+ if (mapped_src_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma +
+ (1 + !!ivsize) * sizeof(*sg_table));
+ }
+ } else if (mapped_dst_nents == 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
+ sizeof(*sg_table));
+ }
+
+ dpaa2_fl_set_len(out_fle, out_len);
+
+ return edesc;
+}
+
+static int gcm_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ /*
+ * AES GCM encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ ctx->cdata.key_virt = ctx->key;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ gcm_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int gcm_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
+ ctx->cdata.keylen = keylen;
+
+ return gcm_set_sh_desc(aead);
+}
+
+static int rfc4106_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4106 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int rfc4106_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4106_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4106_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ return rfc4106_set_sh_desc(aead);
+}
+
+static int rfc4543_set_sh_desc(struct crypto_aead *aead)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+ unsigned int ivsize = crypto_aead_ivsize(aead);
+ struct caam_flc *flc;
+ u32 *desc;
+ int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
+ ctx->cdata.keylen;
+
+ if (!ctx->cdata.keylen || !ctx->authsize)
+ return 0;
+
+ ctx->cdata.key_virt = ctx->key;
+
+ /*
+ * RFC4543 encrypt shared descriptor
+ * Job Descriptor and Shared Descriptor
+ * must fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /*
+ * Job Descriptor and Shared Descriptors
+ * must all fit into the 64-word Descriptor h/w Buffer
+ */
+ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
+ ctx->cdata.key_inline = true;
+ } else {
+ ctx->cdata.key_inline = false;
+ ctx->cdata.key_dma = ctx->key_dma;
+ }
+
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
+ true);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int rfc4543_setauthsize(struct crypto_aead *authenc,
+ unsigned int authsize)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(authenc);
+
+ ctx->authsize = authsize;
+ rfc4543_set_sh_desc(authenc);
+
+ return 0;
+}
+
+static int rfc4543_setkey(struct crypto_aead *aead,
+ const u8 *key, unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct device *dev = ctx->dev;
+
+ if (keylen < 4)
+ return -EINVAL;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ memcpy(ctx->key, key, keylen);
+ /*
+ * The last four bytes of the key material are used as the salt value
+ * in the nonce. Update the AES key length.
+ */
+ ctx->cdata.keylen = keylen - 4;
+ dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
+ ctx->dir);
+
+ return rfc4543_set_sh_desc(aead);
+}
+
+static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_skcipher_alg *alg =
+ container_of(crypto_skcipher_alg(skcipher),
+ struct caam_skcipher_alg, skcipher);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
+ u32 *desc;
+ u32 ctx1_iv_off = 0;
+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
+ OP_ALG_AAI_CTR_MOD128);
+ const bool is_rfc3686 = alg->caam.rfc3686;
+
+ print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
+
+ /*
+ * AES-CTR needs to load IV in CONTEXT1 reg
+ * at an offset of 128bits (16bytes)
+ * CONTEXT1[255:128] = IV
+ */
+ if (ctr_mode)
+ ctx1_iv_off = 16;
+
+ /*
+ * RFC3686 specific:
+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
+ * | *key = {KEY, NONCE}
+ */
+ if (is_rfc3686) {
+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
+ keylen -= CTR_RFC3686_NONCE_SIZE;
+ }
+
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* skcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* skcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
+ ctx1_iv_off);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *dev = ctx->dev;
+ struct caam_flc *flc;
+ u32 *desc;
+
+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) {
+ dev_err(dev, "key size mismatch\n");
+ crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ ctx->cdata.keylen = keylen;
+ ctx->cdata.key_virt = key;
+ ctx->cdata.key_inline = true;
+
+ /* xts_skcipher_encrypt shared descriptor */
+ flc = &ctx->flc[ENCRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ /* xts_skcipher_decrypt shared descriptor */
+ flc = &ctx->flc[DECRYPT];
+ desc = flc->sh_desc;
+ cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
+ sizeof(flc->flc) + desc_bytes(desc),
+ ctx->dir);
+
+ return 0;
+}
+
+static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_request *req_ctx = skcipher_request_ctx(req);
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct device *dev = ctx->dev;
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct skcipher_edesc *edesc;
+ dma_addr_t iv_dma;
+ u8 *iv;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src, req->cryptlen);
+ if (unlikely(src_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
+ req->cryptlen);
+ return ERR_PTR(src_nents);
+ }
+
+ if (unlikely(req->dst != req->src)) {
+ dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
+ if (unlikely(dst_nents < 0)) {
+ dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
+ req->cryptlen);
+ return ERR_PTR(dst_nents);
+ }
+
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
+ DMA_FROM_DEVICE);
+ if (unlikely(!mapped_dst_nents)) {
+ dev_err(dev, "unable to map destination\n");
+ dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ERR_PTR(-ENOMEM);
+ }
+ } else {
+ mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
+ DMA_BIDIRECTIONAL);
+ if (unlikely(!mapped_src_nents)) {
+ dev_err(dev, "unable to map source\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ qm_sg_ents = 1 + mapped_src_nents;
+ dst_sg_idx = qm_sg_ents;
+
+ qm_sg_ents += mapped_dst_nents > 1 ? mapped_dst_nents : 0;
+ qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
+ if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
+ ivsize > CAAM_QI_MEMCACHE_SIZE)) {
+ dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
+ qm_sg_ents, ivsize);
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* allocate space for base edesc, link tables and IV */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (unlikely(!edesc)) {
+ dev_err(dev, "could not allocate extended descriptor\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Make sure IV is located in a DMAable area */
+ sg_table = &edesc->sgt[0];
+ iv = (u8 *)(sg_table + qm_sg_ents);
+ memcpy(iv, req->iv, ivsize);
+
+ iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, iv_dma)) {
+ dev_err(dev, "unable to map IV\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
+ 0, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ edesc->src_nents = src_nents;
+ edesc->dst_nents = dst_nents;
+ edesc->iv_dma = iv_dma;
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
+ sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + 1, 0);
+
+ if (mapped_dst_nents > 1)
+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table +
+ dst_sg_idx, 0);
+
+ edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
+ dev_err(dev, "unable to map S/G table\n");
+ caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
+ iv_dma, ivsize, 0, 0);
+ qi_cache_free(edesc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
+ dpaa2_fl_set_len(out_fle, req->cryptlen);
+
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+
+ if (req->src == req->dst) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
+ sizeof(*sg_table));
+ } else if (mapped_dst_nents > 1) {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
+ sizeof(*sg_table));
+ } else {
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
+ }
+
+ return edesc;
+}
+
+static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
+ struct aead_request *req)
+{
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ivsize = crypto_aead_ivsize(aead);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
+}
+
+static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
+ struct skcipher_request *req)
+{
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
+ edesc->iv_dma, ivsize, edesc->qm_sg_dma, edesc->qm_sg_bytes);
+}
+
+static void aead_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static void aead_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct aead_request *req = container_of(areq, struct aead_request,
+ base);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct aead_edesc *edesc = req_ctx->edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ /*
+ * verify hw auth check passed else return -EBADMSG
+ */
+ if ((status & JRSTA_CCBERR_ERRID_MASK) ==
+ JRSTA_CCBERR_ERRID_ICVCHK)
+ ecode = -EBADMSG;
+ else
+ ecode = -EIO;
+ }
+
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ aead_request_complete(req, ecode);
+}
+
+static int aead_encrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, true);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->cbk = aead_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int aead_decrypt(struct aead_request *req)
+{
+ struct aead_edesc *edesc;
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ struct caam_ctx *ctx = crypto_aead_ctx(aead);
+ struct caam_request *caam_req = aead_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = aead_edesc_alloc(req, false);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->cbk = aead_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ aead_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int ipsec_gcm_encrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_encrypt(req);
+}
+
+static int ipsec_gcm_decrypt(struct aead_request *req)
+{
+ if (req->assoclen < 8)
+ return -EINVAL;
+
+ return aead_decrypt(req);
+}
+
+static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct skcipher_edesc *edesc = req_ctx->edesc;
+ int ecode = 0;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
+
+ skcipher_unmap(ctx->dev, edesc, req);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block. This is used e.g. by the CTS mode.
+ */
+ scatterwalk_map_and_copy(req->iv, req->dst, req->cryptlen - ivsize,
+ ivsize, 0);
+
+ qi_cache_free(edesc);
+ skcipher_request_complete(req, ecode);
+}
+
+static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct skcipher_request *req = skcipher_request_cast(areq);
+ struct caam_request *req_ctx = to_caam_req(areq);
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct skcipher_edesc *edesc = req_ctx->edesc;
+ int ecode = 0;
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
+ edesc->src_nents > 1 ? 100 : ivsize, 1);
+ caam_dump_sg(KERN_DEBUG, "dst @" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
+ edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
+
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ skcipher_request_complete(req, ecode);
+}
+
+static int skcipher_encrypt(struct skcipher_request *req)
+{
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx(req);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = skcipher_edesc_alloc(req);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ caam_req->flc = &ctx->flc[ENCRYPT];
+ caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
+ caam_req->cbk = skcipher_encrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int skcipher_decrypt(struct skcipher_request *req)
+{
+ struct skcipher_edesc *edesc;
+ struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_skcipher_ctx(skcipher);
+ struct caam_request *caam_req = skcipher_request_ctx(req);
+ int ivsize = crypto_skcipher_ivsize(skcipher);
+ int ret;
+
+ /* allocate extended descriptor */
+ edesc = skcipher_edesc_alloc(req);
+ if (IS_ERR(edesc))
+ return PTR_ERR(edesc);
+
+ /*
+ * The crypto API expects us to set the IV (req->iv) to the last
+ * ciphertext block.
+ */
+ scatterwalk_map_and_copy(req->iv, req->src, req->cryptlen - ivsize,
+ ivsize, 0);
+
+ caam_req->flc = &ctx->flc[DECRYPT];
+ caam_req->flc_dma = ctx->flc_dma[DECRYPT];
+ caam_req->cbk = skcipher_decrypt_done;
+ caam_req->ctx = &req->base;
+ caam_req->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ skcipher_unmap(ctx->dev, edesc, req);
+ qi_cache_free(edesc);
+ }
+
+ return ret;
+}
+
+static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
+ bool uses_dkp)
+{
+ dma_addr_t dma_addr;
+ int i;
+
+ /* copy descriptor header template value */
+ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
+
+ ctx->dev = caam->dev;
+ ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+
+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
+ offsetof(struct caam_ctx, flc_dma),
+ ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, dma_addr)) {
+ dev_err(ctx->dev, "unable to map key, shared descriptors\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < NUM_OP; i++)
+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
+ ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
+
+ return 0;
+}
+
+static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
+{
+ struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
+ struct caam_skcipher_alg *caam_alg =
+ container_of(alg, typeof(*caam_alg), skcipher);
+
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_skcipher_ctx(tfm), &caam_alg->caam, false);
+}
+
+static int caam_cra_init_aead(struct crypto_aead *tfm)
+{
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
+ aead);
+
+ crypto_aead_set_reqsize(tfm, sizeof(struct caam_request));
+ return caam_cra_init(crypto_aead_ctx(tfm), &caam_alg->caam,
+ alg->setkey == aead_setkey);
+}
+
+static void caam_exit_common(struct caam_ctx *ctx)
+{
+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
+ offsetof(struct caam_ctx, flc_dma), ctx->dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static void caam_cra_exit(struct crypto_skcipher *tfm)
+{
+ caam_exit_common(crypto_skcipher_ctx(tfm));
+}
+
+static void caam_cra_exit_aead(struct crypto_aead *tfm)
+{
+ caam_exit_common(crypto_aead_ctx(tfm));
+}
+
+static struct caam_skcipher_alg driver_algs[] = {
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des3_ede)",
+ .cra_driver_name = "cbc-3des-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES3_EDE_KEY_SIZE,
+ .max_keysize = DES3_EDE_KEY_SIZE,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "cbc(des)",
+ .cra_driver_name = "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = DES_KEY_SIZE,
+ .max_keysize = DES_KEY_SIZE,
+ .ivsize = DES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "rfc3686(ctr(aes))",
+ .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = AES_MIN_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE +
+ CTR_RFC3686_NONCE_SIZE,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .chunksize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .skcipher = {
+ .base = {
+ .cra_name = "xts(aes)",
+ .cra_driver_name = "xts-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = xts_skcipher_setkey,
+ .encrypt = skcipher_encrypt,
+ .decrypt = skcipher_decrypt,
+ .min_keysize = 2 * AES_MIN_KEY_SIZE,
+ .max_keysize = 2 * AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
+ }
+};
+
+static struct caam_aead_alg driver_aeads[] = {
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4106(gcm(aes))",
+ .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4106_setkey,
+ .setauthsize = rfc4106_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "rfc4543(gcm(aes))",
+ .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = rfc4543_setkey,
+ .setauthsize = rfc4543_setauthsize,
+ .encrypt = ipsec_gcm_encrypt,
+ .decrypt = ipsec_gcm_decrypt,
+ .ivsize = 8,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ },
+ },
+ /* Galois Counter Mode */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "gcm(aes)",
+ .cra_driver_name = "gcm-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = gcm_setkey,
+ .setauthsize = gcm_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = 12,
+ .maxauthsize = AES_BLOCK_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
+ }
+ },
+ /* single-pass ipsec_esp descriptor */
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(aes))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-aes-caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(aes)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-aes-"
+ "caam-qi2",
+ .cra_blocksize = AES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "cbc(des3_ede))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des3_ede)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-"
+ "cbc-des3_ede-caam-qi2",
+ .cra_blocksize = DES3_EDE_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),cbc(des))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(md5),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-hmac-md5-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha1),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha1-cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha224),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha224-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha256),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha256-cbc-desi-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha384),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha384-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),cbc(des))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "cbc-des-caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "echainiv(authenc(hmac(sha512),"
+ "cbc(des)))",
+ .cra_driver_name = "echainiv-authenc-"
+ "hmac-sha512-cbc-des-"
+ "caam-qi2",
+ .cra_blocksize = DES_BLOCK_SIZE,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .geniv = true,
+ }
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(md5),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(md5),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-md5-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha1),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha1),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha1-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA1_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha224),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc("
+ "hmac(sha224),rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha224-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA224_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha256),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha256),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha256-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA256_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha384),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha384),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha384-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA384_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "authenc(hmac(sha512),"
+ "rfc3686(ctr(aes)))",
+ .cra_driver_name = "authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ },
+ },
+ {
+ .aead = {
+ .base = {
+ .cra_name = "seqiv(authenc(hmac(sha512),"
+ "rfc3686(ctr(aes))))",
+ .cra_driver_name = "seqiv-authenc-hmac-sha512-"
+ "rfc3686-ctr-aes-caam-qi2",
+ .cra_blocksize = 1,
+ },
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .ivsize = CTR_RFC3686_IV_SIZE,
+ .maxauthsize = SHA512_DIGEST_SIZE,
+ },
+ .caam = {
+ .class1_alg_type = OP_ALG_ALGSEL_AES |
+ OP_ALG_AAI_CTR_MOD128,
+ .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
+ OP_ALG_AAI_HMAC_PRECOMP,
+ .rfc3686 = true,
+ .geniv = true,
+ },
+ },
+};
+
+static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
+{
+ struct skcipher_alg *alg = &t_alg->skcipher;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init_skcipher;
+ alg->exit = caam_cra_exit;
+}
+
+static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
+{
+ struct aead_alg *alg = &t_alg->aead;
+
+ alg->base.cra_module = THIS_MODULE;
+ alg->base.cra_priority = CAAM_CRA_PRIORITY;
+ alg->base.cra_ctxsize = sizeof(struct caam_ctx);
+ alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
+
+ alg->init = caam_cra_init_aead;
+ alg->exit = caam_cra_exit_aead;
+}
+
+/* max hash key is max split key size */
+#define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
+
+#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
+
+/* caam context sizes for hashes: running digest + 8 */
+#define HASH_MSG_LEN 8
+#define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
+
+enum hash_optype {
+ UPDATE = 0,
+ UPDATE_FIRST,
+ FINALIZE,
+ DIGEST,
+ HASH_NUM_OP
+};
+
+/**
+ * caam_hash_ctx - ahash per-session context
+ * @flc: Flow Contexts array
+ * @flc_dma: I/O virtual addresses of the Flow Contexts
+ * @dev: dpseci device
+ * @ctx_len: size of Context Register
+ * @adata: hashing algorithm details
+ */
+struct caam_hash_ctx {
+ struct caam_flc flc[HASH_NUM_OP];
+ dma_addr_t flc_dma[HASH_NUM_OP];
+ struct device *dev;
+ int ctx_len;
+ struct alginfo adata;
+};
+
+/* ahash state */
+struct caam_hash_state {
+ struct caam_request caam_req;
+ dma_addr_t buf_dma;
+ dma_addr_t ctx_dma;
+ u8 buf_0[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_0;
+ u8 buf_1[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
+ int buflen_1;
+ u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+ int current_buf;
+};
+
+struct caam_export_state {
+ u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
+ u8 caam_ctx[MAX_CTX_LEN];
+ int buflen;
+ int (*update)(struct ahash_request *req);
+ int (*final)(struct ahash_request *req);
+ int (*finup)(struct ahash_request *req);
+};
+
+static inline void switch_buf(struct caam_hash_state *state)
+{
+ state->current_buf ^= 1;
+}
+
+static inline u8 *current_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_1 : state->buf_0;
+}
+
+static inline u8 *alt_buf(struct caam_hash_state *state)
+{
+ return state->current_buf ? state->buf_0 : state->buf_1;
+}
+
+static inline int *current_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_1 : &state->buflen_0;
+}
+
+static inline int *alt_buflen(struct caam_hash_state *state)
+{
+ return state->current_buf ? &state->buflen_0 : &state->buflen_1;
+}
+
+/* Map current buffer in state (if length > 0) and put it in link table */
+static inline int buf_map_to_qm_sg(struct device *dev,
+ struct dpaa2_sg_entry *qm_sg,
+ struct caam_hash_state *state)
+{
+ int buflen = *current_buflen(state);
+
+ if (!buflen)
+ return 0;
+
+ state->buf_dma = dma_map_single(dev, current_buf(state), buflen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, state->buf_dma)) {
+ dev_err(dev, "unable to map buf\n");
+ state->buf_dma = 0;
+ return -ENOMEM;
+ }
+
+ dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
+
+ return 0;
+}
+
+/* Map state->caam_ctx, and add it to link table */
+static inline int ctx_map_to_qm_sg(struct device *dev,
+ struct caam_hash_state *state, int ctx_len,
+ struct dpaa2_sg_entry *qm_sg, u32 flag)
+{
+ state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
+ if (dma_mapping_error(dev, state->ctx_dma)) {
+ dev_err(dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ return -ENOMEM;
+ }
+
+ dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
+
+ return 0;
+}
+
+static int ahash_set_sh_desc(struct crypto_ahash *ahash)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
+ struct caam_flc *flc;
+ u32 *desc;
+
+ /* ahash_update shared descriptor */
+ flc = &ctx->flc[UPDATE];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_update_first shared descriptor */
+ flc = &ctx->flc[UPDATE_FIRST];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_final shared descriptor */
+ flc = &ctx->flc[FINALIZE];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ /* ahash_digest shared descriptor */
+ flc = &ctx->flc[DIGEST];
+ desc = flc->sh_desc;
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, priv->sec_attr.era);
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
+ desc_bytes(desc), DMA_BIDIRECTIONAL);
+ print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ return 0;
+}
+
+struct split_key_sh_result {
+ struct completion completion;
+ int err;
+ struct device *dev;
+};
+
+static void split_key_sh_done(void *cbk_ctx, u32 err)
+{
+ struct split_key_sh_result *res = cbk_ctx;
+
+ dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
+
+ if (err)
+ caam_qi2_strstatus(res->dev, err);
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+/* Digest hash size if it is too large */
+static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
+ u32 *keylen, u8 *key_out, u32 digestsize)
+{
+ struct caam_request *req_ctx;
+ u32 *desc;
+ struct split_key_sh_result result;
+ dma_addr_t src_dma, dst_dma;
+ struct caam_flc *flc;
+ dma_addr_t flc_dma;
+ int ret = -ENOMEM;
+ struct dpaa2_fl_entry *in_fle, *out_fle;
+
+ req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
+ if (!req_ctx)
+ return -ENOMEM;
+
+ in_fle = &req_ctx->fd_flt[1];
+ out_fle = &req_ctx->fd_flt[0];
+
+ flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
+ if (!flc)
+ goto err_flc;
+
+ src_dma = dma_map_single(ctx->dev, (void *)key_in, *keylen,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, src_dma)) {
+ dev_err(ctx->dev, "unable to map key input memory\n");
+ goto err_src_dma;
+ }
+ dst_dma = dma_map_single(ctx->dev, (void *)key_out, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, dst_dma)) {
+ dev_err(ctx->dev, "unable to map key output memory\n");
+ goto err_dst_dma;
+ }
+
+ desc = flc->sh_desc;
+
+ init_sh_desc(desc, 0);
+
+ /* descriptor to perform unkeyed hash on key_in */
+ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
+ OP_ALG_AS_INITFINAL);
+ append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
+ FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
+ flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
+ desc_bytes(desc), DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, flc_dma)) {
+ dev_err(ctx->dev, "unable to map shared descriptor\n");
+ goto err_flc_dma;
+ }
+
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, src_dma);
+ dpaa2_fl_set_len(in_fle, *keylen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in, *keylen, 1);
+ print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+ 1);
+
+ result.err = 0;
+ init_completion(&result.completion);
+ result.dev = ctx->dev;
+
+ req_ctx->flc = flc;
+ req_ctx->flc_dma = flc_dma;
+ req_ctx->cbk = split_key_sh_done;
+ req_ctx->ctx = &result;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS) {
+ /* in progress */
+ wait_for_completion(&result.completion);
+ ret = result.err;
+ print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, key_in,
+ digestsize, 1);
+ }
+
+ dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
+ DMA_TO_DEVICE);
+err_flc_dma:
+ dma_unmap_single(ctx->dev, dst_dma, digestsize, DMA_FROM_DEVICE);
+err_dst_dma:
+ dma_unmap_single(ctx->dev, src_dma, *keylen, DMA_TO_DEVICE);
+err_src_dma:
+ kfree(flc);
+err_flc:
+ kfree(req_ctx);
+
+ *keylen = digestsize;
+
+ return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
+ unsigned int keylen)
+{
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
+ unsigned int digestsize = crypto_ahash_digestsize(ahash);
+ int ret;
+ u8 *hashed_key = NULL;
+
+ dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
+
+ if (keylen > blocksize) {
+ hashed_key = kmalloc_array(digestsize, sizeof(*hashed_key),
+ GFP_KERNEL | GFP_DMA);
+ if (!hashed_key)
+ return -ENOMEM;
+ ret = hash_digest_key(ctx, key, &keylen, hashed_key,
+ digestsize);
+ if (ret)
+ goto bad_free_key;
+ key = hashed_key;
+ }
+
+ ctx->adata.keylen = keylen;
+ ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
+ OP_ALG_ALGSEL_MASK);
+ if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
+ goto bad_free_key;
+
+ ctx->adata.key_virt = key;
+ ctx->adata.key_inline = true;
+
+ ret = ahash_set_sh_desc(ahash);
+ kfree(hashed_key);
+ return ret;
+bad_free_key:
+ kfree(hashed_key);
+ crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (edesc->src_nents)
+ dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
+ if (edesc->dst_dma)
+ dma_unmap_single(dev, edesc->dst_dma, dst_len, DMA_FROM_DEVICE);
+
+ if (edesc->qm_sg_bytes)
+ dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
+ DMA_TO_DEVICE);
+
+ if (state->buf_dma) {
+ dma_unmap_single(dev, state->buf_dma, *current_buflen(state),
+ DMA_TO_DEVICE);
+ state->buf_dma = 0;
+ }
+}
+
+static inline void ahash_unmap_ctx(struct device *dev,
+ struct ahash_edesc *edesc,
+ struct ahash_request *req, int dst_len,
+ u32 flag)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ if (state->ctx_dma) {
+ dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag);
+ state->ctx_dma = 0;
+ }
+ ahash_unmap(dev, edesc, req, dst_len);
+}
+
+static void ahash_done(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_bi(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ switch_buf(state);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ crypto_ahash_digestsize(ahash), 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ digestsize, 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
+{
+ struct crypto_async_request *areq = cbk_ctx;
+ struct ahash_request *req = ahash_request_cast(areq);
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct ahash_edesc *edesc = state->caam_req.edesc;
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ int ecode = 0;
+
+ dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
+
+ if (unlikely(status)) {
+ caam_qi2_strstatus(ctx->dev, status);
+ ecode = -EIO;
+ }
+
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
+ switch_buf(state);
+ qi_cache_free(edesc);
+
+ print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
+ ctx->ctx_len, 1);
+ if (req->result)
+ print_hex_dump_debug("result@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, req->result,
+ crypto_ahash_digestsize(ahash), 1);
+
+ req->base.complete(&req->base, ecode);
+}
+
+static int ahash_update_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state), last_buflen;
+ int in_len = *buflen + req->nbytes, to_hash;
+ int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ last_buflen = *next_buflen;
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - (*next_buflen));
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_src_index = 1 + (*buflen ? 1 : 0);
+ qm_sg_bytes = (qm_sg_src_index + mapped_nents) *
+ sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ if (mapped_nents) {
+ sg_to_qm_sg_last(req->src, mapped_nents,
+ sg_table + qm_sg_src_index, 0);
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+ } else {
+ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
+ true);
+ }
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE];
+ req_ctx->cbk = ahash_done_bi;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = last_buflen;
+ }
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_final_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, qm_sg_src_index;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc)
+ return -ENOMEM;
+
+ qm_sg_src_index = 1 + (buflen ? 1 : 0);
+ qm_sg_bytes = qm_sg_src_index * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[FINALIZE];
+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
+ req_ctx->cbk = ahash_done_ctx_src;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, qm_sg_src_index;
+ int src_nents, mapped_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_src_index = 1 + (buflen ? 1 : 0);
+ qm_sg_bytes = (qm_sg_src_index + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto unmap_ctx;
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
+ if (ret)
+ goto unmap_ctx;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + qm_sg_src_index, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[FINALIZE];
+ req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
+ req_ctx->cbk = ahash_done_ctx_src;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, digestsize, DMA_FROM_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_digest(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ int src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = -ENOMEM;
+
+ state->buf_dma = 0;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to map source for DMA\n");
+ return ret;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return ret;
+ }
+
+ edesc->src_nents = src_nents;
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+
+ if (mapped_nents > 1) {
+ int qm_sg_bytes;
+ struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
+
+ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ goto unmap;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+ }
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ goto unmap;
+ }
+
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_final_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int buflen = *current_buflen(state);
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ int ret = -ENOMEM;
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc)
+ return ret;
+
+ state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->buf_dma)) {
+ dev_err(ctx->dev, "unable to map src\n");
+ goto unmap;
+ }
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ goto unmap;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, state->buf_dma);
+ dpaa2_fl_set_len(in_fle, buflen);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret == -EINPROGRESS ||
+ (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ return ret;
+
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_update_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *buf = current_buf(state);
+ int *buflen = current_buflen(state);
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
+ int in_len = *buflen + req->nbytes, to_hash;
+ int qm_sg_bytes, src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
+ to_hash = in_len - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - *next_buflen);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_bytes = (1 + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
+ if (ret)
+ goto unmap_ctx;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src,
+ to_hash - *buflen,
+ *next_buflen, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes, DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+ ctx->ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, to_hash);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
+ req_ctx->cbk = ahash_done_ctx_dst;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY &&
+ req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else if (*next_buflen) {
+ scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
+ req->nbytes, 0);
+ *buflen = *next_buflen;
+ *next_buflen = 0;
+ }
+
+ print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1);
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_no_ctx(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int buflen = *current_buflen(state);
+ int qm_sg_bytes, src_nents, mapped_nents;
+ int digestsize = crypto_ahash_digestsize(ahash);
+ struct ahash_edesc *edesc;
+ struct dpaa2_sg_entry *sg_table;
+ int ret;
+
+ src_nents = sg_nents_for_len(req->src, req->nbytes);
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to DMA map source\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ qm_sg_bytes = (2 + mapped_nents) * sizeof(*sg_table);
+ sg_table = &edesc->sgt[0];
+
+ ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
+ if (ret)
+ goto unmap;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table + 1, 0);
+
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+
+ edesc->dst_dma = dma_map_single(ctx->dev, req->result, digestsize,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->dst_dma)) {
+ dev_err(ctx->dev, "unable to map dst\n");
+ edesc->dst_dma = 0;
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, edesc->dst_dma);
+ dpaa2_fl_set_len(out_fle, digestsize);
+
+ req_ctx->flc = &ctx->flc[DIGEST];
+ req_ctx->flc_dma = ctx->flc_dma[DIGEST];
+ req_ctx->cbk = ahash_done;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap;
+
+ return ret;
+unmap:
+ ahash_unmap(ctx->dev, edesc, req, digestsize);
+ qi_cache_free(edesc);
+ return -ENOMEM;
+}
+
+static int ahash_update_first(struct ahash_request *req)
+{
+ struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+ struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_request *req_ctx = &state->caam_req;
+ struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
+ struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
+ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ u8 *next_buf = alt_buf(state);
+ int *next_buflen = alt_buflen(state);
+ int to_hash;
+ int src_nents, mapped_nents;
+ struct ahash_edesc *edesc;
+ int ret = 0;
+
+ *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
+ 1);
+ to_hash = req->nbytes - *next_buflen;
+
+ if (to_hash) {
+ struct dpaa2_sg_entry *sg_table;
+
+ src_nents = sg_nents_for_len(req->src,
+ req->nbytes - (*next_buflen));
+ if (src_nents < 0) {
+ dev_err(ctx->dev, "Invalid number of src SG.\n");
+ return src_nents;
+ }
+
+ if (src_nents) {
+ mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ if (!mapped_nents) {
+ dev_err(ctx->dev, "unable to map source for DMA\n");
+ return -ENOMEM;
+ }
+ } else {
+ mapped_nents = 0;
+ }
+
+ /* allocate space for base edesc and link tables */
+ edesc = qi_cache_zalloc(GFP_DMA | flags);
+ if (!edesc) {
+ dma_unmap_sg(ctx->dev, req->src, src_nents,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ edesc->src_nents = src_nents;
+ sg_table = &edesc->sgt[0];
+
+ memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
+ dpaa2_fl_set_final(in_fle, true);
+ dpaa2_fl_set_len(in_fle, to_hash);
+
+ if (mapped_nents > 1) {
+ int qm_sg_bytes;
+
+ sg_to_qm_sg_last(req->src, mapped_nents, sg_table, 0);
+ qm_sg_bytes = mapped_nents * sizeof(*sg_table);
+ edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
+ qm_sg_bytes,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
+ dev_err(ctx->dev, "unable to map S/G table\n");
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+ edesc->qm_sg_bytes = qm_sg_bytes;
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
+ dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
+ } else {
+ dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
+ }
+
+ if (*next_buflen)
+ scatterwalk_map_and_copy(next_buf, req->src, to_hash,
+ *next_buflen, 0);
+
+ state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
+ ctx->ctx_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
+ dev_err(ctx->dev, "unable to map ctx\n");
+ state->ctx_dma = 0;
+ ret = -ENOMEM;
+ goto unmap_ctx;
+ }
+
+ dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
+ dpaa2_fl_set_addr(out_fle, state->ctx_dma);
+ dpaa2_fl_set_len(out_fle, ctx->ctx_len);
+
+ req_ctx->flc = &ctx->flc[UPDATE_FIRST];
+ req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
+ req_ctx->cbk = ahash_done_ctx_dst;
+ req_ctx->ctx = &req->base;
+ req_ctx->edesc = edesc;
+
+ ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
+ if (ret != -EINPROGRESS &&
+ !(ret == -EBUSY && req->base.flags &
+ CRYPTO_TFM_REQ_MAY_BACKLOG))
+ goto unmap_ctx;
+
+ state->update = ahash_update_ctx;
+ state->finup = ahash_finup_ctx;
+ state->final = ahash_final_ctx;
+ } else if (*next_buflen) {
+ state->update = ahash_update_no_ctx;
+ state->finup = ahash_finup_no_ctx;
+ state->final = ahash_final_no_ctx;
+ scatterwalk_map_and_copy(next_buf, req->src, 0,
+ req->nbytes, 0);
+ switch_buf(state);
+ }
+
+ print_hex_dump_debug("next buf@" __stringify(__LINE__)": ",
+ DUMP_PREFIX_ADDRESS, 16, 4, next_buf, *next_buflen,
+ 1);
+
+ return ret;
+unmap_ctx:
+ ahash_unmap_ctx(ctx->dev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+ qi_cache_free(edesc);
+ return ret;
+}
+
+static int ahash_finup_first(struct ahash_request *req)
+{
+ return ahash_digest(req);
+}
+
+static int ahash_init(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ state->update = ahash_update_first;
+ state->finup = ahash_finup_first;
+ state->final = ahash_final_no_ctx;
+
+ state->ctx_dma = 0;
+ state->current_buf = 0;
+ state->buf_dma = 0;
+ state->buflen_0 = 0;
+ state->buflen_1 = 0;
+
+ return 0;
+}
+
+static int ahash_update(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->update(req);
+}
+
+static int ahash_finup(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->finup(req);
+}
+
+static int ahash_final(struct ahash_request *req)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+
+ return state->final(req);
+}
+
+static int ahash_export(struct ahash_request *req, void *out)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ struct caam_export_state *export = out;
+ int len;
+ u8 *buf;
+
+ if (state->current_buf) {
+ buf = state->buf_1;
+ len = state->buflen_1;
+ } else {
+ buf = state->buf_0;
+ len = state->buflen_0;
+ }
+
+ memcpy(export->buf, buf, len);
+ memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
+ export->buflen = len;
+ export->update = state->update;
+ export->final = state->final;
+ export->finup = state->finup;
+
+ return 0;
+}
+
+static int ahash_import(struct ahash_request *req, const void *in)
+{
+ struct caam_hash_state *state = ahash_request_ctx(req);
+ const struct caam_export_state *export = in;
+
+ memset(state, 0, sizeof(*state));
+ memcpy(state->buf_0, export->buf, export->buflen);
+ memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
+ state->buflen_0 = export->buflen;
+ state->update = export->update;
+ state->final = export->final;
+ state->finup = export->finup;
+
+ return 0;
+}
+
+struct caam_hash_template {
+ char name[CRYPTO_MAX_ALG_NAME];
+ char driver_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_name[CRYPTO_MAX_ALG_NAME];
+ char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
+ unsigned int blocksize;
+ struct ahash_alg template_ahash;
+ u32 alg_type;
+};
+
+/* ahash descriptors */
+static struct caam_hash_template driver_hash[] = {
+ {
+ .name = "sha1",
+ .driver_name = "sha1-caam-qi2",
+ .hmac_name = "hmac(sha1)",
+ .hmac_driver_name = "hmac-sha1-caam-qi2",
+ .blocksize = SHA1_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA1,
+ }, {
+ .name = "sha224",
+ .driver_name = "sha224-caam-qi2",
+ .hmac_name = "hmac(sha224)",
+ .hmac_driver_name = "hmac-sha224-caam-qi2",
+ .blocksize = SHA224_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA224_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA224,
+ }, {
+ .name = "sha256",
+ .driver_name = "sha256-caam-qi2",
+ .hmac_name = "hmac(sha256)",
+ .hmac_driver_name = "hmac-sha256-caam-qi2",
+ .blocksize = SHA256_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA256,
+ }, {
+ .name = "sha384",
+ .driver_name = "sha384-caam-qi2",
+ .hmac_name = "hmac(sha384)",
+ .hmac_driver_name = "hmac-sha384-caam-qi2",
+ .blocksize = SHA384_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA384_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA384,
+ }, {
+ .name = "sha512",
+ .driver_name = "sha512-caam-qi2",
+ .hmac_name = "hmac(sha512)",
+ .hmac_driver_name = "hmac-sha512-caam-qi2",
+ .blocksize = SHA512_BLOCK_SIZE,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = SHA512_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_SHA512,
+ }, {
+ .name = "md5",
+ .driver_name = "md5-caam-qi2",
+ .hmac_name = "hmac(md5)",
+ .hmac_driver_name = "hmac-md5-caam-qi2",
+ .blocksize = MD5_BLOCK_WORDS * 4,
+ .template_ahash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .export = ahash_export,
+ .import = ahash_import,
+ .setkey = ahash_setkey,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
+ .statesize = sizeof(struct caam_export_state),
+ },
+ },
+ .alg_type = OP_ALG_ALGSEL_MD5,
+ }
+};
+
+struct caam_hash_alg {
+ struct list_head entry;
+ struct device *dev;
+ int alg_type;
+ struct ahash_alg ahash_alg;
+};
+
+static int caam_hash_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+ struct crypto_alg *base = tfm->__crt_alg;
+ struct hash_alg_common *halg =
+ container_of(base, struct hash_alg_common, base);
+ struct ahash_alg *alg =
+ container_of(halg, struct ahash_alg, halg);
+ struct caam_hash_alg *caam_hash =
+ container_of(alg, struct caam_hash_alg, ahash_alg);
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+ /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
+ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
+ HASH_MSG_LEN + SHA1_DIGEST_SIZE,
+ HASH_MSG_LEN + 32,
+ HASH_MSG_LEN + SHA256_DIGEST_SIZE,
+ HASH_MSG_LEN + 64,
+ HASH_MSG_LEN + SHA512_DIGEST_SIZE };
+ dma_addr_t dma_addr;
+ int i;
+
+ ctx->dev = caam_hash->dev;
+
+ dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
+ DMA_BIDIRECTIONAL,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(ctx->dev, dma_addr)) {
+ dev_err(ctx->dev, "unable to map shared descriptors\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HASH_NUM_OP; i++)
+ ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
+
+ /* copy descriptor header template value */
+ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
+
+ ctx->ctx_len = runninglen[(ctx->adata.algtype &
+ OP_ALG_ALGSEL_SUBMASK) >>
+ OP_ALG_ALGSEL_SHIFT];
+
+ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
+ sizeof(struct caam_hash_state));
+
+ return ahash_set_sh_desc(ahash);
+}
+
+static void caam_hash_cra_exit(struct crypto_tfm *tfm)
+{
+ struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
+ DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
+ struct caam_hash_template *template, bool keyed)
+{
+ struct caam_hash_alg *t_alg;
+ struct ahash_alg *halg;
+ struct crypto_alg *alg;
+
+ t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
+ if (!t_alg)
+ return ERR_PTR(-ENOMEM);
+
+ t_alg->ahash_alg = template->template_ahash;
+ halg = &t_alg->ahash_alg;
+ alg = &halg->halg.base;
+
+ if (keyed) {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->hmac_driver_name);
+ } else {
+ snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->name);
+ snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+ template->driver_name);
+ t_alg->ahash_alg.setkey = NULL;
+ }
+ alg->cra_module = THIS_MODULE;
+ alg->cra_init = caam_hash_cra_init;
+ alg->cra_exit = caam_hash_cra_exit;
+ alg->cra_ctxsize = sizeof(struct caam_hash_ctx);
+ alg->cra_priority = CAAM_CRA_PRIORITY;
+ alg->cra_blocksize = template->blocksize;
+ alg->cra_alignmask = 0;
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
+
+ t_alg->alg_type = template->alg_type;
+ t_alg->dev = dev;
+
+ return t_alg;
+}
+
+static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+
+ ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
+ napi_schedule_irqoff(&ppriv->napi);
+}
+
+static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_io_notification_ctx *nctx;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->priv = priv;
+ nctx = &ppriv->nctx;
+ nctx->is_cdan = 0;
+ nctx->id = ppriv->rsp_fqid;
+ nctx->desired_cpu = cpu;
+ nctx->cb = dpaa2_caam_fqdan_cb;
+
+ /* Register notification callbacks */
+ err = dpaa2_io_service_register(NULL, nctx);
+ if (unlikely(err)) {
+ dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
+ nctx->cb = NULL;
+ /*
+ * If no affine DPIO for this core, there's probably
+ * none available for next cores either. Signal we want
+ * to retry later, in case the DPIO devices weren't
+ * probed yet.
+ */
+ err = -EPROBE_DEFER;
+ goto err;
+ }
+
+ ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
+ dev);
+ if (unlikely(!ppriv->store)) {
+ dev_err(dev, "dpaa2_io_store_create() failed\n");
+ err = -ENOMEM;
+ goto err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return 0;
+
+err:
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->nctx.cb)
+ break;
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ }
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ if (!ppriv->store)
+ break;
+ dpaa2_io_store_destroy(ppriv->store);
+ }
+
+ return err;
+}
+
+static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i = 0, cpu;
+
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ dpaa2_io_service_deregister(NULL, &ppriv->nctx);
+ dpaa2_io_store_destroy(ppriv->store);
+
+ if (++i == priv->num_pairs)
+ return;
+ }
+}
+
+static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
+{
+ struct dpseci_rx_queue_cfg rx_queue_cfg;
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err = 0, i = 0, cpu;
+
+ /* Configure Rx queues */
+ for_each_online_cpu(cpu) {
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+
+ rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
+ DPSECI_QUEUE_OPT_USER_CTX;
+ rx_queue_cfg.order_preservation_en = 0;
+ rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
+ rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
+ /*
+ * Rx priority (WQ) doesn't really matter, since we use
+ * pull mode, i.e. volatile dequeues from specific FQs
+ */
+ rx_queue_cfg.dest_cfg.priority = 0;
+ rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
+
+ err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &rx_queue_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
+ err);
+ return err;
+ }
+
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return err;
+}
+
+static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+
+ if (!priv->cscn_mem)
+ return;
+
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ kfree(priv->cscn_mem);
+}
+
+static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+
+ dpaa2_dpseci_congestion_free(priv);
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
+static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
+ const struct dpaa2_fd *fd)
+{
+ struct caam_request *req;
+ u32 fd_err;
+
+ if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
+ dev_err(priv->dev, "Only Frame List FD format is supported!\n");
+ return;
+ }
+
+ fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
+ if (unlikely(fd_err))
+ dev_err(priv->dev, "FD error: %08x\n", fd_err);
+
+ /*
+ * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
+ * in FD[ERR] or FD[FRC].
+ */
+ req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
+ dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
+}
+
+static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ int err;
+
+ /* Retry while portal is busy */
+ do {
+ err = dpaa2_io_service_pull_fq(NULL, ppriv->rsp_fqid,
+ ppriv->store);
+ } while (err == -EBUSY);
+
+ if (unlikely(err))
+ dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
+
+ return err;
+}
+
+static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
+{
+ struct dpaa2_dq *dq;
+ int cleaned = 0, is_last;
+
+ do {
+ dq = dpaa2_io_store_next(ppriv->store, &is_last);
+ if (unlikely(!dq)) {
+ if (unlikely(!is_last)) {
+ dev_dbg(ppriv->priv->dev,
+ "FQ %d returned no valid frames\n",
+ ppriv->rsp_fqid);
+ /*
+ * MUST retry until we get some sort of
+ * valid response token (be it "empty dequeue"
+ * or a valid frame).
+ */
+ continue;
+ }
+ break;
+ }
+
+ /* Process FD */
+ dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
+ cleaned++;
+ } while (!is_last);
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
+{
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct dpaa2_caam_priv *priv;
+ int err, cleaned = 0, store_cleaned;
+
+ ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
+ priv = ppriv->priv;
+
+ if (unlikely(dpaa2_caam_pull_fq(ppriv)))
+ return 0;
+
+ do {
+ store_cleaned = dpaa2_caam_store_consume(ppriv);
+ cleaned += store_cleaned;
+
+ if (store_cleaned == 0 ||
+ cleaned > budget - DPAA2_CAAM_STORE_SIZE)
+ break;
+
+ /* Try to dequeue some more */
+ err = dpaa2_caam_pull_fq(ppriv);
+ if (unlikely(err))
+ break;
+ } while (1);
+
+ if (cleaned < budget) {
+ napi_complete_done(napi, cleaned);
+ err = dpaa2_io_service_rearm(NULL, &ppriv->nctx);
+ if (unlikely(err))
+ dev_err(priv->dev, "Notification rearm failed: %d\n",
+ err);
+ }
+
+ return cleaned;
+}
+
+static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
+ u16 token)
+{
+ struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
+ struct device *dev = priv->dev;
+ int err;
+
+ /*
+ * Congestion group feature supported starting with DPSECI API v5.1
+ * and only when object has been created with this capability.
+ */
+ if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
+ !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
+ return 0;
+
+ priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
+ GFP_KERNEL | GFP_DMA);
+ if (!priv->cscn_mem)
+ return -ENOMEM;
+
+ priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
+ priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
+ DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, priv->cscn_dma)) {
+ dev_err(dev, "Error mapping CSCN memory area\n");
+ err = -ENOMEM;
+ goto err_dma_map;
+ }
+
+ cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
+ cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
+ cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
+ cong_notif_cfg.message_ctx = (uintptr_t)priv;
+ cong_notif_cfg.message_iova = priv->cscn_dma;
+ cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
+ DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
+ DPSECI_CGN_MODE_COHERENT_WRITE;
+
+ err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
+ &cong_notif_cfg);
+ if (err) {
+ dev_err(dev, "dpseci_set_congestion_notification failed\n");
+ goto err_set_cong;
+ }
+
+ return 0;
+
+err_set_cong:
+ dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
+err_dma_map:
+ kfree(priv->cscn_mem);
+
+ return err;
+}
+
+static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev = &ls_dev->dev;
+ struct dpaa2_caam_priv *priv;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int err, cpu;
+ u8 i;
+
+ priv = dev_get_drvdata(dev);
+
+ priv->dev = dev;
+ priv->dpsec_id = ls_dev->obj_desc.id;
+
+ /* Get a handle for the DPSECI this interface is associate with */
+ err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_open() failed: %d\n", err);
+ goto err_open;
+ }
+
+ err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
+ &priv->minor_ver);
+ if (err) {
+ dev_err(dev, "dpseci_get_api_version() failed\n");
+ goto err_get_vers;
+ }
+
+ dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
+
+ err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->dpseci_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_attributes() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
+ &priv->sec_attr);
+ if (err) {
+ dev_err(dev, "dpseci_get_sec_attr() failed\n");
+ goto err_get_vers;
+ }
+
+ err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "setup_congestion() failed\n");
+ goto err_get_vers;
+ }
+
+ priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
+ priv->dpseci_attr.num_tx_queues);
+ if (priv->num_pairs > num_online_cpus()) {
+ dev_warn(dev, "%d queues won't be used\n",
+ priv->num_pairs - num_online_cpus());
+ priv->num_pairs = num_online_cpus();
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
+ err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->rx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_rx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
+ err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
+ &priv->tx_queue_attr[i]);
+ if (err) {
+ dev_err(dev, "dpseci_get_tx_queue() failed\n");
+ goto err_get_rx_queue;
+ }
+ }
+
+ i = 0;
+ for_each_online_cpu(cpu) {
+ dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", i,
+ priv->rx_queue_attr[i].fqid,
+ priv->tx_queue_attr[i].fqid);
+
+ ppriv = per_cpu_ptr(priv->ppriv, cpu);
+ ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
+ ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
+ ppriv->prio = i;
+
+ ppriv->net_dev.dev = *dev;
+ INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
+ netif_napi_add(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll,
+ DPAA2_CAAM_NAPI_WEIGHT);
+ if (++i == priv->num_pairs)
+ break;
+ }
+
+ return 0;
+
+err_get_rx_queue:
+ dpaa2_dpseci_congestion_free(priv);
+err_get_vers:
+ dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
+err_open:
+ return err;
+}
+
+static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ int i;
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_enable(&ppriv->napi);
+ }
+
+ return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
+}
+
+static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct dpaa2_caam_priv_per_cpu *ppriv;
+ struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
+ int i, err = 0, enabled;
+
+ err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
+ if (err) {
+ dev_err(dev, "dpseci_disable() failed\n");
+ return err;
+ }
+
+ err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
+ if (err) {
+ dev_err(dev, "dpseci_is_enabled() failed\n");
+ return err;
+ }
+
+ dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
+
+ for (i = 0; i < priv->num_pairs; i++) {
+ ppriv = per_cpu_ptr(priv->ppriv, i);
+ napi_disable(&ppriv->napi);
+ netif_napi_del(&ppriv->napi);
+ }
+
+ return 0;
+}
+
+static struct list_head hash_list;
+
+static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i, err = 0;
+ bool registered = false;
+
+ /*
+ * There is no way to get CAAM endianness - there is no direct register
+ * space access and MC f/w does not provide this attribute.
+ * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
+ * property.
+ */
+ caam_little_end = true;
+
+ caam_imx = false;
+
+ dev = &dpseci_dev->dev;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ priv->domain = iommu_get_domain_for_dev(dev);
+
+ qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
+ 0, SLAB_CACHE_DMA, NULL);
+ if (!qi_cache) {
+ dev_err(dev, "Can't allocate SEC cache\n");
+ return -ENOMEM;
+ }
+
+ err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
+ if (err) {
+ dev_err(dev, "dma_set_mask_and_coherent() failed\n");
+ goto err_dma_mask;
+ }
+
+ /* Obtain a MC portal */
+ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "MC portal allocation failed\n");
+
+ goto err_dma_mask;
+ }
+
+ priv->ppriv = alloc_percpu(*priv->ppriv);
+ if (!priv->ppriv) {
+ dev_err(dev, "alloc_percpu() failed\n");
+ err = -ENOMEM;
+ goto err_alloc_ppriv;
+ }
+
+ /* DPSECI initialization */
+ err = dpaa2_dpseci_setup(dpseci_dev);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_setup() failed\n");
+ goto err_dpseci_setup;
+ }
+
+ /* DPIO */
+ err = dpaa2_dpseci_dpio_setup(priv);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "dpaa2_dpseci_dpio_setup() failed\n");
+ goto err_dpio_setup;
+ }
+
+ /* DPSECI binding to DPIO */
+ err = dpaa2_dpseci_bind(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_bind() failed\n");
+ goto err_bind;
+ }
+
+ /* DPSECI enable */
+ err = dpaa2_dpseci_enable(priv);
+ if (err) {
+ dev_err(dev, "dpaa2_dpseci_enable() failed\n");
+ goto err_bind;
+ }
+
+ /* register crypto algorithms the device supports */
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+ u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ (alg_sel == OP_ALG_ALGSEL_3DES ||
+ alg_sel == OP_ALG_ALGSEL_DES))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ alg_sel == OP_ALG_ALGSEL_AES)
+ continue;
+
+ t_alg->caam.dev = dev;
+ caam_skcipher_alg_init(t_alg);
+
+ err = crypto_register_skcipher(&t_alg->skcipher);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->skcipher.base.cra_driver_name, err);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+ u32 c1_alg_sel = t_alg->caam.class1_alg_type &
+ OP_ALG_ALGSEL_MASK;
+ u32 c2_alg_sel = t_alg->caam.class2_alg_type &
+ OP_ALG_ALGSEL_MASK;
+
+ /* Skip DES algorithms if not supported by device */
+ if (!priv->sec_attr.des_acc_num &&
+ (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
+ c1_alg_sel == OP_ALG_ALGSEL_DES))
+ continue;
+
+ /* Skip AES algorithms if not supported by device */
+ if (!priv->sec_attr.aes_acc_num &&
+ c1_alg_sel == OP_ALG_ALGSEL_AES)
+ continue;
+
+ /*
+ * Skip algorithms requiring message digests
+ * if MD not supported by device.
+ */
+ if (!priv->sec_attr.md_acc_num && c2_alg_sel)
+ continue;
+
+ t_alg->caam.dev = dev;
+ caam_aead_alg_init(t_alg);
+
+ err = crypto_register_aead(&t_alg->aead);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->aead.base.cra_driver_name, err);
+ continue;
+ }
+
+ t_alg->registered = true;
+ registered = true;
+ }
+ if (registered)
+ dev_info(dev, "algorithms registered in /proc/crypto\n");
+
+ /* register hash algorithms the device supports */
+ INIT_LIST_HEAD(&hash_list);
+
+ /*
+ * Skip registration of any hashing algorithms if MD block
+ * is not present.
+ */
+ if (!priv->sec_attr.md_acc_num)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
+ struct caam_hash_alg *t_alg;
+ struct caam_hash_template *alg = driver_hash + i;
+
+ /* register hmac version */
+ t_alg = caam_hash_alloc(dev, alg, true);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(dev, "%s hash alg allocation failed: %d\n",
+ alg->driver_name, err);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name,
+ err);
+ kfree(t_alg);
+ } else {
+ list_add_tail(&t_alg->entry, &hash_list);
+ }
+
+ /* register unkeyed version */
+ t_alg = caam_hash_alloc(dev, alg, false);
+ if (IS_ERR(t_alg)) {
+ err = PTR_ERR(t_alg);
+ dev_warn(dev, "%s alg allocation failed: %d\n",
+ alg->driver_name, err);
+ continue;
+ }
+
+ err = crypto_register_ahash(&t_alg->ahash_alg);
+ if (err) {
+ dev_warn(dev, "%s alg registration failed: %d\n",
+ t_alg->ahash_alg.halg.base.cra_driver_name,
+ err);
+ kfree(t_alg);
+ } else {
+ list_add_tail(&t_alg->entry, &hash_list);
+ }
+ }
+ if (!list_empty(&hash_list))
+ dev_info(dev, "hash algorithms registered in /proc/crypto\n");
+
+ return err;
+
+err_bind:
+ dpaa2_dpseci_dpio_free(priv);
+err_dpio_setup:
+ dpaa2_dpseci_free(priv);
+err_dpseci_setup:
+ free_percpu(priv->ppriv);
+err_alloc_ppriv:
+ fsl_mc_portal_free(priv->mc_io);
+err_dma_mask:
+ kmem_cache_destroy(qi_cache);
+
+ return err;
+}
+
+static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
+{
+ struct device *dev;
+ struct dpaa2_caam_priv *priv;
+ int i;
+
+ dev = &ls_dev->dev;
+ priv = dev_get_drvdata(dev);
+
+ for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
+ struct caam_aead_alg *t_alg = driver_aeads + i;
+
+ if (t_alg->registered)
+ crypto_unregister_aead(&t_alg->aead);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
+ struct caam_skcipher_alg *t_alg = driver_algs + i;
+
+ if (t_alg->registered)
+ crypto_unregister_skcipher(&t_alg->skcipher);
+ }
+
+ if (hash_list.next) {
+ struct caam_hash_alg *t_hash_alg, *p;
+
+ list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
+ crypto_unregister_ahash(&t_hash_alg->ahash_alg);
+ list_del(&t_hash_alg->entry);
+ kfree(t_hash_alg);
+ }
+ }
+
+ dpaa2_dpseci_disable(priv);
+ dpaa2_dpseci_dpio_free(priv);
+ dpaa2_dpseci_free(priv);
+ free_percpu(priv->ppriv);
+ fsl_mc_portal_free(priv->mc_io);
+ kmem_cache_destroy(qi_cache);
+
+ return 0;
+}
+
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
+{
+ struct dpaa2_fd fd;
+ struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
+ int err = 0, i, id;
+
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+
+ if (priv->cscn_mem) {
+ dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
+ DPAA2_CSCN_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
+ dev_dbg_ratelimited(dev, "Dropping request\n");
+ return -EBUSY;
+ }
+ }
+
+ dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
+
+ req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, req->fd_flt_dma)) {
+ dev_err(dev, "DMA mapping error for QI enqueue request\n");
+ goto err_out;
+ }
+
+ memset(&fd, 0, sizeof(fd));
+ dpaa2_fd_set_format(&fd, dpaa2_fd_list);
+ dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
+ dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
+ dpaa2_fd_set_flc(&fd, req->flc_dma);
+
+ /*
+ * There is no guarantee that preemption is disabled here,
+ * thus take action.
+ */
+ preempt_disable();
+ id = smp_processor_id() % priv->dpseci_attr.num_tx_queues;
+ for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
+ err = dpaa2_io_service_enqueue_fq(NULL,
+ priv->tx_queue_attr[id].fqid,
+ &fd);
+ if (err != -EBUSY)
+ break;
+ }
+ preempt_enable();
+
+ if (unlikely(err)) {
+ dev_err(dev, "Error enqueuing frame: %d\n", err);
+ goto err_out;
+ }
+
+ return -EINPROGRESS;
+
+err_out:
+ dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
+ DMA_BIDIRECTIONAL);
+ return -EIO;
+}
+EXPORT_SYMBOL(dpaa2_caam_enqueue);
+
+static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpseci",
+ },
+ { .vendor = 0x0 }
+};
+
+static struct fsl_mc_driver dpaa2_caam_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_caam_probe,
+ .remove = dpaa2_caam_remove,
+ .match_id_table = dpaa2_caam_match_id_table
+};
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Freescale Semiconductor, Inc");
+MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
+
+module_fsl_mc_driver(dpaa2_caam_driver);
diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h
new file mode 100644
index 000000000000..9823bdefd029
--- /dev/null
+++ b/drivers/crypto/caam/caamalg_qi2.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _CAAMALG_QI2_H_
+#define _CAAMALG_QI2_H_
+
+#include <soc/fsl/dpaa2-io.h>
+#include <soc/fsl/dpaa2-fd.h>
+#include <linux/threads.h>
+#include "dpseci.h"
+#include "desc_constr.h"
+
+#define DPAA2_CAAM_STORE_SIZE 16
+/* NAPI weight *must* be a multiple of the store size. */
+#define DPAA2_CAAM_NAPI_WEIGHT 64
+
+/* The congestion entrance threshold was chosen so that on LS2088
+ * we support the maximum throughput for the available memory
+ */
+#define DPAA2_SEC_CONG_ENTRY_THRESH (128 * 1024 * 1024)
+#define DPAA2_SEC_CONG_EXIT_THRESH (DPAA2_SEC_CONG_ENTRY_THRESH * 9 / 10)
+
+/**
+ * dpaa2_caam_priv - driver private data
+ * @dpseci_id: DPSECI object unique ID
+ * @major_ver: DPSECI major version
+ * @minor_ver: DPSECI minor version
+ * @dpseci_attr: DPSECI attributes
+ * @sec_attr: SEC engine attributes
+ * @rx_queue_attr: array of Rx queue attributes
+ * @tx_queue_attr: array of Tx queue attributes
+ * @cscn_mem: pointer to memory region containing the congestion SCN
+ * it's size is larger than to accommodate alignment
+ * @cscn_mem_aligned: pointer to congestion SCN; it is computed as
+ * PTR_ALIGN(cscn_mem, DPAA2_CSCN_ALIGN)
+ * @cscn_dma: dma address used by the QMAN to write CSCN messages
+ * @dev: device associated with the DPSECI object
+ * @mc_io: pointer to MC portal's I/O object
+ * @domain: IOMMU domain
+ * @ppriv: per CPU pointers to privata data
+ */
+struct dpaa2_caam_priv {
+ int dpsec_id;
+
+ u16 major_ver;
+ u16 minor_ver;
+
+ struct dpseci_attr dpseci_attr;
+ struct dpseci_sec_attr sec_attr;
+ struct dpseci_rx_queue_attr rx_queue_attr[DPSECI_MAX_QUEUE_NUM];
+ struct dpseci_tx_queue_attr tx_queue_attr[DPSECI_MAX_QUEUE_NUM];
+ int num_pairs;
+
+ /* congestion */
+ void *cscn_mem;
+ void *cscn_mem_aligned;
+ dma_addr_t cscn_dma;
+
+ struct device *dev;
+ struct fsl_mc_io *mc_io;
+ struct iommu_domain *domain;
+
+ struct dpaa2_caam_priv_per_cpu __percpu *ppriv;
+};
+
+/**
+ * dpaa2_caam_priv_per_cpu - per CPU private data
+ * @napi: napi structure
+ * @net_dev: netdev used by napi
+ * @req_fqid: (virtual) request (Tx / enqueue) FQID
+ * @rsp_fqid: (virtual) response (Rx / dequeue) FQID
+ * @prio: internal queue number - index for dpaa2_caam_priv.*_queue_attr
+ * @nctx: notification context of response FQ
+ * @store: where dequeued frames are stored
+ * @priv: backpointer to dpaa2_caam_priv
+ */
+struct dpaa2_caam_priv_per_cpu {
+ struct napi_struct napi;
+ struct net_device net_dev;
+ int req_fqid;
+ int rsp_fqid;
+ int prio;
+ struct dpaa2_io_notification_ctx nctx;
+ struct dpaa2_io_store *store;
+ struct dpaa2_caam_priv *priv;
+};
+
+/*
+ * The CAAM QI hardware constructs a job descriptor which points
+ * to shared descriptor (as pointed by context_a of FQ to CAAM).
+ * When the job descriptor is executed by deco, the whole job
+ * descriptor together with shared descriptor gets loaded in
+ * deco buffer which is 64 words long (each 32-bit).
+ *
+ * The job descriptor constructed by QI hardware has layout:
+ *
+ * HEADER (1 word)
+ * Shdesc ptr (1 or 2 words)
+ * SEQ_OUT_PTR (1 word)
+ * Out ptr (1 or 2 words)
+ * Out length (1 word)
+ * SEQ_IN_PTR (1 word)
+ * In ptr (1 or 2 words)
+ * In length (1 word)
+ *
+ * The shdesc ptr is used to fetch shared descriptor contents
+ * into deco buffer.
+ *
+ * Apart from shdesc contents, the total number of words that
+ * get loaded in deco buffer are '8' or '11'. The remaining words
+ * in deco buffer can be used for storing shared descriptor.
+ */
+#define MAX_SDLEN ((CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN) / CAAM_CMD_SZ)
+
+/* Length of a single buffer in the QI driver memory cache */
+#define CAAM_QI_MEMCACHE_SIZE 512
+
+/*
+ * aead_edesc - s/w-extended aead descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped h/w link table
+ * @qm_sg_dma: bus physical mapped address of h/w link table
+ * @assoclen: associated data length, in CAAM endianness
+ * @assoclen_dma: bus physical mapped address of req->assoclen
+ * @sgt: the h/w link table, followed by IV
+ */
+struct aead_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ unsigned int assoclen;
+ dma_addr_t assoclen_dma;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * skcipher_edesc - s/w-extended skcipher descriptor
+ * @src_nents: number of segments in input scatterlist
+ * @dst_nents: number of segments in output scatterlist
+ * @iv_dma: dma address of iv for checking continuity and link table
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @sgt: the h/w link table, followed by IV
+ */
+struct skcipher_edesc {
+ int src_nents;
+ int dst_nents;
+ dma_addr_t iv_dma;
+ int qm_sg_bytes;
+ dma_addr_t qm_sg_dma;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/*
+ * ahash_edesc - s/w-extended ahash descriptor
+ * @dst_dma: I/O virtual address of req->result
+ * @qm_sg_dma: I/O virtual address of h/w link table
+ * @src_nents: number of segments in input scatterlist
+ * @qm_sg_bytes: length of dma mapped qm_sg space
+ * @sgt: pointer to h/w link table
+ */
+struct ahash_edesc {
+ dma_addr_t dst_dma;
+ dma_addr_t qm_sg_dma;
+ int src_nents;
+ int qm_sg_bytes;
+ struct dpaa2_sg_entry sgt[0];
+};
+
+/**
+ * caam_flc - Flow Context (FLC)
+ * @flc: Flow Context options
+ * @sh_desc: Shared Descriptor
+ */
+struct caam_flc {
+ u32 flc[16];
+ u32 sh_desc[MAX_SDLEN];
+} ____cacheline_aligned;
+
+enum optype {
+ ENCRYPT = 0,
+ DECRYPT,
+ NUM_OP
+};
+
+/**
+ * caam_request - the request structure the driver application should fill while
+ * submitting a job to driver.
+ * @fd_flt: Frame list table defining input and output
+ * fd_flt[0] - FLE pointing to output buffer
+ * fd_flt[1] - FLE pointing to input buffer
+ * @fd_flt_dma: DMA address for the frame list table
+ * @flc: Flow Context
+ * @flc_dma: I/O virtual address of Flow Context
+ * @cbk: Callback function to invoke when job is completed
+ * @ctx: arbit context attached with request by the application
+ * @edesc: extended descriptor; points to one of {skcipher,aead}_edesc
+ */
+struct caam_request {
+ struct dpaa2_fl_entry fd_flt[2];
+ dma_addr_t fd_flt_dma;
+ struct caam_flc *flc;
+ dma_addr_t flc_dma;
+ void (*cbk)(void *ctx, u32 err);
+ void *ctx;
+ void *edesc;
+};
+
+/**
+ * dpaa2_caam_enqueue() - enqueue a crypto request
+ * @dev: device associated with the DPSECI object
+ * @req: pointer to caam_request
+ */
+int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req);
+
+#endif /* _CAAMALG_QI2_H_ */
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 43975ab5f09c..46924affa0bd 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for ahash functions of crypto API
*
@@ -62,6 +63,7 @@
#include "error.h"
#include "sg_sw_sec4.h"
#include "key_gen.h"
+#include "caamhash_desc.h"
#define CAAM_CRA_PRIORITY 3000
@@ -71,14 +73,6 @@
#define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
#define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE
-/* length of descriptors text */
-#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
-#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
-#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-#define DESC_AHASH_FINUP_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
-#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
-
#define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \
CAAM_MAX_HASH_KEY_SIZE)
#define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ)
@@ -235,60 +229,6 @@ static inline int ctx_map_to_sec4_sg(struct device *jrdev,
return 0;
}
-/*
- * For ahash update, final and finup (import_ctx = true)
- * import context, read and write to seqout
- * For ahash firsts and digest (import_ctx = false)
- * read and write to seqout
- */
-static inline void ahash_gen_sh_desc(u32 *desc, u32 state, int digestsize,
- struct caam_hash_ctx *ctx, bool import_ctx,
- int era)
-{
- u32 op = ctx->adata.algtype;
- u32 *skip_key_load;
-
- init_sh_desc(desc, HDR_SHARE_SERIAL);
-
- /* Append key if it has been set; ahash update excluded */
- if ((state != OP_ALG_AS_UPDATE) && (ctx->adata.keylen)) {
- /* Skip key loading if already shared */
- skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
- JUMP_COND_SHRD);
-
- if (era < 6)
- append_key_as_imm(desc, ctx->key, ctx->adata.keylen_pad,
- ctx->adata.keylen, CLASS_2 |
- KEY_DEST_MDHA_SPLIT | KEY_ENC);
- else
- append_proto_dkp(desc, &ctx->adata);
-
- set_jump_tgt_here(desc, skip_key_load);
-
- op |= OP_ALG_AAI_HMAC_PRECOMP;
- }
-
- /* If needed, import context from software */
- if (import_ctx)
- append_seq_load(desc, ctx->ctx_len, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-
- /* Class 2 operation */
- append_operation(desc, op | state | OP_ALG_ENCRYPT);
-
- /*
- * Load from buf and/or src and write to req->result or state->context
- * Calculate remaining bytes to read
- */
- append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
- /* Read remaining bytes */
- append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
- FIFOLD_TYPE_MSG | KEY_VLF);
- /* Store class2 context bytes */
- append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
- LDST_SRCDST_BYTE_CONTEXT);
-}
-
static int ahash_set_sh_desc(struct crypto_ahash *ahash)
{
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
@@ -301,8 +241,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update shared descriptor */
desc = ctx->sh_desc_update;
- ahash_gen_sh_desc(desc, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx, true,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
+ ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -313,8 +253,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_update_first shared descriptor */
desc = ctx->sh_desc_update_first;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INIT, ctx->ctx_len, ctx, false,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
+ ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -325,8 +265,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_final shared descriptor */
desc = ctx->sh_desc_fin;
- ahash_gen_sh_desc(desc, OP_ALG_AS_FINALIZE, digestsize, ctx, true,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
+ ctx->ctx_len, true, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
@@ -337,8 +277,8 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
/* ahash_digest shared descriptor */
desc = ctx->sh_desc_digest;
- ahash_gen_sh_desc(desc, OP_ALG_AS_INITFINAL, digestsize, ctx, false,
- ctrlpriv->era);
+ cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
+ ctx->ctx_len, false, ctrlpriv->era);
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
desc_bytes(desc), ctx->dir);
#ifdef DEBUG
diff --git a/drivers/crypto/caam/caamhash_desc.c b/drivers/crypto/caam/caamhash_desc.c
new file mode 100644
index 000000000000..a12f7959a2c3
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Shared descriptors for ahash algorithms
+ *
+ * Copyright 2017 NXP
+ */
+
+#include "compat.h"
+#include "desc_constr.h"
+#include "caamhash_desc.h"
+
+/**
+ * cnstr_shdsc_ahash - ahash shared descriptor
+ * @desc: pointer to buffer used for descriptor construction
+ * @adata: pointer to authentication transform definitions.
+ * A split key is required for SEC Era < 6; the size of the split key
+ * is specified in this case.
+ * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224,
+ * SHA256, SHA384, SHA512}.
+ * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
+ * @digestsize: algorithm's digest size
+ * @ctx_len: size of Context Register
+ * @import_ctx: true if previous Context Register needs to be restored
+ * must be true for ahash update and final
+ * must be false for for ahash first and digest
+ * @era: SEC Era
+ */
+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, bool import_ctx, int era)
+{
+ u32 op = adata->algtype;
+
+ init_sh_desc(desc, HDR_SHARE_SERIAL);
+
+ /* Append key if it has been set; ahash update excluded */
+ if (state != OP_ALG_AS_UPDATE && adata->keylen) {
+ u32 *skip_key_load;
+
+ /* Skip key loading if already shared */
+ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
+ JUMP_COND_SHRD);
+
+ if (era < 6)
+ append_key_as_imm(desc, adata->key_virt,
+ adata->keylen_pad,
+ adata->keylen, CLASS_2 |
+ KEY_DEST_MDHA_SPLIT | KEY_ENC);
+ else
+ append_proto_dkp(desc, adata);
+
+ set_jump_tgt_here(desc, skip_key_load);
+
+ op |= OP_ALG_AAI_HMAC_PRECOMP;
+ }
+
+ /* If needed, import context from software */
+ if (import_ctx)
+ append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+
+ /* Class 2 operation */
+ append_operation(desc, op | state | OP_ALG_ENCRYPT);
+
+ /*
+ * Load from buf and/or src and write to req->result or state->context
+ * Calculate remaining bytes to read
+ */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
+ /* Read remaining bytes */
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 |
+ FIFOLD_TYPE_MSG | KEY_VLF);
+ /* Store class2 context bytes */
+ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+ LDST_SRCDST_BYTE_CONTEXT);
+}
+EXPORT_SYMBOL(cnstr_shdsc_ahash);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
+MODULE_AUTHOR("NXP Semiconductors");
diff --git a/drivers/crypto/caam/caamhash_desc.h b/drivers/crypto/caam/caamhash_desc.h
new file mode 100644
index 000000000000..631fc1ac312c
--- /dev/null
+++ b/drivers/crypto/caam/caamhash_desc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Shared descriptors for ahash algorithms
+ *
+ * Copyright 2017 NXP
+ */
+
+#ifndef _CAAMHASH_DESC_H_
+#define _CAAMHASH_DESC_H_
+
+/* length of descriptors text */
+#define DESC_AHASH_BASE (3 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_LEN (6 * CAAM_CMD_SZ)
+#define DESC_AHASH_UPDATE_FIRST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+#define DESC_AHASH_FINAL_LEN (DESC_AHASH_BASE + 5 * CAAM_CMD_SZ)
+#define DESC_AHASH_DIGEST_LEN (DESC_AHASH_BASE + 4 * CAAM_CMD_SZ)
+
+void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
+ int digestsize, int ctx_len, bool import_ctx, int era);
+
+#endif /* _CAAMHASH_DESC_H_ */
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index f26d62e5533a..4fc209cbbeab 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* caam - Freescale FSL CAAM support for Public Key Cryptography
*
diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index fde07d4ff019..4318b0aa6fb9 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* caam - Freescale FSL CAAM support for hw_random
*
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index 1c71e0cd5098..9604ff7a335e 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
+#include <linux/iommu.h>
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/in.h>
@@ -39,6 +40,7 @@
#include <crypto/authenc.h>
#include <crypto/akcipher.h>
#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/internal/hash.h>
#include <crypto/internal/rsa.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 538c01f428c1..3fc793193821 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/* * CAAM control-plane driver backend
* Controller-level driver, kernel property detection, initialization
*
diff --git a/drivers/crypto/caam/dpseci.c b/drivers/crypto/caam/dpseci.c
new file mode 100644
index 000000000000..8a68531ded0b
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.c
@@ -0,0 +1,426 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#include <linux/fsl/mc.h>
+#include "dpseci.h"
+#include "dpseci_cmd.h"
+
+/**
+ * dpseci_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpseci_id: DPSECI unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an already created
+ * object; an object may have been declared statically in the DPL
+ * or created dynamically.
+ * This function returns a unique authentication token, associated with the
+ * specific object ID and the specific MC portal; this token must be used in all
+ * subsequent commands for this specific object.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_open *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpseci_cmd_open *)cmd.params;
+ cmd_params->dpseci_id = cpu_to_le32(dpseci_id);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpseci_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * After this function is called, no further operations are allowed on the
+ * object without opening a new control session.
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE,
+ cmd_flags,
+ token);
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_is_enabled() - Check if the DPSECI is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_is_enabled *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params;
+ *en = dpseci_get_field(rsp_params->is_enabled, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_attributes() - Retrieve DPSECI attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_attributes *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->num_tx_queues = rsp_params->num_tx_queues;
+ attr->num_rx_queues = rsp_params->num_rx_queues;
+ attr->options = le32_to_cpu(rsp_params->options);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_rx_queue() - Set Rx queue configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation; use DPSECI_ALL_QUEUES to configure all
+ * Rx queues identically.
+ * @cfg: Rx queue configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ cmd_params->queue = queue;
+ dpseci_set_field(cmd_params->dest_type, DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
+ cmd_params->options = cpu_to_le32(cfg->options);
+ dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION,
+ cfg->order_preservation_en);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_rx_queue() - Retrieve Rx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Rx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
+ attr->dest_cfg.priority = cmd_params->priority;
+ attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type,
+ DEST_TYPE);
+ attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
+ attr->fqid = le32_to_cpu(cmd_params->fqid);
+ attr->order_preservation_en =
+ dpseci_get_field(cmd_params->order_preservation_en,
+ ORDER_PRESERVATION);
+
+ return 0;
+}
+
+/**
+ * dpseci_get_tx_queue() - Retrieve Tx queue attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @queue: Select the queue relative to number of priorities configured at
+ * DPSECI creation
+ * @attr: Returned Tx queue attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_queue *cmd_params;
+ struct dpseci_rsp_get_tx_queue *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_queue *)cmd.params;
+ cmd_params->queue = queue;
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params;
+ attr->fqid = le32_to_cpu(rsp_params->fqid);
+ attr->priority = rsp_params->priority;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @attr: Returned SEC attributes
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_sec_attr *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params;
+ attr->ip_id = le16_to_cpu(rsp_params->ip_id);
+ attr->major_rev = rsp_params->major_rev;
+ attr->minor_rev = rsp_params->minor_rev;
+ attr->era = rsp_params->era;
+ attr->deco_num = rsp_params->deco_num;
+ attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num;
+ attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num;
+ attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num;
+ attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num;
+ attr->crc_acc_num = rsp_params->crc_acc_num;
+ attr->pk_acc_num = rsp_params->pk_acc_num;
+ attr->kasumi_acc_num = rsp_params->kasumi_acc_num;
+ attr->rng_acc_num = rsp_params->rng_acc_num;
+ attr->md_acc_num = rsp_params->md_acc_num;
+ attr->arc4_acc_num = rsp_params->arc4_acc_num;
+ attr->des_acc_num = rsp_params->des_acc_num;
+ attr->aes_acc_num = rsp_params->aes_acc_num;
+ attr->ccha_acc_num = rsp_params->ccha_acc_num;
+ attr->ptha_acc_num = rsp_params->ptha_acc_num;
+
+ return 0;
+}
+
+/**
+ * dpseci_get_api_version() - Get Data Path SEC Interface API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path sec API
+ * @minor_ver: Minor version of data path sec API
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION,
+ cmd_flags, 0);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
+
+/**
+ * dpseci_set_congestion_notification() - Set congestion group
+ * notification configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *cmd_params;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_SET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
+ cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
+ cmd_params->priority = cfg->dest_cfg.priority;
+ dpseci_set_field(cmd_params->options, CGN_DEST_TYPE,
+ cfg->dest_cfg.dest_type);
+ dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units);
+ cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
+ cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
+ cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
+ cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
+
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpseci_get_congestion_notification() - Get congestion group notification
+ * configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSECI object
+ * @cfg: congestion notification configuration
+ *
+ * Return: '0' on success, error code otherwise
+ */
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpseci_cmd_congestion_notification *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(
+ DPSECI_CMDID_GET_CONGESTION_NOTIFICATION,
+ cmd_flags,
+ token);
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params;
+ cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id);
+ cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode);
+ cfg->dest_cfg.priority = rsp_params->priority;
+ cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options,
+ CGN_DEST_TYPE);
+ cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS);
+ cfg->message_iova = le64_to_cpu(rsp_params->message_iova);
+ cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx);
+ cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry);
+ cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit);
+
+ return 0;
+}
diff --git a/drivers/crypto/caam/dpseci.h b/drivers/crypto/caam/dpseci.h
new file mode 100644
index 000000000000..4550e134d166
--- /dev/null
+++ b/drivers/crypto/caam/dpseci.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+#ifndef _DPSECI_H_
+#define _DPSECI_H_
+
+/*
+ * Data Path SEC Interface API
+ * Contains initialization APIs and runtime control APIs for DPSECI
+ */
+
+struct fsl_mc_io;
+
+/**
+ * General DPSECI macros
+ */
+
+/**
+ * Maximum number of Tx/Rx queues per DPSECI object
+ */
+#define DPSECI_MAX_QUEUE_NUM 16
+
+/**
+ * All queues considered; see dpseci_set_rx_queue()
+ */
+#define DPSECI_ALL_QUEUES (u8)(-1)
+
+int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id,
+ u16 *token);
+
+int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+/**
+ * Enable the Congestion Group support
+ */
+#define DPSECI_OPT_HAS_CG 0x000020
+
+/**
+ * struct dpseci_cfg - Structure representing DPSECI configuration
+ * @options: Any combination of the following flags:
+ * DPSECI_OPT_HAS_CG
+ * @num_tx_queues: num of queues towards the SEC
+ * @num_rx_queues: num of queues back from the SEC
+ * @priorities: Priorities for the SEC hardware processing;
+ * each place in the array is the priority of the tx queue
+ * towards the SEC;
+ * valid priorities are configured with values 1-8;
+ */
+struct dpseci_cfg {
+ u32 options;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 priorities[DPSECI_MAX_QUEUE_NUM];
+};
+
+int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
+
+int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ int *en);
+
+/**
+ * struct dpseci_attr - Structure representing DPSECI attributes
+ * @id: DPSECI object ID
+ * @num_tx_queues: number of queues towards the SEC
+ * @num_rx_queues: number of queues back from the SEC
+ * @options: any combination of the following flags:
+ * DPSECI_OPT_HAS_CG
+ */
+struct dpseci_attr {
+ int id;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u32 options;
+};
+
+int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_attr *attr);
+
+/**
+ * enum dpseci_dest - DPSECI destination types
+ * @DPSECI_DEST_NONE: Unassigned destination; The queue is set in parked mode
+ * and does not generate FQDAN notifications; user is expected to dequeue
+ * from the queue based on polling or other user-defined method
+ * @DPSECI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN
+ * notifications to the specified DPIO; user is expected to dequeue from
+ * the queue only after notification is received
+ * @DPSECI_DEST_DPCON: The queue is set in schedule mode and does not generate
+ * FQDAN notifications, but is connected to the specified DPCON object;
+ * user is expected to dequeue from the DPCON channel
+ */
+enum dpseci_dest {
+ DPSECI_DEST_NONE = 0,
+ DPSECI_DEST_DPIO,
+ DPSECI_DEST_DPCON
+};
+
+/**
+ * struct dpseci_dest_cfg - Structure representing DPSECI destination parameters
+ * @dest_type: Destination type
+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
+ * @priority: Priority selection within the DPIO or DPCON channel; valid values
+ * are 0-1 or 0-7, depending on the number of priorities in that channel;
+ * not relevant for 'DPSECI_DEST_NONE' option
+ */
+struct dpseci_dest_cfg {
+ enum dpseci_dest dest_type;
+ int dest_id;
+ u8 priority;
+};
+
+/**
+ * DPSECI queue modification options
+ */
+
+/**
+ * Select to modify the user's context associated with the queue
+ */
+#define DPSECI_QUEUE_OPT_USER_CTX 0x00000001
+
+/**
+ * Select to modify the queue's destination
+ */
+#define DPSECI_QUEUE_OPT_DEST 0x00000002
+
+/**
+ * Select to modify the queue's order preservation
+ */
+#define DPSECI_QUEUE_OPT_ORDER_PRESERVATION 0x00000004
+
+/**
+ * struct dpseci_rx_queue_cfg - DPSECI RX queue configuration
+ * @options: Flags representing the suggested modifications to the queue;
+ * Use any combination of 'DPSECI_QUEUE_OPT_<X>' flags
+ * @order_preservation_en: order preservation configuration for the rx queue
+ * valid only if 'DPSECI_QUEUE_OPT_ORDER_PRESERVATION' is contained in 'options'
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame; valid only if 'DPSECI_QUEUE_OPT_USER_CTX' is contained
+ * in 'options'
+ * @dest_cfg: Queue destination parameters; valid only if
+ * 'DPSECI_QUEUE_OPT_DEST' is contained in 'options'
+ */
+struct dpseci_rx_queue_cfg {
+ u32 options;
+ int order_preservation_en;
+ u64 user_ctx;
+ struct dpseci_dest_cfg dest_cfg;
+};
+
+int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, const struct dpseci_rx_queue_cfg *cfg);
+
+/**
+ * struct dpseci_rx_queue_attr - Structure representing attributes of Rx queues
+ * @user_ctx: User context value provided in the frame descriptor of each
+ * dequeued frame
+ * @order_preservation_en: Status of the order preservation configuration on the
+ * queue
+ * @dest_cfg: Queue destination configuration
+ * @fqid: Virtual FQID value to be used for dequeue operations
+ */
+struct dpseci_rx_queue_attr {
+ u64 user_ctx;
+ int order_preservation_en;
+ struct dpseci_dest_cfg dest_cfg;
+ u32 fqid;
+};
+
+int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_rx_queue_attr *attr);
+
+/**
+ * struct dpseci_tx_queue_attr - Structure representing attributes of Tx queues
+ * @fqid: Virtual FQID to be used for sending frames to SEC hardware
+ * @priority: SEC hardware processing priority for the queue
+ */
+struct dpseci_tx_queue_attr {
+ u32 fqid;
+ u8 priority;
+};
+
+int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ u8 queue, struct dpseci_tx_queue_attr *attr);
+
+/**
+ * struct dpseci_sec_attr - Structure representing attributes of the SEC
+ * hardware accelerator
+ * @ip_id: ID for SEC
+ * @major_rev: Major revision number for SEC
+ * @minor_rev: Minor revision number for SEC
+ * @era: SEC Era
+ * @deco_num: The number of copies of the DECO that are implemented in this
+ * version of SEC
+ * @zuc_auth_acc_num: The number of copies of ZUCA that are implemented in this
+ * version of SEC
+ * @zuc_enc_acc_num: The number of copies of ZUCE that are implemented in this
+ * version of SEC
+ * @snow_f8_acc_num: The number of copies of the SNOW-f8 module that are
+ * implemented in this version of SEC
+ * @snow_f9_acc_num: The number of copies of the SNOW-f9 module that are
+ * implemented in this version of SEC
+ * @crc_acc_num: The number of copies of the CRC module that are implemented in
+ * this version of SEC
+ * @pk_acc_num: The number of copies of the Public Key module that are
+ * implemented in this version of SEC
+ * @kasumi_acc_num: The number of copies of the Kasumi module that are
+ * implemented in this version of SEC
+ * @rng_acc_num: The number of copies of the Random Number Generator that are
+ * implemented in this version of SEC
+ * @md_acc_num: The number of copies of the MDHA (Hashing module) that are
+ * implemented in this version of SEC
+ * @arc4_acc_num: The number of copies of the ARC4 module that are implemented
+ * in this version of SEC
+ * @des_acc_num: The number of copies of the DES module that are implemented in
+ * this version of SEC
+ * @aes_acc_num: The number of copies of the AES module that are implemented in
+ * this version of SEC
+ * @ccha_acc_num: The number of copies of the ChaCha20 module that are
+ * implemented in this version of SEC.
+ * @ptha_acc_num: The number of copies of the Poly1305 module that are
+ * implemented in this version of SEC.
+ **/
+struct dpseci_sec_attr {
+ u16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+ u8 ccha_acc_num;
+ u8 ptha_acc_num;
+};
+
+int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
+ struct dpseci_sec_attr *attr);
+
+int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 *major_ver, u16 *minor_ver);
+
+/**
+ * enum dpseci_congestion_unit - DPSECI congestion units
+ * @DPSECI_CONGESTION_UNIT_BYTES: bytes units
+ * @DPSECI_CONGESTION_UNIT_FRAMES: frames units
+ */
+enum dpseci_congestion_unit {
+ DPSECI_CONGESTION_UNIT_BYTES = 0,
+ DPSECI_CONGESTION_UNIT_FRAMES
+};
+
+/**
+ * CSCN message is written to message_iova once entering a
+ * congestion state (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER 0x00000001
+
+/**
+ * CSCN message is written to message_iova once exiting a
+ * congestion state (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT 0x00000002
+
+/**
+ * CSCN write will attempt to allocate into a cache (coherent write);
+ * valid only if 'DPSECI_CGN_MODE_WRITE_MEM_<X>' is selected
+ */
+#define DPSECI_CGN_MODE_COHERENT_WRITE 0x00000004
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once entering a congestion state
+ * (see 'threshold_entry')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_ENTER 0x00000008
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' CSCN message is sent to
+ * DPIO/DPCON's WQ channel once exiting a congestion state
+ * (see 'threshold_exit')
+ */
+#define DPSECI_CGN_MODE_NOTIFY_DEST_ON_EXIT 0x00000010
+
+/**
+ * if 'dpseci_dest_cfg.dest_type != DPSECI_DEST_NONE' when the CSCN is written
+ * to the sw-portal's DQRR, the DQRI interrupt is asserted immediately
+ * (if enabled)
+ */
+#define DPSECI_CGN_MODE_INTR_COALESCING_DISABLED 0x00000020
+
+/**
+ * struct dpseci_congestion_notification_cfg - congestion notification
+ * configuration
+ * @units: units type
+ * @threshold_entry: above this threshold we enter a congestion state.
+ * set it to '0' to disable it
+ * @threshold_exit: below this threshold we exit the congestion state.
+ * @message_ctx: The context that will be part of the CSCN message
+ * @message_iova: I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned;
+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
+ * @notification_mode: Mask of available options; use 'DPSECI_CGN_MODE_<X>'
+ * values
+ */
+struct dpseci_congestion_notification_cfg {
+ enum dpseci_congestion_unit units;
+ u32 threshold_entry;
+ u32 threshold_exit;
+ u64 message_ctx;
+ u64 message_iova;
+ struct dpseci_dest_cfg dest_cfg;
+ u16 notification_mode;
+};
+
+int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, const struct dpseci_congestion_notification_cfg *cfg);
+
+int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags,
+ u16 token, struct dpseci_congestion_notification_cfg *cfg);
+
+#endif /* _DPSECI_H_ */
diff --git a/drivers/crypto/caam/dpseci_cmd.h b/drivers/crypto/caam/dpseci_cmd.h
new file mode 100644
index 000000000000..6ab77ead6e3d
--- /dev/null
+++ b/drivers/crypto/caam/dpseci_cmd.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2013-2016 Freescale Semiconductor Inc.
+ * Copyright 2017-2018 NXP
+ */
+
+#ifndef _DPSECI_CMD_H_
+#define _DPSECI_CMD_H_
+
+/* DPSECI Version */
+#define DPSECI_VER_MAJOR 5
+#define DPSECI_VER_MINOR 3
+
+#define DPSECI_VER(maj, min) (((maj) << 16) | (min))
+#define DPSECI_VERSION DPSECI_VER(DPSECI_VER_MAJOR, DPSECI_VER_MINOR)
+
+/* Command versioning */
+#define DPSECI_CMD_BASE_VERSION 1
+#define DPSECI_CMD_BASE_VERSION_V2 2
+#define DPSECI_CMD_ID_OFFSET 4
+
+#define DPSECI_CMD_V1(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
+ DPSECI_CMD_BASE_VERSION)
+
+#define DPSECI_CMD_V2(id) (((id) << DPSECI_CMD_ID_OFFSET) | \
+ DPSECI_CMD_BASE_VERSION_V2)
+
+/* Command IDs */
+#define DPSECI_CMDID_CLOSE DPSECI_CMD_V1(0x800)
+#define DPSECI_CMDID_OPEN DPSECI_CMD_V1(0x809)
+#define DPSECI_CMDID_GET_API_VERSION DPSECI_CMD_V1(0xa09)
+
+#define DPSECI_CMDID_ENABLE DPSECI_CMD_V1(0x002)
+#define DPSECI_CMDID_DISABLE DPSECI_CMD_V1(0x003)
+#define DPSECI_CMDID_GET_ATTR DPSECI_CMD_V1(0x004)
+#define DPSECI_CMDID_IS_ENABLED DPSECI_CMD_V1(0x006)
+
+#define DPSECI_CMDID_SET_RX_QUEUE DPSECI_CMD_V1(0x194)
+#define DPSECI_CMDID_GET_RX_QUEUE DPSECI_CMD_V1(0x196)
+#define DPSECI_CMDID_GET_TX_QUEUE DPSECI_CMD_V1(0x197)
+#define DPSECI_CMDID_GET_SEC_ATTR DPSECI_CMD_V2(0x198)
+#define DPSECI_CMDID_SET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x170)
+#define DPSECI_CMDID_GET_CONGESTION_NOTIFICATION DPSECI_CMD_V1(0x171)
+
+/* Macros for accessing command fields smaller than 1 byte */
+#define DPSECI_MASK(field) \
+ GENMASK(DPSECI_##field##_SHIFT + DPSECI_##field##_SIZE - 1, \
+ DPSECI_##field##_SHIFT)
+
+#define dpseci_set_field(var, field, val) \
+ ((var) |= (((val) << DPSECI_##field##_SHIFT) & DPSECI_MASK(field)))
+
+#define dpseci_get_field(var, field) \
+ (((var) & DPSECI_MASK(field)) >> DPSECI_##field##_SHIFT)
+
+struct dpseci_cmd_open {
+ __le32 dpseci_id;
+};
+
+#define DPSECI_ENABLE_SHIFT 0
+#define DPSECI_ENABLE_SIZE 1
+
+struct dpseci_rsp_is_enabled {
+ u8 is_enabled;
+};
+
+struct dpseci_rsp_get_attributes {
+ __le32 id;
+ __le32 pad0;
+ u8 num_tx_queues;
+ u8 num_rx_queues;
+ u8 pad1[6];
+ __le32 options;
+};
+
+#define DPSECI_DEST_TYPE_SHIFT 0
+#define DPSECI_DEST_TYPE_SIZE 4
+
+#define DPSECI_ORDER_PRESERVATION_SHIFT 0
+#define DPSECI_ORDER_PRESERVATION_SIZE 1
+
+struct dpseci_cmd_queue {
+ __le32 dest_id;
+ u8 priority;
+ u8 queue;
+ u8 dest_type;
+ u8 pad;
+ __le64 user_ctx;
+ union {
+ __le32 options;
+ __le32 fqid;
+ };
+ u8 order_preservation_en;
+};
+
+struct dpseci_rsp_get_tx_queue {
+ __le32 pad;
+ __le32 fqid;
+ u8 priority;
+};
+
+struct dpseci_rsp_get_sec_attr {
+ __le16 ip_id;
+ u8 major_rev;
+ u8 minor_rev;
+ u8 era;
+ u8 pad0[3];
+ u8 deco_num;
+ u8 zuc_auth_acc_num;
+ u8 zuc_enc_acc_num;
+ u8 pad1;
+ u8 snow_f8_acc_num;
+ u8 snow_f9_acc_num;
+ u8 crc_acc_num;
+ u8 pad2;
+ u8 pk_acc_num;
+ u8 kasumi_acc_num;
+ u8 rng_acc_num;
+ u8 pad3;
+ u8 md_acc_num;
+ u8 arc4_acc_num;
+ u8 des_acc_num;
+ u8 aes_acc_num;
+ u8 ccha_acc_num;
+ u8 ptha_acc_num;
+};
+
+struct dpseci_rsp_get_api_version {
+ __le16 major;
+ __le16 minor;
+};
+
+#define DPSECI_CGN_DEST_TYPE_SHIFT 0
+#define DPSECI_CGN_DEST_TYPE_SIZE 4
+#define DPSECI_CGN_UNITS_SHIFT 4
+#define DPSECI_CGN_UNITS_SIZE 2
+
+struct dpseci_cmd_congestion_notification {
+ __le32 dest_id;
+ __le16 notification_mode;
+ u8 priority;
+ u8 options;
+ __le64 message_iova;
+ __le64 message_ctx;
+ __le32 threshold_entry;
+ __le32 threshold_exit;
+};
+
+#endif /* _DPSECI_CMD_H_ */
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 8da88beb1abb..7e8d690f2827 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -108,6 +108,54 @@ static const struct {
{ 0xF1, "3GPP HFN matches or exceeds the Threshold" },
};
+static const struct {
+ u8 value;
+ const char *error_text;
+} qi_error_list[] = {
+ { 0x1F, "Job terminated by FQ or ICID flush" },
+ { 0x20, "FD format error"},
+ { 0x21, "FD command format error"},
+ { 0x23, "FL format error"},
+ { 0x25, "CRJD specified in FD, but not enabled in FLC"},
+ { 0x30, "Max. buffer size too small"},
+ { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"},
+ { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"},
+ { 0x33, "Size over/underflow (allocate mode)"},
+ { 0x34, "Size over/underflow (reuse mode)"},
+ { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"},
+ { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"},
+ { 0x41, "SBC frame format not supported (allocate mode)"},
+ { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"},
+ { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"},
+ { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"},
+ { 0x45, "DHR correction underflow (reuse mode, single buffer format)"},
+ { 0x46, "Annotation length exceeds offset (reuse mode)"},
+ { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"},
+ { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"},
+ { 0x4B, "Annotation output enabled but ASA cannote be expanded (frame list)"},
+ { 0x51, "Unsupported IF reuse mode"},
+ { 0x52, "Unsupported FL use mode"},
+ { 0x53, "Unsupported RJD use mode"},
+ { 0x54, "Unsupported inline descriptor use mode"},
+ { 0xC0, "Table buffer pool 0 depletion"},
+ { 0xC1, "Table buffer pool 1 depletion"},
+ { 0xC2, "Data buffer pool 0 depletion, no OF allocated"},
+ { 0xC3, "Data buffer pool 1 depletion, no OF allocated"},
+ { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"},
+ { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"},
+ { 0xD0, "FLC read error"},
+ { 0xD1, "FL read error"},
+ { 0xD2, "FL write error"},
+ { 0xD3, "OF SGT write error"},
+ { 0xD4, "PTA read error"},
+ { 0xD5, "PTA write error"},
+ { 0xD6, "OF SGT F-bit write error"},
+ { 0xD7, "ASA write error"},
+ { 0xE1, "FLC[ICR]=0 ICID error"},
+ { 0xE2, "FLC[ICR]=1 ICID error"},
+ { 0xE4, "source of ICID flush not trusted (BDI = 0)"},
+};
+
static const char * const cha_id_list[] = {
"",
"AES",
@@ -236,6 +284,27 @@ static void report_deco_status(struct device *jrdev, const u32 status,
status, error, idx_str, idx, err_str, err_err_code);
}
+static void report_qi_status(struct device *qidev, const u32 status,
+ const char *error)
+{
+ u8 err_id = status & JRSTA_QIERR_ERROR_MASK;
+ const char *err_str = "unidentified error value 0x";
+ char err_err_code[3] = { 0 };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qi_error_list); i++)
+ if (qi_error_list[i].value == err_id)
+ break;
+
+ if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text)
+ err_str = qi_error_list[i].error_text;
+ else
+ snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
+
+ dev_err(qidev, "%08x: %s: %s%s\n",
+ status, error, err_str, err_err_code);
+}
+
static void report_jr_status(struct device *jrdev, const u32 status,
const char *error)
{
@@ -250,7 +319,7 @@ static void report_cond_code_status(struct device *jrdev, const u32 status,
status, error, __func__);
}
-void caam_jr_strstatus(struct device *jrdev, u32 status)
+void caam_strstatus(struct device *jrdev, u32 status, bool qi_v2)
{
static const struct stat_src {
void (*report_ssed)(struct device *jrdev, const u32 status,
@@ -262,7 +331,7 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
{ report_ccb_status, "CCB" },
{ report_jump_status, "Jump" },
{ report_deco_status, "DECO" },
- { NULL, "Queue Manager Interface" },
+ { report_qi_status, "Queue Manager Interface" },
{ report_jr_status, "Job Ring" },
{ report_cond_code_status, "Condition Code" },
{ NULL, NULL },
@@ -288,4 +357,8 @@ void caam_jr_strstatus(struct device *jrdev, u32 status)
else
dev_err(jrdev, "%d: unknown error source\n", ssrc);
}
-EXPORT_SYMBOL(caam_jr_strstatus);
+EXPORT_SYMBOL(caam_strstatus);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("FSL CAAM error reporting");
+MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/crypto/caam/error.h b/drivers/crypto/caam/error.h
index 5aa332bac4b0..67ea94079837 100644
--- a/drivers/crypto/caam/error.h
+++ b/drivers/crypto/caam/error.h
@@ -8,7 +8,11 @@
#ifndef CAAM_ERROR_H
#define CAAM_ERROR_H
#define CAAM_ERROR_STR_MAX 302
-void caam_jr_strstatus(struct device *jrdev, u32 status);
+
+void caam_strstatus(struct device *dev, u32 status, bool qi_v2);
+
+#define caam_jr_strstatus(jrdev, status) caam_strstatus(jrdev, status, false)
+#define caam_qi2_strstatus(qidev, status) caam_strstatus(qidev, status, true)
void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize, struct scatterlist *sg,
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index acdd72016ffe..d50085a03597 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* CAAM/SEC 4.x transport/backend driver
* JobR backend functionality
diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c
index 67f7f8c42c93..b84e6c8b1e13 100644
--- a/drivers/crypto/caam/qi.c
+++ b/drivers/crypto/caam/qi.c
@@ -84,13 +84,6 @@ static u64 times_congested;
#endif
/*
- * CPU from where the module initialised. This is required because QMan driver
- * requires CGRs to be removed from same CPU from where they were originally
- * allocated.
- */
-static int mod_init_cpu;
-
-/*
* This is a a cache of buffers, from which the users of CAAM QI driver
* can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than
* doing malloc on the hotpath.
@@ -492,12 +485,11 @@ void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx)
}
EXPORT_SYMBOL(caam_drv_ctx_rel);
-int caam_qi_shutdown(struct device *qidev)
+void caam_qi_shutdown(struct device *qidev)
{
- int i, ret;
+ int i;
struct caam_qi_priv *priv = dev_get_drvdata(qidev);
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
for_each_cpu(i, cpus) {
struct napi_struct *irqtask;
@@ -510,26 +502,12 @@ int caam_qi_shutdown(struct device *qidev)
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
}
- /*
- * QMan driver requires CGRs to be deleted from same CPU from where they
- * were instantiated. Hence we get the module removal execute from the
- * same CPU from where it was originally inserted.
- */
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
- ret = qman_delete_cgr(&priv->cgr);
- if (ret)
- dev_err(qidev, "Deletion of CGR failed: %d\n", ret);
- else
- qman_release_cgrid(priv->cgr.cgrid);
+ qman_delete_cgr_safe(&priv->cgr);
+ qman_release_cgrid(priv->cgr.cgrid);
kmem_cache_destroy(qi_cache);
- /* Now that we're done with the CGRs, restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
-
platform_device_unregister(priv->qi_pdev);
- return ret;
}
static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
@@ -718,22 +696,11 @@ int caam_qi_init(struct platform_device *caam_pdev)
struct device *ctrldev = &caam_pdev->dev, *qidev;
struct caam_drv_private *ctrlpriv;
const cpumask_t *cpus = qman_affine_cpus();
- struct cpumask old_cpumask = current->cpus_allowed;
static struct platform_device_info qi_pdev_info = {
.name = "caam_qi",
.id = PLATFORM_DEVID_NONE
};
- /*
- * QMAN requires CGRs to be removed from same CPU+portal from where it
- * was originally allocated. Hence we need to note down the
- * initialisation CPU and use the same CPU for module exit.
- * We select the first CPU to from the list of portal owning CPUs.
- * Then we pin module init to this CPU.
- */
- mod_init_cpu = cpumask_first(cpus);
- set_cpus_allowed_ptr(current, get_cpu_mask(mod_init_cpu));
-
qi_pdev_info.parent = ctrldev;
qi_pdev_info.dma_mask = dma_get_mask(ctrldev);
qi_pdev = platform_device_register_full(&qi_pdev_info);
@@ -795,8 +762,6 @@ int caam_qi_init(struct platform_device *caam_pdev)
return -ENOMEM;
}
- /* Done with the CGRs; restore the cpus allowed mask */
- set_cpus_allowed_ptr(current, &old_cpumask);
#ifdef CONFIG_DEBUG_FS
debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl,
&times_congested, &caam_fops_u64_ro);
diff --git a/drivers/crypto/caam/qi.h b/drivers/crypto/caam/qi.h
index 357b69f57072..f93c9c7ed430 100644
--- a/drivers/crypto/caam/qi.h
+++ b/drivers/crypto/caam/qi.h
@@ -62,7 +62,6 @@ typedef void (*caam_qi_cbk)(struct caam_drv_req *drv_req, u32 status);
enum optype {
ENCRYPT,
DECRYPT,
- GIVENCRYPT,
NUM_OP
};
@@ -174,7 +173,7 @@ int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc);
void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx);
int caam_qi_init(struct platform_device *pdev);
-int caam_qi_shutdown(struct device *dev);
+void caam_qi_shutdown(struct device *dev);
/**
* qi_cache_alloc - Allocate buffers from CAAM-QI cache
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 4fb91ba39c36..457815f965c0 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -70,22 +70,22 @@
extern bool caam_little_end;
extern bool caam_imx;
-#define caam_to_cpu(len) \
-static inline u##len caam##len ## _to_cpu(u##len val) \
-{ \
- if (caam_little_end) \
- return le##len ## _to_cpu(val); \
- else \
- return be##len ## _to_cpu(val); \
+#define caam_to_cpu(len) \
+static inline u##len caam##len ## _to_cpu(u##len val) \
+{ \
+ if (caam_little_end) \
+ return le##len ## _to_cpu((__force __le##len)val); \
+ else \
+ return be##len ## _to_cpu((__force __be##len)val); \
}
-#define cpu_to_caam(len) \
-static inline u##len cpu_to_caam##len(u##len val) \
-{ \
- if (caam_little_end) \
- return cpu_to_le##len(val); \
- else \
- return cpu_to_be##len(val); \
+#define cpu_to_caam(len) \
+static inline u##len cpu_to_caam##len(u##len val) \
+{ \
+ if (caam_little_end) \
+ return (__force u##len)cpu_to_le##len(val); \
+ else \
+ return (__force u##len)cpu_to_be##len(val); \
}
caam_to_cpu(16)
@@ -633,6 +633,8 @@ struct caam_job_ring {
#define JRSTA_DECOERR_INVSIGN 0x86
#define JRSTA_DECOERR_DSASIGN 0x87
+#define JRSTA_QIERR_ERROR_MASK 0x00ff
+
#define JRSTA_CCBERR_JUMP 0x08000000
#define JRSTA_CCBERR_INDEX_MASK 0xff00
#define JRSTA_CCBERR_INDEX_SHIFT 8
diff --git a/drivers/crypto/caam/sg_sw_qm.h b/drivers/crypto/caam/sg_sw_qm.h
index d000b4df745f..b3e1aaaeffea 100644
--- a/drivers/crypto/caam/sg_sw_qm.h
+++ b/drivers/crypto/caam/sg_sw_qm.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor, Inc.
* Copyright 2016-2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __SG_SW_QM_H
diff --git a/drivers/crypto/caam/sg_sw_qm2.h b/drivers/crypto/caam/sg_sw_qm2.h
index b5b4c12179df..c9378402a5f8 100644
--- a/drivers/crypto/caam/sg_sw_qm2.h
+++ b/drivers/crypto/caam/sg_sw_qm2.h
@@ -1,35 +1,7 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2015-2016 Freescale Semiconductor, Inc.
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the names of the above-listed copyright holders nor the
- * names of any contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _SG_SW_QM2_H_
diff --git a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
index b0ba4331944b..ca549c5dc08e 100644
--- a/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
+++ b/drivers/crypto/cavium/cpt/cptvf_reqmanager.c
@@ -308,21 +308,11 @@ void do_request_cleanup(struct cpt_vf *cptvf,
}
}
- if (info->scatter_components)
- kzfree(info->scatter_components);
-
- if (info->gather_components)
- kzfree(info->gather_components);
-
- if (info->out_buffer)
- kzfree(info->out_buffer);
-
- if (info->in_buffer)
- kzfree(info->in_buffer);
-
- if (info->completion_addr)
- kzfree((void *)info->completion_addr);
-
+ kzfree(info->scatter_components);
+ kzfree(info->gather_components);
+ kzfree(info->out_buffer);
+ kzfree(info->in_buffer);
+ kzfree((void *)info->completion_addr);
kzfree(info);
}
diff --git a/drivers/crypto/cavium/nitrox/Makefile b/drivers/crypto/cavium/nitrox/Makefile
index 45b7379e8e30..e12954791673 100644
--- a/drivers/crypto/cavium/nitrox/Makefile
+++ b/drivers/crypto/cavium/nitrox/Makefile
@@ -7,3 +7,6 @@ n5pf-objs := nitrox_main.o \
nitrox_hal.o \
nitrox_reqmgr.o \
nitrox_algs.o
+
+n5pf-$(CONFIG_PCI_IOV) += nitrox_sriov.o
+n5pf-$(CONFIG_DEBUG_FS) += nitrox_debugfs.o
diff --git a/drivers/crypto/cavium/nitrox/nitrox_common.h b/drivers/crypto/cavium/nitrox/nitrox_common.h
index 312f72801af6..863143a8336b 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_common.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_common.h
@@ -12,32 +12,15 @@ void crypto_free_context(void *ctx);
struct nitrox_device *nitrox_get_first_device(void);
void nitrox_put_device(struct nitrox_device *ndev);
-void nitrox_pf_cleanup_isr(struct nitrox_device *ndev);
-int nitrox_pf_init_isr(struct nitrox_device *ndev);
-
int nitrox_common_sw_init(struct nitrox_device *ndev);
void nitrox_common_sw_cleanup(struct nitrox_device *ndev);
-void pkt_slc_resp_handler(unsigned long data);
+void pkt_slc_resp_tasklet(unsigned long data);
int nitrox_process_se_request(struct nitrox_device *ndev,
struct se_crypto_request *req,
completion_t cb,
struct skcipher_request *skreq);
void backlog_qflush_work(struct work_struct *work);
-void nitrox_config_emu_unit(struct nitrox_device *ndev);
-void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
-void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
-void nitrox_config_vfmode(struct nitrox_device *ndev, int mode);
-void nitrox_config_nps_unit(struct nitrox_device *ndev);
-void nitrox_config_pom_unit(struct nitrox_device *ndev);
-void nitrox_config_rand_unit(struct nitrox_device *ndev);
-void nitrox_config_efl_unit(struct nitrox_device *ndev);
-void nitrox_config_bmi_unit(struct nitrox_device *ndev);
-void nitrox_config_bmo_unit(struct nitrox_device *ndev);
-void nitrox_config_lbc_unit(struct nitrox_device *ndev);
-void invalidate_lbc(struct nitrox_device *ndev);
-void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
-void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
#endif /* __NITROX_COMMON_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_csr.h b/drivers/crypto/cavium/nitrox/nitrox_csr.h
index 9dcb7fdbe0a7..1ad27b1a87c5 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_csr.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_csr.h
@@ -7,9 +7,16 @@
/* EMU clusters */
#define NR_CLUSTERS 4
+/* Maximum cores per cluster,
+ * varies based on partname
+ */
#define AE_CORES_PER_CLUSTER 20
#define SE_CORES_PER_CLUSTER 16
+#define AE_MAX_CORES (AE_CORES_PER_CLUSTER * NR_CLUSTERS)
+#define SE_MAX_CORES (SE_CORES_PER_CLUSTER * NR_CLUSTERS)
+#define ZIP_MAX_CORES 5
+
/* BIST registers */
#define EMU_BIST_STATUSX(_i) (0x1402700 + ((_i) * 0x40000))
#define UCD_BIST_STATUS 0x12C0070
@@ -111,6 +118,9 @@
#define LBC_ELM_VF65_128_INT 0x120C000
#define LBC_ELM_VF65_128_INT_ENA_W1S 0x120F000
+#define RST_BOOT 0x10C1600
+#define FUS_DAT1 0x10C1408
+
/* PEM registers */
#define PEM0_INT 0x1080428
@@ -1082,4 +1092,105 @@ union lbc_inval_status {
} s;
};
+/**
+ * struct rst_boot: RST Boot Register
+ * @jtcsrdis: when set, internal CSR access via JTAG TAP controller
+ * is disabled
+ * @jt_tst_mode: JTAG test mode
+ * @io_supply: I/O power supply setting based on IO_VDD_SELECT pin:
+ * 0x1 = 1.8V
+ * 0x2 = 2.5V
+ * 0x4 = 3.3V
+ * All other values are reserved
+ * @pnr_mul: clock multiplier
+ * @lboot: last boot cause mask, resets only with PLL_DC_OK
+ * @rboot: determines whether core 0 remains in reset after
+ * chip cold or warm or soft reset
+ * @rboot_pin: read only access to REMOTE_BOOT pin
+ */
+union rst_boot {
+ u64 value;
+ struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+ u64 raz_63 : 1;
+ u64 jtcsrdis : 1;
+ u64 raz_59_61 : 3;
+ u64 jt_tst_mode : 1;
+ u64 raz_40_57 : 18;
+ u64 io_supply : 3;
+ u64 raz_30_36 : 7;
+ u64 pnr_mul : 6;
+ u64 raz_12_23 : 12;
+ u64 lboot : 10;
+ u64 rboot : 1;
+ u64 rboot_pin : 1;
+#else
+ u64 rboot_pin : 1;
+ u64 rboot : 1;
+ u64 lboot : 10;
+ u64 raz_12_23 : 12;
+ u64 pnr_mul : 6;
+ u64 raz_30_36 : 7;
+ u64 io_supply : 3;
+ u64 raz_40_57 : 18;
+ u64 jt_tst_mode : 1;
+ u64 raz_59_61 : 3;
+ u64 jtcsrdis : 1;
+ u64 raz_63 : 1;
+#endif
+ };
+};
+
+/**
+ * struct fus_dat1: Fuse Data 1 Register
+ * @pll_mul: main clock PLL multiplier hardware limit
+ * @pll_half_dis: main clock PLL control
+ * @efus_lck: efuse lockdown
+ * @zip_info: ZIP information
+ * @bar2_sz_conf: when zero, BAR2 size conforms to
+ * PCIe specification
+ * @efus_ign: efuse ignore
+ * @nozip: ZIP disable
+ * @pll_alt_matrix: select alternate PLL matrix
+ * @pll_bwadj_denom: select CLKF denominator for
+ * BWADJ value
+ * @chip_id: chip ID
+ */
+union fus_dat1 {
+ u64 value;
+ struct {
+#if (defined(__BIG_ENDIAN_BITFIELD))
+ u64 raz_57_63 : 7;
+ u64 pll_mul : 3;
+ u64 pll_half_dis : 1;
+ u64 raz_43_52 : 10;
+ u64 efus_lck : 3;
+ u64 raz_26_39 : 14;
+ u64 zip_info : 5;
+ u64 bar2_sz_conf : 1;
+ u64 efus_ign : 1;
+ u64 nozip : 1;
+ u64 raz_11_17 : 7;
+ u64 pll_alt_matrix : 1;
+ u64 pll_bwadj_denom : 2;
+ u64 chip_id : 8;
+#else
+ u64 chip_id : 8;
+ u64 pll_bwadj_denom : 2;
+ u64 pll_alt_matrix : 1;
+ u64 raz_11_17 : 7;
+ u64 nozip : 1;
+ u64 efus_ign : 1;
+ u64 bar2_sz_conf : 1;
+ u64 zip_info : 5;
+ u64 raz_26_39 : 14;
+ u64 efus_lck : 3;
+ u64 raz_43_52 : 10;
+ u64 pll_half_dis : 1;
+ u64 pll_mul : 3;
+ u64 raz_57_63 : 7;
+#endif
+ };
+};
+
#endif /* __NITROX_CSR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_debugfs.c b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
new file mode 100644
index 000000000000..5f3cd5fafe04
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_debugfs.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+#include "nitrox_csr.h"
+#include "nitrox_dev.h"
+
+static int firmware_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
+ return 0;
+}
+
+static int firmware_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, firmware_show, inode->i_private);
+}
+
+static const struct file_operations firmware_fops = {
+ .owner = THIS_MODULE,
+ .open = firmware_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int device_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "NITROX [%d]\n", ndev->idx);
+ seq_printf(s, " Part Name: %s\n", ndev->hw.partname);
+ seq_printf(s, " Frequency: %d MHz\n", ndev->hw.freq);
+ seq_printf(s, " Device ID: 0x%0x\n", ndev->hw.device_id);
+ seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
+ seq_printf(s, " Cores: [AE=%u SE=%u ZIP=%u]\n",
+ ndev->hw.ae_cores, ndev->hw.se_cores, ndev->hw.zip_cores);
+
+ return 0;
+}
+
+static int nitrox_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, device_show, inode->i_private);
+}
+
+static const struct file_operations nitrox_fops = {
+ .owner = THIS_MODULE,
+ .open = nitrox_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int stats_show(struct seq_file *s, void *v)
+{
+ struct nitrox_device *ndev = s->private;
+
+ seq_printf(s, "NITROX [%d] Request Statistics\n", ndev->idx);
+ seq_printf(s, " Posted: %llu\n",
+ (u64)atomic64_read(&ndev->stats.posted));
+ seq_printf(s, " Completed: %llu\n",
+ (u64)atomic64_read(&ndev->stats.completed));
+ seq_printf(s, " Dropped: %llu\n",
+ (u64)atomic64_read(&ndev->stats.dropped));
+
+ return 0;
+}
+
+static int nitrox_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, stats_show, inode->i_private);
+}
+
+static const struct file_operations nitrox_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = nitrox_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{
+ debugfs_remove_recursive(ndev->debugfs_dir);
+ ndev->debugfs_dir = NULL;
+}
+
+int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+ struct dentry *dir, *f;
+
+ dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ ndev->debugfs_dir = dir;
+ f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
+ if (!f)
+ goto err;
+ f = debugfs_create_file("device", 0400, dir, ndev, &nitrox_fops);
+ if (!f)
+ goto err;
+ f = debugfs_create_file("stats", 0400, dir, ndev, &nitrox_stats_fops);
+ if (!f)
+ goto err;
+
+ return 0;
+
+err:
+ nitrox_debugfs_exit(ndev);
+ return -ENODEV;
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_dev.h b/drivers/crypto/cavium/nitrox/nitrox_dev.h
index af596455b420..283e252385fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_dev.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_dev.h
@@ -5,92 +5,123 @@
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/if.h>
#define VERSION_LEN 32
+/**
+ * struct nitrox_cmdq - NITROX command queue
+ * @cmd_qlock: command queue lock
+ * @resp_qlock: response queue lock
+ * @backlog_qlock: backlog queue lock
+ * @ndev: NITROX device
+ * @response_head: submitted request list
+ * @backlog_head: backlog queue
+ * @dbell_csr_addr: doorbell register address for this queue
+ * @compl_cnt_csr_addr: completion count register address of the slc port
+ * @base: command queue base address
+ * @dma: dma address of the base
+ * @pending_count: request pending at device
+ * @backlog_count: backlog request count
+ * @write_idx: next write index for the command
+ * @instr_size: command size
+ * @qno: command queue number
+ * @qsize: command queue size
+ * @unalign_base: unaligned base address
+ * @unalign_dma: unaligned dma address
+ */
struct nitrox_cmdq {
- /* command queue lock */
- spinlock_t cmdq_lock;
- /* response list lock */
- spinlock_t response_lock;
- /* backlog list lock */
- spinlock_t backlog_lock;
-
- /* request submitted to chip, in progress */
+ spinlock_t cmd_qlock;
+ spinlock_t resp_qlock;
+ spinlock_t backlog_qlock;
+
+ struct nitrox_device *ndev;
struct list_head response_head;
- /* hw queue full, hold in backlog list */
struct list_head backlog_head;
- /* doorbell address */
u8 __iomem *dbell_csr_addr;
- /* base address of the queue */
- u8 *head;
+ u8 __iomem *compl_cnt_csr_addr;
+ u8 *base;
+ dma_addr_t dma;
- struct nitrox_device *ndev;
- /* flush pending backlog commands */
struct work_struct backlog_qflush;
- /* requests posted waiting for completion */
atomic_t pending_count;
- /* requests in backlog queues */
atomic_t backlog_count;
int write_idx;
- /* command size 32B/64B */
u8 instr_size;
u8 qno;
u32 qsize;
- /* unaligned addresses */
- u8 *head_unaligned;
- dma_addr_t dma_unaligned;
- /* dma address of the base */
- dma_addr_t dma;
+ u8 *unalign_base;
+ dma_addr_t unalign_dma;
};
+/**
+ * struct nitrox_hw - NITROX hardware information
+ * @partname: partname ex: CNN55xxx-xxx
+ * @fw_name: firmware version
+ * @freq: NITROX frequency
+ * @vendor_id: vendor ID
+ * @device_id: device ID
+ * @revision_id: revision ID
+ * @se_cores: number of symmetric cores
+ * @ae_cores: number of asymmetric cores
+ * @zip_cores: number of zip cores
+ */
struct nitrox_hw {
- /* firmware version */
+ char partname[IFNAMSIZ * 2];
char fw_name[VERSION_LEN];
+ int freq;
u16 vendor_id;
u16 device_id;
u8 revision_id;
- /* CNN55XX cores */
u8 se_cores;
u8 ae_cores;
u8 zip_cores;
};
-#define MAX_MSIX_VECTOR_NAME 20
-/**
- * vectors for queues (64 AE, 64 SE and 64 ZIP) and
- * error condition/mailbox.
- */
-#define MAX_MSIX_VECTORS 192
-
-struct nitrox_msix {
- struct msix_entry *entries;
- char **names;
- DECLARE_BITMAP(irqs, MAX_MSIX_VECTORS);
- u32 nr_entries;
+struct nitrox_stats {
+ atomic64_t posted;
+ atomic64_t completed;
+ atomic64_t dropped;
};
-struct bh_data {
- /* slc port completion count address */
- u8 __iomem *completion_cnt_csr_addr;
+#define IRQ_NAMESZ 32
+
+struct nitrox_q_vector {
+ char name[IRQ_NAMESZ];
+ bool valid;
+ int ring;
+ struct tasklet_struct resp_tasklet;
+ union {
+ struct nitrox_cmdq *cmdq;
+ struct nitrox_device *ndev;
+ };
+};
- struct nitrox_cmdq *cmdq;
- struct tasklet_struct resp_handler;
+/*
+ * NITROX Device states
+ */
+enum ndev_state {
+ __NDEV_NOT_READY,
+ __NDEV_READY,
+ __NDEV_IN_RESET,
};
-struct nitrox_bh {
- struct bh_data *slc;
+/* NITROX support modes for VF(s) */
+enum vf_mode {
+ __NDEV_MODE_PF,
+ __NDEV_MODE_VF16,
+ __NDEV_MODE_VF32,
+ __NDEV_MODE_VF64,
+ __NDEV_MODE_VF128,
};
-/* NITROX-V driver state */
-#define NITROX_UCODE_LOADED 0
-#define NITROX_READY 1
+#define __NDEV_SRIOV_BIT 0
/* command queue size */
#define DEFAULT_CMD_QLEN 2048
@@ -98,7 +129,6 @@ struct nitrox_bh {
#define CMD_TIMEOUT 2000
#define DEV(ndev) ((struct device *)(&(ndev)->pdev->dev))
-#define PF_MODE 0
#define NITROX_CSR_ADDR(ndev, offset) \
((ndev)->bar_addr + (offset))
@@ -108,17 +138,18 @@ struct nitrox_bh {
* @list: pointer to linked list of devices
* @bar_addr: iomap address
* @pdev: PCI device information
- * @status: NITROX status
+ * @state: NITROX device state
+ * @flags: flags to indicate device the features
* @timeout: Request timeout in jiffies
* @refcnt: Device usage count
* @idx: device index (0..N)
* @node: NUMA node id attached
* @qlen: Command queue length
* @nr_queues: Number of command queues
+ * @mode: Device mode PF/VF
* @ctx_pool: DMA pool for crypto context
- * @pkt_cmdqs: SE Command queues
- * @msix: MSI-X information
- * @bh: post processing work
+ * @pkt_inq: Packet input rings
+ * @qvec: MSI-X queue vectors information
* @hw: hardware information
* @debugfs_dir: debugfs directory
*/
@@ -128,7 +159,8 @@ struct nitrox_device {
u8 __iomem *bar_addr;
struct pci_dev *pdev;
- unsigned long status;
+ atomic_t state;
+ unsigned long flags;
unsigned long timeout;
refcount_t refcnt;
@@ -136,13 +168,16 @@ struct nitrox_device {
int node;
u16 qlen;
u16 nr_queues;
+ int num_vfs;
+ enum vf_mode mode;
struct dma_pool *ctx_pool;
- struct nitrox_cmdq *pkt_cmdqs;
+ struct nitrox_cmdq *pkt_inq;
- struct nitrox_msix msix;
- struct nitrox_bh bh;
+ struct nitrox_q_vector *qvec;
+ int num_vecs;
+ struct nitrox_stats stats;
struct nitrox_hw hw;
#if IS_ENABLED(CONFIG_DEBUG_FS)
struct dentry *debugfs_dir;
@@ -173,9 +208,22 @@ static inline void nitrox_write_csr(struct nitrox_device *ndev, u64 offset,
writeq(value, (ndev->bar_addr + offset));
}
-static inline int nitrox_ready(struct nitrox_device *ndev)
+static inline bool nitrox_ready(struct nitrox_device *ndev)
{
- return test_bit(NITROX_READY, &ndev->status);
+ return atomic_read(&ndev->state) == __NDEV_READY;
}
+#ifdef CONFIG_DEBUG_FS
+int nitrox_debugfs_init(struct nitrox_device *ndev);
+void nitrox_debugfs_exit(struct nitrox_device *ndev);
+#else
+static inline int nitrox_debugfs_init(struct nitrox_device *ndev)
+{
+ return 0;
+}
+
+static inline void nitrox_debugfs_exit(struct nitrox_device *ndev)
+{ }
+#endif
+
#endif /* __NITROX_DEV_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.c b/drivers/crypto/cavium/nitrox/nitrox_hal.c
index ab4ccf2f9e77..a9b82387cf53 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_hal.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.c
@@ -4,6 +4,8 @@
#include "nitrox_dev.h"
#include "nitrox_csr.h"
+#define PLL_REF_CLK 50
+
/**
* emu_enable_cores - Enable EMU cluster cores.
* @ndev: N5 device
@@ -117,7 +119,7 @@ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
int i;
for (i = 0; i < ndev->nr_queues; i++) {
- struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
+ struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
union nps_pkt_in_instr_rsize pkt_in_rsize;
u64 offset;
@@ -256,7 +258,7 @@ void nitrox_config_nps_unit(struct nitrox_device *ndev)
/* disable ILK interface */
core_gbl_vfcfg.value = 0;
core_gbl_vfcfg.s.ilk_disable = 1;
- core_gbl_vfcfg.s.cfg = PF_MODE;
+ core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
/* config input and solicit ports */
nitrox_config_pkt_input_rings(ndev);
@@ -400,3 +402,68 @@ void nitrox_config_lbc_unit(struct nitrox_device *ndev)
offset = LBC_ELM_VF65_128_INT_ENA_W1S;
nitrox_write_csr(ndev, offset, (~0ULL));
}
+
+void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
+{
+ union nps_core_gbl_vfcfg vfcfg;
+
+ vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
+ vfcfg.s.cfg = mode & 0x7;
+
+ nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
+}
+
+void nitrox_get_hwinfo(struct nitrox_device *ndev)
+{
+ union emu_fuse_map emu_fuse;
+ union rst_boot rst_boot;
+ union fus_dat1 fus_dat1;
+ unsigned char name[IFNAMSIZ * 2] = {};
+ int i, dead_cores;
+ u64 offset;
+
+ /* get core frequency */
+ offset = RST_BOOT;
+ rst_boot.value = nitrox_read_csr(ndev, offset);
+ ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
+
+ for (i = 0; i < NR_CLUSTERS; i++) {
+ offset = EMU_FUSE_MAPX(i);
+ emu_fuse.value = nitrox_read_csr(ndev, offset);
+ if (emu_fuse.s.valid) {
+ dead_cores = hweight32(emu_fuse.s.ae_fuse);
+ ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
+ dead_cores = hweight16(emu_fuse.s.se_fuse);
+ ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
+ }
+ }
+ /* find zip hardware availability */
+ offset = FUS_DAT1;
+ fus_dat1.value = nitrox_read_csr(ndev, offset);
+ if (!fus_dat1.nozip) {
+ dead_cores = hweight8(fus_dat1.zip_info);
+ ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
+ }
+
+ /* determine the partname CNN55<cores>-<freq><pincount>-<rev>*/
+ if (ndev->hw.ae_cores == AE_MAX_CORES) {
+ switch (ndev->hw.se_cores) {
+ case SE_MAX_CORES:
+ i = snprintf(name, sizeof(name), "CNN5560");
+ break;
+ case 40:
+ i = snprintf(name, sizeof(name), "CNN5560s");
+ break;
+ }
+ } else if (ndev->hw.ae_cores == (AE_MAX_CORES / 2)) {
+ i = snprintf(name, sizeof(name), "CNN5530");
+ } else {
+ i = snprintf(name, sizeof(name), "CNN5560i");
+ }
+
+ snprintf(name + i, sizeof(name) - i, "-%3dBG676-1.%u",
+ ndev->hw.freq, ndev->hw.revision_id);
+
+ /* copy partname */
+ strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
+}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_hal.h b/drivers/crypto/cavium/nitrox/nitrox_hal.h
new file mode 100644
index 000000000000..489ee64c119e
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_hal.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_HAL_H
+#define __NITROX_HAL_H
+
+#include "nitrox_dev.h"
+
+void nitrox_config_emu_unit(struct nitrox_device *ndev);
+void nitrox_config_pkt_input_rings(struct nitrox_device *ndev);
+void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev);
+void nitrox_config_nps_unit(struct nitrox_device *ndev);
+void nitrox_config_pom_unit(struct nitrox_device *ndev);
+void nitrox_config_rand_unit(struct nitrox_device *ndev);
+void nitrox_config_efl_unit(struct nitrox_device *ndev);
+void nitrox_config_bmi_unit(struct nitrox_device *ndev);
+void nitrox_config_bmo_unit(struct nitrox_device *ndev);
+void nitrox_config_lbc_unit(struct nitrox_device *ndev);
+void invalidate_lbc(struct nitrox_device *ndev);
+void enable_pkt_input_ring(struct nitrox_device *ndev, int ring);
+void enable_pkt_solicit_port(struct nitrox_device *ndev, int port);
+void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode);
+void nitrox_get_hwinfo(struct nitrox_device *ndev);
+
+#endif /* __NITROX_HAL_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.c b/drivers/crypto/cavium/nitrox/nitrox_isr.c
index ee0d70ba25d5..88a77b8fb3fb 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_isr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.c
@@ -6,9 +6,16 @@
#include "nitrox_dev.h"
#include "nitrox_csr.h"
#include "nitrox_common.h"
+#include "nitrox_hal.h"
+/**
+ * One vector for each type of ring
+ * - NPS packet ring, AQMQ ring and ZQMQ ring
+ */
#define NR_RING_VECTORS 3
-#define NPS_CORE_INT_ACTIVE_ENTRY 192
+/* base entry for packet ring/port */
+#define PKT_RING_MSIX_BASE 0
+#define NON_RING_MSIX_BASE 192
/**
* nps_pkt_slc_isr - IRQ handler for NPS solicit port
@@ -17,13 +24,14 @@
*/
static irqreturn_t nps_pkt_slc_isr(int irq, void *data)
{
- struct bh_data *slc = data;
- union nps_pkt_slc_cnts pkt_slc_cnts;
+ struct nitrox_q_vector *qvec = data;
+ union nps_pkt_slc_cnts slc_cnts;
+ struct nitrox_cmdq *cmdq = qvec->cmdq;
- pkt_slc_cnts.value = readq(slc->completion_cnt_csr_addr);
+ slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
/* New packet on SLC output port */
- if (pkt_slc_cnts.s.slc_int)
- tasklet_hi_schedule(&slc->resp_handler);
+ if (slc_cnts.s.slc_int)
+ tasklet_hi_schedule(&qvec->resp_tasklet);
return IRQ_HANDLED;
}
@@ -190,165 +198,92 @@ static void clear_bmi_err_intr(struct nitrox_device *ndev)
dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value);
}
+static void nps_core_int_tasklet(unsigned long data)
+{
+ struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
+ struct nitrox_device *ndev = qvec->ndev;
+
+ /* if pf mode do queue recovery */
+ if (ndev->mode == __NDEV_MODE_PF) {
+ } else {
+ /**
+ * if VF(s) enabled communicate the error information
+ * to VF(s)
+ */
+ }
+}
+
/**
- * clear_nps_core_int_active - clear NPS_CORE_INT_ACTIVE interrupts
- * @ndev: NITROX device
+ * nps_core_int_isr - interrupt handler for NITROX errors and
+ * mailbox communication
*/
-static void clear_nps_core_int_active(struct nitrox_device *ndev)
+static irqreturn_t nps_core_int_isr(int irq, void *data)
{
- union nps_core_int_active core_int_active;
+ struct nitrox_device *ndev = data;
+ union nps_core_int_active core_int;
- core_int_active.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
+ core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE);
- if (core_int_active.s.nps_core)
+ if (core_int.s.nps_core)
clear_nps_core_err_intr(ndev);
- if (core_int_active.s.nps_pkt)
+ if (core_int.s.nps_pkt)
clear_nps_pkt_err_intr(ndev);
- if (core_int_active.s.pom)
+ if (core_int.s.pom)
clear_pom_err_intr(ndev);
- if (core_int_active.s.pem)
+ if (core_int.s.pem)
clear_pem_err_intr(ndev);
- if (core_int_active.s.lbc)
+ if (core_int.s.lbc)
clear_lbc_err_intr(ndev);
- if (core_int_active.s.efl)
+ if (core_int.s.efl)
clear_efl_err_intr(ndev);
- if (core_int_active.s.bmi)
+ if (core_int.s.bmi)
clear_bmi_err_intr(ndev);
/* If more work callback the ISR, set resend */
- core_int_active.s.resend = 1;
- nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int_active.value);
-}
-
-static irqreturn_t nps_core_int_isr(int irq, void *data)
-{
- struct nitrox_device *ndev = data;
-
- clear_nps_core_int_active(ndev);
+ core_int.s.resend = 1;
+ nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value);
return IRQ_HANDLED;
}
-static int nitrox_enable_msix(struct nitrox_device *ndev)
+void nitrox_unregister_interrupts(struct nitrox_device *ndev)
{
- struct msix_entry *entries;
- char **names;
- int i, nr_entries, ret;
-
- /*
- * PF MSI-X vectors
- *
- * Entry 0: NPS PKT ring 0
- * Entry 1: AQMQ ring 0
- * Entry 2: ZQM ring 0
- * Entry 3: NPS PKT ring 1
- * Entry 4: AQMQ ring 1
- * Entry 5: ZQM ring 1
- * ....
- * Entry 192: NPS_CORE_INT_ACTIVE
- */
- nr_entries = (ndev->nr_queues * NR_RING_VECTORS) + 1;
- entries = kcalloc_node(nr_entries, sizeof(struct msix_entry),
- GFP_KERNEL, ndev->node);
- if (!entries)
- return -ENOMEM;
-
- names = kcalloc(nr_entries, sizeof(char *), GFP_KERNEL);
- if (!names) {
- kfree(entries);
- return -ENOMEM;
- }
-
- /* fill entires */
- for (i = 0; i < (nr_entries - 1); i++)
- entries[i].entry = i;
-
- entries[i].entry = NPS_CORE_INT_ACTIVE_ENTRY;
-
- for (i = 0; i < nr_entries; i++) {
- *(names + i) = kzalloc(MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
- if (!(*(names + i))) {
- ret = -ENOMEM;
- goto msix_fail;
- }
- }
- ndev->msix.entries = entries;
- ndev->msix.names = names;
- ndev->msix.nr_entries = nr_entries;
-
- ret = pci_enable_msix_exact(ndev->pdev, ndev->msix.entries,
- ndev->msix.nr_entries);
- if (ret) {
- dev_err(&ndev->pdev->dev, "Failed to enable MSI-X IRQ(s) %d\n",
- ret);
- goto msix_fail;
- }
- return 0;
-
-msix_fail:
- for (i = 0; i < nr_entries; i++)
- kfree(*(names + i));
-
- kfree(entries);
- kfree(names);
- return ret;
-}
-
-static void nitrox_cleanup_pkt_slc_bh(struct nitrox_device *ndev)
-{
- int i;
-
- if (!ndev->bh.slc)
- return;
-
- for (i = 0; i < ndev->nr_queues; i++) {
- struct bh_data *bh = &ndev->bh.slc[i];
-
- tasklet_disable(&bh->resp_handler);
- tasklet_kill(&bh->resp_handler);
- }
- kfree(ndev->bh.slc);
- ndev->bh.slc = NULL;
-}
-
-static int nitrox_setup_pkt_slc_bh(struct nitrox_device *ndev)
-{
- u32 size;
+ struct pci_dev *pdev = ndev->pdev;
int i;
- size = ndev->nr_queues * sizeof(struct bh_data);
- ndev->bh.slc = kzalloc(size, GFP_KERNEL);
- if (!ndev->bh.slc)
- return -ENOMEM;
+ for (i = 0; i < ndev->num_vecs; i++) {
+ struct nitrox_q_vector *qvec;
+ int vec;
- for (i = 0; i < ndev->nr_queues; i++) {
- struct bh_data *bh = &ndev->bh.slc[i];
- u64 offset;
+ qvec = ndev->qvec + i;
+ if (!qvec->valid)
+ continue;
- offset = NPS_PKT_SLC_CNTSX(i);
- /* pre calculate completion count address */
- bh->completion_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
- bh->cmdq = &ndev->pkt_cmdqs[i];
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ irq_set_affinity_hint(vec, NULL);
+ free_irq(vec, qvec);
- tasklet_init(&bh->resp_handler, pkt_slc_resp_handler,
- (unsigned long)bh);
+ tasklet_disable(&qvec->resp_tasklet);
+ tasklet_kill(&qvec->resp_tasklet);
+ qvec->valid = false;
}
-
- return 0;
+ kfree(ndev->qvec);
+ pci_free_irq_vectors(pdev);
}
-static int nitrox_request_irqs(struct nitrox_device *ndev)
+int nitrox_register_interrupts(struct nitrox_device *ndev)
{
struct pci_dev *pdev = ndev->pdev;
- struct msix_entry *msix_ent = ndev->msix.entries;
- int nr_ring_vectors, i = 0, ring, cpu, ret;
- char *name;
+ struct nitrox_q_vector *qvec;
+ int nr_vecs, vec, cpu;
+ int ret, i;
/*
* PF MSI-X vectors
@@ -357,112 +292,76 @@ static int nitrox_request_irqs(struct nitrox_device *ndev)
* Entry 1: AQMQ ring 0
* Entry 2: ZQM ring 0
* Entry 3: NPS PKT ring 1
+ * Entry 4: AQMQ ring 1
+ * Entry 5: ZQM ring 1
* ....
* Entry 192: NPS_CORE_INT_ACTIVE
*/
- nr_ring_vectors = ndev->nr_queues * NR_RING_VECTORS;
-
- /* request irq for pkt ring/ports only */
- while (i < nr_ring_vectors) {
- name = *(ndev->msix.names + i);
- ring = (i / NR_RING_VECTORS);
- snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-slc-ring%d",
- ndev->idx, ring);
+ nr_vecs = pci_msix_vec_count(pdev);
- ret = request_irq(msix_ent[i].vector, nps_pkt_slc_isr, 0,
- name, &ndev->bh.slc[ring]);
- if (ret) {
- dev_err(&pdev->dev, "failed to get irq %d for %s\n",
- msix_ent[i].vector, name);
- return ret;
- }
- cpu = ring % num_online_cpus();
- irq_set_affinity_hint(msix_ent[i].vector, get_cpu_mask(cpu));
-
- set_bit(i, ndev->msix.irqs);
- i += NR_RING_VECTORS;
- }
-
- /* Request IRQ for NPS_CORE_INT_ACTIVE */
- name = *(ndev->msix.names + i);
- snprintf(name, MAX_MSIX_VECTOR_NAME, "n5(%d)-nps-core-int", ndev->idx);
- ret = request_irq(msix_ent[i].vector, nps_core_int_isr, 0, name, ndev);
- if (ret) {
- dev_err(&pdev->dev, "failed to get irq %d for %s\n",
- msix_ent[i].vector, name);
+ /* Enable MSI-X */
+ ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs);
return ret;
}
- set_bit(i, ndev->msix.irqs);
+ ndev->num_vecs = nr_vecs;
- return 0;
-}
-
-static void nitrox_disable_msix(struct nitrox_device *ndev)
-{
- struct msix_entry *msix_ent = ndev->msix.entries;
- char **names = ndev->msix.names;
- int i = 0, ring, nr_ring_vectors;
-
- nr_ring_vectors = ndev->msix.nr_entries - 1;
-
- /* clear pkt ring irqs */
- while (i < nr_ring_vectors) {
- if (test_and_clear_bit(i, ndev->msix.irqs)) {
- ring = (i / NR_RING_VECTORS);
- irq_set_affinity_hint(msix_ent[i].vector, NULL);
- free_irq(msix_ent[i].vector, &ndev->bh.slc[ring]);
- }
- i += NR_RING_VECTORS;
+ ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL);
+ if (!ndev->qvec) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
}
- irq_set_affinity_hint(msix_ent[i].vector, NULL);
- free_irq(msix_ent[i].vector, ndev);
- clear_bit(i, ndev->msix.irqs);
- kfree(ndev->msix.entries);
- for (i = 0; i < ndev->msix.nr_entries; i++)
- kfree(*(names + i));
+ /* request irqs for packet rings/ports */
+ for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) {
+ qvec = &ndev->qvec[i];
- kfree(names);
- pci_disable_msix(ndev->pdev);
-}
-
-/**
- * nitrox_pf_cleanup_isr: Cleanup PF MSI-X and IRQ
- * @ndev: NITROX device
- */
-void nitrox_pf_cleanup_isr(struct nitrox_device *ndev)
-{
- nitrox_disable_msix(ndev);
- nitrox_cleanup_pkt_slc_bh(ndev);
-}
+ qvec->ring = i / NR_RING_VECTORS;
+ if (qvec->ring >= ndev->nr_queues)
+ break;
-/**
- * nitrox_init_isr - Initialize PF MSI-X vectors and IRQ
- * @ndev: NITROX device
- *
- * Return: 0 on success, a negative value on failure.
- */
-int nitrox_pf_init_isr(struct nitrox_device *ndev)
-{
- int err;
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring);
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec);
+ if (ret) {
+ dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n",
+ qvec->ring);
+ goto irq_fail;
+ }
+ cpu = qvec->ring % num_online_cpus();
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
- err = nitrox_setup_pkt_slc_bh(ndev);
- if (err)
- return err;
+ tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet,
+ (unsigned long)qvec);
+ qvec->cmdq = &ndev->pkt_inq[qvec->ring];
+ qvec->valid = true;
+ }
- err = nitrox_enable_msix(ndev);
- if (err)
- goto msix_fail;
+ /* request irqs for non ring vectors */
+ i = NON_RING_MSIX_BASE;
+ qvec = &ndev->qvec[i];
- err = nitrox_request_irqs(ndev);
- if (err)
+ snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i);
+ /* get the vector number */
+ vec = pci_irq_vector(pdev, i);
+ ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec);
+ if (ret) {
+ dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i);
goto irq_fail;
+ }
+ cpu = num_online_cpus();
+ irq_set_affinity_hint(vec, get_cpu_mask(cpu));
+
+ tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet,
+ (unsigned long)qvec);
+ qvec->ndev = ndev;
+ qvec->valid = true;
return 0;
irq_fail:
- nitrox_disable_msix(ndev);
-msix_fail:
- nitrox_cleanup_pkt_slc_bh(ndev);
- return err;
+ nitrox_unregister_interrupts(ndev);
+ return ret;
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_isr.h b/drivers/crypto/cavium/nitrox/nitrox_isr.h
new file mode 100644
index 000000000000..63418a6cc52c
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_isr.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NITROX_ISR_H
+#define __NITROX_ISR_H
+
+#include "nitrox_dev.h"
+
+int nitrox_register_interrupts(struct nitrox_device *ndev);
+void nitrox_unregister_interrupts(struct nitrox_device *ndev);
+
+#endif /* __NITROX_ISR_H */
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 4d31df07777f..2260efa42308 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -17,30 +17,27 @@
#define CRYPTO_CTX_SIZE 256
-/* command queue alignments */
-#define PKT_IN_ALIGN 16
+/* packet inuput ring alignments */
+#define PKTIN_Q_ALIGN_BYTES 16
-static int cmdq_common_init(struct nitrox_cmdq *cmdq)
+static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
{
struct nitrox_device *ndev = cmdq->ndev;
- u32 qsize;
-
- qsize = (ndev->qlen) * cmdq->instr_size;
- cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
- (qsize + PKT_IN_ALIGN),
- &cmdq->dma_unaligned,
- GFP_KERNEL);
- if (!cmdq->head_unaligned)
+
+ cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
+ cmdq->unalign_base = dma_zalloc_coherent(DEV(ndev), cmdq->qsize,
+ &cmdq->unalign_dma,
+ GFP_KERNEL);
+ if (!cmdq->unalign_base)
return -ENOMEM;
- cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
- cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
- cmdq->qsize = (qsize + PKT_IN_ALIGN);
+ cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
+ cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
cmdq->write_idx = 0;
- spin_lock_init(&cmdq->response_lock);
- spin_lock_init(&cmdq->cmdq_lock);
- spin_lock_init(&cmdq->backlog_lock);
+ spin_lock_init(&cmdq->cmd_qlock);
+ spin_lock_init(&cmdq->resp_qlock);
+ spin_lock_init(&cmdq->backlog_qlock);
INIT_LIST_HEAD(&cmdq->response_head);
INIT_LIST_HEAD(&cmdq->backlog_head);
@@ -51,68 +48,83 @@ static int cmdq_common_init(struct nitrox_cmdq *cmdq)
return 0;
}
-static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
+static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
+{
+ cmdq->write_idx = 0;
+ atomic_set(&cmdq->pending_count, 0);
+ atomic_set(&cmdq->backlog_count, 0);
+}
+
+static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
{
struct nitrox_device *ndev = cmdq->ndev;
+ if (!cmdq->unalign_base)
+ return;
+
cancel_work_sync(&cmdq->backlog_qflush);
dma_free_coherent(DEV(ndev), cmdq->qsize,
- cmdq->head_unaligned, cmdq->dma_unaligned);
-
- atomic_set(&cmdq->pending_count, 0);
- atomic_set(&cmdq->backlog_count, 0);
+ cmdq->unalign_base, cmdq->unalign_dma);
+ nitrox_cmdq_reset(cmdq);
cmdq->dbell_csr_addr = NULL;
- cmdq->head = NULL;
+ cmdq->compl_cnt_csr_addr = NULL;
+ cmdq->unalign_base = NULL;
+ cmdq->base = NULL;
+ cmdq->unalign_dma = 0;
cmdq->dma = 0;
cmdq->qsize = 0;
cmdq->instr_size = 0;
}
-static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
+static void nitrox_free_pktin_queues(struct nitrox_device *ndev)
{
int i;
for (i = 0; i < ndev->nr_queues; i++) {
- struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
+ struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
- cmdq_common_cleanup(cmdq);
+ nitrox_cmdq_cleanup(cmdq);
}
- kfree(ndev->pkt_cmdqs);
- ndev->pkt_cmdqs = NULL;
+ kfree(ndev->pkt_inq);
+ ndev->pkt_inq = NULL;
}
-static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
+static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev)
{
- int i, err, size;
+ int i, err;
- size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
- ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
- if (!ndev->pkt_cmdqs)
+ ndev->pkt_inq = kcalloc_node(ndev->nr_queues,
+ sizeof(struct nitrox_cmdq),
+ GFP_KERNEL, ndev->node);
+ if (!ndev->pkt_inq)
return -ENOMEM;
for (i = 0; i < ndev->nr_queues; i++) {
struct nitrox_cmdq *cmdq;
u64 offset;
- cmdq = &ndev->pkt_cmdqs[i];
+ cmdq = &ndev->pkt_inq[i];
cmdq->ndev = ndev;
cmdq->qno = i;
cmdq->instr_size = sizeof(struct nps_pkt_instr);
+ /* packet input ring doorbell address */
offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
- /* SE ring doorbell address for this queue */
cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
+ /* packet solicit port completion count address */
+ offset = NPS_PKT_SLC_CNTSX(i);
+ cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
- err = cmdq_common_init(cmdq);
+ err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
if (err)
- goto pkt_cmdq_fail;
+ goto pktq_fail;
}
return 0;
-pkt_cmdq_fail:
- nitrox_cleanup_pkt_cmdqs(ndev);
+pktq_fail:
+ nitrox_free_pktin_queues(ndev);
return err;
}
@@ -122,7 +134,7 @@ static int create_crypto_dma_pool(struct nitrox_device *ndev)
/* Crypto context pool, 16 byte aligned */
size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
- ndev->ctx_pool = dma_pool_create("crypto-context",
+ ndev->ctx_pool = dma_pool_create("nitrox-context",
DEV(ndev), size, 16, 0);
if (!ndev->ctx_pool)
return -ENOMEM;
@@ -149,7 +161,7 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
void *vaddr;
dma_addr_t dma;
- vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_KERNEL | __GFP_ZERO), &dma);
+ vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
if (!vaddr)
return NULL;
@@ -194,7 +206,7 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
if (err)
return err;
- err = nitrox_init_pkt_cmdqs(ndev);
+ err = nitrox_alloc_pktin_queues(ndev);
if (err)
destroy_crypto_dma_pool(ndev);
@@ -207,6 +219,6 @@ int nitrox_common_sw_init(struct nitrox_device *ndev)
*/
void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
{
- nitrox_cleanup_pkt_cmdqs(ndev);
+ nitrox_free_pktin_queues(ndev);
destroy_crypto_dma_pool(ndev);
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
index fee7cb2ce747..6595c95af9f1 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
@@ -11,13 +11,15 @@
#include "nitrox_dev.h"
#include "nitrox_common.h"
#include "nitrox_csr.h"
+#include "nitrox_hal.h"
+#include "nitrox_isr.h"
#define CNN55XX_DEV_ID 0x12
#define MAX_PF_QUEUES 64
#define UCODE_HLEN 48
#define SE_GROUP 0
-#define DRIVER_VERSION "1.0"
+#define DRIVER_VERSION "1.1"
#define FW_DIR "cavium/"
/* SE microcode */
#define SE_FW FW_DIR "cnn55xx_se.fw"
@@ -42,6 +44,15 @@ static unsigned int qlen = DEFAULT_CMD_QLEN;
module_param(qlen, uint, 0644);
MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
+#ifdef CONFIG_PCI_IOV
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
+#else
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ return 0;
+}
+#endif
+
/**
* struct ucode - Firmware Header
* @id: microcode ID
@@ -136,9 +147,6 @@ static int nitrox_load_fw(struct nitrox_device *ndev, const char *fw_name)
write_to_ucd_unit(ndev, ucode);
release_firmware(fw);
- set_bit(NITROX_UCODE_LOADED, &ndev->status);
- /* barrier to sync with other cpus */
- smp_mb__after_atomic();
return 0;
}
@@ -210,7 +218,7 @@ void nitrox_put_device(struct nitrox_device *ndev)
smp_mb__after_atomic();
}
-static int nitrox_reset_device(struct pci_dev *pdev)
+static int nitrox_device_flr(struct pci_dev *pdev)
{
int pos = 0;
@@ -220,15 +228,10 @@ static int nitrox_reset_device(struct pci_dev *pdev)
return -ENOMEM;
}
- pos = pci_pcie_cap(pdev);
- if (!pos)
- return -ENOTTY;
+ /* check flr support */
+ if (pcie_has_flr(pdev))
+ pcie_flr(pdev);
- if (!pci_wait_for_pending_transaction(pdev))
- dev_err(&pdev->dev, "waiting for pending transaction\n");
-
- pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
- msleep(100);
pci_restore_state(pdev);
return 0;
@@ -242,7 +245,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
if (err)
return err;
- err = nitrox_pf_init_isr(ndev);
+ err = nitrox_register_interrupts(ndev);
if (err)
nitrox_common_sw_cleanup(ndev);
@@ -251,7 +254,7 @@ static int nitrox_pf_sw_init(struct nitrox_device *ndev)
static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
{
- nitrox_pf_cleanup_isr(ndev);
+ nitrox_unregister_interrupts(ndev);
nitrox_common_sw_cleanup(ndev);
}
@@ -284,26 +287,6 @@ static int nitrox_bist_check(struct nitrox_device *ndev)
return 0;
}
-static void nitrox_get_hwinfo(struct nitrox_device *ndev)
-{
- union emu_fuse_map emu_fuse;
- u64 offset;
- int i;
-
- for (i = 0; i < NR_CLUSTERS; i++) {
- u8 dead_cores;
-
- offset = EMU_FUSE_MAPX(i);
- emu_fuse.value = nitrox_read_csr(ndev, offset);
- if (emu_fuse.s.valid) {
- dead_cores = hweight32(emu_fuse.s.ae_fuse);
- ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
- dead_cores = hweight16(emu_fuse.s.se_fuse);
- ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
- }
- }
-}
-
static int nitrox_pf_hw_init(struct nitrox_device *ndev)
{
int err;
@@ -336,135 +319,6 @@ static int nitrox_pf_hw_init(struct nitrox_device *ndev)
return 0;
}
-#if IS_ENABLED(CONFIG_DEBUG_FS)
-static int registers_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
- u64 offset;
-
- /* NPS DMA stats */
- offset = NPS_STATS_PKT_DMA_RD_CNT;
- seq_printf(s, "NPS_STATS_PKT_DMA_RD_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
- offset = NPS_STATS_PKT_DMA_WR_CNT;
- seq_printf(s, "NPS_STATS_PKT_DMA_WR_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
-
- /* BMI/BMO stats */
- offset = BMI_NPS_PKT_CNT;
- seq_printf(s, "BMI_NPS_PKT_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
- offset = BMO_NPS_SLC_PKT_CNT;
- seq_printf(s, "BMO_NPS_PKT_CNT 0x%016llx\n",
- nitrox_read_csr(ndev, offset));
-
- return 0;
-}
-
-static int registers_open(struct inode *inode, struct file *file)
-{
- return single_open(file, registers_show, inode->i_private);
-}
-
-static const struct file_operations register_fops = {
- .owner = THIS_MODULE,
- .open = registers_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int firmware_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
-
- seq_printf(s, "Version: %s\n", ndev->hw.fw_name);
- return 0;
-}
-
-static int firmware_open(struct inode *inode, struct file *file)
-{
- return single_open(file, firmware_show, inode->i_private);
-}
-
-static const struct file_operations firmware_fops = {
- .owner = THIS_MODULE,
- .open = firmware_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int nitrox_show(struct seq_file *s, void *v)
-{
- struct nitrox_device *ndev = s->private;
-
- seq_printf(s, "NITROX-5 [idx: %d]\n", ndev->idx);
- seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id);
- seq_printf(s, " Cores [AE: %u SE: %u]\n",
- ndev->hw.ae_cores, ndev->hw.se_cores);
- seq_printf(s, " Number of Queues: %u\n", ndev->nr_queues);
- seq_printf(s, " Queue length: %u\n", ndev->qlen);
- seq_printf(s, " Node: %u\n", ndev->node);
-
- return 0;
-}
-
-static int nitrox_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nitrox_show, inode->i_private);
-}
-
-static const struct file_operations nitrox_fops = {
- .owner = THIS_MODULE,
- .open = nitrox_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void nitrox_debugfs_exit(struct nitrox_device *ndev)
-{
- debugfs_remove_recursive(ndev->debugfs_dir);
- ndev->debugfs_dir = NULL;
-}
-
-static int nitrox_debugfs_init(struct nitrox_device *ndev)
-{
- struct dentry *dir, *f;
-
- dir = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!dir)
- return -ENOMEM;
-
- ndev->debugfs_dir = dir;
- f = debugfs_create_file("counters", 0400, dir, ndev, &register_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops);
- if (!f)
- goto err;
- f = debugfs_create_file("nitrox", 0400, dir, ndev, &nitrox_fops);
- if (!f)
- goto err;
-
- return 0;
-
-err:
- nitrox_debugfs_exit(ndev);
- return -ENODEV;
-}
-#else
-static int nitrox_debugfs_init(struct nitrox_device *ndev)
-{
- return 0;
-}
-
-static void nitrox_debugfs_exit(struct nitrox_device *ndev)
-{
-}
-#endif
-
/**
* nitrox_probe - NITROX Initialization function.
* @pdev: PCI device information struct
@@ -487,7 +341,7 @@ static int nitrox_probe(struct pci_dev *pdev,
return err;
/* do FLR */
- err = nitrox_reset_device(pdev);
+ err = nitrox_device_flr(pdev);
if (err) {
dev_err(&pdev->dev, "FLR failed\n");
pci_disable_device(pdev);
@@ -555,7 +409,12 @@ static int nitrox_probe(struct pci_dev *pdev,
if (err)
goto pf_hw_fail;
- set_bit(NITROX_READY, &ndev->status);
+ /* clear the statistics */
+ atomic64_set(&ndev->stats.posted, 0);
+ atomic64_set(&ndev->stats.completed, 0);
+ atomic64_set(&ndev->stats.dropped, 0);
+
+ atomic_set(&ndev->state, __NDEV_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
@@ -567,7 +426,7 @@ static int nitrox_probe(struct pci_dev *pdev,
crypto_fail:
nitrox_debugfs_exit(ndev);
- clear_bit(NITROX_READY, &ndev->status);
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
pf_hw_fail:
@@ -602,11 +461,16 @@ static void nitrox_remove(struct pci_dev *pdev)
dev_info(DEV(ndev), "Removing Device %x:%x\n",
ndev->hw.vendor_id, ndev->hw.device_id);
- clear_bit(NITROX_READY, &ndev->status);
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
/* barrier to sync with other cpus */
smp_mb__after_atomic();
nitrox_remove_from_devlist(ndev);
+
+#ifdef CONFIG_PCI_IOV
+ /* disable SR-IOV */
+ nitrox_sriov_configure(pdev, 0);
+#endif
nitrox_crypto_unregister();
nitrox_debugfs_exit(ndev);
nitrox_pf_sw_cleanup(ndev);
@@ -632,6 +496,9 @@ static struct pci_driver nitrox_driver = {
.probe = nitrox_probe,
.remove = nitrox_remove,
.shutdown = nitrox_shutdown,
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = nitrox_sriov_configure,
+#endif
};
module_pci_driver(nitrox_driver);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
index 4a362fc22f62..3987cd84c033 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c
@@ -382,11 +382,11 @@ static inline void backlog_list_add(struct nitrox_softreq *sr,
{
INIT_LIST_HEAD(&sr->backlog);
- spin_lock_bh(&cmdq->backlog_lock);
+ spin_lock_bh(&cmdq->backlog_qlock);
list_add_tail(&sr->backlog, &cmdq->backlog_head);
atomic_inc(&cmdq->backlog_count);
atomic_set(&sr->status, REQ_BACKLOG);
- spin_unlock_bh(&cmdq->backlog_lock);
+ spin_unlock_bh(&cmdq->backlog_qlock);
}
static inline void response_list_add(struct nitrox_softreq *sr,
@@ -394,17 +394,17 @@ static inline void response_list_add(struct nitrox_softreq *sr,
{
INIT_LIST_HEAD(&sr->response);
- spin_lock_bh(&cmdq->response_lock);
+ spin_lock_bh(&cmdq->resp_qlock);
list_add_tail(&sr->response, &cmdq->response_head);
- spin_unlock_bh(&cmdq->response_lock);
+ spin_unlock_bh(&cmdq->resp_qlock);
}
static inline void response_list_del(struct nitrox_softreq *sr,
struct nitrox_cmdq *cmdq)
{
- spin_lock_bh(&cmdq->response_lock);
+ spin_lock_bh(&cmdq->resp_qlock);
list_del(&sr->response);
- spin_unlock_bh(&cmdq->response_lock);
+ spin_unlock_bh(&cmdq->resp_qlock);
}
static struct nitrox_softreq *
@@ -439,11 +439,11 @@ static void post_se_instr(struct nitrox_softreq *sr,
int idx;
u8 *ent;
- spin_lock_bh(&cmdq->cmdq_lock);
+ spin_lock_bh(&cmdq->cmd_qlock);
idx = cmdq->write_idx;
/* copy the instruction */
- ent = cmdq->head + (idx * cmdq->instr_size);
+ ent = cmdq->base + (idx * cmdq->instr_size);
memcpy(ent, &sr->instr, cmdq->instr_size);
atomic_set(&sr->status, REQ_POSTED);
@@ -459,7 +459,10 @@ static void post_se_instr(struct nitrox_softreq *sr,
cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
- spin_unlock_bh(&cmdq->cmdq_lock);
+ spin_unlock_bh(&cmdq->cmd_qlock);
+
+ /* increment the posted command count */
+ atomic64_inc(&ndev->stats.posted);
}
static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
@@ -471,7 +474,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
if (!atomic_read(&cmdq->backlog_count))
return 0;
- spin_lock_bh(&cmdq->backlog_lock);
+ spin_lock_bh(&cmdq->backlog_qlock);
list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
struct skcipher_request *skreq;
@@ -494,7 +497,7 @@ static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
/* backlog requests are posted, wakeup with -EINPROGRESS */
skcipher_request_complete(skreq, -EINPROGRESS);
}
- spin_unlock_bh(&cmdq->backlog_lock);
+ spin_unlock_bh(&cmdq->backlog_qlock);
return ret;
}
@@ -508,8 +511,11 @@ static int nitrox_enqueue_request(struct nitrox_softreq *sr)
post_backlog_cmds(cmdq);
if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
- if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
+ if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
+ /* increment drop count */
+ atomic64_inc(&ndev->stats.dropped);
return -ENOSPC;
+ }
/* add to backlog list */
backlog_list_add(sr, cmdq);
return -EBUSY;
@@ -572,7 +578,7 @@ int nitrox_process_se_request(struct nitrox_device *ndev,
/* select the queue */
qno = smp_processor_id() % ndev->nr_queues;
- sr->cmdq = &ndev->pkt_cmdqs[qno];
+ sr->cmdq = &ndev->pkt_inq[qno];
/*
* 64-Byte Instruction Format
@@ -694,6 +700,7 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
READ_ONCE(sr->resp.orh));
}
atomic_dec(&cmdq->pending_count);
+ atomic64_inc(&ndev->stats.completed);
/* sync with other cpus */
smp_mb__after_atomic();
/* remove from response list */
@@ -714,18 +721,18 @@ static void process_response_list(struct nitrox_cmdq *cmdq)
}
/**
- * pkt_slc_resp_handler - post processing of SE responses
+ * pkt_slc_resp_tasklet - post processing of SE responses
*/
-void pkt_slc_resp_handler(unsigned long data)
+void pkt_slc_resp_tasklet(unsigned long data)
{
- struct bh_data *bh = (void *)(uintptr_t)(data);
- struct nitrox_cmdq *cmdq = bh->cmdq;
- union nps_pkt_slc_cnts pkt_slc_cnts;
+ struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
+ struct nitrox_cmdq *cmdq = qvec->cmdq;
+ union nps_pkt_slc_cnts slc_cnts;
/* read completion count */
- pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
+ slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
/* resend the interrupt if more work to do */
- pkt_slc_cnts.s.resend = 1;
+ slc_cnts.s.resend = 1;
process_response_list(cmdq);
@@ -733,7 +740,7 @@ void pkt_slc_resp_handler(unsigned long data)
* clear the interrupt with resend bit enabled,
* MSI-X interrupt generates if Completion count > Threshold
*/
- writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
+ writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
/* order the writes */
mmiowb();
diff --git a/drivers/crypto/cavium/nitrox/nitrox_sriov.c b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
new file mode 100644
index 000000000000..30c0aa874583
--- /dev/null
+++ b/drivers/crypto/cavium/nitrox/nitrox_sriov.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "nitrox_dev.h"
+#include "nitrox_hal.h"
+#include "nitrox_common.h"
+#include "nitrox_isr.h"
+
+static inline bool num_vfs_valid(int num_vfs)
+{
+ bool valid = false;
+
+ switch (num_vfs) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ valid = true;
+ break;
+ }
+
+ return valid;
+}
+
+static inline enum vf_mode num_vfs_to_mode(int num_vfs)
+{
+ enum vf_mode mode = 0;
+
+ switch (num_vfs) {
+ case 0:
+ mode = __NDEV_MODE_PF;
+ break;
+ case 16:
+ mode = __NDEV_MODE_VF16;
+ break;
+ case 32:
+ mode = __NDEV_MODE_VF32;
+ break;
+ case 64:
+ mode = __NDEV_MODE_VF64;
+ break;
+ case 128:
+ mode = __NDEV_MODE_VF128;
+ break;
+ }
+
+ return mode;
+}
+
+static void pf_sriov_cleanup(struct nitrox_device *ndev)
+{
+ /* PF has no queues in SR-IOV mode */
+ atomic_set(&ndev->state, __NDEV_NOT_READY);
+ /* unregister crypto algorithms */
+ nitrox_crypto_unregister();
+
+ /* cleanup PF resources */
+ nitrox_unregister_interrupts(ndev);
+ nitrox_common_sw_cleanup(ndev);
+}
+
+static int pf_sriov_init(struct nitrox_device *ndev)
+{
+ int err;
+
+ /* allocate resources for PF */
+ err = nitrox_common_sw_init(ndev);
+ if (err)
+ return err;
+
+ err = nitrox_register_interrupts(ndev);
+ if (err) {
+ nitrox_common_sw_cleanup(ndev);
+ return err;
+ }
+
+ /* configure the packet queues */
+ nitrox_config_pkt_input_rings(ndev);
+ nitrox_config_pkt_solicit_ports(ndev);
+
+ /* set device to ready state */
+ atomic_set(&ndev->state, __NDEV_READY);
+
+ /* register crypto algorithms */
+ return nitrox_crypto_register();
+}
+
+static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs)
+{
+ struct nitrox_device *ndev = pci_get_drvdata(pdev);
+ int err;
+
+ if (!num_vfs_valid(num_vfs)) {
+ dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs);
+ return -EINVAL;
+ }
+
+ if (pci_num_vf(pdev) == num_vfs)
+ return num_vfs;
+
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ dev_err(DEV(ndev), "failed to enable PCI sriov %d\n", err);
+ return err;
+ }
+ dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs);
+
+ ndev->num_vfs = num_vfs;
+ ndev->mode = num_vfs_to_mode(num_vfs);
+ /* set bit in flags */
+ set_bit(__NDEV_SRIOV_BIT, &ndev->flags);
+
+ /* cleanup PF resources */
+ pf_sriov_cleanup(ndev);
+
+ config_nps_core_vfcfg_mode(ndev, ndev->mode);
+
+ return num_vfs;
+}
+
+static int nitrox_sriov_disable(struct pci_dev *pdev)
+{
+ struct nitrox_device *ndev = pci_get_drvdata(pdev);
+
+ if (!test_bit(__NDEV_SRIOV_BIT, &ndev->flags))
+ return 0;
+
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(DEV(ndev), "VFs are attached to VM. Can't disable SR-IOV\n");
+ return -EPERM;
+ }
+ pci_disable_sriov(pdev);
+ /* clear bit in flags */
+ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags);
+
+ ndev->num_vfs = 0;
+ ndev->mode = __NDEV_MODE_PF;
+
+ config_nps_core_vfcfg_mode(ndev, ndev->mode);
+
+ return pf_sriov_init(ndev);
+}
+
+int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ if (!num_vfs)
+ return nitrox_sriov_disable(pdev);
+
+ return nitrox_sriov_enable(pdev, num_vfs);
+}
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
index 94b5bcf5b628..ca4630b8395f 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
@@ -102,7 +102,7 @@ static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
ctx->u.aes.key_len = key_len / 2;
sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
- return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
+ return crypto_sync_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
}
static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
@@ -151,12 +151,13 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
(ctx->u.aes.key_len != AES_KEYSIZE_256))
fallback = 1;
if (fallback) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq,
+ ctx->u.aes.tfm_skcipher);
/* Use the fallback to process the request for any
* unsupported unit sizes or key sizes
*/
- skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
+ skcipher_request_set_sync_tfm(subreq, ctx->u.aes.tfm_skcipher);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -203,12 +204,12 @@ static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *fallback_tfm;
+ struct crypto_sync_skcipher *fallback_tfm;
ctx->complete = ccp_aes_xts_complete;
ctx->u.aes.key_len = 0;
- fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
+ fallback_tfm = crypto_alloc_sync_skcipher("xts(aes)", 0,
CRYPTO_ALG_ASYNC |
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback_tfm)) {
@@ -226,7 +227,7 @@ static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
{
struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
+ crypto_free_sync_skcipher(ctx->u.aes.tfm_skcipher);
}
static int ccp_register_aes_xts_alg(struct list_head *head,
diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h
index b9fd090c46c2..28819e11db96 100644
--- a/drivers/crypto/ccp/ccp-crypto.h
+++ b/drivers/crypto/ccp/ccp-crypto.h
@@ -88,7 +88,7 @@ static inline struct ccp_crypto_ahash_alg *
/***** AES related defines *****/
struct ccp_aes_ctx {
/* Fallback cipher for XTS with unsupported unit sizes */
- struct crypto_skcipher *tfm_skcipher;
+ struct crypto_sync_skcipher *tfm_skcipher;
/* Cipher used to generate CMAC K1/K2 keys */
struct crypto_cipher *tfm_cipher;
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 72790d88236d..d64a78ccc03e 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -31,8 +31,9 @@
((psp_master->api_major) >= _maj && \
(psp_master->api_minor) >= _min)
-#define DEVICE_NAME "sev"
-#define SEV_FW_FILE "amd/sev.fw"
+#define DEVICE_NAME "sev"
+#define SEV_FW_FILE "amd/sev.fw"
+#define SEV_FW_NAME_SIZE 64
static DEFINE_MUTEX(sev_cmd_mutex);
static struct sev_misc_dev *misc_dev;
@@ -423,7 +424,7 @@ EXPORT_SYMBOL_GPL(psp_copy_user_blob);
static int sev_get_api_version(void)
{
struct sev_user_data_status *status;
- int error, ret;
+ int error = 0, ret;
status = &psp_master->status_cmd_buf;
ret = sev_platform_status(status, &error);
@@ -440,6 +441,41 @@ static int sev_get_api_version(void)
return 0;
}
+static int sev_get_firmware(struct device *dev,
+ const struct firmware **firmware)
+{
+ char fw_name_specific[SEV_FW_NAME_SIZE];
+ char fw_name_subset[SEV_FW_NAME_SIZE];
+
+ snprintf(fw_name_specific, sizeof(fw_name_specific),
+ "amd/amd_sev_fam%.2xh_model%.2xh.sbin",
+ boot_cpu_data.x86, boot_cpu_data.x86_model);
+
+ snprintf(fw_name_subset, sizeof(fw_name_subset),
+ "amd/amd_sev_fam%.2xh_model%.1xxh.sbin",
+ boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4);
+
+ /* Check for SEV FW for a particular model.
+ * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h
+ *
+ * or
+ *
+ * Check for SEV FW common to a subset of models.
+ * Ex. amd_sev_fam17h_model0xh.sbin for
+ * Family 17h Model 00h -- Family 17h Model 0Fh
+ *
+ * or
+ *
+ * Fall-back to using generic name: sev.fw
+ */
+ if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) ||
+ (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0))
+ return 0;
+
+ return -ENOENT;
+}
+
/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
static int sev_update_firmware(struct device *dev)
{
@@ -449,9 +485,10 @@ static int sev_update_firmware(struct device *dev)
struct page *p;
u64 data_size;
- ret = request_firmware(&firmware, SEV_FW_FILE, dev);
- if (ret < 0)
+ if (sev_get_firmware(dev, &firmware) == -ENOENT) {
+ dev_dbg(dev, "No SEV firmware file present\n");
return -1;
+ }
/*
* SEV FW expects the physical address given to it to be 32
diff --git a/drivers/crypto/ccp/sp-platform.c b/drivers/crypto/ccp/sp-platform.c
index 71734f254fd1..b75dc7db2d4a 100644
--- a/drivers/crypto/ccp/sp-platform.c
+++ b/drivers/crypto/ccp/sp-platform.c
@@ -33,8 +33,31 @@ struct sp_platform {
unsigned int irq_count;
};
-static const struct acpi_device_id sp_acpi_match[];
-static const struct of_device_id sp_of_match[];
+static const struct sp_dev_vdata dev_vdata[] = {
+ {
+ .bar = 0,
+#ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv3_platform,
+#endif
+ },
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id sp_acpi_match[] = {
+ { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id sp_of_match[] = {
+ { .compatible = "amd,ccp-seattle-v1a",
+ .data = (const void *)&dev_vdata[0] },
+ { },
+};
+MODULE_DEVICE_TABLE(of, sp_of_match);
+#endif
static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev)
{
@@ -201,32 +224,6 @@ static int sp_platform_resume(struct platform_device *pdev)
}
#endif
-static const struct sp_dev_vdata dev_vdata[] = {
- {
- .bar = 0,
-#ifdef CONFIG_CRYPTO_DEV_SP_CCP
- .ccp_vdata = &ccpv3_platform,
-#endif
- },
-};
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id sp_acpi_match[] = {
- { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, sp_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id sp_of_match[] = {
- { .compatible = "amd,ccp-seattle-v1a",
- .data = (const void *)&dev_vdata[0] },
- { },
-};
-MODULE_DEVICE_TABLE(of, sp_of_match);
-#endif
-
static struct platform_driver sp_platform_driver = {
.driver = {
.name = "ccp",
diff --git a/drivers/crypto/ccree/cc_hw_queue_defs.h b/drivers/crypto/ccree/cc_hw_queue_defs.h
index a091ae57f902..45985b955d2c 100644
--- a/drivers/crypto/ccree/cc_hw_queue_defs.h
+++ b/drivers/crypto/ccree/cc_hw_queue_defs.h
@@ -449,8 +449,7 @@ static inline void set_flow_mode(struct cc_hw_desc *pdesc,
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
-static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
- enum drv_cipher_mode mode)
+static inline void set_cipher_mode(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_MODE, mode);
}
@@ -461,8 +460,7 @@ static inline void set_cipher_mode(struct cc_hw_desc *pdesc,
* @pdesc: pointer HW descriptor struct
* @mode: Any one of the modes defined in [CC7x-DESC]
*/
-static inline void set_cipher_config0(struct cc_hw_desc *pdesc,
- enum drv_crypto_direction mode)
+static inline void set_cipher_config0(struct cc_hw_desc *pdesc, int mode)
{
pdesc->word[4] |= FIELD_PREP(WORD4_CIPHER_CONF0, mode);
}
diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
index 010bbf607797..db203f8be429 100644
--- a/drivers/crypto/chelsio/chcr_algo.c
+++ b/drivers/crypto/chelsio/chcr_algo.c
@@ -673,7 +673,7 @@ static int chcr_sg_ent_in_wr(struct scatterlist *src,
return min(srclen, dstlen);
}
-static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
+static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
u32 flags,
struct scatterlist *src,
struct scatterlist *dst,
@@ -683,9 +683,9 @@ static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
{
int err;
- SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
- skcipher_request_set_tfm(subreq, cipher);
+ skcipher_request_set_sync_tfm(subreq, cipher);
skcipher_request_set_callback(subreq, flags, NULL, NULL);
skcipher_request_set_crypt(subreq, src, dst,
nbytes, iv);
@@ -856,13 +856,14 @@ static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
int err = 0;
- crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
+ crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
+ CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
+ cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+ err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |=
- crypto_skcipher_get_flags(ablkctx->sw_cipher) &
+ crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
CRYPTO_TFM_RES_MASK;
return err;
}
@@ -1337,8 +1338,7 @@ static int chcr_device_init(struct chcr_context *ctx)
}
ctx->dev = u_ctx->dev;
adap = padap(ctx->dev);
- ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
- adap->vres.ncrypto_fc);
+ ntxq = u_ctx->lldi.ntxq;
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
txq_perchan = ntxq / u_ctx->lldi.nchan;
spin_lock(&ctx->dev->lock_chcr_dev);
@@ -1369,8 +1369,8 @@ static int chcr_cra_init(struct crypto_tfm *tfm)
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
return PTR_ERR(ablkctx->sw_cipher);
@@ -1399,8 +1399,8 @@ static int chcr_rfc3686_init(struct crypto_tfm *tfm)
/*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
* cannot be used as fallback in chcr_handle_cipher_response
*/
- ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ablkctx->sw_cipher)) {
pr_err("failed to allocate fallback for %s\n", alg->cra_name);
return PTR_ERR(ablkctx->sw_cipher);
@@ -1415,7 +1415,7 @@ static void chcr_cra_exit(struct crypto_tfm *tfm)
struct chcr_context *ctx = crypto_tfm_ctx(tfm);
struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
- crypto_free_skcipher(ablkctx->sw_cipher);
+ crypto_free_sync_skcipher(ablkctx->sw_cipher);
if (ablkctx->aes_generic)
crypto_free_cipher(ablkctx->aes_generic);
}
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 62249d4ed373..2c472e3c6aeb 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -43,7 +43,7 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
static struct cxgb4_uld_info chcr_uld_info = {
.name = DRV_MODULE_NAME,
.nrxq = MAX_ULD_QSETS,
- .ntxq = MAX_ULD_QSETS,
+ /* Max ntxq will be derived from fw config file*/
.rxq_size = 1024,
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index 0d2c70c344f3..d37ef41f9ebe 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -170,7 +170,7 @@ static inline struct chcr_context *h_ctx(struct crypto_ahash *tfm)
}
struct ablk_ctx {
- struct crypto_skcipher *sw_cipher;
+ struct crypto_sync_skcipher *sw_cipher;
struct crypto_cipher *aes_generic;
__be32 key_ctx_hdr;
unsigned int enckey_len;
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index 0997e166ea57..20209e29f814 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -234,8 +234,7 @@ static void chtls_send_reset(struct sock *sk, int mode, struct sk_buff *skb)
return;
out:
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
}
static void release_tcp_port(struct sock *sk)
@@ -406,12 +405,10 @@ static int wait_for_states(struct sock *sk, unsigned int states)
int chtls_disconnect(struct sock *sk, int flags)
{
- struct chtls_sock *csk;
struct tcp_sock *tp;
int err;
tp = tcp_sk(sk);
- csk = rcu_dereference_sk_user_data(sk);
chtls_purge_recv_queue(sk);
chtls_purge_receive_queue(sk);
chtls_purge_write_queue(sk);
@@ -1014,7 +1011,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
- const struct tcphdr *tcph;
struct inet_sock *newinet;
const struct iphdr *iph;
struct net_device *ndev;
@@ -1036,7 +1032,6 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (!dst)
goto free_sk;
- tcph = (struct tcphdr *)(iph + 1);
n = dst_neigh_lookup(dst, &iph->saddr);
if (!n)
goto free_sk;
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/crypto/chelsio/chtls/chtls_main.c
index f59b044ebd25..f472c51abe56 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/crypto/chelsio/chtls/chtls_main.c
@@ -272,8 +272,7 @@ static void chtls_free_uld(struct chtls_dev *cdev)
for (i = 0; i < (1 << RSPQ_HASH_BITS); i++)
kfree_skb(cdev->rspq_skb_cache[i]);
kfree(cdev->lldi);
- if (cdev->askb)
- kfree_skb(cdev->askb);
+ kfree_skb(cdev->askb);
kfree(cdev);
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 56bd28174f52..4e6ff32f8a7e 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -28,9 +28,24 @@
#define DCP_MAX_CHANS 4
#define DCP_BUF_SZ PAGE_SIZE
+#define DCP_SHA_PAY_SZ 64
#define DCP_ALIGNMENT 64
+/*
+ * Null hashes to align with hw behavior on imx6sl and ull
+ * these are flipped for consistency with hw output
+ */
+static const uint8_t sha1_null_hash[] =
+ "\x09\x07\xd8\xaf\x90\x18\x60\x95\xef\xbf"
+ "\x55\x32\x0d\x4b\x6b\x5e\xee\xa3\x39\xda";
+
+static const uint8_t sha256_null_hash[] =
+ "\x55\xb8\x52\x78\x1b\x99\x95\xa4"
+ "\x4c\x93\x9b\x64\xe4\x41\xae\x27"
+ "\x24\xb9\x6f\x99\xc8\xf4\xfb\x9a"
+ "\x14\x1c\xfc\x98\x42\xc4\xb0\xe3";
+
/* DCP DMA descriptor. */
struct dcp_dma_desc {
uint32_t next_cmd_addr;
@@ -48,6 +63,7 @@ struct dcp_coherent_block {
uint8_t aes_in_buf[DCP_BUF_SZ];
uint8_t aes_out_buf[DCP_BUF_SZ];
uint8_t sha_in_buf[DCP_BUF_SZ];
+ uint8_t sha_out_buf[DCP_SHA_PAY_SZ];
uint8_t aes_key[2 * AES_KEYSIZE_128];
@@ -84,7 +100,7 @@ struct dcp_async_ctx {
unsigned int hot:1;
/* Crypto-specific context */
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
unsigned int key_len;
uint8_t key[AES_KEYSIZE_128];
};
@@ -99,6 +115,11 @@ struct dcp_sha_req_ctx {
unsigned int fini:1;
};
+struct dcp_export_state {
+ struct dcp_sha_req_ctx req_ctx;
+ struct dcp_async_ctx async_ctx;
+};
+
/*
* There can even be only one instance of the MXS DCP due to the
* design of Linux Crypto API.
@@ -209,6 +230,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
DCP_BUF_SZ, DMA_FROM_DEVICE);
+ if (actx->fill % AES_BLOCK_SIZE) {
+ dev_err(sdcp->dev, "Invalid block size!\n");
+ ret = -EINVAL;
+ goto aes_done_run;
+ }
+
/* Fill in the DMA descriptor. */
desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
MXS_DCP_CONTROL0_INTERRUPT |
@@ -238,6 +265,7 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
ret = mxs_dcp_start_dma(actx);
+aes_done_run:
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
@@ -264,13 +292,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
uint8_t *out_tmp, *src_buf, *dst_buf = NULL;
uint32_t dst_off = 0;
+ uint32_t last_out_len = 0;
uint8_t *key = sdcp->coh->aes_key;
int ret = 0;
int split = 0;
- unsigned int i, len, clen, rem = 0;
+ unsigned int i, len, clen, rem = 0, tlen = 0;
int init = 0;
+ bool limit_hit = false;
actx->fill = 0;
@@ -289,6 +319,11 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
for_each_sg(req->src, src, nents, i) {
src_buf = sg_virt(src);
len = sg_dma_len(src);
+ tlen += len;
+ limit_hit = tlen > req->nbytes;
+
+ if (limit_hit)
+ len = req->nbytes - (tlen - len);
do {
if (actx->fill + len > out_off)
@@ -305,13 +340,15 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
* If we filled the buffer or this is the last SG,
* submit the buffer.
*/
- if (actx->fill == out_off || sg_is_last(src)) {
+ if (actx->fill == out_off || sg_is_last(src) ||
+ limit_hit) {
ret = mxs_dcp_run_aes(actx, req, init);
if (ret)
return ret;
init = 0;
out_tmp = out_buf;
+ last_out_len = actx->fill;
while (dst && actx->fill) {
if (!split) {
dst_buf = sg_virt(dst);
@@ -334,6 +371,19 @@ static int mxs_dcp_aes_block_crypt(struct crypto_async_request *arq)
}
}
} while (len);
+
+ if (limit_hit)
+ break;
+ }
+
+ /* Copy the IV for CBC for chaining */
+ if (!rctx->ecb) {
+ if (rctx->enc)
+ memcpy(req->info, out_buf+(last_out_len-AES_BLOCK_SIZE),
+ AES_BLOCK_SIZE);
+ else
+ memcpy(req->info, in_buf+(last_out_len-AES_BLOCK_SIZE),
+ AES_BLOCK_SIZE);
}
return ret;
@@ -380,10 +430,10 @@ static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct dcp_async_ctx *ctx = crypto_ablkcipher_ctx(tfm);
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
int ret;
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->nbytes, req->info);
@@ -464,16 +514,16 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
* but is supported by in-kernel software implementation, we use
* software fallback.
*/
- crypto_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(actx->fallback,
+ crypto_sync_skcipher_clear_flags(actx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(actx->fallback,
tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(actx->fallback, key, len);
+ ret = crypto_sync_skcipher_setkey(actx->fallback, key, len);
if (!ret)
return 0;
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_skcipher_get_flags(actx->fallback) &
+ tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(actx->fallback) &
CRYPTO_TFM_RES_MASK;
return ret;
@@ -482,11 +532,10 @@ static int mxs_dcp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
static int mxs_dcp_aes_fallback_init(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
- const uint32_t flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *blk;
+ struct crypto_sync_skcipher *blk;
- blk = crypto_alloc_skcipher(name, 0, flags);
+ blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
@@ -499,7 +548,7 @@ static void mxs_dcp_aes_fallback_exit(struct crypto_tfm *tfm)
{
struct dcp_async_ctx *actx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(actx->fallback);
+ crypto_free_sync_skcipher(actx->fallback);
}
/*
@@ -513,8 +562,6 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
- struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
-
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
dma_addr_t digest_phys = 0;
@@ -536,10 +583,23 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
desc->payload = 0;
desc->status = 0;
+ /*
+ * Align driver with hw behavior when generating null hashes
+ */
+ if (rctx->init && rctx->fini && desc->size == 0) {
+ struct hash_alg_common *halg = crypto_hash_alg_common(tfm);
+ const uint8_t *sha_buf =
+ (actx->alg == MXS_DCP_CONTROL1_HASH_SELECT_SHA1) ?
+ sha1_null_hash : sha256_null_hash;
+ memcpy(sdcp->coh->sha_out_buf, sha_buf, halg->digestsize);
+ ret = 0;
+ goto done_run;
+ }
+
/* Set HASH_TERM bit for last transfer block. */
if (rctx->fini) {
- digest_phys = dma_map_single(sdcp->dev, req->result,
- halg->digestsize, DMA_FROM_DEVICE);
+ digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
+ DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
desc->payload = digest_phys;
}
@@ -547,9 +607,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
ret = mxs_dcp_start_dma(actx);
if (rctx->fini)
- dma_unmap_single(sdcp->dev, digest_phys, halg->digestsize,
+ dma_unmap_single(sdcp->dev, digest_phys, DCP_SHA_PAY_SZ,
DMA_FROM_DEVICE);
+done_run:
dma_unmap_single(sdcp->dev, buf_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
return ret;
@@ -567,6 +628,7 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
const int nents = sg_nents(req->src);
uint8_t *in_buf = sdcp->coh->sha_in_buf;
+ uint8_t *out_buf = sdcp->coh->sha_out_buf;
uint8_t *src_buf;
@@ -621,11 +683,9 @@ static int dcp_sha_req_to_buf(struct crypto_async_request *arq)
actx->fill = 0;
- /* For some reason, the result is flipped. */
- for (i = 0; i < halg->digestsize / 2; i++) {
- swap(req->result[i],
- req->result[halg->digestsize - i - 1]);
- }
+ /* For some reason the result is flipped */
+ for (i = 0; i < halg->digestsize; i++)
+ req->result[i] = out_buf[halg->digestsize - i - 1];
}
return 0;
@@ -766,14 +826,32 @@ static int dcp_sha_digest(struct ahash_request *req)
return dcp_sha_finup(req);
}
-static int dcp_sha_noimport(struct ahash_request *req, const void *in)
+static int dcp_sha_import(struct ahash_request *req, const void *in)
{
- return -ENOSYS;
+ struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx = crypto_ahash_ctx(tfm);
+ const struct dcp_export_state *export = in;
+
+ memset(rctx, 0, sizeof(struct dcp_sha_req_ctx));
+ memset(actx, 0, sizeof(struct dcp_async_ctx));
+ memcpy(rctx, &export->req_ctx, sizeof(struct dcp_sha_req_ctx));
+ memcpy(actx, &export->async_ctx, sizeof(struct dcp_async_ctx));
+
+ return 0;
}
-static int dcp_sha_noexport(struct ahash_request *req, void *out)
+static int dcp_sha_export(struct ahash_request *req, void *out)
{
- return -ENOSYS;
+ struct dcp_sha_req_ctx *rctx_state = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct dcp_async_ctx *actx_state = crypto_ahash_ctx(tfm);
+ struct dcp_export_state *export = out;
+
+ memcpy(&export->req_ctx, rctx_state, sizeof(struct dcp_sha_req_ctx));
+ memcpy(&export->async_ctx, actx_state, sizeof(struct dcp_async_ctx));
+
+ return 0;
}
static int dcp_sha_cra_init(struct crypto_tfm *tfm)
@@ -846,10 +924,11 @@ static struct ahash_alg dcp_sha1_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
- .import = dcp_sha_noimport,
- .export = dcp_sha_noexport,
+ .import = dcp_sha_import,
+ .export = dcp_sha_export,
.halg = {
.digestsize = SHA1_DIGEST_SIZE,
+ .statesize = sizeof(struct dcp_export_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-dcp",
@@ -872,10 +951,11 @@ static struct ahash_alg dcp_sha256_alg = {
.final = dcp_sha_final,
.finup = dcp_sha_finup,
.digest = dcp_sha_digest,
- .import = dcp_sha_noimport,
- .export = dcp_sha_noexport,
+ .import = dcp_sha_import,
+ .export = dcp_sha_export,
.halg = {
.digestsize = SHA256_DIGEST_SIZE,
+ .statesize = sizeof(struct dcp_export_state),
.base = {
.cra_name = "sha256",
.cra_driver_name = "sha256-dcp",
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 9019f6b67986..a553ffddb11b 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -522,9 +522,9 @@ static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
!!(mode & FLAGS_CBC));
if (req->nbytes < aes_fallback_sz) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags, NULL,
NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -564,11 +564,11 @@ static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
memcpy(ctx->key, key, keylen);
ctx->keylen = keylen;
- crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
return 0;
@@ -613,11 +613,10 @@ static int omap_aes_crypt_req(struct crypto_engine *engine,
static int omap_aes_cra_init(struct crypto_tfm *tfm)
{
const char *name = crypto_tfm_alg_name(tfm);
- const u32 flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_skcipher *blk;
+ struct crypto_sync_skcipher *blk;
- blk = crypto_alloc_skcipher(name, 0, flags);
+ blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(blk))
return PTR_ERR(blk);
@@ -667,7 +666,7 @@ static void omap_aes_cra_exit(struct crypto_tfm *tfm)
struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback)
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
diff --git a/drivers/crypto/omap-aes.h b/drivers/crypto/omap-aes.h
index fc3b46a85809..7e02920ef6f8 100644
--- a/drivers/crypto/omap-aes.h
+++ b/drivers/crypto/omap-aes.h
@@ -101,7 +101,7 @@ struct omap_aes_ctx {
int keylen;
u32 key[AES_KEYSIZE_256 / sizeof(u32)];
u8 nonce[4];
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct crypto_skcipher *ctr;
};
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index 321d5e2ac833..a28f1d18fe01 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -171,7 +171,7 @@ struct spacc_ablk_ctx {
* The fallback cipher. If the operation can't be done in hardware,
* fallback to a software version.
*/
- struct crypto_skcipher *sw_cipher;
+ struct crypto_sync_skcipher *sw_cipher;
};
/* AEAD cipher context. */
@@ -799,17 +799,17 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* Set the fallback transform to use the same request flags as
* the hardware transform.
*/
- crypto_skcipher_clear_flags(ctx->sw_cipher,
+ crypto_sync_skcipher_clear_flags(ctx->sw_cipher,
CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->sw_cipher,
+ crypto_sync_skcipher_set_flags(ctx->sw_cipher,
cipher->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- err = crypto_skcipher_setkey(ctx->sw_cipher, key, len);
+ err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
tfm->crt_flags |=
- crypto_skcipher_get_flags(ctx->sw_cipher) &
+ crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
CRYPTO_TFM_RES_MASK;
if (err)
@@ -914,7 +914,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
struct crypto_tfm *old_tfm =
crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
int err;
/*
@@ -922,7 +922,7 @@ static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
* the ciphering has completed, put the old transform back into the
* request.
*/
- skcipher_request_set_tfm(subreq, ctx->sw_cipher);
+ skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
req->nbytes, req->info);
@@ -1020,9 +1020,8 @@ static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
ctx->generic.flags = spacc_alg->type;
ctx->generic.engine = engine;
if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
- ctx->sw_cipher = crypto_alloc_skcipher(
- alg->cra_name, 0, CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->sw_cipher = crypto_alloc_sync_skcipher(
+ alg->cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->sw_cipher)) {
dev_warn(engine->dev, "failed to allocate fallback for %s\n",
alg->cra_name);
@@ -1041,7 +1040,7 @@ static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
{
struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->sw_cipher);
+ crypto_free_sync_skcipher(ctx->sw_cipher);
}
static int spacc_ablk_encrypt(struct ablkcipher_request *req)
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index 9225d060e18f..f5e960d23a7a 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -198,7 +198,6 @@ static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
pr_err("QAT: Can't find acceleration device\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 1138e41d6805..d2698299896f 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -113,6 +113,13 @@ struct qat_alg_aead_ctx {
struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
struct qat_crypto_instance *inst;
+ union {
+ struct sha1_state sha1;
+ struct sha256_state sha256;
+ struct sha512_state sha512;
+ };
+ char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
+ char opad[SHA512_BLOCK_SIZE];
};
struct qat_alg_ablkcipher_ctx {
@@ -148,37 +155,32 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
unsigned int auth_keylen)
{
SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
- struct sha1_state sha1;
- struct sha256_state sha256;
- struct sha512_state sha512;
int block_size = crypto_shash_blocksize(ctx->hash_tfm);
int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- char ipad[block_size];
- char opad[block_size];
__be32 *hash_state_out;
__be64 *hash512_state_out;
int i, offset;
- memset(ipad, 0, block_size);
- memset(opad, 0, block_size);
+ memset(ctx->ipad, 0, block_size);
+ memset(ctx->opad, 0, block_size);
shash->tfm = ctx->hash_tfm;
shash->flags = 0x0;
if (auth_keylen > block_size) {
int ret = crypto_shash_digest(shash, auth_key,
- auth_keylen, ipad);
+ auth_keylen, ctx->ipad);
if (ret)
return ret;
- memcpy(opad, ipad, digest_size);
+ memcpy(ctx->opad, ctx->ipad, digest_size);
} else {
- memcpy(ipad, auth_key, auth_keylen);
- memcpy(opad, auth_key, auth_keylen);
+ memcpy(ctx->ipad, auth_key, auth_keylen);
+ memcpy(ctx->opad, auth_key, auth_keylen);
}
for (i = 0; i < block_size; i++) {
- char *ipad_ptr = ipad + i;
- char *opad_ptr = opad + i;
+ char *ipad_ptr = ctx->ipad + i;
+ char *opad_ptr = ctx->opad + i;
*ipad_ptr ^= HMAC_IPAD_VALUE;
*opad_ptr ^= HMAC_OPAD_VALUE;
}
@@ -186,7 +188,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
if (crypto_shash_init(shash))
return -EFAULT;
- if (crypto_shash_update(shash, ipad, block_size))
+ if (crypto_shash_update(shash, ctx->ipad, block_size))
return -EFAULT;
hash_state_out = (__be32 *)hash->sha.state1;
@@ -194,22 +196,22 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &sha1))
+ if (crypto_shash_export(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha1.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &sha256))
+ if (crypto_shash_export(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha256.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &sha512))
+ if (crypto_shash_export(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
break;
default:
return -EFAULT;
@@ -218,7 +220,7 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
if (crypto_shash_init(shash))
return -EFAULT;
- if (crypto_shash_update(shash, opad, block_size))
+ if (crypto_shash_update(shash, ctx->opad, block_size))
return -EFAULT;
offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
@@ -227,28 +229,28 @@ static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
switch (ctx->qat_hash_alg) {
case ICP_QAT_HW_AUTH_ALGO_SHA1:
- if (crypto_shash_export(shash, &sha1))
+ if (crypto_shash_export(shash, &ctx->sha1))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha1.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA256:
- if (crypto_shash_export(shash, &sha256))
+ if (crypto_shash_export(shash, &ctx->sha256))
return -EFAULT;
for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
- *hash_state_out = cpu_to_be32(*(sha256.state + i));
+ *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
break;
case ICP_QAT_HW_AUTH_ALGO_SHA512:
- if (crypto_shash_export(shash, &sha512))
+ if (crypto_shash_export(shash, &ctx->sha512))
return -EFAULT;
for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
- *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+ *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
break;
default:
return -EFAULT;
}
- memzero_explicit(ipad, block_size);
- memzero_explicit(opad, block_size);
+ memzero_explicit(ctx->ipad, block_size);
+ memzero_explicit(ctx->opad, block_size);
return 0;
}
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
index ea4d96bf47e8..585e1cab9ae3 100644
--- a/drivers/crypto/qce/ablkcipher.c
+++ b/drivers/crypto/qce/ablkcipher.c
@@ -189,7 +189,7 @@ static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
memcpy(ctx->enc_key, key, keylen);
return 0;
fallback:
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
if (!ret)
ctx->enc_keylen = keylen;
return ret;
@@ -212,9 +212,9 @@ static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
ctx->enc_keylen != AES_KEYSIZE_256) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -245,9 +245,8 @@ static int qce_ablkcipher_init(struct crypto_tfm *tfm)
memset(ctx, 0, sizeof(*ctx));
tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
- ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(tfm), 0,
- CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK);
+ ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(tfm),
+ 0, CRYPTO_ALG_NEED_FALLBACK);
return PTR_ERR_OR_ZERO(ctx->fallback);
}
@@ -255,7 +254,7 @@ static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
{
struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
}
struct qce_ablkcipher_def {
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
index 2b0278bb6e92..ee055bfe98a0 100644
--- a/drivers/crypto/qce/cipher.h
+++ b/drivers/crypto/qce/cipher.h
@@ -22,7 +22,7 @@
struct qce_cipher_ctx {
u8 enc_key[QCE_MAX_KEY_SIZE];
unsigned int enc_keylen;
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
/**
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index faa282074e5a..0064be0e3941 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -249,8 +249,8 @@ struct s5p_aes_reqctx {
struct s5p_aes_ctx {
struct s5p_aes_dev *dev;
- uint8_t aes_key[AES_MAX_KEY_SIZE];
- uint8_t nonce[CTR_RFC3686_NONCE_SIZE];
+ u8 aes_key[AES_MAX_KEY_SIZE];
+ u8 nonce[CTR_RFC3686_NONCE_SIZE];
int keylen;
};
@@ -475,9 +475,9 @@ static void s5p_sg_done(struct s5p_aes_dev *dev)
}
/* Calls the completion. Cannot be called with dev->lock hold. */
-static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+static void s5p_aes_complete(struct ablkcipher_request *req, int err)
{
- dev->req->base.complete(&dev->req->base, err);
+ req->base.complete(&req->base, err);
}
static void s5p_unset_outdata(struct s5p_aes_dev *dev)
@@ -491,7 +491,7 @@ static void s5p_unset_indata(struct s5p_aes_dev *dev)
}
static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
- struct scatterlist **dst)
+ struct scatterlist **dst)
{
void *pages;
int len;
@@ -518,46 +518,28 @@ static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
- int err;
-
- if (!sg->length) {
- err = -EINVAL;
- goto exit;
- }
+ if (!sg->length)
+ return -EINVAL;
- err = dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE);
- if (!err) {
- err = -ENOMEM;
- goto exit;
- }
+ if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
+ return -ENOMEM;
dev->sg_dst = sg;
- err = 0;
-exit:
- return err;
+ return 0;
}
static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
{
- int err;
-
- if (!sg->length) {
- err = -EINVAL;
- goto exit;
- }
+ if (!sg->length)
+ return -EINVAL;
- err = dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE);
- if (!err) {
- err = -ENOMEM;
- goto exit;
- }
+ if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
+ return -ENOMEM;
dev->sg_src = sg;
- err = 0;
-exit:
- return err;
+ return 0;
}
/*
@@ -655,14 +637,14 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
+ struct ablkcipher_request *req;
int err_dma_tx = 0;
int err_dma_rx = 0;
int err_dma_hx = 0;
bool tx_end = false;
bool hx_end = false;
unsigned long flags;
- uint32_t status;
- u32 st_bits;
+ u32 status, st_bits;
int err;
spin_lock_irqsave(&dev->lock, flags);
@@ -727,7 +709,7 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, 0);
+ s5p_aes_complete(dev->req, 0);
/* Device is still busy */
tasklet_schedule(&dev->tasklet);
} else {
@@ -752,11 +734,12 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
error:
s5p_sg_done(dev);
dev->busy = false;
+ req = dev->req;
if (err_dma_hx == 1)
s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, err);
+ s5p_aes_complete(req, err);
hash_irq_end:
/*
@@ -1830,7 +1813,7 @@ static struct ahash_alg algs_sha1_md5_sha256[] = {
};
static void s5p_set_aes(struct s5p_aes_dev *dev,
- const uint8_t *key, const uint8_t *iv,
+ const u8 *key, const u8 *iv, const u8 *ctr,
unsigned int keylen)
{
void __iomem *keystart;
@@ -1838,6 +1821,9 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
if (iv)
memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+ if (ctr)
+ memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr, 0x10);
+
if (keylen == AES_KEYSIZE_256)
keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
else if (keylen == AES_KEYSIZE_192)
@@ -1887,7 +1873,7 @@ static int s5p_set_indata_start(struct s5p_aes_dev *dev,
}
static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
- struct ablkcipher_request *req)
+ struct ablkcipher_request *req)
{
struct scatterlist *sg;
int err;
@@ -1916,11 +1902,12 @@ static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
{
struct ablkcipher_request *req = dev->req;
- uint32_t aes_control;
+ u32 aes_control;
unsigned long flags;
int err;
- u8 *iv;
+ u8 *iv, *ctr;
+ /* This sets bit [13:12] to 00, which selects 128-bit counter */
aes_control = SSS_AES_KEY_CHANGE_MODE;
if (mode & FLAGS_AES_DECRYPT)
aes_control |= SSS_AES_MODE_DECRYPT;
@@ -1928,11 +1915,14 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC;
iv = req->info;
+ ctr = NULL;
} else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR;
- iv = req->info;
+ iv = NULL;
+ ctr = req->info;
} else {
iv = NULL; /* AES_ECB */
+ ctr = NULL;
}
if (dev->ctx->keylen == AES_KEYSIZE_192)
@@ -1964,7 +1954,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
goto outdata_error;
SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
- s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
+ s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
s5p_set_dma_indata(dev, dev->sg_src);
s5p_set_dma_outdata(dev, dev->sg_dst);
@@ -1983,7 +1973,7 @@ indata_error:
s5p_sg_done(dev);
dev->busy = false;
spin_unlock_irqrestore(&dev->lock, flags);
- s5p_aes_complete(dev, err);
+ s5p_aes_complete(req, err);
}
static void s5p_tasklet_cb(unsigned long data)
@@ -2024,7 +2014,7 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
err = ablkcipher_enqueue_request(&dev->queue, req);
if (dev->busy) {
spin_unlock_irqrestore(&dev->lock, flags);
- goto exit;
+ return err;
}
dev->busy = true;
@@ -2032,7 +2022,6 @@ static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
tasklet_schedule(&dev->tasklet);
-exit:
return err;
}
@@ -2043,7 +2032,8 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
struct s5p_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct s5p_aes_dev *dev = ctx->dev;
- if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
+ if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE) &&
+ ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
return -EINVAL;
}
@@ -2054,7 +2044,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
}
static int s5p_aes_setkey(struct crypto_ablkcipher *cipher,
- const uint8_t *key, unsigned int keylen)
+ const u8 *key, unsigned int keylen)
{
struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2090,6 +2080,11 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
}
+static int s5p_aes_ctr_crypt(struct ablkcipher_request *req)
+{
+ return s5p_aes_crypt(req, FLAGS_AES_CTR);
+}
+
static int s5p_aes_cra_init(struct crypto_tfm *tfm)
{
struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
@@ -2144,6 +2139,28 @@ static struct crypto_alg algs[] = {
.decrypt = s5p_aes_cbc_decrypt,
}
},
+ {
+ .cra_name = "ctr(aes)",
+ .cra_driver_name = "ctr-aes-s5p",
+ .cra_priority = 100,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+ CRYPTO_ALG_ASYNC |
+ CRYPTO_ALG_KERN_DRIVER_ONLY,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct s5p_aes_ctx),
+ .cra_alignmask = 0x0f,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = s5p_aes_cra_init,
+ .cra_u.ablkcipher = {
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ .setkey = s5p_aes_setkey,
+ .encrypt = s5p_aes_ctr_crypt,
+ .decrypt = s5p_aes_ctr_crypt,
+ }
+ },
};
static int s5p_aes_probe(struct platform_device *pdev)
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index e7540a5b8197..bbf166a97ad3 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -149,7 +149,7 @@ struct sahara_ctx {
/* AES-specific context */
int keylen;
u8 key[AES_KEYSIZE_128];
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
};
struct sahara_aes_reqctx {
@@ -621,14 +621,14 @@ static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
/*
* The requested key size is not supported by HW, do a fallback.
*/
- crypto_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
- crypto_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
+ crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
+ crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
CRYPTO_TFM_REQ_MASK);
- ret = crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
tfm->base.crt_flags &= ~CRYPTO_TFM_RES_MASK;
- tfm->base.crt_flags |= crypto_skcipher_get_flags(ctx->fallback) &
+ tfm->base.crt_flags |= crypto_sync_skcipher_get_flags(ctx->fallback) &
CRYPTO_TFM_RES_MASK;
return ret;
}
@@ -666,9 +666,9 @@ static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -688,9 +688,9 @@ static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -710,9 +710,9 @@ static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -732,9 +732,9 @@ static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
int err;
if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
- SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
- skcipher_request_set_tfm(subreq, ctx->fallback);
+ skcipher_request_set_sync_tfm(subreq, ctx->fallback);
skcipher_request_set_callback(subreq, req->base.flags,
NULL, NULL);
skcipher_request_set_crypt(subreq, req->src, req->dst,
@@ -752,8 +752,7 @@ static int sahara_aes_cra_init(struct crypto_tfm *tfm)
const char *name = crypto_tfm_alg_name(tfm);
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- ctx->fallback = crypto_alloc_skcipher(name, 0,
- CRYPTO_ALG_ASYNC |
+ ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("Error allocating fallback algo %s\n", name);
@@ -769,7 +768,7 @@ static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
{
struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
}
static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c
index b71895871be3..c5c5ff82b52e 100644
--- a/drivers/crypto/vmx/aes_cbc.c
+++ b/drivers/crypto/vmx/aes_cbc.c
@@ -32,7 +32,7 @@
#include "aesp8-ppc.h"
struct p8_aes_cbc_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
};
@@ -40,11 +40,11 @@ struct p8_aes_cbc_ctx {
static int p8_aes_cbc_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
@@ -53,7 +53,7 @@ static int p8_aes_cbc_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -66,7 +66,7 @@ static void p8_aes_cbc_exit(struct crypto_tfm *tfm)
struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -86,7 +86,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -100,8 +100,8 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_encrypt(req);
@@ -139,8 +139,8 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_decrypt(req);
diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c
index cd777c75291d..8a2fe092cb8e 100644
--- a/drivers/crypto/vmx/aes_ctr.c
+++ b/drivers/crypto/vmx/aes_ctr.c
@@ -32,18 +32,18 @@
#include "aesp8-ppc.h"
struct p8_aes_ctr_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
};
static int p8_aes_ctr_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
@@ -51,7 +51,7 @@ static int p8_aes_ctr_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -64,7 +64,7 @@ static void p8_aes_ctr_exit(struct crypto_tfm *tfm)
struct p8_aes_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -83,7 +83,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -119,8 +119,8 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = crypto_skcipher_encrypt(req);
diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c
index e9954a7d4694..ecd64e5cc5bb 100644
--- a/drivers/crypto/vmx/aes_xts.c
+++ b/drivers/crypto/vmx/aes_xts.c
@@ -33,7 +33,7 @@
#include "aesp8-ppc.h"
struct p8_aes_xts_ctx {
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct aes_key enc_key;
struct aes_key dec_key;
struct aes_key tweak_key;
@@ -42,11 +42,11 @@ struct p8_aes_xts_ctx {
static int p8_aes_xts_init(struct crypto_tfm *tfm)
{
const char *alg = crypto_tfm_alg_name(tfm);
- struct crypto_skcipher *fallback;
+ struct crypto_sync_skcipher *fallback;
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
- fallback = crypto_alloc_skcipher(alg, 0,
- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+ fallback = crypto_alloc_sync_skcipher(alg, 0,
+ CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(fallback)) {
printk(KERN_ERR
"Failed to allocate transformation for '%s': %ld\n",
@@ -54,7 +54,7 @@ static int p8_aes_xts_init(struct crypto_tfm *tfm)
return PTR_ERR(fallback);
}
- crypto_skcipher_set_flags(
+ crypto_sync_skcipher_set_flags(
fallback,
crypto_skcipher_get_flags((struct crypto_skcipher *)tfm));
ctx->fallback = fallback;
@@ -67,7 +67,7 @@ static void p8_aes_xts_exit(struct crypto_tfm *tfm)
struct p8_aes_xts_ctx *ctx = crypto_tfm_ctx(tfm);
if (ctx->fallback) {
- crypto_free_skcipher(ctx->fallback);
+ crypto_free_sync_skcipher(ctx->fallback);
ctx->fallback = NULL;
}
}
@@ -92,7 +92,7 @@ static int p8_aes_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
pagefault_enable();
preempt_enable();
- ret += crypto_skcipher_setkey(ctx->fallback, key, keylen);
+ ret += crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
return ret;
}
@@ -109,8 +109,8 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc,
crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm));
if (in_interrupt()) {
- SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
- skcipher_request_set_tfm(req, ctx->fallback);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, ctx->fallback);
+ skcipher_request_set_sync_tfm(req, ctx->fallback);
skcipher_request_set_callback(req, desc->flags, NULL, NULL);
skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index dacf3f42426d..de511db021cc 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -143,7 +143,7 @@ config DMA_JZ4740
config DMA_JZ4780
tristate "JZ4780 DMA support"
- depends on MACH_JZ4780 || COMPILE_TEST
+ depends on MIPS || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
@@ -321,6 +321,17 @@ config LPC18XX_DMAMUX
Enable support for DMA on NXP LPC18xx/43xx platforms
with PL080 and multiplexed DMA request lines.
+config MCF_EDMA
+ tristate "Freescale eDMA engine support, ColdFire mcf5441x SoCs"
+ depends on M5441x || COMPILE_TEST
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Support the Freescale ColdFire eDMA engine, 64-channel
+ implementation that performs complex data transfers with
+ minimal intervention from a host processor.
+ This module can be found on Freescale ColdFire mcf5441x SoCs.
+
config MMP_PDMA
bool "MMP PDMA support"
depends on ARCH_MMP || ARCH_PXA || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index c91702d88b95..7fcc4d8e336d 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -31,7 +31,8 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
-obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
+obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
+obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o
obj-$(CONFIG_HSU_DMA) += hsu/
obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 75f38d19fcbe..7cbac6e8c113 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -1320,7 +1320,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
if (unlikely(!is_slave_direction(direction)))
goto err_out;
- if (sconfig->direction == DMA_MEM_TO_DEV)
+ if (direction == DMA_MEM_TO_DEV)
reg_width = convert_buswidth(sconfig->dst_addr_width);
else
reg_width = convert_buswidth(sconfig->src_addr_width);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 4bf72561667c..4e557684f792 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1600,7 +1600,7 @@ static void at_xdmac_tasklet(unsigned long data)
if (atchan->status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
- spin_lock_bh(&atchan->lock);
+ spin_lock(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc,
xfer_node);
@@ -1610,7 +1610,7 @@ static void at_xdmac_tasklet(unsigned long data)
txd = &desc->tx_dma_desc;
at_xdmac_remove_xfer(atchan, desc);
- spin_unlock_bh(&atchan->lock);
+ spin_unlock(&atchan->lock);
if (!at_xdmac_chan_is_cyclic(atchan)) {
dma_cookie_complete(txd);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 847f84a41a69..cad55ab80d41 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -778,14 +778,6 @@ static int bcm2835_dma_slave_config(struct dma_chan *chan,
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
- if ((cfg->direction == DMA_DEV_TO_MEM &&
- cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- (cfg->direction == DMA_MEM_TO_DEV &&
- cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
- !is_slave_direction(cfg->direction)) {
- return -EINVAL;
- }
-
c->cfg = *cfg;
return 0;
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index da74fd74636b..eebaba3d9e78 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1306,6 +1306,7 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
+ struct dma_slave_config config;
u32 addr;
u32 ctrl;
@@ -1402,6 +1403,10 @@ static inline struct coh901318_chan *to_coh901318_chan(struct dma_chan *chan)
return container_of(chan, struct coh901318_chan, chan);
}
+static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
+ struct dma_slave_config *config,
+ enum dma_transfer_direction direction);
+
static inline const struct coh901318_params *
cohc_chan_param(struct coh901318_chan *cohc)
{
@@ -2360,6 +2365,8 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (lli == NULL)
goto err_dma_alloc;
+ coh901318_dma_set_runtimeconfig(chan, &cohc->config, direction);
+
/* initiate allocated lli list */
ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len,
cohc->addr,
@@ -2499,7 +2506,8 @@ static const struct burst_table burst_sizes[] = {
};
static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
- struct dma_slave_config *config)
+ struct dma_slave_config *config,
+ enum dma_transfer_direction direction)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
dma_addr_t addr;
@@ -2509,11 +2517,11 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
int i = 0;
/* We only support mem to per or per to mem transfers */
- if (config->direction == DMA_DEV_TO_MEM) {
+ if (direction == DMA_DEV_TO_MEM) {
addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
- } else if (config->direction == DMA_MEM_TO_DEV) {
+ } else if (direction == DMA_MEM_TO_DEV) {
addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
@@ -2579,6 +2587,16 @@ static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
return 0;
}
+static int coh901318_dma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct coh901318_chan *cohc = to_coh901318_chan(chan);
+
+ memcpy(&cohc->config, config, sizeof(*config));
+
+ return 0;
+}
+
static void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base)
{
@@ -2684,7 +2702,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
base->dma_slave.device_tx_status = coh901318_tx_status;
base->dma_slave.device_issue_pending = coh901318_issue_pending;
- base->dma_slave.device_config = coh901318_dma_set_runtimeconfig;
+ base->dma_slave.device_config = coh901318_dma_slave_config;
base->dma_slave.device_pause = coh901318_pause;
base->dma_slave.device_resume = coh901318_resume;
base->dma_slave.device_terminate_all = coh901318_terminate_all;
@@ -2707,7 +2725,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
base->dma_memcpy.device_tx_status = coh901318_tx_status;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
- base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig;
+ base->dma_memcpy.device_config = coh901318_dma_slave_config;
base->dma_memcpy.device_pause = coh901318_pause;
base->dma_memcpy.device_resume = coh901318_resume;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index afd5e10f8927..5253e3c0dc04 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -113,6 +113,7 @@ struct jz4740_dma_desc {
struct jz4740_dmaengine_chan {
struct virt_dma_chan vchan;
unsigned int id;
+ struct dma_slave_config config;
dma_addr_t fifo_addr;
unsigned int transfer_shift;
@@ -203,8 +204,9 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst)
return JZ4740_DMA_TRANSFER_SIZE_32BYTE;
}
-static int jz4740_dma_slave_config(struct dma_chan *c,
- struct dma_slave_config *config)
+static int jz4740_dma_slave_config_write(struct dma_chan *c,
+ struct dma_slave_config *config,
+ enum dma_transfer_direction direction)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan);
@@ -214,7 +216,7 @@ static int jz4740_dma_slave_config(struct dma_chan *c,
enum jz4740_dma_flags flags;
uint32_t cmd;
- switch (config->direction) {
+ switch (direction) {
case DMA_MEM_TO_DEV:
flags = JZ4740_DMA_SRC_AUTOINC;
transfer_size = jz4740_dma_maxburst(config->dst_maxburst);
@@ -265,6 +267,15 @@ static int jz4740_dma_slave_config(struct dma_chan *c,
return 0;
}
+static int jz4740_dma_slave_config(struct dma_chan *c,
+ struct dma_slave_config *config)
+{
+ struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
+
+ memcpy(&chan->config, config, sizeof(*config));
+ return 0;
+}
+
static int jz4740_dma_terminate_all(struct dma_chan *c)
{
struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c);
@@ -407,6 +418,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg(
desc->direction = direction;
desc->cyclic = false;
+ jz4740_dma_slave_config_write(c, &chan->config, direction);
+
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
}
@@ -438,6 +451,8 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic(
desc->direction = direction;
desc->cyclic = true;
+ jz4740_dma_slave_config_write(c, &chan->config, direction);
+
return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 85820a2d69d4..a8b6225faa12 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -23,33 +24,35 @@
#include "dmaengine.h"
#include "virt-dma.h"
-#define JZ_DMA_NR_CHANNELS 32
-
/* Global registers. */
-#define JZ_DMA_REG_DMAC 0x1000
-#define JZ_DMA_REG_DIRQP 0x1004
-#define JZ_DMA_REG_DDR 0x1008
-#define JZ_DMA_REG_DDRS 0x100c
-#define JZ_DMA_REG_DMACP 0x101c
-#define JZ_DMA_REG_DSIRQP 0x1020
-#define JZ_DMA_REG_DSIRQM 0x1024
-#define JZ_DMA_REG_DCIRQP 0x1028
-#define JZ_DMA_REG_DCIRQM 0x102c
+#define JZ_DMA_REG_DMAC 0x00
+#define JZ_DMA_REG_DIRQP 0x04
+#define JZ_DMA_REG_DDR 0x08
+#define JZ_DMA_REG_DDRS 0x0c
+#define JZ_DMA_REG_DCKE 0x10
+#define JZ_DMA_REG_DCKES 0x14
+#define JZ_DMA_REG_DCKEC 0x18
+#define JZ_DMA_REG_DMACP 0x1c
+#define JZ_DMA_REG_DSIRQP 0x20
+#define JZ_DMA_REG_DSIRQM 0x24
+#define JZ_DMA_REG_DCIRQP 0x28
+#define JZ_DMA_REG_DCIRQM 0x2c
/* Per-channel registers. */
#define JZ_DMA_REG_CHAN(n) (n * 0x20)
-#define JZ_DMA_REG_DSA(n) (0x00 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DTA(n) (0x04 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DTC(n) (0x08 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DRT(n) (0x0c + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DCS(n) (0x10 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DCM(n) (0x14 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DDA(n) (0x18 + JZ_DMA_REG_CHAN(n))
-#define JZ_DMA_REG_DSD(n) (0x1c + JZ_DMA_REG_CHAN(n))
+#define JZ_DMA_REG_DSA 0x00
+#define JZ_DMA_REG_DTA 0x04
+#define JZ_DMA_REG_DTC 0x08
+#define JZ_DMA_REG_DRT 0x0c
+#define JZ_DMA_REG_DCS 0x10
+#define JZ_DMA_REG_DCM 0x14
+#define JZ_DMA_REG_DDA 0x18
+#define JZ_DMA_REG_DSD 0x1c
#define JZ_DMA_DMAC_DMAE BIT(0)
#define JZ_DMA_DMAC_AR BIT(2)
#define JZ_DMA_DMAC_HLT BIT(3)
+#define JZ_DMA_DMAC_FAIC BIT(27)
#define JZ_DMA_DMAC_FMSC BIT(31)
#define JZ_DMA_DRT_AUTO 0x8
@@ -86,6 +89,14 @@
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
+#define JZ4780_DMA_CTRL_OFFSET 0x1000
+
+/* macros for use with jz4780_dma_soc_data.flags */
+#define JZ_SOC_DATA_ALLOW_LEGACY_DT BIT(0)
+#define JZ_SOC_DATA_PROGRAMMABLE_DMA BIT(1)
+#define JZ_SOC_DATA_PER_CHAN_PM BIT(2)
+#define JZ_SOC_DATA_NO_DCKES_DCKEC BIT(3)
+
/**
* struct jz4780_dma_hwdesc - descriptor structure read by the DMA controller.
* @dcm: value for the DCM (channel command) register
@@ -94,17 +105,12 @@
* @dtc: transfer count (number of blocks of the transfer size specified in DCM
* to transfer) in the low 24 bits, offset of the next descriptor from the
* descriptor base address in the upper 8 bits.
- * @sd: target/source stride difference (in stride transfer mode).
- * @drt: request type
*/
struct jz4780_dma_hwdesc {
uint32_t dcm;
uint32_t dsa;
uint32_t dta;
uint32_t dtc;
- uint32_t sd;
- uint32_t drt;
- uint32_t reserved[2];
};
/* Size of allocations for hardware descriptor blocks. */
@@ -135,14 +141,22 @@ struct jz4780_dma_chan {
unsigned int curr_hwdesc;
};
+struct jz4780_dma_soc_data {
+ unsigned int nb_channels;
+ unsigned int transfer_ord_max;
+ unsigned long flags;
+};
+
struct jz4780_dma_dev {
struct dma_device dma_device;
- void __iomem *base;
+ void __iomem *chn_base;
+ void __iomem *ctrl_base;
struct clk *clk;
unsigned int irq;
+ const struct jz4780_dma_soc_data *soc_data;
uint32_t chan_reserved;
- struct jz4780_dma_chan chan[JZ_DMA_NR_CHANNELS];
+ struct jz4780_dma_chan chan[];
};
struct jz4780_dma_filter_data {
@@ -169,16 +183,51 @@ static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
dma_device);
}
-static inline uint32_t jz4780_dma_readl(struct jz4780_dma_dev *jzdma,
+static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
+ unsigned int chn, unsigned int reg)
+{
+ return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
+}
+
+static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
+ unsigned int chn, unsigned int reg, uint32_t val)
+{
+ writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
+}
+
+static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
unsigned int reg)
{
- return readl(jzdma->base + reg);
+ return readl(jzdma->ctrl_base + reg);
}
-static inline void jz4780_dma_writel(struct jz4780_dma_dev *jzdma,
+static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
unsigned int reg, uint32_t val)
{
- writel(val, jzdma->base + reg);
+ writel(val, jzdma->ctrl_base + reg);
+}
+
+static inline void jz4780_dma_chan_enable(struct jz4780_dma_dev *jzdma,
+ unsigned int chn)
+{
+ if (jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) {
+ unsigned int reg;
+
+ if (jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC)
+ reg = JZ_DMA_REG_DCKE;
+ else
+ reg = JZ_DMA_REG_DCKES;
+
+ jz4780_dma_ctrl_writel(jzdma, reg, BIT(chn));
+ }
+}
+
+static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
+ unsigned int chn)
+{
+ if ((jzdma->soc_data->flags & JZ_SOC_DATA_PER_CHAN_PM) &&
+ !(jzdma->soc_data->flags & JZ_SOC_DATA_NO_DCKES_DCKEC))
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
}
static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
@@ -215,8 +264,10 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(desc);
}
-static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
+static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
+ unsigned long val, uint32_t *shift)
{
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
int ord = ffs(val) - 1;
/*
@@ -228,8 +279,8 @@ static uint32_t jz4780_dma_transfer_size(unsigned long val, uint32_t *shift)
*/
if (ord == 3)
ord = 2;
- else if (ord > 7)
- ord = 7;
+ else if (ord > jzdma->soc_data->transfer_ord_max)
+ ord = jzdma->soc_data->transfer_ord_max;
*shift = ord;
@@ -262,7 +313,6 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
desc->dcm = JZ_DMA_DCM_SAI;
desc->dsa = addr;
desc->dta = config->dst_addr;
- desc->drt = jzchan->transfer_type;
width = config->dst_addr_width;
maxburst = config->dst_maxburst;
@@ -270,7 +320,6 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
desc->dcm = JZ_DMA_DCM_DAI;
desc->dsa = config->src_addr;
desc->dta = addr;
- desc->drt = jzchan->transfer_type;
width = config->src_addr_width;
maxburst = config->src_maxburst;
@@ -283,7 +332,7 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
* divisible by the transfer size, and we must not use more than the
* maximum burst specified by the user.
*/
- tsz = jz4780_dma_transfer_size(addr | len | (width * maxburst),
+ tsz = jz4780_dma_transfer_size(jzchan, addr | len | (width * maxburst),
&jzchan->transfer_shift);
switch (width) {
@@ -412,12 +461,13 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
if (!desc)
return NULL;
- tsz = jz4780_dma_transfer_size(dest | src | len,
+ tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
&jzchan->transfer_shift);
+ jzchan->transfer_type = JZ_DMA_DRT_AUTO;
+
desc->desc[0].dsa = src;
desc->desc[0].dta = dest;
- desc->desc[0].drt = JZ_DMA_DRT_AUTO;
desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI |
tsz << JZ_DMA_DCM_TSZ_SHIFT |
JZ_DMA_WIDTH_32_BIT << JZ_DMA_DCM_SP_SHIFT |
@@ -472,18 +522,34 @@ static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
(jzchan->curr_hwdesc + 1) % jzchan->desc->count;
}
- /* Use 8-word descriptors. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), JZ_DMA_DCS_DES8);
+ /* Enable the channel's clock. */
+ jz4780_dma_chan_enable(jzdma, jzchan->id);
+
+ /* Use 4-word descriptors. */
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
+
+ /* Set transfer type. */
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
+ jzchan->transfer_type);
+
+ /*
+ * Set the transfer count. This is redundant for a descriptor-driven
+ * transfer. However, there can be a delay between the transfer start
+ * time and when DTCn reg contains the new transfer count. Setting
+ * it explicitly ensures residue is computed correctly at all times.
+ */
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DTC,
+ jzchan->desc->desc[jzchan->curr_hwdesc].dtc);
/* Write descriptor address and initiate descriptor fetch. */
desc_phys = jzchan->desc->desc_phys +
(jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc));
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DDA(jzchan->id), desc_phys);
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DDA, desc_phys);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DDRS, BIT(jzchan->id));
/* Enable the channel. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id),
- JZ_DMA_DCS_DES8 | JZ_DMA_DCS_CTE);
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS,
+ JZ_DMA_DCS_CTE);
}
static void jz4780_dma_issue_pending(struct dma_chan *chan)
@@ -509,12 +575,14 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&jzchan->vchan.lock, flags);
/* Clear the DMA status and stop the transfer. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
if (jzchan->desc) {
vchan_terminate_vdesc(&jzchan->desc->vdesc);
jzchan->desc = NULL;
}
+ jz4780_dma_chan_disable(jzdma, jzchan->id);
+
vchan_get_all_descriptors(&jzchan->vchan, &head);
spin_unlock_irqrestore(&jzchan->vchan.lock, flags);
@@ -526,8 +594,10 @@ static int jz4780_dma_terminate_all(struct dma_chan *chan)
static void jz4780_dma_synchronize(struct dma_chan *chan)
{
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
+ struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
vchan_synchronize(&jzchan->vchan);
+ jz4780_dma_chan_disable(jzdma, jzchan->id);
}
static int jz4780_dma_config(struct dma_chan *chan,
@@ -549,21 +619,17 @@ static size_t jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan,
struct jz4780_dma_desc *desc, unsigned int next_sg)
{
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
- unsigned int residue, count;
+ unsigned int count = 0;
unsigned int i;
- residue = 0;
-
for (i = next_sg; i < desc->count; i++)
- residue += desc->desc[i].dtc << jzchan->transfer_shift;
+ count += desc->desc[i].dtc & GENMASK(23, 0);
- if (next_sg != 0) {
- count = jz4780_dma_readl(jzdma,
- JZ_DMA_REG_DTC(jzchan->id));
- residue += count << jzchan->transfer_shift;
- }
+ if (next_sg != 0)
+ count += jz4780_dma_chn_readl(jzdma, jzchan->id,
+ JZ_DMA_REG_DTC);
- return residue;
+ return count << jzchan->transfer_shift;
}
static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
@@ -573,6 +639,7 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
struct virt_dma_desc *vdesc;
enum dma_status status;
unsigned long flags;
+ unsigned long residue = 0;
status = dma_cookie_status(chan, cookie, txstate);
if ((status == DMA_COMPLETE) || (txstate == NULL))
@@ -583,13 +650,13 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
vdesc = vchan_find_desc(&jzchan->vchan, cookie);
if (vdesc) {
/* On the issued list, so hasn't been processed yet */
- txstate->residue = jz4780_dma_desc_residue(jzchan,
+ residue = jz4780_dma_desc_residue(jzchan,
to_jz4780_dma_desc(vdesc), 0);
} else if (cookie == jzchan->desc->vdesc.tx.cookie) {
- txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
- (jzchan->curr_hwdesc + 1) % jzchan->desc->count);
- } else
- txstate->residue = 0;
+ residue = jz4780_dma_desc_residue(jzchan, jzchan->desc,
+ jzchan->curr_hwdesc + 1);
+ }
+ dma_set_residue(txstate, residue);
if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc
&& jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT))
@@ -606,8 +673,8 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
spin_lock(&jzchan->vchan.lock);
- dcs = jz4780_dma_readl(jzdma, JZ_DMA_REG_DCS(jzchan->id));
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DCS(jzchan->id), 0);
+ dcs = jz4780_dma_chn_readl(jzdma, jzchan->id, JZ_DMA_REG_DCS);
+ jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DCS, 0);
if (dcs & JZ_DMA_DCS_AR) {
dev_warn(&jzchan->vchan.chan.dev->device,
@@ -646,9 +713,9 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
uint32_t pending, dmac;
int i;
- pending = jz4780_dma_readl(jzdma, JZ_DMA_REG_DIRQP);
+ pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
- for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+ for (i = 0; i < jzdma->soc_data->nb_channels; i++) {
if (!(pending & (1<<i)))
continue;
@@ -656,12 +723,12 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
}
/* Clear halt and address error status of all channels. */
- dmac = jz4780_dma_readl(jzdma, JZ_DMA_REG_DMAC);
+ dmac = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DMAC);
dmac &= ~(JZ_DMA_DMAC_HLT | JZ_DMA_DMAC_AR);
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
/* Clear interrupt pending status. */
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0);
return IRQ_HANDLED;
}
@@ -728,7 +795,7 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
data.channel = dma_spec->args[1];
if (data.channel > -1) {
- if (data.channel >= JZ_DMA_NR_CHANNELS) {
+ if (data.channel >= jzdma->soc_data->nb_channels) {
dev_err(jzdma->dma_device.dev,
"device requested non-existent channel %u\n",
data.channel);
@@ -755,16 +822,29 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
static int jz4780_dma_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct jz4780_dma_soc_data *soc_data;
struct jz4780_dma_dev *jzdma;
struct jz4780_dma_chan *jzchan;
struct dma_device *dd;
struct resource *res;
int i, ret;
- jzdma = devm_kzalloc(dev, sizeof(*jzdma), GFP_KERNEL);
+ if (!dev->of_node) {
+ dev_err(dev, "This driver must be probed from devicetree\n");
+ return -EINVAL;
+ }
+
+ soc_data = device_get_match_data(dev);
+ if (!soc_data)
+ return -EINVAL;
+
+ jzdma = devm_kzalloc(dev, sizeof(*jzdma)
+ + sizeof(*jzdma->chan) * soc_data->nb_channels,
+ GFP_KERNEL);
if (!jzdma)
return -ENOMEM;
+ jzdma->soc_data = soc_data;
platform_set_drvdata(pdev, jzdma);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -773,9 +853,26 @@ static int jz4780_dma_probe(struct platform_device *pdev)
return -EINVAL;
}
- jzdma->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(jzdma->base))
- return PTR_ERR(jzdma->base);
+ jzdma->chn_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(jzdma->chn_base))
+ return PTR_ERR(jzdma->chn_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ jzdma->ctrl_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(jzdma->ctrl_base))
+ return PTR_ERR(jzdma->ctrl_base);
+ } else if (soc_data->flags & JZ_SOC_DATA_ALLOW_LEGACY_DT) {
+ /*
+ * On JZ4780, if the second memory resource was not supplied,
+ * assume we're using an old devicetree, and calculate the
+ * offset to the control registers.
+ */
+ jzdma->ctrl_base = jzdma->chn_base + JZ4780_DMA_CTRL_OFFSET;
+ } else {
+ dev_err(dev, "failed to get I/O memory\n");
+ return -EINVAL;
+ }
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
@@ -833,13 +930,15 @@ static int jz4780_dma_probe(struct platform_device *pdev)
* Also set the FMSC bit - it increases MSC performance, so it makes
* little sense not to enable it.
*/
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DMAC,
- JZ_DMA_DMAC_DMAE | JZ_DMA_DMAC_FMSC);
- jz4780_dma_writel(jzdma, JZ_DMA_REG_DMACP, 0);
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, JZ_DMA_DMAC_DMAE |
+ JZ_DMA_DMAC_FAIC | JZ_DMA_DMAC_FMSC);
+
+ if (soc_data->flags & JZ_SOC_DATA_PROGRAMMABLE_DMA)
+ jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMACP, 0);
INIT_LIST_HEAD(&dd->channels);
- for (i = 0; i < JZ_DMA_NR_CHANNELS; i++) {
+ for (i = 0; i < soc_data->nb_channels; i++) {
jzchan = &jzdma->chan[i];
jzchan->id = i;
@@ -847,7 +946,7 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzchan->vchan.desc_free = jz4780_dma_desc_free;
}
- ret = dma_async_device_register(dd);
+ ret = dmaenginem_async_device_register(dd);
if (ret) {
dev_err(dev, "failed to register device\n");
goto err_disable_clk;
@@ -858,15 +957,12 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzdma);
if (ret) {
dev_err(dev, "failed to register OF DMA controller\n");
- goto err_unregister_dev;
+ goto err_disable_clk;
}
dev_info(dev, "JZ4780 DMA controller initialised\n");
return 0;
-err_unregister_dev:
- dma_async_device_unregister(dd);
-
err_disable_clk:
clk_disable_unprepare(jzdma->clk);
@@ -884,15 +980,40 @@ static int jz4780_dma_remove(struct platform_device *pdev)
free_irq(jzdma->irq, jzdma);
- for (i = 0; i < JZ_DMA_NR_CHANNELS; i++)
+ for (i = 0; i < jzdma->soc_data->nb_channels; i++)
tasklet_kill(&jzdma->chan[i].vchan.task);
- dma_async_device_unregister(&jzdma->dma_device);
return 0;
}
+static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
+ .nb_channels = 6,
+ .transfer_ord_max = 5,
+};
+
+static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
+ .nb_channels = 6,
+ .transfer_ord_max = 5,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+};
+
+static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
+ .nb_channels = 6,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM,
+};
+
+static const struct jz4780_dma_soc_data jz4780_dma_soc_data = {
+ .nb_channels = 32,
+ .transfer_ord_max = 7,
+ .flags = JZ_SOC_DATA_ALLOW_LEGACY_DT | JZ_SOC_DATA_PROGRAMMABLE_DMA,
+};
+
static const struct of_device_id jz4780_dma_dt_match[] = {
- { .compatible = "ingenic,jz4780-dma", .data = NULL },
+ { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
+ { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
+ { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
+ { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{},
};
MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index c4eb55e3011c..b2ac1d2c5b86 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -934,7 +934,7 @@ static int dw_probe(struct platform_device *pdev)
pm_runtime_put(chip->dev);
- ret = dma_async_device_register(&dw->dma);
+ ret = dmaenginem_async_device_register(&dw->dma);
if (ret)
goto err_pm_disable;
@@ -977,8 +977,6 @@ static int dw_remove(struct platform_device *pdev)
tasklet_kill(&chan->vc.task);
}
- dma_async_device_unregister(&dw->dma);
-
return 0;
}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index f43e6dafe446..d0c3e50b39fb 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -886,12 +886,7 @@ static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
*/
u32 s = dw->pdata->is_idma32 ? 1 : 2;
- /* Check if chan will be configured for slave transfers */
- if (!is_slave_direction(sconfig->direction))
- return -EINVAL;
-
memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
- dwc->direction = sconfig->direction;
sc->src_maxburst = sc->src_maxburst > 1 ? fls(sc->src_maxburst) - s : 0;
sc->dst_maxburst = sc->dst_maxburst > 1 ? fls(sc->dst_maxburst) - s : 0;
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index f62dd0944908..f01b2c173fa6 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -284,6 +284,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
#ifdef CONFIG_ACPI
static const struct acpi_device_id dw_dma_acpi_id_table[] = {
{ "INTL9C60", 0 },
+ { "80862286", 0 },
+ { "808622C0", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table);
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index a15592383d4e..f674eb5fbbef 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -109,6 +109,9 @@
#define DMA_MAX_CHAN_DESCRIPTORS 32
struct ep93xx_dma_engine;
+static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *config);
/**
* struct ep93xx_dma_desc - EP93xx specific transaction descriptor
@@ -180,6 +183,7 @@ struct ep93xx_dma_chan {
struct list_head free_list;
u32 runtime_addr;
u32 runtime_ctrl;
+ struct dma_slave_config slave_config;
};
/**
@@ -1051,6 +1055,8 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL;
}
+ ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
+
first = NULL;
for_each_sg(sgl, sg, sg_len, i) {
size_t len = sg_dma_len(sg);
@@ -1136,6 +1142,8 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
return NULL;
}
+ ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
+
/* Split the buffer into period size chunks */
first = NULL;
for (offset = 0; offset < buf_len; offset += period_len) {
@@ -1227,6 +1235,17 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
+
+ memcpy(&edmac->slave_config, config, sizeof(*config));
+
+ return 0;
+}
+
+static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *config)
+{
+ struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
enum dma_slave_buswidth width;
unsigned long flags;
u32 addr, ctrl;
@@ -1234,7 +1253,7 @@ static int ep93xx_dma_slave_config(struct dma_chan *chan,
if (!edmac->edma->m2m)
return -EINVAL;
- switch (config->direction) {
+ switch (dir) {
case DMA_DEV_TO_MEM:
width = config->src_addr_width;
addr = config->src_addr;
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
new file mode 100644
index 000000000000..8876c4c1bb2c
--- /dev/null
+++ b/drivers/dma/fsl-edma-common.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
+// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
+
+#include <linux/dmapool.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "fsl-edma-common.h"
+
+#define EDMA_CR 0x00
+#define EDMA_ES 0x04
+#define EDMA_ERQ 0x0C
+#define EDMA_EEI 0x14
+#define EDMA_SERQ 0x1B
+#define EDMA_CERQ 0x1A
+#define EDMA_SEEI 0x19
+#define EDMA_CEEI 0x18
+#define EDMA_CINT 0x1F
+#define EDMA_CERR 0x1E
+#define EDMA_SSRT 0x1D
+#define EDMA_CDNE 0x1C
+#define EDMA_INTR 0x24
+#define EDMA_ERR 0x2C
+
+#define EDMA64_ERQH 0x08
+#define EDMA64_EEIH 0x10
+#define EDMA64_SERQ 0x18
+#define EDMA64_CERQ 0x19
+#define EDMA64_SEEI 0x1a
+#define EDMA64_CEEI 0x1b
+#define EDMA64_CINT 0x1c
+#define EDMA64_CERR 0x1d
+#define EDMA64_SSRT 0x1e
+#define EDMA64_CDNE 0x1f
+#define EDMA64_INTH 0x20
+#define EDMA64_INTL 0x24
+#define EDMA64_ERRH 0x28
+#define EDMA64_ERRL 0x2c
+
+#define EDMA_TCD 0x1000
+
+static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
+{
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ if (fsl_chan->edma->version == v1) {
+ edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
+ edma_writeb(fsl_chan->edma, ch, regs->serq);
+ } else {
+ /* ColdFire is big endian, and accesses natively
+ * big endian I/O peripherals
+ */
+ iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
+ iowrite8(ch, regs->serq);
+ }
+}
+
+void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
+{
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ if (fsl_chan->edma->version == v1) {
+ edma_writeb(fsl_chan->edma, ch, regs->cerq);
+ edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
+ } else {
+ /* ColdFire is big endian, and accesses natively
+ * big endian I/O peripherals
+ */
+ iowrite8(ch, regs->cerq);
+ iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
+ }
+}
+EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
+
+void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
+ unsigned int slot, bool enable)
+{
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+ void __iomem *muxaddr;
+ unsigned int chans_per_mux, ch_off;
+
+ chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
+ ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
+ muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
+ slot = EDMAMUX_CHCFG_SOURCE(slot);
+
+ if (enable)
+ iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
+ else
+ iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
+
+static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
+{
+ switch (addr_width) {
+ case 1:
+ return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
+ case 2:
+ return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
+ case 4:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ case 8:
+ return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
+ default:
+ return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
+ }
+}
+
+void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
+{
+ struct fsl_edma_desc *fsl_desc;
+ int i;
+
+ fsl_desc = to_fsl_edma_desc(vdesc);
+ for (i = 0; i < fsl_desc->n_tcds; i++)
+ dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
+
+int fsl_edma_terminate_all(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma_disable_request(fsl_chan);
+ fsl_chan->edesc = NULL;
+ fsl_chan->idle = true;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
+
+int fsl_edma_pause(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma_disable_request(fsl_chan);
+ fsl_chan->status = DMA_PAUSED;
+ fsl_chan->idle = true;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_pause);
+
+int fsl_edma_resume(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ if (fsl_chan->edesc) {
+ fsl_edma_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
+ }
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_resume);
+
+int fsl_edma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+ memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
+
+static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
+ struct virt_dma_desc *vdesc, bool in_progress)
+{
+ struct fsl_edma_desc *edesc = fsl_chan->edesc;
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+ enum dma_transfer_direction dir = edesc->dirn;
+ dma_addr_t cur_addr, dma_addr;
+ size_t len, size;
+ int i;
+
+ /* calculate the total size in this desc */
+ for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
+ len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+
+ if (!in_progress)
+ return len;
+
+ if (dir == DMA_MEM_TO_DEV)
+ cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
+ else
+ cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
+
+ /* figure out the finished and calculate the residue */
+ for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
+ size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
+ * le16_to_cpu(edesc->tcd[i].vtcd->biter);
+ if (dir == DMA_MEM_TO_DEV)
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
+ else
+ dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
+
+ len -= size;
+ if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
+ len += dma_addr + size - cur_addr;
+ break;
+ }
+ }
+
+ return len;
+}
+
+enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct virt_dma_desc *vdesc;
+ enum dma_status status;
+ unsigned long flags;
+
+ status = dma_cookie_status(chan, cookie, txstate);
+ if (status == DMA_COMPLETE)
+ return status;
+
+ if (!txstate)
+ return fsl_chan->status;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
+ if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
+ txstate->residue =
+ fsl_edma_desc_residue(fsl_chan, vdesc, true);
+ else if (vdesc)
+ txstate->residue =
+ fsl_edma_desc_residue(fsl_chan, vdesc, false);
+ else
+ txstate->residue = 0;
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ return fsl_chan->status;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
+
+static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
+ struct fsl_edma_hw_tcd *tcd)
+{
+ struct fsl_edma_engine *edma = fsl_chan->edma;
+ struct edma_regs *regs = &fsl_chan->edma->regs;
+ u32 ch = fsl_chan->vchan.chan.chan_id;
+
+ /*
+ * TCD parameters are stored in struct fsl_edma_hw_tcd in little
+ * endian format. However, we need to load the TCD registers in
+ * big- or little-endian obeying the eDMA engine model endian.
+ */
+ edma_writew(edma, 0, &regs->tcd[ch].csr);
+ edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
+ edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
+
+ edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
+ edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
+
+ edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
+ edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
+
+ edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
+ edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
+ edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
+
+ edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
+ &regs->tcd[ch].dlast_sga);
+
+ edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
+}
+
+static inline
+void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
+ u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
+ u16 biter, u16 doff, u32 dlast_sga, bool major_int,
+ bool disable_req, bool enable_sg)
+{
+ u16 csr = 0;
+
+ /*
+ * eDMA hardware SGs require the TCDs to be stored in little
+ * endian format irrespective of the register endian model.
+ * So we put the value in little endian in memory, waiting
+ * for fsl_edma_set_tcd_regs doing the swap.
+ */
+ tcd->saddr = cpu_to_le32(src);
+ tcd->daddr = cpu_to_le32(dst);
+
+ tcd->attr = cpu_to_le16(attr);
+
+ tcd->soff = cpu_to_le16(soff);
+
+ tcd->nbytes = cpu_to_le32(nbytes);
+ tcd->slast = cpu_to_le32(slast);
+
+ tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
+ tcd->doff = cpu_to_le16(doff);
+
+ tcd->dlast_sga = cpu_to_le32(dlast_sga);
+
+ tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
+ if (major_int)
+ csr |= EDMA_TCD_CSR_INT_MAJOR;
+
+ if (disable_req)
+ csr |= EDMA_TCD_CSR_D_REQ;
+
+ if (enable_sg)
+ csr |= EDMA_TCD_CSR_E_SG;
+
+ tcd->csr = cpu_to_le16(csr);
+}
+
+static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
+ int sg_len)
+{
+ struct fsl_edma_desc *fsl_desc;
+ int i;
+
+ fsl_desc = kzalloc(sizeof(*fsl_desc) +
+ sizeof(struct fsl_edma_sw_tcd) *
+ sg_len, GFP_NOWAIT);
+ if (!fsl_desc)
+ return NULL;
+
+ fsl_desc->echan = fsl_chan;
+ fsl_desc->n_tcds = sg_len;
+ for (i = 0; i < sg_len; i++) {
+ fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
+ GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
+ if (!fsl_desc->tcd[i].vtcd)
+ goto err;
+ }
+ return fsl_desc;
+
+err:
+ while (--i >= 0)
+ dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
+ fsl_desc->tcd[i].ptcd);
+ kfree(fsl_desc);
+ return NULL;
+}
+
+struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct fsl_edma_desc *fsl_desc;
+ dma_addr_t dma_buf_next;
+ int sg_len, i;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ sg_len = buf_len / period_len;
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = true;
+ fsl_desc->dirn = direction;
+
+ dma_buf_next = dma_addr;
+ if (direction == DMA_MEM_TO_DEV) {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
+ nbytes = fsl_chan->cfg.dst_addr_width *
+ fsl_chan->cfg.dst_maxburst;
+ } else {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
+ nbytes = fsl_chan->cfg.src_addr_width *
+ fsl_chan->cfg.src_maxburst;
+ }
+
+ iter = period_len / nbytes;
+
+ for (i = 0; i < sg_len; i++) {
+ if (dma_buf_next >= dma_addr + buf_len)
+ dma_buf_next = dma_addr;
+
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = dma_buf_next;
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = fsl_chan->cfg.dst_addr_width;
+ doff = 0;
+ } else {
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = dma_buf_next;
+ soff = 0;
+ doff = fsl_chan->cfg.src_addr_width;
+ }
+
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
+ fsl_chan->attr, soff, nbytes, 0, iter,
+ iter, doff, last_sg, true, false, true);
+ dma_buf_next += period_len;
+ }
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
+
+struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ struct fsl_edma_desc *fsl_desc;
+ struct scatterlist *sg;
+ u32 src_addr, dst_addr, last_sg, nbytes;
+ u16 soff, doff, iter;
+ int i;
+
+ if (!is_slave_direction(direction))
+ return NULL;
+
+ fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
+ if (!fsl_desc)
+ return NULL;
+ fsl_desc->iscyclic = false;
+ fsl_desc->dirn = direction;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
+ nbytes = fsl_chan->cfg.dst_addr_width *
+ fsl_chan->cfg.dst_maxburst;
+ } else {
+ fsl_chan->attr =
+ fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
+ nbytes = fsl_chan->cfg.src_addr_width *
+ fsl_chan->cfg.src_maxburst;
+ }
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ /* get next sg's physical address */
+ last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
+
+ if (direction == DMA_MEM_TO_DEV) {
+ src_addr = sg_dma_address(sg);
+ dst_addr = fsl_chan->cfg.dst_addr;
+ soff = fsl_chan->cfg.dst_addr_width;
+ doff = 0;
+ } else {
+ src_addr = fsl_chan->cfg.src_addr;
+ dst_addr = sg_dma_address(sg);
+ soff = 0;
+ doff = fsl_chan->cfg.src_addr_width;
+ }
+
+ iter = sg_dma_len(sg) / nbytes;
+ if (i < sg_len - 1) {
+ last_sg = fsl_desc->tcd[(i + 1)].ptcd;
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->attr, soff,
+ nbytes, 0, iter, iter, doff, last_sg,
+ false, false, true);
+ } else {
+ last_sg = 0;
+ fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
+ dst_addr, fsl_chan->attr, soff,
+ nbytes, 0, iter, iter, doff, last_sg,
+ true, true, false);
+ }
+ }
+
+ return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
+
+void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
+{
+ struct virt_dma_desc *vdesc;
+
+ vdesc = vchan_next_desc(&fsl_chan->vchan);
+ if (!vdesc)
+ return;
+ fsl_chan->edesc = to_fsl_edma_desc(vdesc);
+ fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
+ fsl_edma_enable_request(fsl_chan);
+ fsl_chan->status = DMA_IN_PROGRESS;
+ fsl_chan->idle = false;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
+
+void fsl_edma_issue_pending(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+
+ if (unlikely(fsl_chan->pm_state != RUNNING)) {
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+ /* cannot submit due to suspend */
+ return;
+ }
+
+ if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
+ fsl_edma_xfer_desc(fsl_chan);
+
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+}
+EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
+
+int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+
+ fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
+ sizeof(struct fsl_edma_hw_tcd),
+ 32, 0);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
+
+void fsl_edma_free_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
+ fsl_edma_disable_request(fsl_chan);
+ fsl_edma_chan_mux(fsl_chan, 0, false);
+ fsl_chan->edesc = NULL;
+ vchan_get_all_descriptors(&fsl_chan->vchan, &head);
+ spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
+
+ vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
+ dma_pool_destroy(fsl_chan->tcd_pool);
+ fsl_chan->tcd_pool = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
+
+void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
+{
+ struct fsl_edma_chan *chan, *_chan;
+
+ list_for_each_entry_safe(chan, _chan,
+ &dmadev->channels, vchan.chan.device_node) {
+ list_del(&chan->vchan.chan.device_node);
+ tasklet_kill(&chan->vchan.task);
+ }
+}
+EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
+
+/*
+ * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
+ * register offsets are different compared to ColdFire mcf5441x 64 channels
+ * edma (here called "v2").
+ *
+ * This function sets up register offsets as per proper declared version
+ * so must be called in xxx_edma_probe() just after setting the
+ * edma "version" and "membase" appropriately.
+ */
+void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
+{
+ edma->regs.cr = edma->membase + EDMA_CR;
+ edma->regs.es = edma->membase + EDMA_ES;
+ edma->regs.erql = edma->membase + EDMA_ERQ;
+ edma->regs.eeil = edma->membase + EDMA_EEI;
+
+ edma->regs.serq = edma->membase + ((edma->version == v1) ?
+ EDMA_SERQ : EDMA64_SERQ);
+ edma->regs.cerq = edma->membase + ((edma->version == v1) ?
+ EDMA_CERQ : EDMA64_CERQ);
+ edma->regs.seei = edma->membase + ((edma->version == v1) ?
+ EDMA_SEEI : EDMA64_SEEI);
+ edma->regs.ceei = edma->membase + ((edma->version == v1) ?
+ EDMA_CEEI : EDMA64_CEEI);
+ edma->regs.cint = edma->membase + ((edma->version == v1) ?
+ EDMA_CINT : EDMA64_CINT);
+ edma->regs.cerr = edma->membase + ((edma->version == v1) ?
+ EDMA_CERR : EDMA64_CERR);
+ edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
+ EDMA_SSRT : EDMA64_SSRT);
+ edma->regs.cdne = edma->membase + ((edma->version == v1) ?
+ EDMA_CDNE : EDMA64_CDNE);
+ edma->regs.intl = edma->membase + ((edma->version == v1) ?
+ EDMA_INTR : EDMA64_INTL);
+ edma->regs.errl = edma->membase + ((edma->version == v1) ?
+ EDMA_ERR : EDMA64_ERRL);
+
+ if (edma->version == v2) {
+ edma->regs.erqh = edma->membase + EDMA64_ERQH;
+ edma->regs.eeih = edma->membase + EDMA64_EEIH;
+ edma->regs.errh = edma->membase + EDMA64_ERRH;
+ edma->regs.inth = edma->membase + EDMA64_INTH;
+ }
+
+ edma->regs.tcd = edma->membase + EDMA_TCD;
+}
+EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
new file mode 100644
index 000000000000..8917e8865959
--- /dev/null
+++ b/drivers/dma/fsl-edma-common.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2013-2014 Freescale Semiconductor, Inc.
+ * Copyright 2018 Angelo Dureghello <angelo@sysam.it>
+ */
+#ifndef _FSL_EDMA_COMMON_H_
+#define _FSL_EDMA_COMMON_H_
+
+#include "virt-dma.h"
+
+#define EDMA_CR_EDBG BIT(1)
+#define EDMA_CR_ERCA BIT(2)
+#define EDMA_CR_ERGA BIT(3)
+#define EDMA_CR_HOE BIT(4)
+#define EDMA_CR_HALT BIT(5)
+#define EDMA_CR_CLM BIT(6)
+#define EDMA_CR_EMLM BIT(7)
+#define EDMA_CR_ECX BIT(16)
+#define EDMA_CR_CX BIT(17)
+
+#define EDMA_SEEI_SEEI(x) ((x) & GENMASK(4, 0))
+#define EDMA_CEEI_CEEI(x) ((x) & GENMASK(4, 0))
+#define EDMA_CINT_CINT(x) ((x) & GENMASK(4, 0))
+#define EDMA_CERR_CERR(x) ((x) & GENMASK(4, 0))
+
+#define EDMA_TCD_ATTR_DSIZE(x) (((x) & GENMASK(2, 0)))
+#define EDMA_TCD_ATTR_DMOD(x) (((x) & GENMASK(4, 0)) << 3)
+#define EDMA_TCD_ATTR_SSIZE(x) (((x) & GENMASK(2, 0)) << 8)
+#define EDMA_TCD_ATTR_SMOD(x) (((x) & GENMASK(4, 0)) << 11)
+#define EDMA_TCD_ATTR_DSIZE_8BIT 0
+#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
+#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
+#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
+#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0))
+#define EDMA_TCD_ATTR_SSIZE_8BIT 0
+#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
+#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
+#define EDMA_TCD_ATTR_SSIZE_64BIT (EDMA_TCD_ATTR_DSIZE_64BIT << 8)
+#define EDMA_TCD_ATTR_SSIZE_32BYTE (EDMA_TCD_ATTR_DSIZE_32BYTE << 8)
+
+#define EDMA_TCD_CITER_CITER(x) ((x) & GENMASK(14, 0))
+#define EDMA_TCD_BITER_BITER(x) ((x) & GENMASK(14, 0))
+
+#define EDMA_TCD_CSR_START BIT(0)
+#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
+#define EDMA_TCD_CSR_INT_HALF BIT(2)
+#define EDMA_TCD_CSR_D_REQ BIT(3)
+#define EDMA_TCD_CSR_E_SG BIT(4)
+#define EDMA_TCD_CSR_E_LINK BIT(5)
+#define EDMA_TCD_CSR_ACTIVE BIT(6)
+#define EDMA_TCD_CSR_DONE BIT(7)
+
+#define EDMAMUX_CHCFG_DIS 0x0
+#define EDMAMUX_CHCFG_ENBL 0x80
+#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
+
+#define DMAMUX_NR 2
+
+#define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
+enum fsl_edma_pm_state {
+ RUNNING = 0,
+ SUSPENDED,
+};
+
+struct fsl_edma_hw_tcd {
+ __le32 saddr;
+ __le16 soff;
+ __le16 attr;
+ __le32 nbytes;
+ __le32 slast;
+ __le32 daddr;
+ __le16 doff;
+ __le16 citer;
+ __le32 dlast_sga;
+ __le16 csr;
+ __le16 biter;
+};
+
+/*
+ * These are iomem pointers, for both v32 and v64.
+ */
+struct edma_regs {
+ void __iomem *cr;
+ void __iomem *es;
+ void __iomem *erqh;
+ void __iomem *erql; /* aka erq on v32 */
+ void __iomem *eeih;
+ void __iomem *eeil; /* aka eei on v32 */
+ void __iomem *seei;
+ void __iomem *ceei;
+ void __iomem *serq;
+ void __iomem *cerq;
+ void __iomem *cint;
+ void __iomem *cerr;
+ void __iomem *ssrt;
+ void __iomem *cdne;
+ void __iomem *inth;
+ void __iomem *intl;
+ void __iomem *errh;
+ void __iomem *errl;
+ struct fsl_edma_hw_tcd __iomem *tcd;
+};
+
+struct fsl_edma_sw_tcd {
+ dma_addr_t ptcd;
+ struct fsl_edma_hw_tcd *vtcd;
+};
+
+struct fsl_edma_chan {
+ struct virt_dma_chan vchan;
+ enum dma_status status;
+ enum fsl_edma_pm_state pm_state;
+ bool idle;
+ u32 slave_id;
+ struct fsl_edma_engine *edma;
+ struct fsl_edma_desc *edesc;
+ struct dma_slave_config cfg;
+ u32 attr;
+ struct dma_pool *tcd_pool;
+};
+
+struct fsl_edma_desc {
+ struct virt_dma_desc vdesc;
+ struct fsl_edma_chan *echan;
+ bool iscyclic;
+ enum dma_transfer_direction dirn;
+ unsigned int n_tcds;
+ struct fsl_edma_sw_tcd tcd[];
+};
+
+enum edma_version {
+ v1, /* 32ch, Vybdir, mpc57x, etc */
+ v2, /* 64ch Coldfire */
+};
+
+struct fsl_edma_engine {
+ struct dma_device dma_dev;
+ void __iomem *membase;
+ void __iomem *muxbase[DMAMUX_NR];
+ struct clk *muxclk[DMAMUX_NR];
+ struct mutex fsl_edma_mutex;
+ u32 n_chans;
+ int txirq;
+ int errirq;
+ bool big_endian;
+ enum edma_version version;
+ struct edma_regs regs;
+ struct fsl_edma_chan chans[];
+};
+
+/*
+ * R/W functions for big- or little-endian registers:
+ * The eDMA controller's endian is independent of the CPU core's endian.
+ * For the big-endian IP module, the offset for 8-bit or 16-bit registers
+ * should also be swapped opposite to that in little-endian IP.
+ */
+static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
+{
+ if (edma->big_endian)
+ return ioread32be(addr);
+ else
+ return ioread32(addr);
+}
+
+static inline void edma_writeb(struct fsl_edma_engine *edma,
+ u8 val, void __iomem *addr)
+{
+ /* swap the reg offset for these in big-endian mode */
+ if (edma->big_endian)
+ iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
+ else
+ iowrite8(val, addr);
+}
+
+static inline void edma_writew(struct fsl_edma_engine *edma,
+ u16 val, void __iomem *addr)
+{
+ /* swap the reg offset for these in big-endian mode */
+ if (edma->big_endian)
+ iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
+ else
+ iowrite16(val, addr);
+}
+
+static inline void edma_writel(struct fsl_edma_engine *edma,
+ u32 val, void __iomem *addr)
+{
+ if (edma->big_endian)
+ iowrite32be(val, addr);
+ else
+ iowrite32(val, addr);
+}
+
+static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct fsl_edma_chan, vchan.chan);
+}
+
+static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct fsl_edma_desc, vdesc);
+}
+
+void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan);
+void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
+ unsigned int slot, bool enable);
+void fsl_edma_free_desc(struct virt_dma_desc *vdesc);
+int fsl_edma_terminate_all(struct dma_chan *chan);
+int fsl_edma_pause(struct dma_chan *chan);
+int fsl_edma_resume(struct dma_chan *chan);
+int fsl_edma_slave_config(struct dma_chan *chan,
+ struct dma_slave_config *cfg);
+enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate);
+struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
+ struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
+ size_t period_len, enum dma_transfer_direction direction,
+ unsigned long flags);
+struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context);
+void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan);
+void fsl_edma_issue_pending(struct dma_chan *chan);
+int fsl_edma_alloc_chan_resources(struct dma_chan *chan);
+void fsl_edma_free_chan_resources(struct dma_chan *chan);
+void fsl_edma_cleanup_vchan(struct dma_device *dmadev);
+void fsl_edma_setup_regs(struct fsl_edma_engine *edma);
+
+#endif /* _FSL_EDMA_COMMON_H_ */
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index c7568869284e..34d70112fcc9 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -13,671 +13,31 @@
* option) any later version.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_dma.h>
-#include "virt-dma.h"
-
-#define EDMA_CR 0x00
-#define EDMA_ES 0x04
-#define EDMA_ERQ 0x0C
-#define EDMA_EEI 0x14
-#define EDMA_SERQ 0x1B
-#define EDMA_CERQ 0x1A
-#define EDMA_SEEI 0x19
-#define EDMA_CEEI 0x18
-#define EDMA_CINT 0x1F
-#define EDMA_CERR 0x1E
-#define EDMA_SSRT 0x1D
-#define EDMA_CDNE 0x1C
-#define EDMA_INTR 0x24
-#define EDMA_ERR 0x2C
-
-#define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x))
-#define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x))
-#define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x))
-#define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x))
-#define EDMA_TCD_SLAST(x) (0x100C + 32 * (x))
-#define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x))
-#define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x))
-#define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x))
-#define EDMA_TCD_CITER(x) (0x1016 + 32 * (x))
-#define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x))
-#define EDMA_TCD_CSR(x) (0x101C + 32 * (x))
-#define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x))
-#define EDMA_TCD_BITER(x) (0x101E + 32 * (x))
-
-#define EDMA_CR_EDBG BIT(1)
-#define EDMA_CR_ERCA BIT(2)
-#define EDMA_CR_ERGA BIT(3)
-#define EDMA_CR_HOE BIT(4)
-#define EDMA_CR_HALT BIT(5)
-#define EDMA_CR_CLM BIT(6)
-#define EDMA_CR_EMLM BIT(7)
-#define EDMA_CR_ECX BIT(16)
-#define EDMA_CR_CX BIT(17)
-
-#define EDMA_SEEI_SEEI(x) ((x) & 0x1F)
-#define EDMA_CEEI_CEEI(x) ((x) & 0x1F)
-#define EDMA_CINT_CINT(x) ((x) & 0x1F)
-#define EDMA_CERR_CERR(x) ((x) & 0x1F)
-
-#define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007))
-#define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3)
-#define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8)
-#define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11)
-#define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000)
-#define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100)
-#define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200)
-#define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300)
-#define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500)
-#define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000)
-#define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001)
-#define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002)
-#define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003)
-#define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005)
-
-#define EDMA_TCD_SOFF_SOFF(x) (x)
-#define EDMA_TCD_NBYTES_NBYTES(x) (x)
-#define EDMA_TCD_SLAST_SLAST(x) (x)
-#define EDMA_TCD_DADDR_DADDR(x) (x)
-#define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF)
-#define EDMA_TCD_DOFF_DOFF(x) (x)
-#define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x)
-#define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF)
-
-#define EDMA_TCD_CSR_START BIT(0)
-#define EDMA_TCD_CSR_INT_MAJOR BIT(1)
-#define EDMA_TCD_CSR_INT_HALF BIT(2)
-#define EDMA_TCD_CSR_D_REQ BIT(3)
-#define EDMA_TCD_CSR_E_SG BIT(4)
-#define EDMA_TCD_CSR_E_LINK BIT(5)
-#define EDMA_TCD_CSR_ACTIVE BIT(6)
-#define EDMA_TCD_CSR_DONE BIT(7)
-
-#define EDMAMUX_CHCFG_DIS 0x0
-#define EDMAMUX_CHCFG_ENBL 0x80
-#define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F)
-
-#define DMAMUX_NR 2
-
-#define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
- BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
- BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
-enum fsl_edma_pm_state {
- RUNNING = 0,
- SUSPENDED,
-};
-
-struct fsl_edma_hw_tcd {
- __le32 saddr;
- __le16 soff;
- __le16 attr;
- __le32 nbytes;
- __le32 slast;
- __le32 daddr;
- __le16 doff;
- __le16 citer;
- __le32 dlast_sga;
- __le16 csr;
- __le16 biter;
-};
-
-struct fsl_edma_sw_tcd {
- dma_addr_t ptcd;
- struct fsl_edma_hw_tcd *vtcd;
-};
-
-struct fsl_edma_slave_config {
- enum dma_transfer_direction dir;
- enum dma_slave_buswidth addr_width;
- u32 dev_addr;
- u32 burst;
- u32 attr;
-};
-
-struct fsl_edma_chan {
- struct virt_dma_chan vchan;
- enum dma_status status;
- enum fsl_edma_pm_state pm_state;
- bool idle;
- u32 slave_id;
- struct fsl_edma_engine *edma;
- struct fsl_edma_desc *edesc;
- struct fsl_edma_slave_config fsc;
- struct dma_pool *tcd_pool;
-};
-
-struct fsl_edma_desc {
- struct virt_dma_desc vdesc;
- struct fsl_edma_chan *echan;
- bool iscyclic;
- unsigned int n_tcds;
- struct fsl_edma_sw_tcd tcd[];
-};
-
-struct fsl_edma_engine {
- struct dma_device dma_dev;
- void __iomem *membase;
- void __iomem *muxbase[DMAMUX_NR];
- struct clk *muxclk[DMAMUX_NR];
- struct mutex fsl_edma_mutex;
- u32 n_chans;
- int txirq;
- int errirq;
- bool big_endian;
- struct fsl_edma_chan chans[];
-};
-
-/*
- * R/W functions for big- or little-endian registers:
- * The eDMA controller's endian is independent of the CPU core's endian.
- * For the big-endian IP module, the offset for 8-bit or 16-bit registers
- * should also be swapped opposite to that in little-endian IP.
- */
-
-static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
-{
- if (edma->big_endian)
- return ioread32be(addr);
- else
- return ioread32(addr);
-}
-
-static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
-{
- /* swap the reg offset for these in big-endian mode */
- if (edma->big_endian)
- iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
- else
- iowrite8(val, addr);
-}
-
-static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
-{
- /* swap the reg offset for these in big-endian mode */
- if (edma->big_endian)
- iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
- else
- iowrite16(val, addr);
-}
-
-static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr)
-{
- if (edma->big_endian)
- iowrite32be(val, addr);
- else
- iowrite32(val, addr);
-}
-
-static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
-{
- return container_of(chan, struct fsl_edma_chan, vchan.chan);
-}
-
-static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
-{
- return container_of(vd, struct fsl_edma_desc, vdesc);
-}
-
-static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
-{
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
-
- edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI);
- edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ);
-}
-
-static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
-{
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
-
- edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ);
- edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI);
-}
-
-static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
- unsigned int slot, bool enable)
-{
- u32 ch = fsl_chan->vchan.chan.chan_id;
- void __iomem *muxaddr;
- unsigned chans_per_mux, ch_off;
-
- chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
- ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
- muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
- slot = EDMAMUX_CHCFG_SOURCE(slot);
-
- if (enable)
- iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
- else
- iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
-}
-
-static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
-{
- switch (addr_width) {
- case 1:
- return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
- case 2:
- return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
- case 4:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- case 8:
- return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
- default:
- return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
- }
-}
-
-static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
-{
- struct fsl_edma_desc *fsl_desc;
- int i;
-
- fsl_desc = to_fsl_edma_desc(vdesc);
- for (i = 0; i < fsl_desc->n_tcds; i++)
- dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
- fsl_desc->tcd[i].ptcd);
- kfree(fsl_desc);
-}
-
-static int fsl_edma_terminate_all(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
- LIST_HEAD(head);
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- fsl_edma_disable_request(fsl_chan);
- fsl_chan->edesc = NULL;
- fsl_chan->idle = true;
- vchan_get_all_descriptors(&fsl_chan->vchan, &head);
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
- return 0;
-}
-
-static int fsl_edma_pause(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- if (fsl_chan->edesc) {
- fsl_edma_disable_request(fsl_chan);
- fsl_chan->status = DMA_PAUSED;
- fsl_chan->idle = true;
- }
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- return 0;
-}
-
-static int fsl_edma_resume(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- if (fsl_chan->edesc) {
- fsl_edma_enable_request(fsl_chan);
- fsl_chan->status = DMA_IN_PROGRESS;
- fsl_chan->idle = false;
- }
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- return 0;
-}
-
-static int fsl_edma_slave_config(struct dma_chan *chan,
- struct dma_slave_config *cfg)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
-
- fsl_chan->fsc.dir = cfg->direction;
- if (cfg->direction == DMA_DEV_TO_MEM) {
- fsl_chan->fsc.dev_addr = cfg->src_addr;
- fsl_chan->fsc.addr_width = cfg->src_addr_width;
- fsl_chan->fsc.burst = cfg->src_maxburst;
- fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width);
- } else if (cfg->direction == DMA_MEM_TO_DEV) {
- fsl_chan->fsc.dev_addr = cfg->dst_addr;
- fsl_chan->fsc.addr_width = cfg->dst_addr_width;
- fsl_chan->fsc.burst = cfg->dst_maxburst;
- fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width);
- } else {
- return -EINVAL;
- }
- return 0;
-}
-
-static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
- struct virt_dma_desc *vdesc, bool in_progress)
-{
- struct fsl_edma_desc *edesc = fsl_chan->edesc;
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
- enum dma_transfer_direction dir = fsl_chan->fsc.dir;
- dma_addr_t cur_addr, dma_addr;
- size_t len, size;
- int i;
-
- /* calculate the total size in this desc */
- for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
- len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
-
- if (!in_progress)
- return len;
-
- if (dir == DMA_MEM_TO_DEV)
- cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch));
- else
- cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch));
-
- /* figure out the finished and calculate the residue */
- for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
- size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
- * le16_to_cpu(edesc->tcd[i].vtcd->biter);
- if (dir == DMA_MEM_TO_DEV)
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
- else
- dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
-
- len -= size;
- if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
- len += dma_addr + size - cur_addr;
- break;
- }
- }
-
- return len;
-}
-
-static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie, struct dma_tx_state *txstate)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- struct virt_dma_desc *vdesc;
- enum dma_status status;
- unsigned long flags;
-
- status = dma_cookie_status(chan, cookie, txstate);
- if (status == DMA_COMPLETE)
- return status;
-
- if (!txstate)
- return fsl_chan->status;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
- if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
- txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true);
- else if (vdesc)
- txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false);
- else
- txstate->residue = 0;
-
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-
- return fsl_chan->status;
-}
-
-static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
- struct fsl_edma_hw_tcd *tcd)
-{
- struct fsl_edma_engine *edma = fsl_chan->edma;
- void __iomem *addr = fsl_chan->edma->membase;
- u32 ch = fsl_chan->vchan.chan.chan_id;
-
- /*
- * TCD parameters are stored in struct fsl_edma_hw_tcd in little
- * endian format. However, we need to load the TCD registers in
- * big- or little-endian obeying the eDMA engine model endian.
- */
- edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch));
- edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch));
- edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch));
-
- edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch));
- edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch));
-
- edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch));
- edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch));
-
- edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch));
- edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch));
- edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch));
-
- edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch));
-
- edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch));
-}
-
-static inline
-void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
- u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
- u16 biter, u16 doff, u32 dlast_sga, bool major_int,
- bool disable_req, bool enable_sg)
-{
- u16 csr = 0;
-
- /*
- * eDMA hardware SGs require the TCDs to be stored in little
- * endian format irrespective of the register endian model.
- * So we put the value in little endian in memory, waiting
- * for fsl_edma_set_tcd_regs doing the swap.
- */
- tcd->saddr = cpu_to_le32(src);
- tcd->daddr = cpu_to_le32(dst);
-
- tcd->attr = cpu_to_le16(attr);
-
- tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
-
- tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
- tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
-
- tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
- tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
-
- tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
-
- tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
- if (major_int)
- csr |= EDMA_TCD_CSR_INT_MAJOR;
-
- if (disable_req)
- csr |= EDMA_TCD_CSR_D_REQ;
-
- if (enable_sg)
- csr |= EDMA_TCD_CSR_E_SG;
-
- tcd->csr = cpu_to_le16(csr);
-}
-
-static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
- int sg_len)
-{
- struct fsl_edma_desc *fsl_desc;
- int i;
-
- fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len,
- GFP_NOWAIT);
- if (!fsl_desc)
- return NULL;
-
- fsl_desc->echan = fsl_chan;
- fsl_desc->n_tcds = sg_len;
- for (i = 0; i < sg_len; i++) {
- fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
- GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
- if (!fsl_desc->tcd[i].vtcd)
- goto err;
- }
- return fsl_desc;
-
-err:
- while (--i >= 0)
- dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
- fsl_desc->tcd[i].ptcd);
- kfree(fsl_desc);
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
- struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- struct fsl_edma_desc *fsl_desc;
- dma_addr_t dma_buf_next;
- int sg_len, i;
- u32 src_addr, dst_addr, last_sg, nbytes;
- u16 soff, doff, iter;
-
- if (!is_slave_direction(fsl_chan->fsc.dir))
- return NULL;
-
- sg_len = buf_len / period_len;
- fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
- if (!fsl_desc)
- return NULL;
- fsl_desc->iscyclic = true;
-
- dma_buf_next = dma_addr;
- nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
- iter = period_len / nbytes;
-
- for (i = 0; i < sg_len; i++) {
- if (dma_buf_next >= dma_addr + buf_len)
- dma_buf_next = dma_addr;
-
- /* get next sg's physical address */
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
-
- if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
- src_addr = dma_buf_next;
- dst_addr = fsl_chan->fsc.dev_addr;
- soff = fsl_chan->fsc.addr_width;
- doff = 0;
- } else {
- src_addr = fsl_chan->fsc.dev_addr;
- dst_addr = dma_buf_next;
- soff = 0;
- doff = fsl_chan->fsc.addr_width;
- }
-
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
- fsl_chan->fsc.attr, soff, nbytes, 0, iter,
- iter, doff, last_sg, true, false, true);
- dma_buf_next += period_len;
- }
-
- return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
-}
-
-static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- struct fsl_edma_desc *fsl_desc;
- struct scatterlist *sg;
- u32 src_addr, dst_addr, last_sg, nbytes;
- u16 soff, doff, iter;
- int i;
-
- if (!is_slave_direction(fsl_chan->fsc.dir))
- return NULL;
-
- fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
- if (!fsl_desc)
- return NULL;
- fsl_desc->iscyclic = false;
-
- nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst;
- for_each_sg(sgl, sg, sg_len, i) {
- /* get next sg's physical address */
- last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
-
- if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) {
- src_addr = sg_dma_address(sg);
- dst_addr = fsl_chan->fsc.dev_addr;
- soff = fsl_chan->fsc.addr_width;
- doff = 0;
- } else {
- src_addr = fsl_chan->fsc.dev_addr;
- dst_addr = sg_dma_address(sg);
- soff = 0;
- doff = fsl_chan->fsc.addr_width;
- }
-
- iter = sg_dma_len(sg) / nbytes;
- if (i < sg_len - 1) {
- last_sg = fsl_desc->tcd[(i + 1)].ptcd;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
- dst_addr, fsl_chan->fsc.attr, soff,
- nbytes, 0, iter, iter, doff, last_sg,
- false, false, true);
- } else {
- last_sg = 0;
- fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
- dst_addr, fsl_chan->fsc.attr, soff,
- nbytes, 0, iter, iter, doff, last_sg,
- true, true, false);
- }
- }
-
- return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
-}
-
-static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
-{
- struct virt_dma_desc *vdesc;
-
- vdesc = vchan_next_desc(&fsl_chan->vchan);
- if (!vdesc)
- return;
- fsl_chan->edesc = to_fsl_edma_desc(vdesc);
- fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
- fsl_edma_enable_request(fsl_chan);
- fsl_chan->status = DMA_IN_PROGRESS;
- fsl_chan->idle = false;
-}
+#include "fsl-edma-common.h"
static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int intr, ch;
- void __iomem *base_addr;
+ struct edma_regs *regs = &fsl_edma->regs;
struct fsl_edma_chan *fsl_chan;
- base_addr = fsl_edma->membase;
-
- intr = edma_readl(fsl_edma, base_addr + EDMA_INTR);
+ intr = edma_readl(fsl_edma, regs->intl);
if (!intr)
return IRQ_NONE;
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (intr & (0x1 << ch)) {
- edma_writeb(fsl_edma, EDMA_CINT_CINT(ch),
- base_addr + EDMA_CINT);
+ edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint);
fsl_chan = &fsl_edma->chans[ch];
@@ -705,16 +65,16 @@ static irqreturn_t fsl_edma_err_handler(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int err, ch;
+ struct edma_regs *regs = &fsl_edma->regs;
- err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR);
+ err = edma_readl(fsl_edma, regs->errl);
if (!err)
return IRQ_NONE;
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (err & (0x1 << ch)) {
fsl_edma_disable_request(&fsl_edma->chans[ch]);
- edma_writeb(fsl_edma, EDMA_CERR_CERR(ch),
- fsl_edma->membase + EDMA_CERR);
+ edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr);
fsl_edma->chans[ch].status = DMA_ERROR;
fsl_edma->chans[ch].idle = true;
}
@@ -730,25 +90,6 @@ static irqreturn_t fsl_edma_irq_handler(int irq, void *dev_id)
return fsl_edma_err_handler(irq, dev_id);
}
-static void fsl_edma_issue_pending(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
-
- if (unlikely(fsl_chan->pm_state != RUNNING)) {
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
- /* cannot submit due to suspend */
- return;
- }
-
- if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
- fsl_edma_xfer_desc(fsl_chan);
-
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-}
-
static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
struct of_dma *ofdma)
{
@@ -781,34 +122,6 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
-static int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
-
- fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
- sizeof(struct fsl_edma_hw_tcd),
- 32, 0);
- return 0;
-}
-
-static void fsl_edma_free_chan_resources(struct dma_chan *chan)
-{
- struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
- unsigned long flags;
- LIST_HEAD(head);
-
- spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
- fsl_edma_disable_request(fsl_chan);
- fsl_edma_chan_mux(fsl_chan, 0, false);
- fsl_chan->edesc = NULL;
- vchan_get_all_descriptors(&fsl_chan->vchan, &head);
- spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
-
- vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
- dma_pool_destroy(fsl_chan->tcd_pool);
- fsl_chan->tcd_pool = NULL;
-}
-
static int
fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
@@ -876,6 +189,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma;
struct fsl_edma_chan *fsl_chan;
+ struct edma_regs *regs;
struct resource *res;
int len, chans;
int ret, i;
@@ -891,6 +205,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (!fsl_edma)
return -ENOMEM;
+ fsl_edma->version = v1;
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
@@ -899,6 +214,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (IS_ERR(fsl_edma->membase))
return PTR_ERR(fsl_edma->membase);
+ fsl_edma_setup_regs(fsl_edma);
+ regs = &fsl_edma->regs;
+
for (i = 0; i < DMAMUX_NR; i++) {
char clkname[32];
@@ -939,11 +257,11 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->vchan.desc_free = fsl_edma_free_desc;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
- edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
fsl_edma_chan_mux(fsl_chan, 0, false);
}
- edma_writel(fsl_edma, ~0, fsl_edma->membase + EDMA_INTR);
+ edma_writel(fsl_edma, ~0, regs->intl);
ret = fsl_edma_irq_init(pdev, fsl_edma);
if (ret)
return ret;
@@ -990,22 +308,11 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
/* enable round robin arbitration */
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR);
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
-static void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
-{
- struct fsl_edma_chan *chan, *_chan;
-
- list_for_each_entry_safe(chan, _chan,
- &dmadev->channels, vchan.chan.device_node) {
- list_del(&chan->vchan.chan.device_node);
- tasklet_kill(&chan->vchan.task);
- }
-}
-
static int fsl_edma_remove(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -1048,18 +355,18 @@ static int fsl_edma_resume_early(struct device *dev)
{
struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev);
struct fsl_edma_chan *fsl_chan;
+ struct edma_regs *regs = &fsl_edma->regs;
int i;
for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i];
fsl_chan->pm_state = RUNNING;
- edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i));
+ edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr);
if (fsl_chan->slave_id != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
}
- edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA,
- fsl_edma->membase + EDMA_CR);
+ edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0;
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 1117b5123a6f..9d360a3fbae3 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -987,7 +987,7 @@ static void dma_do_tasklet(unsigned long data)
chan_dbg(chan, "tasklet entry\n");
- spin_lock_bh(&chan->desc_lock);
+ spin_lock(&chan->desc_lock);
/* the hardware is now idle and ready for more */
chan->idle = true;
@@ -995,7 +995,7 @@ static void dma_do_tasklet(unsigned long data)
/* Run all cleanup for descriptors which have been completed */
fsldma_cleanup_descriptors(chan);
- spin_unlock_bh(&chan->desc_lock);
+ spin_unlock(&chan->desc_lock);
chan_dbg(chan, "tasklet exit\n");
}
diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
index 202ffa9f7611..e06f20272fd7 100644
--- a/drivers/dma/hsu/hsu.c
+++ b/drivers/dma/hsu/hsu.c
@@ -348,10 +348,6 @@ static int hsu_dma_slave_config(struct dma_chan *chan,
{
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
- /* Check if chan will be configured for slave transfers */
- if (!is_slave_direction(config->direction))
- return -EINVAL;
-
memcpy(&hsuc->config, config, sizeof(hsuc->config));
return 0;
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 1fbf9cb9b742..0baf9797cc09 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -142,9 +142,8 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
{
struct idma64_chan *idma64c = &idma64->chan[c];
struct idma64_desc *desc;
- unsigned long flags;
- spin_lock_irqsave(&idma64c->vchan.lock, flags);
+ spin_lock(&idma64c->vchan.lock);
desc = idma64c->desc;
if (desc) {
if (status_err & (1 << c)) {
@@ -161,7 +160,7 @@ static void idma64_chan_irq(struct idma64 *idma64, unsigned short c,
if (idma64c->desc == NULL || desc->status == DMA_ERROR)
idma64_stop_transfer(idma64c);
}
- spin_unlock_irqrestore(&idma64c->vchan.lock, flags);
+ spin_unlock(&idma64c->vchan.lock);
}
static irqreturn_t idma64_irq(int irq, void *dev)
@@ -408,10 +407,6 @@ static int idma64_slave_config(struct dma_chan *chan,
{
struct idma64_chan *idma64c = to_idma64_chan(chan);
- /* Check if chan will be configured for slave transfers */
- if (!is_slave_direction(config->direction))
- return -EINVAL;
-
memcpy(&idma64c->config, config, sizeof(idma64c->config));
convert_burst(&idma64c->config.src_maxburst);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 75b6ff0415ee..c2fff3f6c9ca 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -162,6 +162,7 @@ struct imxdma_channel {
bool enabled_2d;
int slot_2d;
unsigned int irq;
+ struct dma_slave_config config;
};
enum imx_dma_type {
@@ -675,14 +676,15 @@ static int imxdma_terminate_all(struct dma_chan *chan)
return 0;
}
-static int imxdma_config(struct dma_chan *chan,
- struct dma_slave_config *dmaengine_cfg)
+static int imxdma_config_write(struct dma_chan *chan,
+ struct dma_slave_config *dmaengine_cfg,
+ enum dma_transfer_direction direction)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
unsigned int mode = 0;
- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ if (direction == DMA_DEV_TO_MEM) {
imxdmac->per_address = dmaengine_cfg->src_addr;
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
imxdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -723,6 +725,16 @@ static int imxdma_config(struct dma_chan *chan,
return 0;
}
+static int imxdma_config(struct dma_chan *chan,
+ struct dma_slave_config *dmaengine_cfg)
+{
+ struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
+
+ memcpy(&imxdmac->config, dmaengine_cfg, sizeof(*dmaengine_cfg));
+
+ return 0;
+}
+
static enum dma_status imxdma_tx_status(struct dma_chan *chan,
dma_cookie_t cookie,
struct dma_tx_state *txstate)
@@ -905,6 +917,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
desc->desc.callback = NULL;
desc->desc.callback_param = NULL;
+ imxdma_config_write(chan, &imxdmac->config, direction);
+
return &desc->desc;
}
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 4fa4c06c9edb..2d810dfcdc48 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -129,7 +129,7 @@ static void
ioat_init_channel(struct ioatdma_device *ioat_dma,
struct ioatdma_chan *ioat_chan, int idx);
static void ioat_intr_quirk(struct ioatdma_device *ioat_dma);
-static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
+static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
static int ioat3_dma_self_test(struct ioatdma_device *ioat_dma);
static int ioat_dca_enabled = 1;
@@ -575,7 +575,7 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma)
* ioat_enumerate_channels - find and initialize the device's channels
* @ioat_dma: the ioat dma device to be enumerated
*/
-static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
+static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
{
struct ioatdma_chan *ioat_chan;
struct device *dev = &ioat_dma->pdev->dev;
@@ -594,7 +594,7 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
xfercap_log &= 0x1f; /* bits [4:0] valid */
if (xfercap_log == 0)
- return 0;
+ return;
dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
for (i = 0; i < dma->chancnt; i++) {
@@ -611,7 +611,6 @@ static int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
}
}
dma->chancnt = i;
- return i;
}
/**
@@ -1205,8 +1204,15 @@ static void ioat_shutdown(struct pci_dev *pdev)
spin_lock_bh(&ioat_chan->prep_lock);
set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
- del_timer_sync(&ioat_chan->timer);
spin_unlock_bh(&ioat_chan->prep_lock);
+ /*
+ * Synchronization rule for del_timer_sync():
+ * - The caller must not hold locks which would prevent
+ * completion of the timer's handler.
+ * So prep_lock cannot be held before calling it.
+ */
+ del_timer_sync(&ioat_chan->timer);
+
/* this should quiesce then reset */
ioat_reset_hw(ioat_chan);
}
@@ -1252,7 +1258,6 @@ static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev,
static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
{
pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
- int err;
dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME);
@@ -1267,12 +1272,6 @@ static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "AER uncorrect error status clear failed: %#x\n", err);
- }
-
return result;
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index 6bfa217ed6d0..fdec2b6cfbb0 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -87,10 +87,10 @@ struct k3_dma_chan {
struct virt_dma_chan vc;
struct k3_dma_phy *phy;
struct list_head node;
- enum dma_transfer_direction dir;
dma_addr_t dev_addr;
enum dma_status status;
bool cyclic;
+ struct dma_slave_config slave_config;
};
struct k3_dma_phy {
@@ -118,6 +118,10 @@ struct k3_dma_dev {
#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
+static int k3_dma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *cfg);
+
static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
{
return container_of(chan, struct k3_dma_chan, vc.chan);
@@ -501,14 +505,8 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
copy = min_t(size_t, len, DMA_MAX_SIZE);
k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
- if (c->dir == DMA_MEM_TO_DEV) {
- src += copy;
- } else if (c->dir == DMA_DEV_TO_MEM) {
- dst += copy;
- } else {
- src += copy;
- dst += copy;
- }
+ src += copy;
+ dst += copy;
len -= copy;
} while (len);
@@ -542,6 +540,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
if (!ds)
return NULL;
num = 0;
+ k3_dma_config_write(chan, dir, &c->slave_config);
for_each_sg(sgl, sg, sglen, i) {
addr = sg_dma_address(sg);
@@ -602,6 +601,7 @@ k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
avail = buf_len;
total = avail;
num = 0;
+ k3_dma_config_write(chan, dir, &c->slave_config);
if (period_len < modulo)
modulo = period_len;
@@ -642,18 +642,26 @@ static int k3_dma_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
struct k3_dma_chan *c = to_k3_chan(chan);
+
+ memcpy(&c->slave_config, cfg, sizeof(*cfg));
+
+ return 0;
+}
+
+static int k3_dma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *cfg)
+{
+ struct k3_dma_chan *c = to_k3_chan(chan);
u32 maxburst = 0, val = 0;
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
- if (cfg == NULL)
- return -EINVAL;
- c->dir = cfg->direction;
- if (c->dir == DMA_DEV_TO_MEM) {
+ if (dir == DMA_DEV_TO_MEM) {
c->ccfg = CX_CFG_DSTINCR;
c->dev_addr = cfg->src_addr;
maxburst = cfg->src_maxburst;
width = cfg->src_addr_width;
- } else if (c->dir == DMA_MEM_TO_DEV) {
+ } else if (dir == DMA_MEM_TO_DEV) {
c->ccfg = CX_CFG_SRCINCR;
c->dev_addr = cfg->dst_addr;
maxburst = cfg->dst_maxburst;
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
new file mode 100644
index 000000000000..5de1b07eddff
--- /dev/null
+++ b/drivers/dma/mcf-edma.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
+// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/dma-mcf-edma.h>
+
+#include "fsl-edma-common.h"
+
+#define EDMA_CHANNELS 64
+#define EDMA_MASK_CH(x) ((x) & GENMASK(5, 0))
+
+static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *mcf_edma = dev_id;
+ struct edma_regs *regs = &mcf_edma->regs;
+ unsigned int ch;
+ struct fsl_edma_chan *mcf_chan;
+ u64 intmap;
+
+ intmap = ioread32(regs->inth);
+ intmap <<= 32;
+ intmap |= ioread32(regs->intl);
+ if (!intmap)
+ return IRQ_NONE;
+
+ for (ch = 0; ch < mcf_edma->n_chans; ch++) {
+ if (intmap & BIT(ch)) {
+ iowrite8(EDMA_MASK_CH(ch), regs->cint);
+
+ mcf_chan = &mcf_edma->chans[ch];
+
+ spin_lock(&mcf_chan->vchan.lock);
+ if (!mcf_chan->edesc->iscyclic) {
+ list_del(&mcf_chan->edesc->vdesc.node);
+ vchan_cookie_complete(&mcf_chan->edesc->vdesc);
+ mcf_chan->edesc = NULL;
+ mcf_chan->status = DMA_COMPLETE;
+ mcf_chan->idle = true;
+ } else {
+ vchan_cyclic_callback(&mcf_chan->edesc->vdesc);
+ }
+
+ if (!mcf_chan->edesc)
+ fsl_edma_xfer_desc(mcf_chan);
+
+ spin_unlock(&mcf_chan->vchan.lock);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mcf_edma_err_handler(int irq, void *dev_id)
+{
+ struct fsl_edma_engine *mcf_edma = dev_id;
+ struct edma_regs *regs = &mcf_edma->regs;
+ unsigned int err, ch;
+
+ err = ioread32(regs->errl);
+ if (!err)
+ return IRQ_NONE;
+
+ for (ch = 0; ch < (EDMA_CHANNELS / 2); ch++) {
+ if (err & BIT(ch)) {
+ fsl_edma_disable_request(&mcf_edma->chans[ch]);
+ iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
+ mcf_edma->chans[ch].status = DMA_ERROR;
+ mcf_edma->chans[ch].idle = true;
+ }
+ }
+
+ err = ioread32(regs->errh);
+ if (!err)
+ return IRQ_NONE;
+
+ for (ch = (EDMA_CHANNELS / 2); ch < EDMA_CHANNELS; ch++) {
+ if (err & (BIT(ch - (EDMA_CHANNELS / 2)))) {
+ fsl_edma_disable_request(&mcf_edma->chans[ch]);
+ iowrite8(EDMA_CERR_CERR(ch), regs->cerr);
+ mcf_edma->chans[ch].status = DMA_ERROR;
+ mcf_edma->chans[ch].idle = true;
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mcf_edma_irq_init(struct platform_device *pdev,
+ struct fsl_edma_engine *mcf_edma)
+{
+ int ret = 0, i;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-00-15");
+ if (!res)
+ return -1;
+
+ for (ret = 0, i = res->start; i <= res->end; ++i)
+ ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-16-55");
+ if (!res)
+ return -1;
+
+ for (ret = 0, i = res->start; i <= res->end; ++i)
+ ret |= request_irq(i, mcf_edma_tx_handler, 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+
+ ret = platform_get_irq_byname(pdev, "edma-tx-56-63");
+ if (ret != -ENXIO) {
+ ret = request_irq(ret, mcf_edma_tx_handler,
+ 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+ }
+
+ ret = platform_get_irq_byname(pdev, "edma-err");
+ if (ret != -ENXIO) {
+ ret = request_irq(ret, mcf_edma_err_handler,
+ 0, "eDMA", mcf_edma);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mcf_edma_irq_free(struct platform_device *pdev,
+ struct fsl_edma_engine *mcf_edma)
+{
+ int irq;
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-00-15");
+ if (res) {
+ for (irq = res->start; irq <= res->end; irq++)
+ free_irq(irq, mcf_edma);
+ }
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_IRQ, "edma-tx-16-55");
+ if (res) {
+ for (irq = res->start; irq <= res->end; irq++)
+ free_irq(irq, mcf_edma);
+ }
+
+ irq = platform_get_irq_byname(pdev, "edma-tx-56-63");
+ if (irq != -ENXIO)
+ free_irq(irq, mcf_edma);
+
+ irq = platform_get_irq_byname(pdev, "edma-err");
+ if (irq != -ENXIO)
+ free_irq(irq, mcf_edma);
+}
+
+static int mcf_edma_probe(struct platform_device *pdev)
+{
+ struct mcf_edma_platform_data *pdata;
+ struct fsl_edma_engine *mcf_edma;
+ struct fsl_edma_chan *mcf_chan;
+ struct edma_regs *regs;
+ struct resource *res;
+ int ret, i, len, chans;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data supplied\n");
+ return -EINVAL;
+ }
+
+ chans = pdata->dma_channels;
+ len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans;
+ mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
+ if (!mcf_edma)
+ return -ENOMEM;
+
+ mcf_edma->n_chans = chans;
+
+ /* Set up version for ColdFire edma */
+ mcf_edma->version = v2;
+ mcf_edma->big_endian = 1;
+
+ if (!mcf_edma->n_chans) {
+ dev_info(&pdev->dev, "setting default channel number to 64");
+ mcf_edma->n_chans = 64;
+ }
+
+ mutex_init(&mcf_edma->fsl_edma_mutex);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ mcf_edma->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcf_edma->membase))
+ return PTR_ERR(mcf_edma->membase);
+
+ fsl_edma_setup_regs(mcf_edma);
+ regs = &mcf_edma->regs;
+
+ INIT_LIST_HEAD(&mcf_edma->dma_dev.channels);
+ for (i = 0; i < mcf_edma->n_chans; i++) {
+ struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
+
+ mcf_chan->edma = mcf_edma;
+ mcf_chan->slave_id = i;
+ mcf_chan->idle = true;
+ mcf_chan->vchan.desc_free = fsl_edma_free_desc;
+ vchan_init(&mcf_chan->vchan, &mcf_edma->dma_dev);
+ iowrite32(0x0, &regs->tcd[i].csr);
+ }
+
+ iowrite32(~0, regs->inth);
+ iowrite32(~0, regs->intl);
+
+ ret = mcf_edma_irq_init(pdev, mcf_edma);
+ if (ret)
+ return ret;
+
+ dma_cap_set(DMA_PRIVATE, mcf_edma->dma_dev.cap_mask);
+ dma_cap_set(DMA_SLAVE, mcf_edma->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, mcf_edma->dma_dev.cap_mask);
+
+ mcf_edma->dma_dev.dev = &pdev->dev;
+ mcf_edma->dma_dev.device_alloc_chan_resources =
+ fsl_edma_alloc_chan_resources;
+ mcf_edma->dma_dev.device_free_chan_resources =
+ fsl_edma_free_chan_resources;
+ mcf_edma->dma_dev.device_config = fsl_edma_slave_config;
+ mcf_edma->dma_dev.device_prep_dma_cyclic =
+ fsl_edma_prep_dma_cyclic;
+ mcf_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg;
+ mcf_edma->dma_dev.device_tx_status = fsl_edma_tx_status;
+ mcf_edma->dma_dev.device_pause = fsl_edma_pause;
+ mcf_edma->dma_dev.device_resume = fsl_edma_resume;
+ mcf_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all;
+ mcf_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending;
+
+ mcf_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS;
+ mcf_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS;
+ mcf_edma->dma_dev.directions =
+ BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+
+ mcf_edma->dma_dev.filter.fn = mcf_edma_filter_fn;
+ mcf_edma->dma_dev.filter.map = pdata->slave_map;
+ mcf_edma->dma_dev.filter.mapcnt = pdata->slavecnt;
+
+ platform_set_drvdata(pdev, mcf_edma);
+
+ ret = dma_async_device_register(&mcf_edma->dma_dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Can't register Freescale eDMA engine. (%d)\n", ret);
+ return ret;
+ }
+
+ /* Enable round robin arbitration */
+ iowrite32(EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
+
+ return 0;
+}
+
+static int mcf_edma_remove(struct platform_device *pdev)
+{
+ struct fsl_edma_engine *mcf_edma = platform_get_drvdata(pdev);
+
+ mcf_edma_irq_free(pdev, mcf_edma);
+ fsl_edma_cleanup_vchan(&mcf_edma->dma_dev);
+ dma_async_device_unregister(&mcf_edma->dma_dev);
+
+ return 0;
+}
+
+static struct platform_driver mcf_edma_driver = {
+ .driver = {
+ .name = "mcf-edma",
+ },
+ .probe = mcf_edma_probe,
+ .remove = mcf_edma_remove,
+};
+
+bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
+{
+ if (chan->device->dev->driver == &mcf_edma_driver.driver) {
+ struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
+
+ return (mcf_chan->slave_id == (uintptr_t)param);
+ }
+
+ return false;
+}
+EXPORT_SYMBOL(mcf_edma_filter_fn);
+
+static int __init mcf_edma_init(void)
+{
+ return platform_driver_register(&mcf_edma_driver);
+}
+subsys_initcall(mcf_edma_init);
+
+static void __exit mcf_edma_exit(void)
+{
+ platform_driver_unregister(&mcf_edma_driver);
+}
+module_exit(mcf_edma_exit);
+
+MODULE_ALIAS("platform:mcf-edma");
+MODULE_DESCRIPTION("Freescale eDMA engine driver, ColdFire family");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index 13c68b6434ce..0c56faa03e9a 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -116,6 +116,7 @@ struct mmp_tdma_chan {
u32 burst_sz;
enum dma_slave_buswidth buswidth;
enum dma_status status;
+ struct dma_slave_config slave_config;
int idx;
enum mmp_tdma_type type;
@@ -139,6 +140,10 @@ struct mmp_tdma_device {
#define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan)
+static int mmp_tdma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *dmaengine_cfg);
+
static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys)
{
writel(phys, tdmac->reg_base + TDNDPR);
@@ -442,6 +447,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
if (!desc)
goto err_out;
+ mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
+
while (buf < buf_len) {
desc = &tdmac->desc_arr[i];
@@ -495,7 +502,18 @@ static int mmp_tdma_config(struct dma_chan *chan,
{
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
- if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
+ memcpy(&tdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
+
+ return 0;
+}
+
+static int mmp_tdma_config_write(struct dma_chan *chan,
+ enum dma_transfer_direction dir,
+ struct dma_slave_config *dmaengine_cfg)
+{
+ struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
+
+ if (dir == DMA_DEV_TO_MEM) {
tdmac->dev_addr = dmaengine_cfg->src_addr;
tdmac->burst_sz = dmaengine_cfg->src_maxburst;
tdmac->buswidth = dmaengine_cfg->src_addr_width;
@@ -504,7 +522,7 @@ static int mmp_tdma_config(struct dma_chan *chan,
tdmac->burst_sz = dmaengine_cfg->dst_maxburst;
tdmac->buswidth = dmaengine_cfg->dst_addr_width;
}
- tdmac->dir = dmaengine_cfg->direction;
+ tdmac->dir = dir;
return mmp_tdma_config_chan(chan);
}
@@ -530,9 +548,6 @@ static void mmp_tdma_issue_pending(struct dma_chan *chan)
static int mmp_tdma_remove(struct platform_device *pdev)
{
- struct mmp_tdma_device *tdev = platform_get_drvdata(pdev);
-
- dma_async_device_unregister(&tdev->device);
return 0;
}
@@ -696,7 +711,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
platform_set_drvdata(pdev, tdev);
- ret = dma_async_device_register(&tdev->device);
+ ret = dmaenginem_async_device_register(&tdev->device);
if (ret) {
dev_err(tdev->device.dev, "unable to register\n");
return ret;
@@ -708,7 +723,7 @@ static int mmp_tdma_probe(struct platform_device *pdev)
if (ret) {
dev_err(tdev->device.dev,
"failed to register controller\n");
- dma_async_device_unregister(&tdev->device);
+ return ret;
}
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 969534c1a6c6..7f595355fb79 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -348,9 +348,9 @@ static void mv_xor_tasklet(unsigned long data)
{
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
- spin_lock_bh(&chan->lock);
+ spin_lock(&chan->lock);
mv_chan_slot_cleanup(chan);
- spin_unlock_bh(&chan->lock);
+ spin_unlock(&chan->lock);
}
static struct mv_xor_desc_slot *
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index ae5182ff0128..35193b31a9e0 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -847,7 +847,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan;
- ret = dma_async_device_register(&mxs_dma->dma_device);
+ ret = dmaenginem_async_device_register(&mxs_dma->dma_device);
if (ret) {
dev_err(mxs_dma->dma_device.dev, "unable to register\n");
return ret;
@@ -857,7 +857,6 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
if (ret) {
dev_err(mxs_dma->dma_device.dev,
"failed to register controller\n");
- dma_async_device_unregister(&mxs_dma->dma_device);
}
dev_info(mxs_dma->dma_device.dev, "initialized\n");
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index 8c7b2e8703da..a67b292190f4 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1,10 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2013-2014 Renesas Electronics Europe Ltd.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/bitmap.h>
@@ -1095,8 +1092,8 @@ static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec,
if (!dchan)
return NULL;
- dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__,
- dma_spec->np->name);
+ dev_dbg(dchan->device->dev, "Entry %s(%pOFn)\n", __func__,
+ dma_spec->np);
chan = nbpf_to_chan(dchan);
diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
index 7812a6338acd..90bbcef99ef8 100644
--- a/drivers/dma/owl-dma.c
+++ b/drivers/dma/owl-dma.c
@@ -21,6 +21,7 @@
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/of_dma.h>
#include <linux/slab.h>
#include "virt-dma.h"
@@ -161,10 +162,12 @@ struct owl_dma_lli {
* struct owl_dma_txd - Wrapper for struct dma_async_tx_descriptor
* @vd: virtual DMA descriptor
* @lli_list: link list of lli nodes
+ * @cyclic: flag to indicate cyclic transfers
*/
struct owl_dma_txd {
struct virt_dma_desc vd;
struct list_head lli_list;
+ bool cyclic;
};
/**
@@ -186,11 +189,15 @@ struct owl_dma_pchan {
* @vc: wrappped virtual channel
* @pchan: the physical channel utilized by this channel
* @txd: active transaction on this channel
+ * @cfg: slave configuration for this channel
+ * @drq: physical DMA request ID for this channel
*/
struct owl_dma_vchan {
struct virt_dma_chan vc;
struct owl_dma_pchan *pchan;
struct owl_dma_txd *txd;
+ struct dma_slave_config cfg;
+ u8 drq;
};
/**
@@ -200,6 +207,7 @@ struct owl_dma_vchan {
* @clk: clock for the DMA controller
* @lock: a lock to use when change DMA controller global register
* @lli_pool: a pool for the LLI descriptors
+ * @irq: interrupt ID for the DMA controller
* @nr_pchans: the number of physical channels
* @pchans: array of data for the physical channels
* @nr_vchans: the number of physical channels
@@ -336,9 +344,11 @@ static struct owl_dma_lli *owl_dma_alloc_lli(struct owl_dma *od)
static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
struct owl_dma_lli *prev,
- struct owl_dma_lli *next)
+ struct owl_dma_lli *next,
+ bool is_cyclic)
{
- list_add_tail(&next->node, &txd->lli_list);
+ if (!is_cyclic)
+ list_add_tail(&next->node, &txd->lli_list);
if (prev) {
prev->hw.next_lli = next->phys;
@@ -351,7 +361,9 @@ static struct owl_dma_lli *owl_dma_add_lli(struct owl_dma_txd *txd,
static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
struct owl_dma_lli *lli,
dma_addr_t src, dma_addr_t dst,
- u32 len, enum dma_transfer_direction dir)
+ u32 len, enum dma_transfer_direction dir,
+ struct dma_slave_config *sconfig,
+ bool is_cyclic)
{
struct owl_dma_lli_hw *hw = &lli->hw;
u32 mode;
@@ -365,6 +377,32 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
OWL_DMA_MODE_DAM_INC;
break;
+ case DMA_MEM_TO_DEV:
+ mode |= OWL_DMA_MODE_TS(vchan->drq)
+ | OWL_DMA_MODE_ST_DCU | OWL_DMA_MODE_DT_DEV
+ | OWL_DMA_MODE_SAM_INC | OWL_DMA_MODE_DAM_CONST;
+
+ /*
+ * Hardware only supports 32bit and 8bit buswidth. Since the
+ * default is 32bit, select 8bit only when requested.
+ */
+ if (sconfig->dst_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+ mode |= OWL_DMA_MODE_NDDBW_8BIT;
+
+ break;
+ case DMA_DEV_TO_MEM:
+ mode |= OWL_DMA_MODE_TS(vchan->drq)
+ | OWL_DMA_MODE_ST_DEV | OWL_DMA_MODE_DT_DCU
+ | OWL_DMA_MODE_SAM_CONST | OWL_DMA_MODE_DAM_INC;
+
+ /*
+ * Hardware only supports 32bit and 8bit buswidth. Since the
+ * default is 32bit, select 8bit only when requested.
+ */
+ if (sconfig->src_addr_width == DMA_SLAVE_BUSWIDTH_1_BYTE)
+ mode |= OWL_DMA_MODE_NDDBW_8BIT;
+
+ break;
default:
return -EINVAL;
}
@@ -381,7 +419,10 @@ static inline int owl_dma_cfg_lli(struct owl_dma_vchan *vchan,
OWL_DMA_LLC_SAV_LOAD_NEXT |
OWL_DMA_LLC_DAV_LOAD_NEXT);
- hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
+ if (is_cyclic)
+ hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_BLOCK);
+ else
+ hw->ctrlb = llc_hw_ctrlb(OWL_DMA_INTCTL_SUPER_BLOCK);
return 0;
}
@@ -443,6 +484,16 @@ static void owl_dma_terminate_pchan(struct owl_dma *od,
spin_unlock_irqrestore(&od->lock, flags);
}
+static void owl_dma_pause_pchan(struct owl_dma_pchan *pchan)
+{
+ pchan_writel(pchan, 1, OWL_DMAX_PAUSE);
+}
+
+static void owl_dma_resume_pchan(struct owl_dma_pchan *pchan)
+{
+ pchan_writel(pchan, 0, OWL_DMAX_PAUSE);
+}
+
static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
{
struct owl_dma *od = to_owl_dma(vchan->vc.chan.device);
@@ -464,7 +515,10 @@ static int owl_dma_start_next_txd(struct owl_dma_vchan *vchan)
lli = list_first_entry(&txd->lli_list,
struct owl_dma_lli, node);
- int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
+ if (txd->cyclic)
+ int_ctl = OWL_DMA_INTCTL_BLOCK;
+ else
+ int_ctl = OWL_DMA_INTCTL_SUPER_BLOCK;
pchan_writel(pchan, OWL_DMAX_MODE, OWL_DMA_MODE_LME);
pchan_writel(pchan, OWL_DMAX_LINKLIST_CTL,
@@ -627,6 +681,54 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
return 0;
}
+static int owl_dma_config(struct dma_chan *chan,
+ struct dma_slave_config *config)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+
+ /* Reject definitely invalid configurations */
+ if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
+ config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
+ return -EINVAL;
+
+ memcpy(&vchan->cfg, config, sizeof(struct dma_slave_config));
+
+ return 0;
+}
+
+static int owl_dma_pause(struct dma_chan *chan)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ owl_dma_pause_pchan(vchan->pchan);
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ return 0;
+}
+
+static int owl_dma_resume(struct dma_chan *chan)
+{
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ unsigned long flags;
+
+ if (!vchan->pchan && !vchan->txd)
+ return 0;
+
+ dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
+
+ spin_lock_irqsave(&vchan->vc.lock, flags);
+
+ owl_dma_resume_pchan(vchan->pchan);
+
+ spin_unlock_irqrestore(&vchan->vc.lock, flags);
+
+ return 0;
+}
+
static u32 owl_dma_getbytes_chan(struct owl_dma_vchan *vchan)
{
struct owl_dma_pchan *pchan;
@@ -754,13 +856,14 @@ static struct dma_async_tx_descriptor
bytes = min_t(size_t, (len - offset), OWL_DMA_FRAME_MAX_LENGTH);
ret = owl_dma_cfg_lli(vchan, lli, src + offset, dst + offset,
- bytes, DMA_MEM_TO_MEM);
+ bytes, DMA_MEM_TO_MEM,
+ &vchan->cfg, txd->cyclic);
if (ret) {
dev_warn(chan2dev(chan), "failed to config lli\n");
goto err_txd_free;
}
- prev = owl_dma_add_lli(txd, prev, lli);
+ prev = owl_dma_add_lli(txd, prev, lli, false);
}
return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
@@ -770,6 +873,133 @@ err_txd_free:
return NULL;
}
+static struct dma_async_tx_descriptor
+ *owl_dma_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags, void *context)
+{
+ struct owl_dma *od = to_owl_dma(chan->device);
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct owl_dma_txd *txd;
+ struct owl_dma_lli *lli, *prev = NULL;
+ struct scatterlist *sg;
+ dma_addr_t addr, src = 0, dst = 0;
+ size_t len;
+ int ret, i;
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ INIT_LIST_HEAD(&txd->lli_list);
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if (len > OWL_DMA_FRAME_MAX_LENGTH) {
+ dev_err(od->dma.dev,
+ "frame length exceeds max supported length");
+ goto err_txd_free;
+ }
+
+ lli = owl_dma_alloc_lli(od);
+ if (!lli) {
+ dev_err(chan2dev(chan), "failed to allocate lli");
+ goto err_txd_free;
+ }
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = addr;
+ dst = sconfig->dst_addr;
+ } else {
+ src = sconfig->src_addr;
+ dst = addr;
+ }
+
+ ret = owl_dma_cfg_lli(vchan, lli, src, dst, len, dir, sconfig,
+ txd->cyclic);
+ if (ret) {
+ dev_warn(chan2dev(chan), "failed to config lli");
+ goto err_txd_free;
+ }
+
+ prev = owl_dma_add_lli(txd, prev, lli, false);
+ }
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_txd_free:
+ owl_dma_free_txd(od, txd);
+
+ return NULL;
+}
+
+static struct dma_async_tx_descriptor
+ *owl_prep_dma_cyclic(struct dma_chan *chan,
+ dma_addr_t buf_addr, size_t buf_len,
+ size_t period_len,
+ enum dma_transfer_direction dir,
+ unsigned long flags)
+{
+ struct owl_dma *od = to_owl_dma(chan->device);
+ struct owl_dma_vchan *vchan = to_owl_vchan(chan);
+ struct dma_slave_config *sconfig = &vchan->cfg;
+ struct owl_dma_txd *txd;
+ struct owl_dma_lli *lli, *prev = NULL, *first = NULL;
+ dma_addr_t src = 0, dst = 0;
+ unsigned int periods = buf_len / period_len;
+ int ret, i;
+
+ txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
+ if (!txd)
+ return NULL;
+
+ INIT_LIST_HEAD(&txd->lli_list);
+ txd->cyclic = true;
+
+ for (i = 0; i < periods; i++) {
+ lli = owl_dma_alloc_lli(od);
+ if (!lli) {
+ dev_warn(chan2dev(chan), "failed to allocate lli");
+ goto err_txd_free;
+ }
+
+ if (dir == DMA_MEM_TO_DEV) {
+ src = buf_addr + (period_len * i);
+ dst = sconfig->dst_addr;
+ } else if (dir == DMA_DEV_TO_MEM) {
+ src = sconfig->src_addr;
+ dst = buf_addr + (period_len * i);
+ }
+
+ ret = owl_dma_cfg_lli(vchan, lli, src, dst, period_len,
+ dir, sconfig, txd->cyclic);
+ if (ret) {
+ dev_warn(chan2dev(chan), "failed to config lli");
+ goto err_txd_free;
+ }
+
+ if (!first)
+ first = lli;
+
+ prev = owl_dma_add_lli(txd, prev, lli, false);
+ }
+
+ /* close the cyclic list */
+ owl_dma_add_lli(txd, prev, first, true);
+
+ return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
+
+err_txd_free:
+ owl_dma_free_txd(od, txd);
+
+ return NULL;
+}
+
static void owl_dma_free_chan_resources(struct dma_chan *chan)
{
struct owl_dma_vchan *vchan = to_owl_vchan(chan);
@@ -790,6 +1020,27 @@ static inline void owl_dma_free(struct owl_dma *od)
}
}
+static struct dma_chan *owl_dma_of_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct owl_dma *od = ofdma->of_dma_data;
+ struct owl_dma_vchan *vchan;
+ struct dma_chan *chan;
+ u8 drq = dma_spec->args[0];
+
+ if (drq > od->nr_vchans)
+ return NULL;
+
+ chan = dma_get_any_slave_channel(&od->dma);
+ if (!chan)
+ return NULL;
+
+ vchan = to_owl_vchan(chan);
+ vchan->drq = drq;
+
+ return chan;
+}
+
static int owl_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -833,12 +1084,19 @@ static int owl_dma_probe(struct platform_device *pdev)
spin_lock_init(&od->lock);
dma_cap_set(DMA_MEMCPY, od->dma.cap_mask);
+ dma_cap_set(DMA_SLAVE, od->dma.cap_mask);
+ dma_cap_set(DMA_CYCLIC, od->dma.cap_mask);
od->dma.dev = &pdev->dev;
od->dma.device_free_chan_resources = owl_dma_free_chan_resources;
od->dma.device_tx_status = owl_dma_tx_status;
od->dma.device_issue_pending = owl_dma_issue_pending;
od->dma.device_prep_dma_memcpy = owl_dma_prep_memcpy;
+ od->dma.device_prep_slave_sg = owl_dma_prep_slave_sg;
+ od->dma.device_prep_dma_cyclic = owl_prep_dma_cyclic;
+ od->dma.device_config = owl_dma_config;
+ od->dma.device_pause = owl_dma_pause;
+ od->dma.device_resume = owl_dma_resume;
od->dma.device_terminate_all = owl_dma_terminate_all;
od->dma.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
od->dma.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
@@ -910,8 +1168,18 @@ static int owl_dma_probe(struct platform_device *pdev)
goto err_pool_free;
}
+ /* Device-tree DMA controller registration */
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ owl_dma_of_xlate, od);
+ if (ret) {
+ dev_err(&pdev->dev, "of_dma_controller_register failed\n");
+ goto err_dma_unregister;
+ }
+
return 0;
+err_dma_unregister:
+ dma_async_device_unregister(&od->dma);
err_pool_free:
clk_disable_unprepare(od->clk);
dma_pool_destroy(od->lli_pool);
@@ -923,6 +1191,7 @@ static int owl_dma_remove(struct platform_device *pdev)
{
struct owl_dma *od = platform_get_drvdata(pdev);
+ of_dma_controller_free(pdev->dev.of_node);
dma_async_device_unregister(&od->dma);
/* Mask all interrupts for this execution environment */
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 4cf0d4d0cecf..25610286979f 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4360,7 +4360,7 @@ static ssize_t enable_store(struct device_driver *dev, const char *buf,
}
static DRIVER_ATTR_RW(enable);
-static ssize_t poly_store(struct device_driver *dev, char *buf)
+static ssize_t poly_show(struct device_driver *dev, char *buf)
{
ssize_t size = 0;
u32 reg;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index b31c28b67ad3..825725057e00 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -1285,7 +1285,6 @@ static int pxad_remove(struct platform_device *op)
pxad_cleanup_debugfs(pdev);
pxad_free_channels(&pdev->slave);
- dma_async_device_unregister(&pdev->slave);
return 0;
}
@@ -1396,7 +1395,7 @@ static int pxad_init_dmadev(struct platform_device *op,
init_waitqueue_head(&c->wq_state);
}
- return dma_async_device_register(&pdev->slave);
+ return dmaenginem_async_device_register(&pdev->slave);
}
static int pxad_probe(struct platform_device *op)
@@ -1433,7 +1432,7 @@ static int pxad_probe(struct platform_device *op)
"#dma-requests set to default 32 as missing in OF: %d",
ret);
nb_requestors = 32;
- };
+ }
} else if (pdata && pdata->dma_channels) {
dma_channels = pdata->dma_channels;
nb_requestors = pdata->nb_requestors;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 48ee35e2bce6..74fa2b1a6a86 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -198,6 +198,7 @@ struct rcar_dmac {
struct dma_device engine;
struct device *dev;
void __iomem *iomem;
+ struct device_dma_parameters parms;
unsigned int n_channels;
struct rcar_dmac_chan *channels;
@@ -1792,6 +1793,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
+ dmac->dev->dma_parms = &dmac->parms;
+ dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h
index a1b0ef45d6a2..7459f9a13b5b 100644
--- a/drivers/dma/sh/shdma-arm.h
+++ b/drivers/dma/sh/shdma-arm.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Renesas SuperH DMA Engine support
*
* Copyright (C) 2013 Renesas Electronics, Inc.
- *
- * This is free software; you can redistribute it and/or modify it under the
- * terms of version 2 the GNU General Public License as published by the Free
- * Software Foundation.
*/
#ifndef SHDMA_ARM_H
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 6b5626e299b2..c51de498b5b4 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Dmaengine driver base library for DMA controllers, found on SH-based SoCs
*
@@ -7,10 +8,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index f999f9b0d314..be89dd894328 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SHDMA Device Tree glue
*
* Copyright (C) 2013 Renesas Electronics Inc.
* Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/dmaengine.h>
diff --git a/drivers/dma/sh/shdma-r8a73a4.c b/drivers/dma/sh/shdma-r8a73a4.c
index 96ea3828c3eb..ddc9a3578353 100644
--- a/drivers/dma/sh/shdma-r8a73a4.c
+++ b/drivers/dma/sh/shdma-r8a73a4.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas SuperH DMA Engine support for r8a73a4 (APE6) SoCs
*
* Copyright (C) 2013 Renesas Electronics, Inc.
- *
- * This is free software; you can redistribute it and/or modify it under the
- * terms of version 2 the GNU General Public License as published by the Free
- * Software Foundation.
*/
#include <linux/sh_dma.h>
diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h
index 2c0a969adc9f..bfb69909bd19 100644
--- a/drivers/dma/sh/shdma.h
+++ b/drivers/dma/sh/shdma.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Renesas SuperH DMA Engine support
*
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
*
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#ifndef __DMA_SHDMA_H
#define __DMA_SHDMA_H
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 04a74e0a95b7..7971ea275387 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas SuperH DMA Engine support
*
@@ -8,11 +9,6 @@
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
*
- * This is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
* - DMA of SuperH does not have Hardware DMA chain mode.
* - MAX DMA size is 16MB.
*
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index 69b9564dc9d9..30cc3553cb8b 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas SUDMAC support
*
@@ -8,10 +9,6 @@
* Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
* Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
* Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/dmaengine.h>
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index 1bb1a8e09025..7f7184c3cf95 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas USB DMA Controller Driver
*
@@ -6,10 +7,6 @@
* based on rcar-dmac.c
* Copyright (C) 2014 Renesas Electronics Inc.
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
- *
- * This is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 55df0d41355b..38d4e4f07c66 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -68,6 +68,7 @@
/* SPRD_DMA_CHN_CFG register definition */
#define SPRD_DMA_CHN_EN BIT(0)
+#define SPRD_DMA_LINKLIST_EN BIT(4)
#define SPRD_DMA_WAIT_BDONE_OFFSET 24
#define SPRD_DMA_DONOT_WAIT_BDONE 1
@@ -103,7 +104,7 @@
#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
#define SPRD_DMA_FIX_SEL_OFFSET 21
#define SPRD_DMA_FIX_EN_OFFSET 20
-#define SPRD_DMA_LLIST_END_OFFSET 19
+#define SPRD_DMA_LLIST_END BIT(19)
#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
/* SPRD_DMA_CHN_BLK_LEN register definition */
@@ -164,6 +165,7 @@ struct sprd_dma_desc {
struct sprd_dma_chn {
struct virt_dma_chan vc;
void __iomem *chn_base;
+ struct sprd_dma_linklist linklist;
struct dma_slave_config slave_cfg;
u32 chn_num;
u32 dev_id;
@@ -582,7 +584,8 @@ static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
}
static int sprd_dma_fill_desc(struct dma_chan *chan,
- struct sprd_dma_desc *sdesc,
+ struct sprd_dma_chn_hw *hw,
+ unsigned int sglen, int sg_index,
dma_addr_t src, dma_addr_t dst, u32 len,
enum dma_transfer_direction dir,
unsigned long flags,
@@ -590,7 +593,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
{
struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
- struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
u32 int_mode = flags & SPRD_DMA_INT_MASK;
int src_datawidth, dst_datawidth, src_step, dst_step;
@@ -670,12 +672,52 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
hw->trsf_step = temp;
+ /* link-list configuration */
+ if (schan->linklist.phy_addr) {
+ if (sg_index == sglen - 1)
+ hw->frg_len |= SPRD_DMA_LLIST_END;
+
+ hw->cfg |= SPRD_DMA_LINKLIST_EN;
+
+ /* link-list index */
+ temp = (sg_index + 1) % sglen;
+ /* Next link-list configuration's physical address offset */
+ temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
+ /*
+ * Set the link-list pointer point to next link-list
+ * configuration's physical address.
+ */
+ hw->llist_ptr = schan->linklist.phy_addr + temp;
+ } else {
+ hw->llist_ptr = 0;
+ }
+
hw->frg_step = 0;
hw->src_blk_step = 0;
hw->des_blk_step = 0;
return 0;
}
+static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
+ unsigned int sglen, int sg_index,
+ dma_addr_t src, dma_addr_t dst, u32 len,
+ enum dma_transfer_direction dir,
+ unsigned long flags,
+ struct dma_slave_config *slave_cfg)
+{
+ struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+ struct sprd_dma_chn_hw *hw;
+
+ if (!schan->linklist.virt_addr)
+ return -EINVAL;
+
+ hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
+ sg_index * sizeof(*hw));
+
+ return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
+ dir, flags, slave_cfg);
+}
+
static struct dma_async_tx_descriptor *
sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
size_t len, unsigned long flags)
@@ -744,10 +786,20 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 len = 0;
int ret, i;
- /* TODO: now we only support one sg for each DMA configuration. */
- if (!is_slave_direction(dir) || sglen > 1)
+ if (!is_slave_direction(dir))
return NULL;
+ if (context) {
+ struct sprd_dma_linklist *ll_cfg =
+ (struct sprd_dma_linklist *)context;
+
+ schan->linklist.phy_addr = ll_cfg->phy_addr;
+ schan->linklist.virt_addr = ll_cfg->virt_addr;
+ } else {
+ schan->linklist.phy_addr = 0;
+ schan->linklist.virt_addr = 0;
+ }
+
sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
if (!sdesc)
return NULL;
@@ -762,10 +814,25 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
src = slave_cfg->src_addr;
dst = sg_dma_address(sg);
}
+
+ /*
+ * The link-list mode needs at least 2 link-list
+ * configurations. If there is only one sg, it doesn't
+ * need to fill the link-list configuration.
+ */
+ if (sglen < 2)
+ break;
+
+ ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
+ dir, flags, slave_cfg);
+ if (ret) {
+ kfree(sdesc);
+ return NULL;
+ }
}
- ret = sprd_dma_fill_desc(chan, sdesc, src, dst, len, dir, flags,
- slave_cfg);
+ ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
+ dir, flags, slave_cfg);
if (ret) {
kfree(sdesc);
return NULL;
diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
index bfb79bd0c6de..07c20aa2e955 100644
--- a/drivers/dma/st_fdma.c
+++ b/drivers/dma/st_fdma.c
@@ -833,7 +833,7 @@ static int st_fdma_probe(struct platform_device *pdev)
fdev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
fdev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
- ret = dma_async_device_register(&fdev->dma_device);
+ ret = dmaenginem_async_device_register(&fdev->dma_device);
if (ret) {
dev_err(&pdev->dev,
"Failed to register DMA device (%d)\n", ret);
@@ -844,15 +844,13 @@ static int st_fdma_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Failed to register controller (%d)\n", ret);
- goto err_dma_dev;
+ goto err_rproc;
}
dev_info(&pdev->dev, "ST FDMA engine driver, irq:%d\n", fdev->irq);
return 0;
-err_dma_dev:
- dma_async_device_unregister(&fdev->dma_device);
err_rproc:
st_fdma_free(fdev);
st_slim_rproc_put(fdev->slim_rproc);
@@ -867,7 +865,6 @@ static int st_fdma_remove(struct platform_device *pdev)
devm_free_irq(&pdev->dev, fdev->irq, fdev);
st_slim_rproc_put(fdev->slim_rproc);
of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&fdev->dma_device);
return 0;
}
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index f4edfc56f34e..5e328bd10c27 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2839,7 +2839,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
d40_ops_init(base, &base->dma_slave);
- err = dma_async_device_register(&base->dma_slave);
+ err = dmaenginem_async_device_register(&base->dma_slave);
if (err) {
d40_err(base->dev, "Failed to register slave channels\n");
@@ -2854,12 +2854,12 @@ static int __init d40_dmaengine_init(struct d40_base *base,
d40_ops_init(base, &base->dma_memcpy);
- err = dma_async_device_register(&base->dma_memcpy);
+ err = dmaenginem_async_device_register(&base->dma_memcpy);
if (err) {
d40_err(base->dev,
"Failed to register memcpy only channels\n");
- goto unregister_slave;
+ goto exit;
}
d40_chan_init(base, &base->dma_both, base->phy_chans,
@@ -2871,18 +2871,14 @@ static int __init d40_dmaengine_init(struct d40_base *base,
dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
d40_ops_init(base, &base->dma_both);
- err = dma_async_device_register(&base->dma_both);
+ err = dmaenginem_async_device_register(&base->dma_both);
if (err) {
d40_err(base->dev,
"Failed to register logical and physical capable channels\n");
- goto unregister_memcpy;
+ goto exit;
}
return 0;
- unregister_memcpy:
- dma_async_device_unregister(&base->dma_memcpy);
- unregister_slave:
- dma_async_device_unregister(&base->dma_slave);
exit:
return err;
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 379e8d534e61..4903a408fc14 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -308,20 +308,12 @@ static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
{
- switch (threshold) {
- case STM32_DMA_FIFO_THRESHOLD_FULL:
- if (buf_len >= STM32_DMA_MAX_BURST)
- return true;
- else
- return false;
- case STM32_DMA_FIFO_THRESHOLD_HALFFULL:
- if (buf_len >= STM32_DMA_MAX_BURST / 2)
- return true;
- else
- return false;
- default:
- return false;
- }
+ /*
+ * Buffer or period length has to be aligned on FIFO depth.
+ * Otherwise bytes may be stuck within FIFO at buffer or period
+ * length.
+ */
+ return ((buf_len % ((threshold + 1) * 4)) == 0);
}
static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 06dd1725375e..390e4cae0e1a 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1656,7 +1656,7 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return ret;
}
- ret = dma_async_device_register(dd);
+ ret = dmaenginem_async_device_register(dd);
if (ret)
return ret;
@@ -1674,8 +1674,6 @@ static int stm32_mdma_probe(struct platform_device *pdev)
return 0;
err_unregister:
- dma_async_device_unregister(dd);
-
return ret;
}
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 395c698edb4d..fc0f9c8766a8 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -545,7 +545,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
- td_desc->desc_list_len, DMA_MEM_TO_DEV);
+ td_desc->desc_list_len, DMA_TO_DEVICE);
return &td_desc->txd;
}
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index ab7c5a937ab0..c89d82aa2776 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -69,25 +69,6 @@ static const struct altr_sdram_prv_data a10_data = {
.ue_set_mask = A10_DIAGINT_TDERRA_MASK,
};
-static const struct altr_sdram_prv_data s10_data = {
- .ecc_ctrl_offset = S10_ECCCTRL1_OFST,
- .ecc_ctl_en_mask = A10_ECCCTRL1_ECC_EN,
- .ecc_stat_offset = S10_INTSTAT_OFST,
- .ecc_stat_ce_mask = A10_INTSTAT_SBEERR,
- .ecc_stat_ue_mask = A10_INTSTAT_DBEERR,
- .ecc_saddr_offset = S10_SERRADDR_OFST,
- .ecc_daddr_offset = S10_DERRADDR_OFST,
- .ecc_irq_en_offset = S10_ERRINTEN_OFST,
- .ecc_irq_en_mask = A10_ECC_IRQ_EN_MASK,
- .ecc_irq_clr_offset = S10_INTSTAT_OFST,
- .ecc_irq_clr_mask = (A10_INTSTAT_SBEERR | A10_INTSTAT_DBEERR),
- .ecc_cnt_rst_offset = S10_ECCCTRL1_OFST,
- .ecc_cnt_rst_mask = A10_ECC_CNT_RESET_MASK,
- .ce_ue_trgr_offset = S10_DIAGINTTEST_OFST,
- .ce_set_mask = A10_DIAGINT_TSERRA_MASK,
- .ue_set_mask = A10_DIAGINT_TDERRA_MASK,
-};
-
/*********************** EDAC Memory Controller Functions ****************/
/* The SDRAM controller uses the EDAC Memory Controller framework. */
@@ -239,7 +220,7 @@ static unsigned long get_total_mem(void)
static const struct of_device_id altr_sdram_ctrl_of_match[] = {
{ .compatible = "altr,sdram-edac", .data = &c5_data},
{ .compatible = "altr,sdram-edac-a10", .data = &a10_data},
- { .compatible = "altr,sdram-edac-s10", .data = &s10_data},
+ { .compatible = "altr,sdram-edac-s10", .data = &a10_data},
{},
};
MODULE_DEVICE_TABLE(of, altr_sdram_ctrl_of_match);
@@ -293,6 +274,7 @@ release:
return ret;
}
+static int socfpga_is_a10(void);
static int altr_sdram_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
@@ -416,7 +398,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
goto err;
/* Only the Arria10 has separate IRQs */
- if (irq2 > 0) {
+ if (socfpga_is_a10()) {
/* Arria10 specific initialization */
res = a10_init(mc_vbase);
if (res < 0)
@@ -502,8 +484,9 @@ static int s10_protected_reg_write(void *context, unsigned int reg,
unsigned int val)
{
struct arm_smccc_res result;
+ unsigned long offset = (unsigned long)context;
- arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, reg, val, 0, 0,
+ arm_smccc_smc(INTEL_SIP_SMC_REG_WRITE, offset + reg, val, 0, 0,
0, 0, 0, &result);
return (int)result.a0;
@@ -523,8 +506,9 @@ static int s10_protected_reg_read(void *context, unsigned int reg,
unsigned int *val)
{
struct arm_smccc_res result;
+ unsigned long offset = (unsigned long)context;
- arm_smccc_smc(INTEL_SIP_SMC_REG_READ, reg, 0, 0, 0,
+ arm_smccc_smc(INTEL_SIP_SMC_REG_READ, offset + reg, 0, 0, 0,
0, 0, 0, &result);
*val = (unsigned int)result.a1;
@@ -532,246 +516,18 @@ static int s10_protected_reg_read(void *context, unsigned int reg,
return (int)result.a0;
}
-static bool s10_sdram_writeable_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S10_ECCCTRL1_OFST:
- case S10_ERRINTEN_OFST:
- case S10_INTMODE_OFST:
- case S10_INTSTAT_OFST:
- case S10_DIAGINTTEST_OFST:
- case S10_SYSMGR_ECC_INTMASK_VAL_OFST:
- case S10_SYSMGR_ECC_INTMASK_SET_OFST:
- case S10_SYSMGR_ECC_INTMASK_CLR_OFST:
- return true;
- }
- return false;
-}
-
-static bool s10_sdram_readable_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S10_ECCCTRL1_OFST:
- case S10_ERRINTEN_OFST:
- case S10_INTMODE_OFST:
- case S10_INTSTAT_OFST:
- case S10_DERRADDR_OFST:
- case S10_SERRADDR_OFST:
- case S10_DIAGINTTEST_OFST:
- case S10_SYSMGR_ECC_INTMASK_VAL_OFST:
- case S10_SYSMGR_ECC_INTMASK_SET_OFST:
- case S10_SYSMGR_ECC_INTMASK_CLR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_SERR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_DERR_OFST:
- return true;
- }
- return false;
-}
-
-static bool s10_sdram_volatile_reg(struct device *dev, unsigned int reg)
-{
- switch (reg) {
- case S10_ECCCTRL1_OFST:
- case S10_ERRINTEN_OFST:
- case S10_INTMODE_OFST:
- case S10_INTSTAT_OFST:
- case S10_DERRADDR_OFST:
- case S10_SERRADDR_OFST:
- case S10_DIAGINTTEST_OFST:
- case S10_SYSMGR_ECC_INTMASK_VAL_OFST:
- case S10_SYSMGR_ECC_INTMASK_SET_OFST:
- case S10_SYSMGR_ECC_INTMASK_CLR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_SERR_OFST:
- case S10_SYSMGR_ECC_INTSTAT_DERR_OFST:
- return true;
- }
- return false;
-}
-
static const struct regmap_config s10_sdram_regmap_cfg = {
.name = "s10_ddr",
.reg_bits = 32,
.reg_stride = 4,
.val_bits = 32,
- .max_register = 0xffffffff,
- .writeable_reg = s10_sdram_writeable_reg,
- .readable_reg = s10_sdram_readable_reg,
- .volatile_reg = s10_sdram_volatile_reg,
+ .max_register = 0xffd12228,
.reg_read = s10_protected_reg_read,
.reg_write = s10_protected_reg_write,
.use_single_read = true,
.use_single_write = true,
};
-static int altr_s10_sdram_probe(struct platform_device *pdev)
-{
- const struct of_device_id *id;
- struct edac_mc_layer layers[2];
- struct mem_ctl_info *mci;
- struct altr_sdram_mc_data *drvdata;
- const struct altr_sdram_prv_data *priv;
- struct regmap *regmap;
- struct dimm_info *dimm;
- u32 read_reg;
- int irq, ret = 0;
- unsigned long mem_size;
-
- id = of_match_device(altr_sdram_ctrl_of_match, &pdev->dev);
- if (!id)
- return -ENODEV;
-
- /* Grab specific offsets and masks for Stratix10 */
- priv = of_match_node(altr_sdram_ctrl_of_match,
- pdev->dev.of_node)->data;
-
- regmap = devm_regmap_init(&pdev->dev, NULL, (void *)priv,
- &s10_sdram_regmap_cfg);
- if (IS_ERR(regmap))
- return PTR_ERR(regmap);
-
- /* Validate the SDRAM controller has ECC enabled */
- if (regmap_read(regmap, priv->ecc_ctrl_offset, &read_reg) ||
- ((read_reg & priv->ecc_ctl_en_mask) != priv->ecc_ctl_en_mask)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "No ECC/ECC disabled [0x%08X]\n", read_reg);
- return -ENODEV;
- }
-
- /* Grab memory size from device tree. */
- mem_size = get_total_mem();
- if (!mem_size) {
- edac_printk(KERN_ERR, EDAC_MC, "Unable to calculate memory size\n");
- return -ENODEV;
- }
-
- /* Ensure the SDRAM Interrupt is disabled */
- if (regmap_update_bits(regmap, priv->ecc_irq_en_offset,
- priv->ecc_irq_en_mask, 0)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error disabling SDRAM ECC IRQ\n");
- return -ENODEV;
- }
-
- /* Toggle to clear the SDRAM Error count */
- if (regmap_update_bits(regmap, priv->ecc_cnt_rst_offset,
- priv->ecc_cnt_rst_mask,
- priv->ecc_cnt_rst_mask)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error clearing SDRAM ECC count\n");
- return -ENODEV;
- }
-
- if (regmap_update_bits(regmap, priv->ecc_cnt_rst_offset,
- priv->ecc_cnt_rst_mask, 0)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error clearing SDRAM ECC count\n");
- return -ENODEV;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- edac_printk(KERN_ERR, EDAC_MC,
- "No irq %d in DT\n", irq);
- return -ENODEV;
- }
-
- layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
- layers[0].size = 1;
- layers[0].is_virt_csrow = true;
- layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = 1;
- layers[1].is_virt_csrow = false;
- mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
- sizeof(struct altr_sdram_mc_data));
- if (!mci)
- return -ENOMEM;
-
- mci->pdev = &pdev->dev;
- drvdata = mci->pvt_info;
- drvdata->mc_vbase = regmap;
- drvdata->data = priv;
- platform_set_drvdata(pdev, mci);
-
- if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Unable to get managed device resource\n");
- ret = -ENOMEM;
- goto free;
- }
-
- mci->mtype_cap = MEM_FLAG_DDR3;
- mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
- mci->edac_cap = EDAC_FLAG_SECDED;
- mci->mod_name = EDAC_MOD_STR;
- mci->ctl_name = dev_name(&pdev->dev);
- mci->scrub_mode = SCRUB_SW_SRC;
- mci->dev_name = dev_name(&pdev->dev);
-
- dimm = *mci->dimms;
- dimm->nr_pages = ((mem_size - 1) >> PAGE_SHIFT) + 1;
- dimm->grain = 8;
- dimm->dtype = DEV_X8;
- dimm->mtype = MEM_DDR3;
- dimm->edac_mode = EDAC_SECDED;
-
- ret = edac_mc_add_mc(mci);
- if (ret < 0)
- goto err;
-
- ret = devm_request_irq(&pdev->dev, irq, altr_sdram_mc_err_handler,
- IRQF_SHARED, dev_name(&pdev->dev), mci);
- if (ret < 0) {
- edac_mc_printk(mci, KERN_ERR,
- "Unable to request irq %d\n", irq);
- ret = -ENODEV;
- goto err2;
- }
-
- if (regmap_write(regmap, S10_SYSMGR_ECC_INTMASK_CLR_OFST,
- S10_DDR0_IRQ_MASK)) {
- edac_printk(KERN_ERR, EDAC_MC,
- "Error clearing SDRAM ECC count\n");
- ret = -ENODEV;
- goto err2;
- }
-
- if (regmap_update_bits(drvdata->mc_vbase, priv->ecc_irq_en_offset,
- priv->ecc_irq_en_mask, priv->ecc_irq_en_mask)) {
- edac_mc_printk(mci, KERN_ERR,
- "Error enabling SDRAM ECC IRQ\n");
- ret = -ENODEV;
- goto err2;
- }
-
- altr_sdr_mc_create_debugfs_nodes(mci);
-
- devres_close_group(&pdev->dev, NULL);
-
- return 0;
-
-err2:
- edac_mc_del_mc(&pdev->dev);
-err:
- devres_release_group(&pdev->dev, NULL);
-free:
- edac_mc_free(mci);
- edac_printk(KERN_ERR, EDAC_MC,
- "EDAC Probe Failed; Error %d\n", ret);
-
- return ret;
-}
-
-static int altr_s10_sdram_remove(struct platform_device *pdev)
-{
- struct mem_ctl_info *mci = platform_get_drvdata(pdev);
-
- edac_mc_del_mc(&pdev->dev);
- edac_mc_free(mci);
- platform_set_drvdata(pdev, NULL);
-
- return 0;
-}
-
/************** </Stratix10 EDAC Memory Controller Functions> ***********/
/*
@@ -805,20 +561,6 @@ static struct platform_driver altr_sdram_edac_driver = {
module_platform_driver(altr_sdram_edac_driver);
-static struct platform_driver altr_s10_sdram_edac_driver = {
- .probe = altr_s10_sdram_probe,
- .remove = altr_s10_sdram_remove,
- .driver = {
- .name = "altr_s10_sdram_edac",
-#ifdef CONFIG_PM
- .pm = &altr_sdram_pm_ops,
-#endif
- .of_match_table = altr_sdram_ctrl_of_match,
- },
-};
-
-module_platform_driver(altr_s10_sdram_edac_driver);
-
/************************* EDAC Parent Probe *************************/
static const struct of_device_id altr_edac_device_of_match[];
@@ -972,6 +714,16 @@ static const struct file_operations altr_edac_a10_device_inject_fops = {
.llseek = generic_file_llseek,
};
+static ssize_t altr_edac_a10_device_trig2(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos);
+
+static const struct file_operations altr_edac_a10_device_inject2_fops = {
+ .open = simple_open,
+ .write = altr_edac_a10_device_trig2,
+ .llseek = generic_file_llseek,
+};
+
static void altr_create_edacdev_dbgfs(struct edac_device_ctl_info *edac_dci,
const struct edac_device_prv_data *priv)
{
@@ -1253,6 +1005,16 @@ static int __maybe_unused altr_init_memory_port(void __iomem *ioaddr, int port)
return ret;
}
+static int socfpga_is_a10(void)
+{
+ return of_machine_is_compatible("altr,socfpga-arria10");
+}
+
+static int socfpga_is_s10(void)
+{
+ return of_machine_is_compatible("altr,socfpga-stratix10");
+}
+
static __init int __maybe_unused
altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
u32 ecc_ctrl_en_mask, bool dual_port)
@@ -1267,8 +1029,32 @@ altr_init_a10_ecc_block(struct device_node *np, u32 irq_mask,
/* Get the ECC Manager - parent of the device EDACs */
np_eccmgr = of_get_parent(np);
- ecc_mgr_map = syscon_regmap_lookup_by_phandle(np_eccmgr,
- "altr,sysmgr-syscon");
+
+ if (socfpga_is_a10()) {
+ ecc_mgr_map = syscon_regmap_lookup_by_phandle(np_eccmgr,
+ "altr,sysmgr-syscon");
+ } else {
+ struct device_node *sysmgr_np;
+ struct resource res;
+ uintptr_t base;
+
+ sysmgr_np = of_parse_phandle(np_eccmgr,
+ "altr,sysmgr-syscon", 0);
+ if (!sysmgr_np) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to find altr,sysmgr-syscon\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(sysmgr_np, 0, &res))
+ return -ENOMEM;
+
+ /* Need physical address for SMCC call */
+ base = res.start;
+
+ ecc_mgr_map = regmap_init(NULL, NULL, (void *)base,
+ &s10_sdram_regmap_cfg);
+ }
of_node_put(np_eccmgr);
if (IS_ERR(ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
@@ -1326,11 +1112,6 @@ out:
return ret;
}
-static int socfpga_is_a10(void)
-{
- return of_machine_is_compatible("altr,socfpga-arria10");
-}
-
static int validate_parent_available(struct device_node *np);
static const struct of_device_id altr_edac_a10_device_of_match[];
static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
@@ -1338,7 +1119,7 @@ static int __init __maybe_unused altr_init_a10_ecc_device_type(char *compat)
int irq;
struct device_node *child, *np;
- if (!socfpga_is_a10())
+ if (!socfpga_is_a10() && !socfpga_is_s10())
return -ENODEV;
np = of_find_compatible_node(NULL, NULL,
@@ -1584,7 +1365,7 @@ static const struct edac_device_prv_data a10_enetecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject_fops,
+ .inject_fops = &altr_edac_a10_device_inject2_fops,
};
static int __init socfpga_init_ethernet_ecc(void)
@@ -1662,7 +1443,7 @@ static const struct edac_device_prv_data a10_usbecc_data = {
.ue_set_mask = ALTR_A10_ECC_TDERRA,
.set_err_ofst = ALTR_A10_ECC_INTTEST_OFST,
.ecc_irq_handler = altr_edac_a10_ecc_irq,
- .inject_fops = &altr_edac_a10_device_inject_fops,
+ .inject_fops = &altr_edac_a10_device_inject2_fops,
};
static int __init socfpga_init_usb_ecc(void)
@@ -1860,7 +1641,7 @@ static int __init socfpga_init_sdmmc_ecc(void)
int rc = -ENODEV;
struct device_node *child;
- if (!socfpga_is_a10())
+ if (!socfpga_is_a10() && !socfpga_is_s10())
return -ENODEV;
child = of_find_compatible_node(NULL, NULL, "altr,socfpga-sdmmc-ecc");
@@ -1944,6 +1725,74 @@ static ssize_t altr_edac_a10_device_trig(struct file *file,
writel(priv->ue_set_mask, set_addr);
else
writel(priv->ce_set_mask, set_addr);
+
+ /* Ensure the interrupt test bits are set */
+ wmb();
+ local_irq_restore(flags);
+
+ return count;
+}
+
+/*
+ * The Stratix10 EDAC Error Injection Functions differ from Arria10
+ * slightly. A few Arria10 peripherals can use this injection function.
+ * Inject the error into the memory and then readback to trigger the IRQ.
+ */
+static ssize_t altr_edac_a10_device_trig2(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct edac_device_ctl_info *edac_dci = file->private_data;
+ struct altr_edac_device_dev *drvdata = edac_dci->pvt_info;
+ const struct edac_device_prv_data *priv = drvdata->data;
+ void __iomem *set_addr = (drvdata->base + priv->set_err_ofst);
+ unsigned long flags;
+ u8 trig_type;
+
+ if (!user_buf || get_user(trig_type, user_buf))
+ return -EFAULT;
+
+ local_irq_save(flags);
+ if (trig_type == ALTR_UE_TRIGGER_CHAR) {
+ writel(priv->ue_set_mask, set_addr);
+ } else {
+ /* Setup write of 0 to first 4 bytes */
+ writel(0x0, drvdata->base + ECC_BLK_WDATA0_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
+ /* Setup write of 4 bytes */
+ writel(ECC_WORD_WRITE, drvdata->base + ECC_BLK_DBYTECTRL_OFST);
+ /* Setup Address to 0 */
+ writel(0x0, drvdata->base + ECC_BLK_ADDRESS_OFST);
+ /* Setup accctrl to write & data override */
+ writel(ECC_WRITE_DOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ /* Setup accctrl to read & ecc override */
+ writel(ECC_READ_EOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ /* Setup write for single bit change */
+ writel(0x1, drvdata->base + ECC_BLK_WDATA0_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA1_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA2_OFST);
+ writel(0x0, drvdata->base + ECC_BLK_WDATA3_OFST);
+ /* Copy Read ECC to Write ECC */
+ writel(readl(drvdata->base + ECC_BLK_RECC0_OFST),
+ drvdata->base + ECC_BLK_WECC0_OFST);
+ writel(readl(drvdata->base + ECC_BLK_RECC1_OFST),
+ drvdata->base + ECC_BLK_WECC1_OFST);
+ /* Setup accctrl to write & ecc override & data override */
+ writel(ECC_WRITE_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ /* Setup accctrl to read & ecc overwrite & data overwrite */
+ writel(ECC_READ_EDOVR, drvdata->base + ECC_BLK_ACCCTRL_OFST);
+ /* Kick it. */
+ writel(ECC_XACT_KICK, drvdata->base + ECC_BLK_STARTACC_OFST);
+ }
+
/* Ensure the interrupt test bits are set */
wmb();
local_irq_restore(flags);
@@ -2147,6 +1996,35 @@ static const struct irq_domain_ops a10_eccmgr_ic_ops = {
.xlate = irq_domain_xlate_twocell,
};
+/************** Stratix 10 EDAC Double Bit Error Handler ************/
+#define to_a10edac(p, m) container_of(p, struct altr_arria10_edac, m)
+
+/*
+ * The double bit error is handled through SError which is fatal. This is
+ * called as a panic notifier to printout ECC error info as part of the panic.
+ */
+static int s10_edac_dberr_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct altr_arria10_edac *edac = to_a10edac(this, panic_notifier);
+ int err_addr, dberror;
+
+ regmap_read(edac->ecc_mgr_map, S10_SYSMGR_ECC_INTSTAT_DERR_OFST,
+ &dberror);
+ regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST, dberror);
+ if (dberror & S10_DDR0_IRQ_MASK) {
+ regmap_read(edac->ecc_mgr_map, A10_DERRADDR_OFST, &err_addr);
+ regmap_write(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST,
+ err_addr);
+ edac_printk(KERN_ERR, EDAC_MC,
+ "EDAC: [Uncorrectable errors @ 0x%08X]\n\n",
+ err_addr);
+ }
+
+ return NOTIFY_DONE;
+}
+
+/****************** Arria 10 EDAC Probe Function *********************/
static int altr_edac_a10_probe(struct platform_device *pdev)
{
struct altr_arria10_edac *edac;
@@ -2160,8 +2038,34 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, edac);
INIT_LIST_HEAD(&edac->a10_ecc_devices);
- edac->ecc_mgr_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ if (socfpga_is_a10()) {
+ edac->ecc_mgr_map =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"altr,sysmgr-syscon");
+ } else {
+ struct device_node *sysmgr_np;
+ struct resource res;
+ uintptr_t base;
+
+ sysmgr_np = of_parse_phandle(pdev->dev.of_node,
+ "altr,sysmgr-syscon", 0);
+ if (!sysmgr_np) {
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Unable to find altr,sysmgr-syscon\n");
+ return -ENODEV;
+ }
+
+ if (of_address_to_resource(sysmgr_np, 0, &res))
+ return -ENOMEM;
+
+ /* Need physical address for SMCC call */
+ base = res.start;
+
+ edac->ecc_mgr_map = devm_regmap_init(&pdev->dev, NULL,
+ (void *)base,
+ &s10_sdram_regmap_cfg);
+ }
+
if (IS_ERR(edac->ecc_mgr_map)) {
edac_printk(KERN_ERR, EDAC_DEVICE,
"Unable to get syscon altr,sysmgr-syscon\n");
@@ -2188,14 +2092,38 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
altr_edac_a10_irq_handler,
edac);
- edac->db_irq = platform_get_irq(pdev, 1);
- if (edac->db_irq < 0) {
- dev_err(&pdev->dev, "No DBERR IRQ resource\n");
- return edac->db_irq;
+ if (socfpga_is_a10()) {
+ edac->db_irq = platform_get_irq(pdev, 1);
+ if (edac->db_irq < 0) {
+ dev_err(&pdev->dev, "No DBERR IRQ resource\n");
+ return edac->db_irq;
+ }
+ irq_set_chained_handler_and_data(edac->db_irq,
+ altr_edac_a10_irq_handler,
+ edac);
+ } else {
+ int dberror, err_addr;
+
+ edac->panic_notifier.notifier_call = s10_edac_dberr_handler;
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &edac->panic_notifier);
+
+ /* Printout a message if uncorrectable error previously. */
+ regmap_read(edac->ecc_mgr_map, S10_SYSMGR_UE_VAL_OFST,
+ &dberror);
+ if (dberror) {
+ regmap_read(edac->ecc_mgr_map, S10_SYSMGR_UE_ADDR_OFST,
+ &err_addr);
+ edac_printk(KERN_ERR, EDAC_DEVICE,
+ "Previous Boot UE detected[0x%X] @ 0x%X\n",
+ dberror, err_addr);
+ /* Reset the sticky registers */
+ regmap_write(edac->ecc_mgr_map,
+ S10_SYSMGR_UE_VAL_OFST, 0);
+ regmap_write(edac->ecc_mgr_map,
+ S10_SYSMGR_UE_ADDR_OFST, 0);
+ }
}
- irq_set_chained_handler_and_data(edac->db_irq,
- altr_edac_a10_irq_handler,
- edac);
for_each_child_of_node(pdev->dev.of_node, child) {
if (!of_device_is_available(child))
@@ -2212,7 +2140,8 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
altr_edac_a10_device_add(edac, child);
- else if (of_device_is_compatible(child, "altr,sdram-edac-a10"))
+ else if ((of_device_is_compatible(child, "altr,sdram-edac-a10")) ||
+ (of_device_is_compatible(child, "altr,sdram-edac-s10")))
of_platform_populate(pdev->dev.of_node,
altr_sdram_ctrl_of_match,
NULL, &pdev->dev);
@@ -2223,6 +2152,7 @@ static int altr_edac_a10_probe(struct platform_device *pdev)
static const struct of_device_id altr_edac_a10_of_match[] = {
{ .compatible = "altr,socfpga-a10-ecc-manager" },
+ { .compatible = "altr,socfpga-s10-ecc-manager" },
{},
};
MODULE_DEVICE_TABLE(of, altr_edac_a10_of_match);
@@ -2236,171 +2166,6 @@ static struct platform_driver altr_edac_a10_driver = {
};
module_platform_driver(altr_edac_a10_driver);
-/************** Stratix 10 EDAC Device Controller Functions> ************/
-
-#define to_s10edac(p, m) container_of(p, struct altr_stratix10_edac, m)
-
-/*
- * The double bit error is handled through SError which is fatal. This is
- * called as a panic notifier to printout ECC error info as part of the panic.
- */
-static int s10_edac_dberr_handler(struct notifier_block *this,
- unsigned long event, void *ptr)
-{
- struct altr_stratix10_edac *edac = to_s10edac(this, panic_notifier);
- int err_addr, dberror;
-
- s10_protected_reg_read(edac, S10_SYSMGR_ECC_INTSTAT_DERR_OFST,
- &dberror);
- /* Remember the UE Errors for a reboot */
- s10_protected_reg_write(edac, S10_SYSMGR_UE_VAL_OFST, dberror);
- if (dberror & S10_DDR0_IRQ_MASK) {
- s10_protected_reg_read(edac, S10_DERRADDR_OFST, &err_addr);
- /* Remember the UE Error address */
- s10_protected_reg_write(edac, S10_SYSMGR_UE_ADDR_OFST,
- err_addr);
- edac_printk(KERN_ERR, EDAC_MC,
- "EDAC: [Uncorrectable errors @ 0x%08X]\n\n",
- err_addr);
- }
-
- return NOTIFY_DONE;
-}
-
-static void altr_edac_s10_irq_handler(struct irq_desc *desc)
-{
- struct altr_stratix10_edac *edac = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_chip(desc);
- int irq = irq_desc_get_irq(desc);
- int bit, sm_offset, irq_status;
-
- sm_offset = S10_SYSMGR_ECC_INTSTAT_SERR_OFST;
-
- chained_irq_enter(chip, desc);
-
- s10_protected_reg_read(NULL, sm_offset, &irq_status);
-
- for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
- irq = irq_linear_revmap(edac->domain, bit);
- if (irq)
- generic_handle_irq(irq);
- }
-
- chained_irq_exit(chip, desc);
-}
-
-static void s10_eccmgr_irq_mask(struct irq_data *d)
-{
- struct altr_stratix10_edac *edac = irq_data_get_irq_chip_data(d);
-
- s10_protected_reg_write(edac, S10_SYSMGR_ECC_INTMASK_SET_OFST,
- BIT(d->hwirq));
-}
-
-static void s10_eccmgr_irq_unmask(struct irq_data *d)
-{
- struct altr_stratix10_edac *edac = irq_data_get_irq_chip_data(d);
-
- s10_protected_reg_write(edac, S10_SYSMGR_ECC_INTMASK_CLR_OFST,
- BIT(d->hwirq));
-}
-
-static int s10_eccmgr_irqdomain_map(struct irq_domain *d, unsigned int irq,
- irq_hw_number_t hwirq)
-{
- struct altr_stratix10_edac *edac = d->host_data;
-
- irq_set_chip_and_handler(irq, &edac->irq_chip, handle_simple_irq);
- irq_set_chip_data(irq, edac);
- irq_set_noprobe(irq);
-
- return 0;
-}
-
-static const struct irq_domain_ops s10_eccmgr_ic_ops = {
- .map = s10_eccmgr_irqdomain_map,
- .xlate = irq_domain_xlate_twocell,
-};
-
-static int altr_edac_s10_probe(struct platform_device *pdev)
-{
- struct altr_stratix10_edac *edac;
- struct device_node *child;
- int dberror, err_addr;
-
- edac = devm_kzalloc(&pdev->dev, sizeof(*edac), GFP_KERNEL);
- if (!edac)
- return -ENOMEM;
-
- edac->dev = &pdev->dev;
- platform_set_drvdata(pdev, edac);
- INIT_LIST_HEAD(&edac->s10_ecc_devices);
-
- edac->irq_chip.name = pdev->dev.of_node->name;
- edac->irq_chip.irq_mask = s10_eccmgr_irq_mask;
- edac->irq_chip.irq_unmask = s10_eccmgr_irq_unmask;
- edac->domain = irq_domain_add_linear(pdev->dev.of_node, 64,
- &s10_eccmgr_ic_ops, edac);
- if (!edac->domain) {
- dev_err(&pdev->dev, "Error adding IRQ domain\n");
- return -ENOMEM;
- }
-
- edac->sb_irq = platform_get_irq(pdev, 0);
- if (edac->sb_irq < 0) {
- dev_err(&pdev->dev, "No SBERR IRQ resource\n");
- return edac->sb_irq;
- }
-
- irq_set_chained_handler_and_data(edac->sb_irq,
- altr_edac_s10_irq_handler,
- edac);
-
- edac->panic_notifier.notifier_call = s10_edac_dberr_handler;
- atomic_notifier_chain_register(&panic_notifier_list,
- &edac->panic_notifier);
-
- /* Printout a message if uncorrectable error previously. */
- s10_protected_reg_read(edac, S10_SYSMGR_UE_VAL_OFST, &dberror);
- if (dberror) {
- s10_protected_reg_read(edac, S10_SYSMGR_UE_ADDR_OFST,
- &err_addr);
- edac_printk(KERN_ERR, EDAC_DEVICE,
- "Previous Boot UE detected[0x%X] @ 0x%X\n",
- dberror, err_addr);
- /* Reset the sticky registers */
- s10_protected_reg_write(edac, S10_SYSMGR_UE_VAL_OFST, 0);
- s10_protected_reg_write(edac, S10_SYSMGR_UE_ADDR_OFST, 0);
- }
-
- for_each_child_of_node(pdev->dev.of_node, child) {
- if (!of_device_is_available(child))
- continue;
-
- if (of_device_is_compatible(child, "altr,sdram-edac-s10"))
- of_platform_populate(pdev->dev.of_node,
- altr_sdram_ctrl_of_match,
- NULL, &pdev->dev);
- }
-
- return 0;
-}
-
-static const struct of_device_id altr_edac_s10_of_match[] = {
- { .compatible = "altr,socfpga-s10-ecc-manager" },
- {},
-};
-MODULE_DEVICE_TABLE(of, altr_edac_s10_of_match);
-
-static struct platform_driver altr_edac_s10_driver = {
- .probe = altr_edac_s10_probe,
- .driver = {
- .name = "socfpga_s10_ecc_manager",
- .of_match_table = altr_edac_s10_of_match,
- },
-};
-module_platform_driver(altr_edac_s10_driver);
-
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Thor Thayer");
MODULE_DESCRIPTION("EDAC Driver for Altera Memories");
diff --git a/drivers/edac/altera_edac.h b/drivers/edac/altera_edac.h
index 81f0554e09de..4213cb0bb2a7 100644
--- a/drivers/edac/altera_edac.h
+++ b/drivers/edac/altera_edac.h
@@ -156,34 +156,6 @@
#define A10_INTMASK_CLR_OFST 0x10
#define A10_DDR0_IRQ_MASK BIT(17)
-/************* Stratix10 Defines **************/
-
-/* SDRAM Controller EccCtrl Register */
-#define S10_ECCCTRL1_OFST 0xF8011100
-
-/* SDRAM Controller DRAM IRQ Register */
-#define S10_ERRINTEN_OFST 0xF8011110
-
-/* SDRAM Interrupt Mode Register */
-#define S10_INTMODE_OFST 0xF801111C
-
-/* SDRAM Controller Error Status Register */
-#define S10_INTSTAT_OFST 0xF8011120
-
-/* SDRAM Controller ECC Error Address Register */
-#define S10_DERRADDR_OFST 0xF801112C
-#define S10_SERRADDR_OFST 0xF8011130
-
-/* SDRAM Controller ECC Diagnostic Register */
-#define S10_DIAGINTTEST_OFST 0xF8011124
-
-/* SDRAM Single Bit Error Count Compare Set Register */
-#define S10_SERRCNTREG_OFST 0xF801113C
-
-/* Sticky registers for Uncorrected Errors */
-#define S10_SYSMGR_UE_VAL_OFST 0xFFD12220
-#define S10_SYSMGR_UE_ADDR_OFST 0xFFD12224
-
struct altr_sdram_prv_data {
int ecc_ctrl_offset;
int ecc_ctl_en_mask;
@@ -319,15 +291,40 @@ struct altr_sdram_mc_data {
/************* Stratix10 Defines **************/
/* Stratix10 ECC Manager Defines */
-#define S10_SYSMGR_ECC_INTMASK_VAL_OFST 0xFFD12090
-#define S10_SYSMGR_ECC_INTMASK_SET_OFST 0xFFD12094
-#define S10_SYSMGR_ECC_INTMASK_CLR_OFST 0xFFD12098
+#define S10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98
+#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0
-#define S10_SYSMGR_ECC_INTSTAT_SERR_OFST 0xFFD1209C
-#define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xFFD120A0
+/* Sticky registers for Uncorrected Errors */
+#define S10_SYSMGR_UE_VAL_OFST 0x120
+#define S10_SYSMGR_UE_ADDR_OFST 0x124
#define S10_DDR0_IRQ_MASK BIT(16)
+/* Define ECC Block Offsets for peripherals */
+#define ECC_BLK_ADDRESS_OFST 0x40
+#define ECC_BLK_RDATA0_OFST 0x44
+#define ECC_BLK_RDATA1_OFST 0x48
+#define ECC_BLK_RDATA2_OFST 0x4C
+#define ECC_BLK_RDATA3_OFST 0x50
+#define ECC_BLK_WDATA0_OFST 0x54
+#define ECC_BLK_WDATA1_OFST 0x58
+#define ECC_BLK_WDATA2_OFST 0x5C
+#define ECC_BLK_WDATA3_OFST 0x60
+#define ECC_BLK_RECC0_OFST 0x64
+#define ECC_BLK_RECC1_OFST 0x68
+#define ECC_BLK_WECC0_OFST 0x6C
+#define ECC_BLK_WECC1_OFST 0x70
+#define ECC_BLK_DBYTECTRL_OFST 0x74
+#define ECC_BLK_ACCCTRL_OFST 0x78
+#define ECC_BLK_STARTACC_OFST 0x7C
+
+#define ECC_XACT_KICK 0x10000
+#define ECC_WORD_WRITE 0xF
+#define ECC_WRITE_DOVR 0x101
+#define ECC_WRITE_EDOVR 0x103
+#define ECC_READ_EOVR 0x2
+#define ECC_READ_EDOVR 0x3
+
struct altr_edac_device_dev;
struct edac_device_prv_data {
@@ -370,6 +367,7 @@ struct altr_arria10_edac {
struct irq_domain *domain;
struct irq_chip irq_chip;
struct list_head a10_ecc_devices;
+ struct notifier_block panic_notifier;
};
/*
@@ -437,13 +435,4 @@ struct altr_arria10_edac {
#define INTEL_SIP_SMC_REG_WRITE \
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_REG_WRITE)
-struct altr_stratix10_edac {
- struct device *dev;
- int sb_irq;
- struct irq_domain *domain;
- struct irq_chip irq_chip;
- struct list_head s10_ecc_devices;
- struct notifier_block panic_notifier;
-};
-
#endif /* #ifndef _ALTERA_EDAC_H */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 18aeabb1d5ee..6ea98575a402 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -211,7 +211,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate)
scrubval = scrubrates[i].scrubval;
- if (pvt->fam == 0x17) {
+ if (pvt->fam == 0x17 || pvt->fam == 0x18) {
__f17h_set_scrubval(pvt, scrubval);
} else if (pvt->fam == 0x15 && pvt->model == 0x60) {
f15h_select_dct(pvt, 0);
@@ -264,6 +264,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
break;
case 0x17:
+ case 0x18:
amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval);
if (scrubval & BIT(0)) {
amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval);
@@ -1044,6 +1045,7 @@ static void determine_memory_type(struct amd64_pvt *pvt)
goto ddr3;
case 0x17:
+ case 0x18:
if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5))
pvt->dram_type = MEM_LRDDR4;
else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4))
@@ -2200,6 +2202,15 @@ static struct amd64_family_type family_types[] = {
.dbam_to_cs = f17_base_addr_to_cs_size,
}
},
+ [F17_M10H_CPUS] = {
+ .ctl_name = "F17h_M10h",
+ .f0_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F0,
+ .f6_id = PCI_DEVICE_ID_AMD_17H_M10H_DF_F6,
+ .ops = {
+ .early_channel_count = f17_early_channel_count,
+ .dbam_to_cs = f17_base_addr_to_cs_size,
+ }
+ },
};
/*
@@ -3188,8 +3199,18 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
break;
case 0x17:
+ if (pvt->model >= 0x10 && pvt->model <= 0x2f) {
+ fam_type = &family_types[F17_M10H_CPUS];
+ pvt->ops = &family_types[F17_M10H_CPUS].ops;
+ break;
+ }
+ /* fall through */
+ case 0x18:
fam_type = &family_types[F17_CPUS];
pvt->ops = &family_types[F17_CPUS].ops;
+
+ if (pvt->fam == 0x18)
+ family_types[F17_CPUS].ctl_name = "F18h";
break;
default:
@@ -3428,6 +3449,7 @@ static const struct x86_cpu_id amd64_cpuids[] = {
{ X86_VENDOR_AMD, 0x15, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
+ { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 },
{ }
};
MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids);
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 1d4b74e9a037..4242f8e39c18 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -115,6 +115,8 @@
#define PCI_DEVICE_ID_AMD_16H_M30H_NB_F2 0x1582
#define PCI_DEVICE_ID_AMD_17H_DF_F0 0x1460
#define PCI_DEVICE_ID_AMD_17H_DF_F6 0x1466
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F0 0x15e8
+#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F6 0x15ee
/*
* Function 1 - Address Map
@@ -281,6 +283,7 @@ enum amd_families {
F16_CPUS,
F16_M30H_CPUS,
F17_CPUS,
+ F17_M10H_CPUS,
NUM_FAMILIES,
};
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 2c98e020df05..3c0881ac9880 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -593,8 +593,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
/******************** CPU err device********************************/
static u32 cpc925_cpu_mask_disabled(void)
{
- struct device_node *cpus;
- struct device_node *cpunode = NULL;
+ struct device_node *cpunode;
static u32 mask = 0;
/* use cached value if available */
@@ -603,20 +602,8 @@ static u32 cpc925_cpu_mask_disabled(void)
mask = APIMASK_ADI0 | APIMASK_ADI1;
- cpus = of_find_node_by_path("/cpus");
- if (cpus == NULL) {
- cpc925_printk(KERN_DEBUG, "No /cpus node !\n");
- return 0;
- }
-
- while ((cpunode = of_get_next_child(cpus, cpunode)) != NULL) {
+ for_each_of_cpu_node(cpunode) {
const u32 *reg = of_get_property(cpunode, "reg", NULL);
-
- if (strcmp(cpunode->type, "cpu")) {
- cpc925_printk(KERN_ERR, "Not a cpu node in /cpus: %s\n", cpunode->name);
- continue;
- }
-
if (reg == NULL || *reg > 2) {
cpc925_printk(KERN_ERR, "Bad reg value at %pOF\n", cpunode);
continue;
@@ -633,9 +620,6 @@ static u32 cpc925_cpu_mask_disabled(void)
"Assuming PI id is equal to CPU MPIC id!\n");
}
- of_node_put(cpunode);
- of_node_put(cpus);
-
return mask;
}
diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
index 473aeec4b1da..49396bf6ad88 100644
--- a/drivers/edac/ghes_edac.c
+++ b/drivers/edac/ghes_edac.c
@@ -81,6 +81,18 @@ static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
(*num_dimm)++;
}
+static int get_dimm_smbios_index(u16 handle)
+{
+ struct mem_ctl_info *mci = ghes_pvt->mci;
+ int i;
+
+ for (i = 0; i < mci->tot_dimms; i++) {
+ if (mci->dimms[i]->smbios_handle == handle)
+ return i;
+ }
+ return -1;
+}
+
static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
{
struct ghes_edac_dimm_fill *dimm_fill = arg;
@@ -177,6 +189,8 @@ static void ghes_edac_dmidecode(const struct dmi_header *dh, void *arg)
entry->total_width, entry->data_width);
}
+ dimm->smbios_handle = entry->handle;
+
dimm_fill->count++;
}
}
@@ -327,12 +341,21 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
p += sprintf(p, "bit_pos:%d ", mem_err->bit_pos);
if (mem_err->validation_bits & CPER_MEM_VALID_MODULE_HANDLE) {
const char *bank = NULL, *device = NULL;
+ int index = -1;
+
dmi_memdev_name(mem_err->mem_dev_handle, &bank, &device);
if (bank != NULL && device != NULL)
p += sprintf(p, "DIMM location:%s %s ", bank, device);
else
p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
mem_err->mem_dev_handle);
+
+ index = get_dimm_smbios_index(mem_err->mem_dev_handle);
+ if (index >= 0) {
+ e->top_layer = index;
+ e->enable_per_layer_report = true;
+ }
+
}
if (p > e->location)
*(p - 1) = '\0';
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index d92d56cee101..299b441647cd 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -399,7 +399,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
if (nr_pages == 0)
continue;
- edac_dbg(0, "csrow %d, channel %d%s, size = %ld Mb\n", i, j,
+ edac_dbg(0, "csrow %d, channel %d%s, size = %ld MiB\n", i, j,
stacked ? " (stacked)" : "", PAGES_TO_MiB(nr_pages));
dimm->nr_pages = nr_pages;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8e120bf60624..9ef448fef12f 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -597,7 +597,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
/* DDR3 has 8 I/O banks */
size = (rows * cols * banks * ranks) >> (20 - 3);
- edac_dbg(0, "\tdimm %d %d Mb offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
+ edac_dbg(0, "\tdimm %d %d MiB offset: %x, bank: %d, rank: %d, row: %#x, col: %#x\n",
j, size,
RANKOFFSET(dimm_dod[j]),
banks, ranks, rows, cols);
@@ -1711,6 +1711,7 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
u32 errnum = find_first_bit(&error, 32);
if (uncorrected_error) {
+ core_err_cnt = 1;
if (ripv)
tp_event = HW_EVENT_ERR_FATAL;
else
@@ -1815,14 +1816,12 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
struct mce *mce = (struct mce *)data;
struct i7core_dev *i7_dev;
struct mem_ctl_info *mci;
- struct i7core_pvt *pvt;
i7_dev = get_i7core_dev(mce->socketid);
if (!i7_dev)
return NOTIFY_DONE;
mci = i7_dev->mci;
- pvt = mci->pvt_info;
/*
* Just let mcelog handle it if the error is
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index 2ab4d61ee47e..c605089d899f 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -1059,7 +1059,8 @@ static int __init mce_amd_init(void)
{
struct cpuinfo_x86 *c = &boot_cpu_data;
- if (c->x86_vendor != X86_VENDOR_AMD)
+ if (c->x86_vendor != X86_VENDOR_AMD &&
+ c->x86_vendor != X86_VENDOR_HYGON)
return -ENODEV;
fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL);
@@ -1113,6 +1114,7 @@ static int __init mce_amd_init(void)
break;
case 0x17:
+ case 0x18:
xec_mask = 0x3f;
if (!boot_cpu_has(X86_FEATURE_SMCA)) {
printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n");
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 07726fb00321..9353c3fc7c05 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -326,6 +326,7 @@ struct sbridge_info {
const struct interleave_pkg *interleave_pkg;
u8 max_sad;
u8 (*get_node_id)(struct sbridge_pvt *pvt);
+ u8 (*get_ha)(u8 bank);
enum mem_type (*get_memory_type)(struct sbridge_pvt *pvt);
enum dev_type (*get_width)(struct sbridge_pvt *pvt, u32 mtr);
struct pci_dev *pci_vtd;
@@ -1002,6 +1003,39 @@ static u8 knl_get_node_id(struct sbridge_pvt *pvt)
return GET_BITFIELD(reg, 0, 2);
}
+/*
+ * Use the reporting bank number to determine which memory
+ * controller (also known as "ha" for "home agent"). Sandy
+ * Bridge only has one memory controller per socket, so the
+ * answer is always zero.
+ */
+static u8 sbridge_get_ha(u8 bank)
+{
+ return 0;
+}
+
+/*
+ * On Ivy Bridge, Haswell and Broadwell the error may be in a
+ * home agent bank (7, 8), or one of the per-channel memory
+ * controller banks (9 .. 16).
+ */
+static u8 ibridge_get_ha(u8 bank)
+{
+ switch (bank) {
+ case 7 ... 8:
+ return bank - 7;
+ case 9 ... 16:
+ return (bank - 9) / 4;
+ default:
+ return 0xff;
+ }
+}
+
+/* Not used, but included for safety/symmetry */
+static u8 knl_get_ha(u8 bank)
+{
+ return 0xff;
+}
static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
{
@@ -1622,7 +1656,7 @@ static int __populate_dimms(struct mem_ctl_info *mci,
size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
- edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
size, npages,
banks, ranks, rows, cols);
@@ -2207,6 +2241,60 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
return 0;
}
+static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
+ const struct mce *m, u8 *socket,
+ u8 *ha, long *channel_mask,
+ char *msg)
+{
+ u32 reg, channel = GET_BITFIELD(m->status, 0, 3);
+ struct mem_ctl_info *new_mci;
+ struct sbridge_pvt *pvt;
+ struct pci_dev *pci_ha;
+ bool tad0;
+
+ if (channel >= NUM_CHANNELS) {
+ sprintf(msg, "Invalid channel 0x%x", channel);
+ return -EINVAL;
+ }
+
+ pvt = mci->pvt_info;
+ if (!pvt->info.get_ha) {
+ sprintf(msg, "No get_ha()");
+ return -EINVAL;
+ }
+ *ha = pvt->info.get_ha(m->bank);
+ if (*ha != 0 && *ha != 1) {
+ sprintf(msg, "Impossible bank %d", m->bank);
+ return -EINVAL;
+ }
+
+ *socket = m->socketid;
+ new_mci = get_mci_for_node_id(*socket, *ha);
+ if (!new_mci) {
+ strcpy(msg, "mci socket got corrupted!");
+ return -EINVAL;
+ }
+
+ pvt = new_mci->pvt_info;
+ pci_ha = pvt->pci_ha;
+ pci_read_config_dword(pci_ha, tad_dram_rule[0], &reg);
+ tad0 = m->addr <= TAD_LIMIT(reg);
+
+ *channel_mask = 1 << channel;
+ if (pvt->mirror_mode == FULL_MIRRORING ||
+ (pvt->mirror_mode == ADDR_RANGE_MIRRORING && tad0)) {
+ *channel_mask |= 1 << ((channel + 2) % 4);
+ pvt->is_cur_addr_mirrored = true;
+ } else {
+ pvt->is_cur_addr_mirrored = false;
+ }
+
+ if (pvt->is_lockstep)
+ *channel_mask |= 1 << ((channel + 1) % 4);
+
+ return 0;
+}
+
/****************************************************************************
Device initialization routines: put/get, init/exit
****************************************************************************/
@@ -2877,10 +2965,16 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
u32 errcode = GET_BITFIELD(m->status, 0, 15);
u32 channel = GET_BITFIELD(m->status, 0, 3);
u32 optypenum = GET_BITFIELD(m->status, 4, 6);
+ /*
+ * Bits 5-0 of MCi_MISC give the least significant bit that is valid.
+ * A value 6 is for cache line aligned address, a value 12 is for page
+ * aligned address reported by patrol scrubber.
+ */
+ u32 lsb = GET_BITFIELD(m->misc, 0, 5);
long channel_mask, first_channel;
- u8 rank, socket, ha;
+ u8 rank = 0xff, socket, ha;
int rc, dimm;
- char *area_type = NULL;
+ char *area_type = "DRAM";
if (pvt->info.type != SANDY_BRIDGE)
recoverable = true;
@@ -2888,6 +2982,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
recoverable = GET_BITFIELD(m->status, 56, 56);
if (uncorrected_error) {
+ core_err_cnt = 1;
if (ripv) {
type = "FATAL";
tp_event = HW_EVENT_ERR_FATAL;
@@ -2911,35 +3006,27 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
* cccc = channel
* If the mask doesn't match, report an error to the parsing logic
*/
- if (! ((errcode & 0xef80) == 0x80)) {
- optype = "Can't parse: it is not a mem";
- } else {
- switch (optypenum) {
- case 0:
- optype = "generic undef request error";
- break;
- case 1:
- optype = "memory read error";
- break;
- case 2:
- optype = "memory write error";
- break;
- case 3:
- optype = "addr/cmd error";
- break;
- case 4:
- optype = "memory scrubbing error";
- break;
- default:
- optype = "reserved";
- break;
- }
+ switch (optypenum) {
+ case 0:
+ optype = "generic undef request error";
+ break;
+ case 1:
+ optype = "memory read error";
+ break;
+ case 2:
+ optype = "memory write error";
+ break;
+ case 3:
+ optype = "addr/cmd error";
+ break;
+ case 4:
+ optype = "memory scrubbing error";
+ break;
+ default:
+ optype = "reserved";
+ break;
}
- /* Only decode errors with an valid address (ADDRV) */
- if (!GET_BITFIELD(m->status, 58, 58))
- return;
-
if (pvt->info.type == KNIGHTS_LANDING) {
if (channel == 14) {
edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
@@ -2972,9 +3059,13 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
optype, msg);
}
return;
- } else {
+ } else if (lsb < 12) {
rc = get_memory_error_data(mci, m->addr, &socket, &ha,
- &channel_mask, &rank, &area_type, msg);
+ &channel_mask, &rank,
+ &area_type, msg);
+ } else {
+ rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
+ &channel_mask, msg);
}
if (rc < 0)
@@ -2989,14 +3080,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
- if (rank < 4)
+ if (rank == 0xff)
+ dimm = -1;
+ else if (rank < 4)
dimm = 0;
else if (rank < 8)
dimm = 1;
else
dimm = 2;
-
/*
* FIXME: On some memory configurations (mirror, lockstep), the
* Memory Controller can't point the error to a single DIMM. The
@@ -3045,17 +3137,11 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
{
struct mce *mce = (struct mce *)data;
struct mem_ctl_info *mci;
- struct sbridge_pvt *pvt;
char *type;
if (edac_get_report_status() == EDAC_REPORTING_DISABLED)
return NOTIFY_DONE;
- mci = get_mci_for_node_id(mce->socketid, IMC0);
- if (!mci)
- return NOTIFY_DONE;
- pvt = mci->pvt_info;
-
/*
* Just let mcelog handle it if the error is
* outside the memory controller. A memory error
@@ -3065,6 +3151,22 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
if ((mce->status & 0xefff) >> 7 != 1)
return NOTIFY_DONE;
+ /* Check ADDRV bit in STATUS */
+ if (!GET_BITFIELD(mce->status, 58, 58))
+ return NOTIFY_DONE;
+
+ /* Check MISCV bit in STATUS */
+ if (!GET_BITFIELD(mce->status, 59, 59))
+ return NOTIFY_DONE;
+
+ /* Check address type in MISC (physical address only) */
+ if (GET_BITFIELD(mce->misc, 6, 8) != 2)
+ return NOTIFY_DONE;
+
+ mci = get_mci_for_node_id(mce->socketid, IMC0);
+ if (!mci)
+ return NOTIFY_DONE;
+
if (mce->mcgstatus & MCG_STATUS_MCIP)
type = "Exception";
else
@@ -3173,6 +3275,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = get_memory_type;
pvt->info.get_node_id = get_node_id;
+ pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3197,6 +3300,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = sbridge_dram_rule;
pvt->info.get_memory_type = get_memory_type;
pvt->info.get_node_id = get_node_id;
+ pvt->info.get_ha = sbridge_get_ha;
pvt->info.rir_limit = rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3221,6 +3325,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = haswell_get_memory_type;
pvt->info.get_node_id = haswell_get_node_id;
+ pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = haswell_rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3245,6 +3350,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = ibridge_dram_rule;
pvt->info.get_memory_type = haswell_get_memory_type;
pvt->info.get_node_id = haswell_get_node_id;
+ pvt->info.get_ha = ibridge_get_ha;
pvt->info.rir_limit = haswell_rir_limit;
pvt->info.sad_limit = sad_limit;
pvt->info.interleave_mode = interleave_mode;
@@ -3269,6 +3375,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
pvt->info.dram_rule = knl_dram_rule;
pvt->info.get_memory_type = knl_get_memory_type;
pvt->info.get_node_id = knl_get_node_id;
+ pvt->info.get_ha = knl_get_ha;
pvt->info.rir_limit = NULL;
pvt->info.sad_limit = knl_sad_limit;
pvt->info.interleave_mode = knl_interleave_mode;
@@ -3320,17 +3427,14 @@ fail0:
return rc;
}
-#define ICPU(model, table) \
- { X86_VENDOR_INTEL, 6, model, 0, (unsigned long)&table }
-
static const struct x86_cpu_id sbridge_cpuids[] = {
- ICPU(INTEL_FAM6_SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
- ICPU(INTEL_FAM6_IVYBRIDGE_X, pci_dev_descr_ibridge_table),
- ICPU(INTEL_FAM6_HASWELL_X, pci_dev_descr_haswell_table),
- ICPU(INTEL_FAM6_BROADWELL_X, pci_dev_descr_broadwell_table),
- ICPU(INTEL_FAM6_BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
- ICPU(INTEL_FAM6_XEON_PHI_KNL, pci_dev_descr_knl_table),
- ICPU(INTEL_FAM6_XEON_PHI_KNM, pci_dev_descr_knl_table),
+ INTEL_CPU_FAM6(SANDYBRIDGE_X, pci_dev_descr_sbridge_table),
+ INTEL_CPU_FAM6(IVYBRIDGE_X, pci_dev_descr_ibridge_table),
+ INTEL_CPU_FAM6(HASWELL_X, pci_dev_descr_haswell_table),
+ INTEL_CPU_FAM6(BROADWELL_X, pci_dev_descr_broadwell_table),
+ INTEL_CPU_FAM6(BROADWELL_XEON_D, pci_dev_descr_broadwell_table),
+ INTEL_CPU_FAM6(XEON_PHI_KNL, pci_dev_descr_knl_table),
+ INTEL_CPU_FAM6(XEON_PHI_KNM, pci_dev_descr_knl_table),
{ }
};
MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c
index fae095162c01..dd209e0dd9ab 100644
--- a/drivers/edac/skx_edac.c
+++ b/drivers/edac/skx_edac.c
@@ -364,7 +364,7 @@ static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm,
size = ((1ull << (rows + cols + ranks)) * banks) >> (20 - 3);
npages = MiB_TO_PAGES(size);
- edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
imc->mc, chan, dimmno, size, npages,
banks, 1 << ranks, rows, cols);
@@ -424,7 +424,7 @@ unknown_size:
dimm->mtype = MEM_NVDIMM;
dimm->edac_mode = EDAC_SECDED; /* likely better than this */
- edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu Mb (%u pages)\n",
+ edac_dbg(0, "mc#%d: channel %d, dimm %d, %llu MiB (%u pages)\n",
imc->mc, chan, dimmno, size >> 20, dimm->nr_pages);
snprintf(dimm->label, sizeof(dimm->label), "CPU_SrcID#%u_MC#%u_Chan#%u_DIMM#%u",
@@ -668,7 +668,7 @@ sad_found:
break;
case 2:
lchan = (addr >> shift) % 2;
- lchan = (lchan << 1) | ~lchan;
+ lchan = (lchan << 1) | !lchan;
break;
case 3:
lchan = ((addr >> shift) % 2) << 1;
@@ -959,6 +959,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
recoverable = GET_BITFIELD(m->status, 56, 56);
if (uncorrected_error) {
+ core_err_cnt = 1;
if (ripv) {
type = "FATAL";
tp_event = HW_EVENT_ERR_FATAL;
diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c
index c009d94f40c5..34be60fe6892 100644
--- a/drivers/edac/thunderx_edac.c
+++ b/drivers/edac/thunderx_edac.c
@@ -1884,7 +1884,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
default:
dev_err(&l2c->pdev->dev, "Unsupported device: %04x\n",
l2c->pdev->device);
- return IRQ_NONE;
+ goto err_free;
}
while (CIRC_CNT(l2c->ring_head, l2c->ring_tail,
@@ -1906,7 +1906,7 @@ static irqreturn_t thunderx_l2c_threaded_isr(int irq, void *irq_id)
l2c->ring_tail++;
}
- return IRQ_HANDLED;
+ ret = IRQ_HANDLED;
err_free:
kfree(other);
diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
index 5e1dd2772278..5ef215297101 100644
--- a/drivers/extcon/extcon-intel-cht-wc.c
+++ b/drivers/extcon/extcon-intel-cht-wc.c
@@ -1,18 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Extcon charger detection driver for Intel Cherrytrail Whiskey Cove PMIC
* Copyright (C) 2017 Hans de Goede <hdegoede@redhat.com>
*
* Based on various non upstream patches to support the CHT Whiskey Cove PMIC:
* Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#include <linux/extcon-provider.h>
@@ -32,10 +24,10 @@
#define CHT_WC_CHGRCTRL0_EMRGCHREN BIT(1)
#define CHT_WC_CHGRCTRL0_EXTCHRDIS BIT(2)
#define CHT_WC_CHGRCTRL0_SWCONTROL BIT(3)
-#define CHT_WC_CHGRCTRL0_TTLCK_MASK BIT(4)
-#define CHT_WC_CHGRCTRL0_CCSM_OFF_MASK BIT(5)
-#define CHT_WC_CHGRCTRL0_DBPOFF_MASK BIT(6)
-#define CHT_WC_CHGRCTRL0_WDT_NOKICK BIT(7)
+#define CHT_WC_CHGRCTRL0_TTLCK BIT(4)
+#define CHT_WC_CHGRCTRL0_CCSM_OFF BIT(5)
+#define CHT_WC_CHGRCTRL0_DBPOFF BIT(6)
+#define CHT_WC_CHGRCTRL0_CHR_WDT_NOKICK BIT(7)
#define CHT_WC_CHGRCTRL1 0x5e17
@@ -52,7 +44,7 @@
#define CHT_WC_USBSRC_TYPE_ACA 4
#define CHT_WC_USBSRC_TYPE_SE1 5
#define CHT_WC_USBSRC_TYPE_MHL 6
-#define CHT_WC_USBSRC_TYPE_FLOAT_DP_DN 7
+#define CHT_WC_USBSRC_TYPE_FLOATING 7
#define CHT_WC_USBSRC_TYPE_OTHER 8
#define CHT_WC_USBSRC_TYPE_DCP_EXTPHY 9
@@ -61,9 +53,12 @@
#define CHT_WC_PWRSRC_STS 0x6e1e
#define CHT_WC_PWRSRC_VBUS BIT(0)
#define CHT_WC_PWRSRC_DC BIT(1)
-#define CHT_WC_PWRSRC_BAT BIT(2)
-#define CHT_WC_PWRSRC_ID_GND BIT(3)
-#define CHT_WC_PWRSRC_ID_FLOAT BIT(4)
+#define CHT_WC_PWRSRC_BATT BIT(2)
+#define CHT_WC_PWRSRC_USBID_MASK GENMASK(4, 3)
+#define CHT_WC_PWRSRC_USBID_SHIFT 3
+#define CHT_WC_PWRSRC_RID_ACA 0
+#define CHT_WC_PWRSRC_RID_GND 1
+#define CHT_WC_PWRSRC_RID_FLOAT 2
#define CHT_WC_VBUS_GPIO_CTLO 0x6e2d
#define CHT_WC_VBUS_GPIO_CTLO_OUTPUT BIT(0)
@@ -104,16 +99,20 @@ struct cht_wc_extcon_data {
static int cht_wc_extcon_get_id(struct cht_wc_extcon_data *ext, int pwrsrc_sts)
{
- if (pwrsrc_sts & CHT_WC_PWRSRC_ID_GND)
+ switch ((pwrsrc_sts & CHT_WC_PWRSRC_USBID_MASK) >> CHT_WC_PWRSRC_USBID_SHIFT) {
+ case CHT_WC_PWRSRC_RID_GND:
return USB_ID_GND;
- if (pwrsrc_sts & CHT_WC_PWRSRC_ID_FLOAT)
+ case CHT_WC_PWRSRC_RID_FLOAT:
return USB_ID_FLOAT;
-
- /*
- * Once we have iio support for the gpadc we should read the USBID
- * gpadc channel here and determine ACA role based on that.
- */
- return USB_ID_FLOAT;
+ case CHT_WC_PWRSRC_RID_ACA:
+ default:
+ /*
+ * Once we have IIO support for the GPADC we should read
+ * the USBID GPADC channel here and determine ACA role
+ * based on that.
+ */
+ return USB_ID_FLOAT;
+ }
}
static int cht_wc_extcon_get_charger(struct cht_wc_extcon_data *ext,
@@ -156,9 +155,9 @@ static int cht_wc_extcon_get_charger(struct cht_wc_extcon_data *ext,
dev_warn(ext->dev,
"Unhandled charger type %d, defaulting to SDP\n",
ret);
- /* Fall through, treat as SDP */
+ return EXTCON_CHG_USB_SDP;
case CHT_WC_USBSRC_TYPE_SDP:
- case CHT_WC_USBSRC_TYPE_FLOAT_DP_DN:
+ case CHT_WC_USBSRC_TYPE_FLOATING:
case CHT_WC_USBSRC_TYPE_OTHER:
return EXTCON_CHG_USB_SDP;
case CHT_WC_USBSRC_TYPE_CDP:
@@ -279,7 +278,7 @@ static int cht_wc_extcon_sw_control(struct cht_wc_extcon_data *ext, bool enable)
{
int ret, mask, val;
- mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF_MASK;
+ mask = CHT_WC_CHGRCTRL0_SWCONTROL | CHT_WC_CHGRCTRL0_CCSM_OFF;
val = enable ? mask : 0;
ret = regmap_update_bits(ext->regmap, CHT_WC_CHGRCTRL0, mask, val);
if (ret)
@@ -292,6 +291,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
{
struct intel_soc_pmic *pmic = dev_get_drvdata(pdev->dev.parent);
struct cht_wc_extcon_data *ext;
+ unsigned long mask = ~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_USBID_MASK);
int irq, ret;
irq = platform_get_irq(pdev, 0);
@@ -352,9 +352,7 @@ static int cht_wc_extcon_probe(struct platform_device *pdev)
}
/* Unmask irqs */
- ret = regmap_write(ext->regmap, CHT_WC_PWRSRC_IRQ_MASK,
- (int)~(CHT_WC_PWRSRC_VBUS | CHT_WC_PWRSRC_ID_GND |
- CHT_WC_PWRSRC_ID_FLOAT));
+ ret = regmap_write(ext->regmap, CHT_WC_PWRSRC_IRQ_MASK, mask);
if (ret) {
dev_err(ext->dev, "Error writing irq-mask: %d\n", ret);
goto disable_sw_control;
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index fd24debe58a3..80c9abcc3f97 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Intel INT3496 ACPI device extcon driver
*
@@ -7,15 +8,6 @@
*
* Copyright (c) 2014, Intel Corporation.
* Author: David Cohen <david.a.cohen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/acpi.h>
@@ -192,4 +184,4 @@ module_platform_driver(int3496_driver);
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("Intel INT3496 ACPI device extcon driver");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index b871836da8a4..22d2feb1f8bc 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -1,20 +1,10 @@
-/*
- * extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC
- *
- * Copyright (C) 2013,2014 Samsung Electronics
- * Chanwoo Choi <cw00.choi@samsung.com>
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC
+//
+// Copyright (C) 2013,2014 Samsung Electronics
+// Chanwoo Choi <cw00.choi@samsung.com>
+// Krzysztof Kozlowski <krzk@kernel.org>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 227651ff9666..a79537ebb671 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -1,19 +1,9 @@
-/*
- * extcon-max77693.c - MAX77693 extcon driver to support MAX77693 MUIC
- *
- * Copyright (C) 2012 Samsung Electrnoics
- * Chanwoo Choi <cw00.choi@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// extcon-max77693.c - MAX77693 extcon driver to support MAX77693 MUIC
+//
+// Copyright (C) 2012 Samsung Electrnoics
+// Chanwoo Choi <cw00.choi@samsung.com>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
index c9fcd6cd41cb..b98cbd0362f5 100644
--- a/drivers/extcon/extcon-max77843.c
+++ b/drivers/extcon/extcon-max77843.c
@@ -1,15 +1,10 @@
-/*
- * extcon-max77843.c - Maxim MAX77843 extcon driver to support
- * MUIC(Micro USB Interface Controller)
- *
- * Copyright (C) 2015 Samsung Electronics
- * Author: Jaewon Kim <jaewon02.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// extcon-max77843.c - Maxim MAX77843 extcon driver to support
+// MUIC(Micro USB Interface Controller)
+//
+// Copyright (C) 2015 Samsung Electronics
+// Author: Jaewon Kim <jaewon02.kim@samsung.com>
#include <linux/extcon-provider.h>
#include <linux/i2c.h>
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 9f30f4929b72..bdabb2479e0d 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -1,19 +1,9 @@
-/*
- * extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC
- *
- * Copyright (C) 2012 Samsung Electronics
- * Donggeun Kim <dg77.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// extcon-max8997.c - MAX8997 extcon driver to support MAX8997 MUIC
+//
+// Copyright (C) 2012 Samsung Electronics
+// Donggeun Kim <dg77.kim@samsung.com>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index b9d27c8fe57e..5ab0498be652 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -628,7 +628,7 @@ int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned long flags;
int index, ret = 0;
- *prop_val = (union extcon_property_value)(0);
+ *prop_val = (union extcon_property_value){0};
if (!edev)
return -EINVAL;
@@ -1123,7 +1123,6 @@ int extcon_dev_register(struct extcon_dev *edev)
(unsigned long)atomic_inc_return(&edev_no));
if (edev->max_supported) {
- char buf[10];
char *str;
struct extcon_cable *cable;
@@ -1137,9 +1136,7 @@ int extcon_dev_register(struct extcon_dev *edev)
for (index = 0; index < edev->max_supported; index++) {
cable = &edev->cables[index];
- snprintf(buf, 10, "cable.%d", index);
- str = kzalloc(strlen(buf) + 1,
- GFP_KERNEL);
+ str = kasprintf(GFP_KERNEL, "cable.%d", index);
if (!str) {
for (index--; index >= 0; index--) {
cable = &edev->cables[index];
@@ -1149,7 +1146,6 @@ int extcon_dev_register(struct extcon_dev *edev)
goto err_alloc_cables;
}
- strcpy(str, buf);
cable->edev = edev;
cable->cable_index = index;
@@ -1172,7 +1168,6 @@ int extcon_dev_register(struct extcon_dev *edev)
}
if (edev->max_supported && edev->mutually_exclusive) {
- char buf[80];
char *name;
/* Count the size of mutually_exclusive array */
@@ -1197,9 +1192,8 @@ int extcon_dev_register(struct extcon_dev *edev)
}
for (index = 0; edev->mutually_exclusive[index]; index++) {
- sprintf(buf, "0x%x", edev->mutually_exclusive[index]);
- name = kzalloc(strlen(buf) + 1,
- GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "0x%x",
+ edev->mutually_exclusive[index]);
if (!name) {
for (index--; index >= 0; index--) {
kfree(edev->d_attrs_muex[index].attr.
@@ -1210,7 +1204,6 @@ int extcon_dev_register(struct extcon_dev *edev)
ret = -ENOMEM;
goto err_muex;
}
- strcpy(name, buf);
sysfs_attr_init(&edev->d_attrs_muex[index].attr);
edev->d_attrs_muex[index].attr.name = name;
edev->d_attrs_muex[index].attr.mode = 0000;
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index a456a000048b..91a0404affe2 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -10,37 +10,31 @@ if GOOGLE_FIRMWARE
config GOOGLE_SMI
tristate "SMI interface for Google platforms"
- depends on X86 && ACPI && DMI && EFI
- select EFI_VARS
+ depends on X86 && ACPI && DMI
help
Say Y here if you want to enable SMI callbacks for Google
platforms. This provides an interface for writing to and
- clearing the EFI event log and reading and writing NVRAM
+ clearing the event log. If EFI_VARS is also enabled this
+ driver provides an interface for reading and writing NVRAM
variables.
config GOOGLE_COREBOOT_TABLE
- tristate
- depends on GOOGLE_COREBOOT_TABLE_ACPI || GOOGLE_COREBOOT_TABLE_OF
-
-config GOOGLE_COREBOOT_TABLE_ACPI
- tristate "Coreboot Table Access - ACPI"
- depends on ACPI
- select GOOGLE_COREBOOT_TABLE
+ tristate "Coreboot Table Access"
+ depends on ACPI || OF
help
This option enables the coreboot_table module, which provides other
- firmware modules to access to the coreboot table. The coreboot table
- pointer is accessed through the ACPI "GOOGCB00" object.
+ firmware modules access to the coreboot table. The coreboot table
+ pointer is accessed through the ACPI "GOOGCB00" object or the
+ device tree node /firmware/coreboot.
If unsure say N.
+config GOOGLE_COREBOOT_TABLE_ACPI
+ tristate
+ select GOOGLE_COREBOOT_TABLE
+
config GOOGLE_COREBOOT_TABLE_OF
- tristate "Coreboot Table Access - Device Tree"
- depends on OF
+ tristate
select GOOGLE_COREBOOT_TABLE
- help
- This option enable the coreboot_table module, which provide other
- firmware modules to access coreboot table. The coreboot table pointer
- is accessed through the device tree node /firmware/coreboot.
- If unsure say N.
config GOOGLE_MEMCONSOLE
tristate
diff --git a/drivers/firmware/google/Makefile b/drivers/firmware/google/Makefile
index d0b3fba96194..d17caded5d88 100644
--- a/drivers/firmware/google/Makefile
+++ b/drivers/firmware/google/Makefile
@@ -2,8 +2,6 @@
obj-$(CONFIG_GOOGLE_SMI) += gsmi.o
obj-$(CONFIG_GOOGLE_COREBOOT_TABLE) += coreboot_table.o
-obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_ACPI) += coreboot_table-acpi.o
-obj-$(CONFIG_GOOGLE_COREBOOT_TABLE_OF) += coreboot_table-of.o
obj-$(CONFIG_GOOGLE_FRAMEBUFFER_COREBOOT) += framebuffer-coreboot.o
obj-$(CONFIG_GOOGLE_MEMCONSOLE) += memconsole.o
obj-$(CONFIG_GOOGLE_MEMCONSOLE_COREBOOT) += memconsole-coreboot.o
diff --git a/drivers/firmware/google/coreboot_table-acpi.c b/drivers/firmware/google/coreboot_table-acpi.c
deleted file mode 100644
index 77197fe3d42f..000000000000
--- a/drivers/firmware/google/coreboot_table-acpi.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * coreboot_table-acpi.c
- *
- * Using ACPI to locate Coreboot table and provide coreboot table access.
- *
- * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/acpi.h>
-#include <linux/device.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "coreboot_table.h"
-
-static int coreboot_table_acpi_probe(struct platform_device *pdev)
-{
- phys_addr_t phyaddr;
- resource_size_t len;
- struct coreboot_table_header __iomem *header = NULL;
- struct resource *res;
- void __iomem *ptr = NULL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
- len = resource_size(res);
- if (!res->start || !len)
- return -EINVAL;
-
- phyaddr = res->start;
- header = ioremap_cache(phyaddr, sizeof(*header));
- if (header == NULL)
- return -ENOMEM;
-
- ptr = ioremap_cache(phyaddr,
- header->header_bytes + header->table_bytes);
- iounmap(header);
- if (!ptr)
- return -ENOMEM;
-
- return coreboot_table_init(&pdev->dev, ptr);
-}
-
-static int coreboot_table_acpi_remove(struct platform_device *pdev)
-{
- return coreboot_table_exit();
-}
-
-static const struct acpi_device_id cros_coreboot_acpi_match[] = {
- { "GOOGCB00", 0 },
- { "BOOT0000", 0 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
-
-static struct platform_driver coreboot_table_acpi_driver = {
- .probe = coreboot_table_acpi_probe,
- .remove = coreboot_table_acpi_remove,
- .driver = {
- .name = "coreboot_table_acpi",
- .acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
- },
-};
-
-static int __init coreboot_table_acpi_init(void)
-{
- return platform_driver_register(&coreboot_table_acpi_driver);
-}
-
-module_init(coreboot_table_acpi_init);
-
-MODULE_AUTHOR("Google, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table-of.c b/drivers/firmware/google/coreboot_table-of.c
deleted file mode 100644
index f15bf404c579..000000000000
--- a/drivers/firmware/google/coreboot_table-of.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * coreboot_table-of.c
- *
- * Coreboot table access through open firmware.
- *
- * Copyright 2017 Google Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License v2.0 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-
-#include "coreboot_table.h"
-
-static int coreboot_table_of_probe(struct platform_device *pdev)
-{
- struct device_node *fw_dn = pdev->dev.of_node;
- void __iomem *ptr;
-
- ptr = of_iomap(fw_dn, 0);
- of_node_put(fw_dn);
- if (!ptr)
- return -ENOMEM;
-
- return coreboot_table_init(&pdev->dev, ptr);
-}
-
-static int coreboot_table_of_remove(struct platform_device *pdev)
-{
- return coreboot_table_exit();
-}
-
-static const struct of_device_id coreboot_of_match[] = {
- { .compatible = "coreboot" },
- {},
-};
-
-static struct platform_driver coreboot_table_of_driver = {
- .probe = coreboot_table_of_probe,
- .remove = coreboot_table_of_remove,
- .driver = {
- .name = "coreboot_table_of",
- .of_match_table = coreboot_of_match,
- },
-};
-
-static int __init platform_coreboot_table_of_init(void)
-{
- struct platform_device *pdev;
- struct device_node *of_node;
-
- /* Limit device creation to the presence of /firmware/coreboot node */
- of_node = of_find_node_by_path("/firmware/coreboot");
- if (!of_node)
- return -ENODEV;
-
- if (!of_match_node(coreboot_of_match, of_node))
- return -ENODEV;
-
- pdev = of_platform_device_create(of_node, "coreboot_table_of", NULL);
- if (!pdev)
- return -ENODEV;
-
- return platform_driver_register(&coreboot_table_of_driver);
-}
-
-module_init(platform_coreboot_table_of_init);
-
-MODULE_AUTHOR("Google, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c
index 19db5709ae28..078d3bbe632f 100644
--- a/drivers/firmware/google/coreboot_table.c
+++ b/drivers/firmware/google/coreboot_table.c
@@ -16,12 +16,15 @@
* GNU General Public License for more details.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "coreboot_table.h"
@@ -29,8 +32,6 @@
#define CB_DEV(d) container_of(d, struct coreboot_device, dev)
#define CB_DRV(d) container_of(d, struct coreboot_driver, drv)
-static struct coreboot_table_header __iomem *ptr_header;
-
static int coreboot_bus_match(struct device *dev, struct device_driver *drv)
{
struct coreboot_device *device = CB_DEV(dev);
@@ -70,12 +71,6 @@ static struct bus_type coreboot_bus_type = {
.remove = coreboot_bus_remove,
};
-static int __init coreboot_bus_init(void)
-{
- return bus_register(&coreboot_bus_type);
-}
-module_init(coreboot_bus_init);
-
static void coreboot_device_release(struct device *dev)
{
struct coreboot_device *device = CB_DEV(dev);
@@ -97,62 +92,117 @@ void coreboot_driver_unregister(struct coreboot_driver *driver)
}
EXPORT_SYMBOL(coreboot_driver_unregister);
-int coreboot_table_init(struct device *dev, void __iomem *ptr)
+static int coreboot_table_populate(struct device *dev, void *ptr)
{
int i, ret;
void *ptr_entry;
struct coreboot_device *device;
- struct coreboot_table_entry entry;
- struct coreboot_table_header header;
-
- ptr_header = ptr;
- memcpy_fromio(&header, ptr_header, sizeof(header));
-
- if (strncmp(header.signature, "LBIO", sizeof(header.signature))) {
- pr_warn("coreboot_table: coreboot table missing or corrupt!\n");
- return -ENODEV;
- }
+ struct coreboot_table_entry *entry;
+ struct coreboot_table_header *header = ptr;
- ptr_entry = (void *)ptr_header + header.header_bytes;
- for (i = 0; i < header.table_entries; i++) {
- memcpy_fromio(&entry, ptr_entry, sizeof(entry));
+ ptr_entry = ptr + header->header_bytes;
+ for (i = 0; i < header->table_entries; i++) {
+ entry = ptr_entry;
- device = kzalloc(sizeof(struct device) + entry.size, GFP_KERNEL);
- if (!device) {
- ret = -ENOMEM;
- break;
- }
+ device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
dev_set_name(&device->dev, "coreboot%d", i);
device->dev.parent = dev;
device->dev.bus = &coreboot_bus_type;
device->dev.release = coreboot_device_release;
- memcpy_fromio(&device->entry, ptr_entry, entry.size);
+ memcpy(&device->entry, ptr_entry, entry->size);
ret = device_register(&device->dev);
if (ret) {
put_device(&device->dev);
- break;
+ return ret;
}
- ptr_entry += entry.size;
+ ptr_entry += entry->size;
}
- return ret;
+ return 0;
}
-EXPORT_SYMBOL(coreboot_table_init);
-int coreboot_table_exit(void)
+static int coreboot_table_probe(struct platform_device *pdev)
{
- if (ptr_header) {
- bus_unregister(&coreboot_bus_type);
- iounmap(ptr_header);
- ptr_header = NULL;
+ resource_size_t len;
+ struct coreboot_table_header *header;
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ void *ptr;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ len = resource_size(res);
+ if (!res->start || !len)
+ return -EINVAL;
+
+ /* Check just the header first to make sure things are sane */
+ header = memremap(res->start, sizeof(*header), MEMREMAP_WB);
+ if (!header)
+ return -ENOMEM;
+
+ len = header->header_bytes + header->table_bytes;
+ ret = strncmp(header->signature, "LBIO", sizeof(header->signature));
+ memunmap(header);
+ if (ret) {
+ dev_warn(dev, "coreboot table missing or corrupt!\n");
+ return -ENODEV;
}
+ ptr = memremap(res->start, len, MEMREMAP_WB);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = bus_register(&coreboot_bus_type);
+ if (!ret) {
+ ret = coreboot_table_populate(dev, ptr);
+ if (ret)
+ bus_unregister(&coreboot_bus_type);
+ }
+ memunmap(ptr);
+
+ return ret;
+}
+
+static int coreboot_table_remove(struct platform_device *pdev)
+{
+ bus_unregister(&coreboot_bus_type);
return 0;
}
-EXPORT_SYMBOL(coreboot_table_exit);
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_coreboot_acpi_match[] = {
+ { "GOOGCB00", 0 },
+ { "BOOT0000", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_coreboot_acpi_match);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id coreboot_of_match[] = {
+ { .compatible = "coreboot" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, coreboot_of_match);
+#endif
+
+static struct platform_driver coreboot_table_driver = {
+ .probe = coreboot_table_probe,
+ .remove = coreboot_table_remove,
+ .driver = {
+ .name = "coreboot_table",
+ .acpi_match_table = ACPI_PTR(cros_coreboot_acpi_match),
+ .of_match_table = of_match_ptr(coreboot_of_match),
+ },
+};
+module_platform_driver(coreboot_table_driver);
MODULE_AUTHOR("Google, Inc.");
MODULE_LICENSE("GPL");
diff --git a/drivers/firmware/google/coreboot_table.h b/drivers/firmware/google/coreboot_table.h
index 8ad95a94481b..71a9de6b15fa 100644
--- a/drivers/firmware/google/coreboot_table.h
+++ b/drivers/firmware/google/coreboot_table.h
@@ -91,10 +91,4 @@ int coreboot_driver_register(struct coreboot_driver *driver);
/* Unregister a driver that uses the data from a coreboot table. */
void coreboot_driver_unregister(struct coreboot_driver *driver);
-/* Initialize coreboot table module given a pointer to iomem */
-int coreboot_table_init(struct device *dev, void __iomem *ptr);
-
-/* Cleanup coreboot table module */
-int coreboot_table_exit(void);
-
#endif /* __COREBOOT_TABLE_H */
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index c8f169bf2e27..82ce1e6d261e 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -29,6 +29,7 @@
#include <linux/efi.h>
#include <linux/module.h>
#include <linux/ucs2_string.h>
+#include <linux/suspend.h>
#define GSMI_SHUTDOWN_CLEAN 0 /* Clean Shutdown */
/* TODO(mikew@google.com): Tie in HARDLOCKUP_DETECTOR with NMIWDT */
@@ -70,6 +71,8 @@
#define GSMI_CMD_SET_NVRAM_VAR 0x03
#define GSMI_CMD_SET_EVENT_LOG 0x08
#define GSMI_CMD_CLEAR_EVENT_LOG 0x09
+#define GSMI_CMD_LOG_S0IX_SUSPEND 0x0a
+#define GSMI_CMD_LOG_S0IX_RESUME 0x0b
#define GSMI_CMD_CLEAR_CONFIG 0x20
#define GSMI_CMD_HANDSHAKE_TYPE 0xC1
@@ -84,7 +87,7 @@ struct gsmi_buf {
u32 address; /* physical address of buffer */
};
-struct gsmi_device {
+static struct gsmi_device {
struct platform_device *pdev; /* platform device */
struct gsmi_buf *name_buf; /* variable name buffer */
struct gsmi_buf *data_buf; /* generic data buffer */
@@ -122,7 +125,6 @@ struct gsmi_log_entry_type_1 {
u32 instance;
} __packed;
-
/*
* Some platforms don't have explicit SMI handshake
* and need to wait for SMI to complete.
@@ -133,6 +135,15 @@ module_param(spincount, uint, 0600);
MODULE_PARM_DESC(spincount,
"The number of loop iterations to use when using the spin handshake.");
+/*
+ * Platforms might not support S0ix logging in their GSMI handlers. In order to
+ * avoid any side-effects of generating an SMI for S0ix logging, use the S0ix
+ * related GSMI commands only for those platforms that explicitly enable this
+ * option.
+ */
+static bool s0ix_logging_enable;
+module_param(s0ix_logging_enable, bool, 0600);
+
static struct gsmi_buf *gsmi_buf_alloc(void)
{
struct gsmi_buf *smibuf;
@@ -289,6 +300,10 @@ static int gsmi_exec(u8 func, u8 sub)
return rc;
}
+#ifdef CONFIG_EFI_VARS
+
+static struct efivars efivars;
+
static efi_status_t gsmi_get_variable(efi_char16_t *name,
efi_guid_t *vendor, u32 *attr,
unsigned long *data_size,
@@ -466,6 +481,8 @@ static const struct efivar_operations efivar_ops = {
.get_next_variable = gsmi_get_next_variable,
};
+#endif /* CONFIG_EFI_VARS */
+
static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t pos, size_t count)
@@ -480,11 +497,10 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
if (count < sizeof(u32))
return -EINVAL;
param.type = *(u32 *)buf;
- count -= sizeof(u32);
buf += sizeof(u32);
/* The remaining buffer is the data payload */
- if (count > gsmi_dev.data_buf->length)
+ if ((count - sizeof(u32)) > gsmi_dev.data_buf->length)
return -EINVAL;
param.data_len = count - sizeof(u32);
@@ -504,7 +520,7 @@ static ssize_t eventlog_write(struct file *filp, struct kobject *kobj,
spin_unlock_irqrestore(&gsmi_dev.lock, flags);
- return rc;
+ return (rc == 0) ? count : rc;
}
@@ -716,6 +732,12 @@ static const struct dmi_system_id gsmi_dmi_table[] __initconst = {
DMI_MATCH(DMI_BOARD_VENDOR, "Google, Inc."),
},
},
+ {
+ .ident = "Coreboot Firmware",
+ .matches = {
+ DMI_MATCH(DMI_BIOS_VENDOR, "coreboot"),
+ },
+ },
{}
};
MODULE_DEVICE_TABLE(dmi, gsmi_dmi_table);
@@ -762,7 +784,6 @@ static __init int gsmi_system_valid(void)
}
static struct kobject *gsmi_kobj;
-static struct efivars efivars;
static const struct platform_device_info gsmi_dev_info = {
.name = "gsmi",
@@ -771,6 +792,78 @@ static const struct platform_device_info gsmi_dev_info = {
.dma_mask = DMA_BIT_MASK(32),
};
+#ifdef CONFIG_PM
+static void gsmi_log_s0ix_info(u8 cmd)
+{
+ unsigned long flags;
+
+ /*
+ * If platform has not enabled S0ix logging, then no action is
+ * necessary.
+ */
+ if (!s0ix_logging_enable)
+ return;
+
+ spin_lock_irqsave(&gsmi_dev.lock, flags);
+
+ memset(gsmi_dev.param_buf->start, 0, gsmi_dev.param_buf->length);
+
+ gsmi_exec(GSMI_CALLBACK, cmd);
+
+ spin_unlock_irqrestore(&gsmi_dev.lock, flags);
+}
+
+static int gsmi_log_s0ix_suspend(struct device *dev)
+{
+ /*
+ * If system is not suspending via firmware using the standard ACPI Sx
+ * types, then make a GSMI call to log the suspend info.
+ */
+ if (!pm_suspend_via_firmware())
+ gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_SUSPEND);
+
+ /*
+ * Always return success, since we do not want suspend
+ * to fail just because of logging failure.
+ */
+ return 0;
+}
+
+static int gsmi_log_s0ix_resume(struct device *dev)
+{
+ /*
+ * If system did not resume via firmware, then make a GSMI call to log
+ * the resume info and wake source.
+ */
+ if (!pm_resume_via_firmware())
+ gsmi_log_s0ix_info(GSMI_CMD_LOG_S0IX_RESUME);
+
+ /*
+ * Always return success, since we do not want resume
+ * to fail just because of logging failure.
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops gsmi_pm_ops = {
+ .suspend_noirq = gsmi_log_s0ix_suspend,
+ .resume_noirq = gsmi_log_s0ix_resume,
+};
+
+static int gsmi_platform_driver_probe(struct platform_device *dev)
+{
+ return 0;
+}
+
+static struct platform_driver gsmi_driver_info = {
+ .driver = {
+ .name = "gsmi",
+ .pm = &gsmi_pm_ops,
+ },
+ .probe = gsmi_platform_driver_probe,
+};
+#endif
+
static __init int gsmi_init(void)
{
unsigned long flags;
@@ -782,6 +875,14 @@ static __init int gsmi_init(void)
gsmi_dev.smi_cmd = acpi_gbl_FADT.smi_command;
+#ifdef CONFIG_PM
+ ret = platform_driver_register(&gsmi_driver_info);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "gsmi: unable to register platform driver\n");
+ return ret;
+ }
+#endif
+
/* register device */
gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
if (IS_ERR(gsmi_dev.pdev)) {
@@ -886,11 +987,14 @@ static __init int gsmi_init(void)
goto out_remove_bin_file;
}
+#ifdef CONFIG_EFI_VARS
ret = efivars_register(&efivars, &efivar_ops, gsmi_kobj);
if (ret) {
printk(KERN_INFO "gsmi: Failed to register efivars\n");
- goto out_remove_sysfs_files;
+ sysfs_remove_files(gsmi_kobj, gsmi_attrs);
+ goto out_remove_bin_file;
}
+#endif
register_reboot_notifier(&gsmi_reboot_notifier);
register_die_notifier(&gsmi_die_notifier);
@@ -901,8 +1005,6 @@ static __init int gsmi_init(void)
return 0;
-out_remove_sysfs_files:
- sysfs_remove_files(gsmi_kobj, gsmi_attrs);
out_remove_bin_file:
sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
out_err:
@@ -922,7 +1024,9 @@ static void __exit gsmi_exit(void)
unregister_die_notifier(&gsmi_die_notifier);
atomic_notifier_chain_unregister(&panic_notifier_list,
&gsmi_panic_notifier);
+#ifdef CONFIG_EFI_VARS
efivars_unregister(&efivars);
+#endif
sysfs_remove_files(gsmi_kobj, gsmi_attrs);
sysfs_remove_bin_file(gsmi_kobj, &eventlog_bin_attr);
diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c
index 1aa67bb5d8c0..c0c0b4e4e281 100644
--- a/drivers/firmware/google/vpd.c
+++ b/drivers/firmware/google/vpd.c
@@ -198,7 +198,7 @@ static int vpd_section_init(const char *name, struct vpd_section *sec,
sec->name = name;
- /* We want to export the raw partion with name ${name}_raw */
+ /* We want to export the raw partition with name ${name}_raw */
sec->raw_name = kasprintf(GFP_KERNEL, "%s_raw", name);
if (!sec->raw_name) {
err = -ENOMEM;
diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
index f395dec27113..390aa13391e4 100644
--- a/drivers/firmware/scpi_pm_domain.c
+++ b/drivers/firmware/scpi_pm_domain.c
@@ -121,7 +121,7 @@ static int scpi_pm_domain_probe(struct platform_device *pdev)
scpi_pd->domain = i;
scpi_pd->ops = scpi_ops;
- sprintf(scpi_pd->name, "%s.%d", np->name, i);
+ sprintf(scpi_pd->name, "%pOFn.%d", np, i);
scpi_pd->genpd.name = scpi_pd->name;
scpi_pd->genpd.power_off = scpi_pd_power_off;
scpi_pd->genpd.power_on = scpi_pd_power_on;
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 7fa793672a7a..610a1558e0ed 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -453,8 +453,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,
snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s",
ALTERA_CVP_MGR_NAME, pci_name(pdev));
- mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name,
- &altera_cvp_ops, conf);
+ mgr = devm_fpga_mgr_create(&pdev->dev, conf->mgr_name,
+ &altera_cvp_ops, conf);
if (!mgr) {
ret = -ENOMEM;
goto err_unmap;
@@ -463,10 +463,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, mgr);
ret = fpga_mgr_register(mgr);
- if (ret) {
- fpga_mgr_free(mgr);
+ if (ret)
goto err_unmap;
- }
ret = driver_create_file(&altera_cvp_driver.driver,
&driver_attr_chkcfg);
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
index 23660ccd634b..a78e49c63c64 100644
--- a/drivers/fpga/altera-fpga2sdram.c
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -121,18 +121,16 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
/* Get f2s bridge configuration saved in handoff register */
regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask);
- br = fpga_bridge_create(dev, F2S_BRIDGE_NAME,
- &altera_fpga2sdram_br_ops, priv);
+ br = devm_fpga_bridge_create(dev, F2S_BRIDGE_NAME,
+ &altera_fpga2sdram_br_ops, priv);
if (!br)
return -ENOMEM;
platform_set_drvdata(pdev, br);
ret = fpga_bridge_register(br);
- if (ret) {
- fpga_bridge_free(br);
+ if (ret)
return ret;
- }
dev_info(dev, "driver initialized with handoff %08x\n", priv->mask);
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
index ffd586c48ecf..dd58c4aea92e 100644
--- a/drivers/fpga/altera-freeze-bridge.c
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -213,7 +213,6 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
struct fpga_bridge *br;
struct resource *res;
u32 status, revision;
- int ret;
if (!np)
return -ENODEV;
@@ -245,20 +244,14 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
priv->base_addr = base_addr;
- br = fpga_bridge_create(dev, FREEZE_BRIDGE_NAME,
- &altera_freeze_br_br_ops, priv);
+ br = devm_fpga_bridge_create(dev, FREEZE_BRIDGE_NAME,
+ &altera_freeze_br_br_ops, priv);
if (!br)
return -ENOMEM;
platform_set_drvdata(pdev, br);
- ret = fpga_bridge_register(br);
- if (ret) {
- fpga_bridge_free(br);
- return ret;
- }
-
- return 0;
+ return fpga_bridge_register(br);
}
static int altera_freeze_br_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index a974d3f60321..77b95f251821 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -180,7 +180,8 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
}
}
- br = fpga_bridge_create(dev, priv->name, &altera_hps2fpga_br_ops, priv);
+ br = devm_fpga_bridge_create(dev, priv->name,
+ &altera_hps2fpga_br_ops, priv);
if (!br) {
ret = -ENOMEM;
goto err;
@@ -190,12 +191,10 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
ret = fpga_bridge_register(br);
if (ret)
- goto err_free;
+ goto err;
return 0;
-err_free:
- fpga_bridge_free(br);
err:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index 65e0b6a2c031..a7a3bf0b5202 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -177,7 +177,6 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
{
struct alt_pr_priv *priv;
struct fpga_manager *mgr;
- int ret;
u32 val;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -192,17 +191,13 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
(val & ALT_PR_CSR_STATUS_MSK) >> ALT_PR_CSR_STATUS_SFT,
(int)(val & ALT_PR_CSR_PR_START));
- mgr = fpga_mgr_create(dev, dev_name(dev), &alt_pr_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, dev_name(dev), &alt_pr_ops, priv);
if (!mgr)
return -ENOMEM;
dev_set_drvdata(dev, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
EXPORT_SYMBOL_GPL(alt_pr_register);
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 24b25c626036..33aafda50af5 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -239,7 +239,6 @@ static int altera_ps_probe(struct spi_device *spi)
struct altera_ps_conf *conf;
const struct of_device_id *of_id;
struct fpga_manager *mgr;
- int ret;
conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
if (!conf)
@@ -275,18 +274,14 @@ static int altera_ps_probe(struct spi_device *spi)
snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s",
dev_driver_string(&spi->dev), dev_name(&spi->dev));
- mgr = fpga_mgr_create(&spi->dev, conf->mgr_name,
- &altera_ps_ops, conf);
+ mgr = devm_fpga_mgr_create(&spi->dev, conf->mgr_name,
+ &altera_ps_ops, conf);
if (!mgr)
return -ENOMEM;
spi_set_drvdata(spi, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int altera_ps_remove(struct spi_device *spi)
diff --git a/drivers/fpga/dfl-afu-dma-region.c b/drivers/fpga/dfl-afu-dma-region.c
index 0e81d33af856..025aba3ea76c 100644
--- a/drivers/fpga/dfl-afu-dma-region.c
+++ b/drivers/fpga/dfl-afu-dma-region.c
@@ -70,7 +70,7 @@ static int afu_dma_adjust_locked_vm(struct device *dev, long npages, bool incr)
dev_dbg(dev, "[%d] RLIMIT_MEMLOCK %c%ld %ld/%ld%s\n", current->pid,
incr ? '+' : '-', npages << PAGE_SHIFT,
current->mm->locked_vm << PAGE_SHIFT, rlimit(RLIMIT_MEMLOCK),
- ret ? "- execeeded" : "");
+ ret ? "- exceeded" : "");
up_write(&current->mm->mmap_sem);
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 7cc041def8b3..3ff9f3a687ce 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -61,7 +61,6 @@ static int fme_br_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct fme_br_priv *priv;
struct fpga_bridge *br;
- int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -69,18 +68,14 @@ static int fme_br_probe(struct platform_device *pdev)
priv->pdata = dev_get_platdata(dev);
- br = fpga_bridge_create(dev, "DFL FPGA FME Bridge",
- &fme_bridge_ops, priv);
+ br = devm_fpga_bridge_create(dev, "DFL FPGA FME Bridge",
+ &fme_bridge_ops, priv);
if (!br)
return -ENOMEM;
platform_set_drvdata(pdev, br);
- ret = fpga_bridge_register(br);
- if (ret)
- fpga_bridge_free(br);
-
- return ret;
+ return fpga_bridge_register(br);
}
static int fme_br_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
index b5ef405b6d88..76f37709dd1a 100644
--- a/drivers/fpga/dfl-fme-mgr.c
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -201,7 +201,7 @@ static int fme_mgr_write(struct fpga_manager *mgr,
}
if (count < 4) {
- dev_err(dev, "Invaild PR bitstream size\n");
+ dev_err(dev, "Invalid PR bitstream size\n");
return -EINVAL;
}
@@ -287,7 +287,6 @@ static int fme_mgr_probe(struct platform_device *pdev)
struct fme_mgr_priv *priv;
struct fpga_manager *mgr;
struct resource *res;
- int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -309,19 +308,15 @@ static int fme_mgr_probe(struct platform_device *pdev)
fme_mgr_get_compat_id(priv->ioaddr, compat_id);
- mgr = fpga_mgr_create(dev, "DFL FME FPGA Manager",
- &fme_mgr_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, "DFL FME FPGA Manager",
+ &fme_mgr_ops, priv);
if (!mgr)
return -ENOMEM;
mgr->compat_id = compat_id;
platform_set_drvdata(pdev, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int fme_mgr_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 51a5ac2293a7..ec134ec93f08 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -39,7 +39,7 @@ static int fme_region_probe(struct platform_device *pdev)
if (IS_ERR(mgr))
return -EPROBE_DEFER;
- region = fpga_region_create(dev, mgr, fme_region_get_bridges);
+ region = devm_fpga_region_create(dev, mgr, fme_region_get_bridges);
if (!region) {
ret = -ENOMEM;
goto eprobe_mgr_put;
@@ -51,14 +51,12 @@ static int fme_region_probe(struct platform_device *pdev)
ret = fpga_region_register(region);
if (ret)
- goto region_free;
+ goto eprobe_mgr_put;
dev_dbg(dev, "DFL FME FPGA Region probed\n");
return 0;
-region_free:
- fpga_region_free(region);
eprobe_mgr_put:
fpga_mgr_put(mgr);
return ret;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index a9b521bccb06..2c09e502e721 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -899,7 +899,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
if (!cdev)
return ERR_PTR(-ENOMEM);
- cdev->region = fpga_region_create(info->dev, NULL, NULL);
+ cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
if (!cdev->region) {
ret = -ENOMEM;
goto free_cdev_exit;
@@ -911,7 +911,7 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
ret = fpga_region_register(cdev->region);
if (ret)
- goto free_region_exit;
+ goto free_cdev_exit;
/* create and init build info for enumeration */
binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
@@ -942,8 +942,6 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
unregister_region_exit:
fpga_region_unregister(cdev->region);
-free_region_exit:
- fpga_region_free(cdev->region);
free_cdev_exit:
devm_kfree(info->dev, cdev);
return ERR_PTR(ret);
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index c983dac97501..80bd8f1b2aa6 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -324,6 +324,9 @@ ATTRIBUTE_GROUPS(fpga_bridge);
* @br_ops: pointer to structure of fpga bridge ops
* @priv: FPGA bridge private data
*
+ * The caller of this function is responsible for freeing the bridge with
+ * fpga_bridge_free(). Using devm_fpga_bridge_create() instead is recommended.
+ *
* Return: struct fpga_bridge or NULL
*/
struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
@@ -378,8 +381,8 @@ error_kfree:
EXPORT_SYMBOL_GPL(fpga_bridge_create);
/**
- * fpga_bridge_free - free a fpga bridge and its id
- * @bridge: FPGA bridge struct created by fpga_bridge_create
+ * fpga_bridge_free - free a fpga bridge created by fpga_bridge_create()
+ * @bridge: FPGA bridge struct
*/
void fpga_bridge_free(struct fpga_bridge *bridge)
{
@@ -388,9 +391,56 @@ void fpga_bridge_free(struct fpga_bridge *bridge)
}
EXPORT_SYMBOL_GPL(fpga_bridge_free);
+static void devm_fpga_bridge_release(struct device *dev, void *res)
+{
+ struct fpga_bridge *bridge = *(struct fpga_bridge **)res;
+
+ fpga_bridge_free(bridge);
+}
+
/**
- * fpga_bridge_register - register a fpga bridge
- * @bridge: FPGA bridge struct created by fpga_bridge_create
+ * devm_fpga_bridge_create - create and init a managed struct fpga_bridge
+ * @dev: FPGA bridge device from pdev
+ * @name: FPGA bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: FPGA bridge private data
+ *
+ * This function is intended for use in a FPGA bridge driver's probe function.
+ * After the bridge driver creates the struct with devm_fpga_bridge_create(), it
+ * should register the bridge with fpga_bridge_register(). The bridge driver's
+ * remove function should call fpga_bridge_unregister(). The bridge struct
+ * allocated with this function will be freed automatically on driver detach.
+ * This includes the case of a probe function returning error before calling
+ * fpga_bridge_register(), the struct will still get cleaned up.
+ *
+ * Return: struct fpga_bridge or NULL
+ */
+struct fpga_bridge
+*devm_fpga_bridge_create(struct device *dev, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv)
+{
+ struct fpga_bridge **ptr, *bridge;
+
+ ptr = devres_alloc(devm_fpga_bridge_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ bridge = fpga_bridge_create(dev, name, br_ops, priv);
+ if (!bridge) {
+ devres_free(ptr);
+ } else {
+ *ptr = bridge;
+ devres_add(dev, ptr);
+ }
+
+ return bridge;
+}
+EXPORT_SYMBOL_GPL(devm_fpga_bridge_create);
+
+/**
+ * fpga_bridge_register - register a FPGA bridge
+ *
+ * @bridge: FPGA bridge struct
*
* Return: 0 for success, error code otherwise.
*/
@@ -412,8 +462,11 @@ int fpga_bridge_register(struct fpga_bridge *bridge)
EXPORT_SYMBOL_GPL(fpga_bridge_register);
/**
- * fpga_bridge_unregister - unregister and free a fpga bridge
- * @bridge: FPGA bridge struct created by fpga_bridge_create
+ * fpga_bridge_unregister - unregister a FPGA bridge
+ *
+ * @bridge: FPGA bridge struct
+ *
+ * This function is intended for use in a FPGA bridge driver's remove function.
*/
void fpga_bridge_unregister(struct fpga_bridge *bridge)
{
@@ -430,9 +483,6 @@ EXPORT_SYMBOL_GPL(fpga_bridge_unregister);
static void fpga_bridge_dev_release(struct device *dev)
{
- struct fpga_bridge *bridge = to_fpga_bridge(dev);
-
- fpga_bridge_free(bridge);
}
static int __init fpga_bridge_dev_init(void)
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index a41b07e37884..c3866816456a 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -558,6 +558,9 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
*
+ * The caller of this function is responsible for freeing the struct with
+ * fpga_mgr_free(). Using devm_fpga_mgr_create() instead is recommended.
+ *
* Return: pointer to struct fpga_manager or NULL
*/
struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
@@ -618,8 +621,8 @@ error_kfree:
EXPORT_SYMBOL_GPL(fpga_mgr_create);
/**
- * fpga_mgr_free - deallocate a FPGA manager
- * @mgr: fpga manager struct created by fpga_mgr_create
+ * fpga_mgr_free - free a FPGA manager created with fpga_mgr_create()
+ * @mgr: fpga manager struct
*/
void fpga_mgr_free(struct fpga_manager *mgr)
{
@@ -628,9 +631,55 @@ void fpga_mgr_free(struct fpga_manager *mgr)
}
EXPORT_SYMBOL_GPL(fpga_mgr_free);
+static void devm_fpga_mgr_release(struct device *dev, void *res)
+{
+ struct fpga_manager *mgr = *(struct fpga_manager **)res;
+
+ fpga_mgr_free(mgr);
+}
+
+/**
+ * devm_fpga_mgr_create - create and initialize a managed FPGA manager struct
+ * @dev: fpga manager device from pdev
+ * @name: fpga manager name
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * This function is intended for use in a FPGA manager driver's probe function.
+ * After the manager driver creates the manager struct with
+ * devm_fpga_mgr_create(), it should register it with fpga_mgr_register(). The
+ * manager driver's remove function should call fpga_mgr_unregister(). The
+ * manager struct allocated with this function will be freed automatically on
+ * driver detach. This includes the case of a probe function returning error
+ * before calling fpga_mgr_register(), the struct will still get cleaned up.
+ *
+ * Return: pointer to struct fpga_manager or NULL
+ */
+struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
+ const struct fpga_manager_ops *mops,
+ void *priv)
+{
+ struct fpga_manager **ptr, *mgr;
+
+ ptr = devres_alloc(devm_fpga_mgr_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ mgr = fpga_mgr_create(dev, name, mops, priv);
+ if (!mgr) {
+ devres_free(ptr);
+ } else {
+ *ptr = mgr;
+ devres_add(dev, ptr);
+ }
+
+ return mgr;
+}
+EXPORT_SYMBOL_GPL(devm_fpga_mgr_create);
+
/**
* fpga_mgr_register - register a FPGA manager
- * @mgr: fpga manager struct created by fpga_mgr_create
+ * @mgr: fpga manager struct
*
* Return: 0 on success, negative error code otherwise.
*/
@@ -661,8 +710,10 @@ error_device:
EXPORT_SYMBOL_GPL(fpga_mgr_register);
/**
- * fpga_mgr_unregister - unregister and free a FPGA manager
- * @mgr: fpga manager struct
+ * fpga_mgr_unregister - unregister a FPGA manager
+ * @mgr: fpga manager struct
+ *
+ * This function is intended for use in a FPGA manager driver's remove function.
*/
void fpga_mgr_unregister(struct fpga_manager *mgr)
{
@@ -681,9 +732,6 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unregister);
static void fpga_mgr_dev_release(struct device *dev)
{
- struct fpga_manager *mgr = to_fpga_manager(dev);
-
- fpga_mgr_free(mgr);
}
static int __init fpga_mgr_class_init(void)
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index 0d65220d5ec5..bde5a9d460c5 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -185,6 +185,10 @@ ATTRIBUTE_GROUPS(fpga_region);
* @mgr: manager that programs this region
* @get_bridges: optional function to get bridges to a list
*
+ * The caller of this function is responsible for freeing the resulting region
+ * struct with fpga_region_free(). Using devm_fpga_region_create() instead is
+ * recommended.
+ *
* Return: struct fpga_region or NULL
*/
struct fpga_region
@@ -230,8 +234,8 @@ err_free:
EXPORT_SYMBOL_GPL(fpga_region_create);
/**
- * fpga_region_free - free a struct fpga_region
- * @region: FPGA region created by fpga_region_create
+ * fpga_region_free - free a FPGA region created by fpga_region_create()
+ * @region: FPGA region
*/
void fpga_region_free(struct fpga_region *region)
{
@@ -240,21 +244,69 @@ void fpga_region_free(struct fpga_region *region)
}
EXPORT_SYMBOL_GPL(fpga_region_free);
+static void devm_fpga_region_release(struct device *dev, void *res)
+{
+ struct fpga_region *region = *(struct fpga_region **)res;
+
+ fpga_region_free(region);
+}
+
+/**
+ * devm_fpga_region_create - create and initialize a managed FPGA region struct
+ * @dev: device parent
+ * @mgr: manager that programs this region
+ * @get_bridges: optional function to get bridges to a list
+ *
+ * This function is intended for use in a FPGA region driver's probe function.
+ * After the region driver creates the region struct with
+ * devm_fpga_region_create(), it should register it with fpga_region_register().
+ * The region driver's remove function should call fpga_region_unregister().
+ * The region struct allocated with this function will be freed automatically on
+ * driver detach. This includes the case of a probe function returning error
+ * before calling fpga_region_register(), the struct will still get cleaned up.
+ *
+ * Return: struct fpga_region or NULL
+ */
+struct fpga_region
+*devm_fpga_region_create(struct device *dev,
+ struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *))
+{
+ struct fpga_region **ptr, *region;
+
+ ptr = devres_alloc(devm_fpga_region_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ region = fpga_region_create(dev, mgr, get_bridges);
+ if (!region) {
+ devres_free(ptr);
+ } else {
+ *ptr = region;
+ devres_add(dev, ptr);
+ }
+
+ return region;
+}
+EXPORT_SYMBOL_GPL(devm_fpga_region_create);
+
/**
* fpga_region_register - register a FPGA region
- * @region: FPGA region created by fpga_region_create
+ * @region: FPGA region
+ *
* Return: 0 or -errno
*/
int fpga_region_register(struct fpga_region *region)
{
return device_add(&region->dev);
-
}
EXPORT_SYMBOL_GPL(fpga_region_register);
/**
- * fpga_region_unregister - unregister and free a FPGA region
+ * fpga_region_unregister - unregister a FPGA region
* @region: FPGA region
+ *
+ * This function is intended for use in a FPGA region driver's remove function.
*/
void fpga_region_unregister(struct fpga_region *region)
{
@@ -264,9 +316,6 @@ EXPORT_SYMBOL_GPL(fpga_region_unregister);
static void fpga_region_dev_release(struct device *dev)
{
- struct fpga_region *region = to_fpga_region(dev);
-
- fpga_region_free(region);
}
/**
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 5981c7ee7a7d..6154661b8f76 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -175,18 +175,14 @@ static int ice40_fpga_probe(struct spi_device *spi)
return ret;
}
- mgr = fpga_mgr_create(dev, "Lattice iCE40 FPGA Manager",
- &ice40_fpga_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, "Lattice iCE40 FPGA Manager",
+ &ice40_fpga_ops, priv);
if (!mgr)
return -ENOMEM;
spi_set_drvdata(spi, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int ice40_fpga_remove(struct spi_device *spi)
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index a582e0000c97..4d8a87641587 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -356,25 +356,20 @@ static int machxo2_spi_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct fpga_manager *mgr;
- int ret;
if (spi->max_speed_hz > MACHXO2_MAX_SPEED) {
dev_err(dev, "Speed is too high\n");
return -EINVAL;
}
- mgr = fpga_mgr_create(dev, "Lattice MachXO2 SPI FPGA Manager",
- &machxo2_ops, spi);
+ mgr = devm_fpga_mgr_create(dev, "Lattice MachXO2 SPI FPGA Manager",
+ &machxo2_ops, spi);
if (!mgr)
return -ENOMEM;
spi_set_drvdata(spi, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int machxo2_spi_remove(struct spi_device *spi)
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index 052a1342ab7e..122286fd255a 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -410,7 +410,7 @@ static int of_fpga_region_probe(struct platform_device *pdev)
if (IS_ERR(mgr))
return -EPROBE_DEFER;
- region = fpga_region_create(dev, mgr, of_fpga_region_get_bridges);
+ region = devm_fpga_region_create(dev, mgr, of_fpga_region_get_bridges);
if (!region) {
ret = -ENOMEM;
goto eprobe_mgr_put;
@@ -418,7 +418,7 @@ static int of_fpga_region_probe(struct platform_device *pdev)
ret = fpga_region_register(region);
if (ret)
- goto eprobe_free;
+ goto eprobe_mgr_put;
of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
dev_set_drvdata(dev, region);
@@ -427,8 +427,6 @@ static int of_fpga_region_probe(struct platform_device *pdev)
return 0;
-eprobe_free:
- fpga_region_free(region);
eprobe_mgr_put:
fpga_mgr_put(mgr);
return ret;
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index be30c48eb6e4..573d88bdf730 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -508,8 +508,8 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
return -EBUSY;
}
- mgr = fpga_mgr_create(dev, "SoCFPGA Arria10 FPGA Manager",
- &socfpga_a10_fpga_mgr_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, "SoCFPGA Arria10 FPGA Manager",
+ &socfpga_a10_fpga_mgr_ops, priv);
if (!mgr)
return -ENOMEM;
@@ -517,7 +517,6 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
ret = fpga_mgr_register(mgr);
if (ret) {
- fpga_mgr_free(mgr);
clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 959d71f26896..4a8a2fcd4e6c 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -571,18 +571,14 @@ static int socfpga_fpga_probe(struct platform_device *pdev)
if (ret)
return ret;
- mgr = fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager",
- &socfpga_fpga_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager",
+ &socfpga_fpga_ops, priv);
if (!mgr)
return -ENOMEM;
platform_set_drvdata(pdev, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int socfpga_fpga_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 08efd1895b1b..dc22a5842609 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -118,7 +118,6 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
struct ts73xx_fpga_priv *priv;
struct fpga_manager *mgr;
struct resource *res;
- int ret;
priv = devm_kzalloc(kdev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -133,18 +132,14 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
return PTR_ERR(priv->io_base);
}
- mgr = fpga_mgr_create(kdev, "TS-73xx FPGA Manager",
- &ts73xx_fpga_ops, priv);
+ mgr = devm_fpga_mgr_create(kdev, "TS-73xx FPGA Manager",
+ &ts73xx_fpga_ops, priv);
if (!mgr)
return -ENOMEM;
platform_set_drvdata(pdev, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int ts73xx_fpga_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index 07ba1539e82c..641036135207 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -121,8 +121,8 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
clk_disable(priv->clk);
- br = fpga_bridge_create(&pdev->dev, "Xilinx PR Decoupler",
- &xlnx_pr_decoupler_br_ops, priv);
+ br = devm_fpga_bridge_create(&pdev->dev, "Xilinx PR Decoupler",
+ &xlnx_pr_decoupler_br_ops, priv);
if (!br) {
err = -ENOMEM;
goto err_clk;
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index 8d1945966533..469486be20c4 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -144,7 +144,6 @@ static int xilinx_spi_probe(struct spi_device *spi)
{
struct xilinx_spi_conf *conf;
struct fpga_manager *mgr;
- int ret;
conf = devm_kzalloc(&spi->dev, sizeof(*conf), GFP_KERNEL);
if (!conf)
@@ -167,18 +166,15 @@ static int xilinx_spi_probe(struct spi_device *spi)
return PTR_ERR(conf->done);
}
- mgr = fpga_mgr_create(&spi->dev, "Xilinx Slave Serial FPGA Manager",
- &xilinx_spi_ops, conf);
+ mgr = devm_fpga_mgr_create(&spi->dev,
+ "Xilinx Slave Serial FPGA Manager",
+ &xilinx_spi_ops, conf);
if (!mgr)
return -ENOMEM;
spi_set_drvdata(spi, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- fpga_mgr_free(mgr);
-
- return ret;
+ return fpga_mgr_register(mgr);
}
static int xilinx_spi_remove(struct spi_device *spi)
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 3110e00121ca..bb82efeebb9d 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -614,8 +614,8 @@ static int zynq_fpga_probe(struct platform_device *pdev)
clk_disable(priv->clk);
- mgr = fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager",
- &zynq_fpga_ops, priv);
+ mgr = devm_fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager",
+ &zynq_fpga_ops, priv);
if (!mgr)
return -ENOMEM;
@@ -624,7 +624,6 @@ static int zynq_fpga_probe(struct platform_device *pdev)
err = fpga_mgr_register(mgr);
if (err) {
dev_err(dev, "unable to register FPGA manager\n");
- fpga_mgr_free(mgr);
clk_unprepare(priv->clk);
return err;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 2b1a7b455aa8..55b72fbe1631 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1194,7 +1194,7 @@ int acpi_gpio_count(struct device *dev, const char *con_id)
bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
{
/* Never allow fallback if the device has properties */
- if (adev->data.properties || adev->driver_gpios)
+ if (acpi_dev_has_props(adev) || adev->driver_gpios)
return false;
return con_id == NULL;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 2d45d1dd9554..643f5edd68fe 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1446,8 +1446,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
}
/* The CEC module handles HDMI hotplug detection */
- cec_np = of_find_compatible_node(np->parent, NULL,
- "mediatek,mt8173-cec");
+ cec_np = of_get_compatible_child(np->parent, "mediatek,mt8173-cec");
if (!cec_np) {
dev_err(dev, "Failed to find CEC node\n");
return -EINVAL;
@@ -1457,8 +1456,10 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
if (!cec_pdev) {
dev_err(hdmi->dev, "Waiting for CEC device %pOF\n",
cec_np);
+ of_node_put(cec_np);
return -EPROBE_DEFER;
}
+ of_node_put(cec_np);
hdmi->cec_dev = &cec_pdev->dev;
/*
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index da1363a0c54d..93d70f4a2154 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -633,8 +633,7 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
struct device_node *child, *node;
int ret;
- node = of_find_compatible_node(dev->of_node, NULL,
- "qcom,gpu-pwrlevels");
+ node = of_get_compatible_child(dev->of_node, "qcom,gpu-pwrlevels");
if (!node) {
dev_err(dev, "Could not find the GPU powerlevels\n");
return -ENXIO;
@@ -655,6 +654,8 @@ static int adreno_get_legacy_pwrlevels(struct device *dev)
dev_pm_opp_add(dev, val, 0);
}
+ of_node_put(node);
+
return 0;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 61e1953ff921..18c846477ba2 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -182,6 +182,19 @@ config HID_BETOP_FF
Currently the following devices are known to be supported:
- BETOP 2185 PC & BFM MODE
+config HID_BIGBEN_FF
+ tristate "BigBen Interactive Kids' gamepad support"
+ depends on USB_HID
+ depends on NEW_LEDS
+ depends on LEDS_CLASS
+ select INPUT_FF_MEMLESS
+ default !EXPERT
+ help
+ Support for the "Kid-friendly Wired Controller" PS3OFMINIPAD
+ gamepad made by BigBen Interactive, originally sold as a PS3
+ accessory. This driver fixes input mapping and adds support for
+ force feedback effects and LEDs on the device.
+
config HID_CHERRY
tristate "Cherry Cymotion keyboard"
depends on HID
@@ -351,7 +364,7 @@ config HOLTEK_FF
config HID_GOOGLE_HAMMER
tristate "Google Hammer Keyboard"
- depends on USB_HID && LEDS_CLASS
+ depends on USB_HID && LEDS_CLASS && MFD_CROS_EC
---help---
Say Y here if you have a Google Hammer device.
@@ -596,6 +609,7 @@ config HID_MICROSOFT
tristate "Microsoft non-fully HID-compliant devices"
depends on HID
default !EXPERT
+ select INPUT_FF_MEMLESS
---help---
Support for Microsoft devices that are not fully compliant with HID standard.
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index bd7ac53b75c5..896a51ce7ce0 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_HID_ASUS) += hid-asus.o
obj-$(CONFIG_HID_AUREAL) += hid-aureal.o
obj-$(CONFIG_HID_BELKIN) += hid-belkin.o
obj-$(CONFIG_HID_BETOP_FF) += hid-betopff.o
+obj-$(CONFIG_HID_BIGBEN_FF) += hid-bigbenff.o
obj-$(CONFIG_HID_CHERRY) += hid-cherry.o
obj-$(CONFIG_HID_CHICONY) += hid-chicony.o
obj-$(CONFIG_HID_CMEDIA) += hid-cmedia.o
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
new file mode 100644
index 000000000000..3f6abd190df4
--- /dev/null
+++ b/drivers/hid/hid-bigbenff.c
@@ -0,0 +1,414 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * LED & force feedback support for BigBen Interactive
+ *
+ * 0x146b:0x0902 "Bigben Interactive Bigben Game Pad"
+ * "Kid-friendly Wired Controller" PS3OFMINIPAD SONY
+ * sold for use with the PS3
+ *
+ * Copyright (c) 2018 Hanno Zulla <kontakt@hanno.de>
+ */
+
+#include <linux/input.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/leds.h>
+#include <linux/hid.h>
+
+#include "hid-ids.h"
+
+
+/*
+ * The original descriptor for 0x146b:0x0902
+ *
+ * 0x05, 0x01, // Usage Page (Generic Desktop Ctrls)
+ * 0x09, 0x05, // Usage (Game Pad)
+ * 0xA1, 0x01, // Collection (Application)
+ * 0x15, 0x00, // Logical Minimum (0)
+ * 0x25, 0x01, // Logical Maximum (1)
+ * 0x35, 0x00, // Physical Minimum (0)
+ * 0x45, 0x01, // Physical Maximum (1)
+ * 0x75, 0x01, // Report Size (1)
+ * 0x95, 0x0D, // Report Count (13)
+ * 0x05, 0x09, // Usage Page (Button)
+ * 0x19, 0x01, // Usage Minimum (0x01)
+ * 0x29, 0x0D, // Usage Maximum (0x0D)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x95, 0x03, // Report Count (3)
+ * 0x81, 0x01, // Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x05, 0x01, // Usage Page (Generic Desktop Ctrls)
+ * 0x25, 0x07, // Logical Maximum (7)
+ * 0x46, 0x3B, 0x01, // Physical Maximum (315)
+ * 0x75, 0x04, // Report Size (4)
+ * 0x95, 0x01, // Report Count (1)
+ * 0x65, 0x14, // Unit (System: English Rotation, Length: Centimeter)
+ * 0x09, 0x39, // Usage (Hat switch)
+ * 0x81, 0x42, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,Null State)
+ * 0x65, 0x00, // Unit (None)
+ * 0x95, 0x01, // Report Count (1)
+ * 0x81, 0x01, // Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x26, 0xFF, 0x00, // Logical Maximum (255)
+ * 0x46, 0xFF, 0x00, // Physical Maximum (255)
+ * 0x09, 0x30, // Usage (X)
+ * 0x09, 0x31, // Usage (Y)
+ * 0x09, 0x32, // Usage (Z)
+ * 0x09, 0x35, // Usage (Rz)
+ * 0x75, 0x08, // Report Size (8)
+ * 0x95, 0x04, // Report Count (4)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x06, 0x00, 0xFF, // Usage Page (Vendor Defined 0xFF00)
+ * 0x09, 0x20, // Usage (0x20)
+ * 0x09, 0x21, // Usage (0x21)
+ * 0x09, 0x22, // Usage (0x22)
+ * 0x09, 0x23, // Usage (0x23)
+ * 0x09, 0x24, // Usage (0x24)
+ * 0x09, 0x25, // Usage (0x25)
+ * 0x09, 0x26, // Usage (0x26)
+ * 0x09, 0x27, // Usage (0x27)
+ * 0x09, 0x28, // Usage (0x28)
+ * 0x09, 0x29, // Usage (0x29)
+ * 0x09, 0x2A, // Usage (0x2A)
+ * 0x09, 0x2B, // Usage (0x2B)
+ * 0x95, 0x0C, // Report Count (12)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0x0A, 0x21, 0x26, // Usage (0x2621)
+ * 0x95, 0x08, // Report Count (8)
+ * 0xB1, 0x02, // Feature (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile)
+ * 0x0A, 0x21, 0x26, // Usage (0x2621)
+ * 0x91, 0x02, // Output (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile)
+ * 0x26, 0xFF, 0x03, // Logical Maximum (1023)
+ * 0x46, 0xFF, 0x03, // Physical Maximum (1023)
+ * 0x09, 0x2C, // Usage (0x2C)
+ * 0x09, 0x2D, // Usage (0x2D)
+ * 0x09, 0x2E, // Usage (0x2E)
+ * 0x09, 0x2F, // Usage (0x2F)
+ * 0x75, 0x10, // Report Size (16)
+ * 0x95, 0x04, // Report Count (4)
+ * 0x81, 0x02, // Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position)
+ * 0xC0, // End Collection
+ */
+
+#define PID0902_RDESC_ORIG_SIZE 137
+
+/*
+ * The fixed descriptor for 0x146b:0x0902
+ *
+ * - map buttons according to gamepad.rst
+ * - assign right stick from Z/Rz to Rx/Ry
+ * - map previously unused analog trigger data to Z/RZ
+ * - simplify feature and output descriptor
+ */
+static __u8 pid0902_rdesc_fixed[] = {
+ 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
+ 0x09, 0x05, /* Usage (Game Pad) */
+ 0xA1, 0x01, /* Collection (Application) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x25, 0x01, /* Logical Maximum (1) */
+ 0x35, 0x00, /* Physical Minimum (0) */
+ 0x45, 0x01, /* Physical Maximum (1) */
+ 0x75, 0x01, /* Report Size (1) */
+ 0x95, 0x0D, /* Report Count (13) */
+ 0x05, 0x09, /* Usage Page (Button) */
+ 0x09, 0x05, /* Usage (BTN_WEST) */
+ 0x09, 0x01, /* Usage (BTN_SOUTH) */
+ 0x09, 0x02, /* Usage (BTN_EAST) */
+ 0x09, 0x04, /* Usage (BTN_NORTH) */
+ 0x09, 0x07, /* Usage (BTN_TL) */
+ 0x09, 0x08, /* Usage (BTN_TR) */
+ 0x09, 0x09, /* Usage (BTN_TL2) */
+ 0x09, 0x0A, /* Usage (BTN_TR2) */
+ 0x09, 0x0B, /* Usage (BTN_SELECT) */
+ 0x09, 0x0C, /* Usage (BTN_START) */
+ 0x09, 0x0E, /* Usage (BTN_THUMBL) */
+ 0x09, 0x0F, /* Usage (BTN_THUMBR) */
+ 0x09, 0x0D, /* Usage (BTN_MODE) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x75, 0x01, /* Report Size (1) */
+ 0x95, 0x03, /* Report Count (3) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
+ 0x25, 0x07, /* Logical Maximum (7) */
+ 0x46, 0x3B, 0x01, /* Physical Maximum (315) */
+ 0x75, 0x04, /* Report Size (4) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x65, 0x14, /* Unit (System: English Rotation, Length: Centimeter) */
+ 0x09, 0x39, /* Usage (Hat switch) */
+ 0x81, 0x42, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,Null State) */
+ 0x65, 0x00, /* Unit (None) */
+ 0x95, 0x01, /* Report Count (1) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
+ 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
+ 0x09, 0x30, /* Usage (X) */
+ 0x09, 0x31, /* Usage (Y) */
+ 0x09, 0x33, /* Usage (Rx) */
+ 0x09, 0x34, /* Usage (Ry) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x95, 0x04, /* Report Count (4) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x95, 0x0A, /* Report Count (10) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x05, 0x01, /* Usage Page (Generic Desktop Ctrls) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
+ 0x46, 0xFF, 0x00, /* Physical Maximum (255) */
+ 0x09, 0x32, /* Usage (Z) */
+ 0x09, 0x35, /* Usage (Rz) */
+ 0x95, 0x02, /* Report Count (2) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x81, 0x01, /* Input (Const,Array,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0x06, 0x00, 0xFF, /* Usage Page (Vendor Defined 0xFF00) */
+ 0xB1, 0x02, /* Feature (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) */
+ 0x0A, 0x21, 0x26, /* Usage (0x2621) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x91, 0x02, /* Output (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position,Non-volatile) */
+ 0x0A, 0x21, 0x26, /* Usage (0x2621) */
+ 0x95, 0x08, /* Report Count (8) */
+ 0x81, 0x02, /* Input (Data,Var,Abs,No Wrap,Linear,Preferred State,No Null Position) */
+ 0xC0, /* End Collection */
+};
+
+#define NUM_LEDS 4
+
+struct bigben_device {
+ struct hid_device *hid;
+ struct hid_report *report;
+ u8 led_state; /* LED1 = 1 .. LED4 = 8 */
+ u8 right_motor_on; /* right motor off/on 0/1 */
+ u8 left_motor_force; /* left motor force 0-255 */
+ struct led_classdev *leds[NUM_LEDS];
+ bool work_led;
+ bool work_ff;
+ struct work_struct worker;
+};
+
+
+static void bigben_worker(struct work_struct *work)
+{
+ struct bigben_device *bigben = container_of(work,
+ struct bigben_device, worker);
+ struct hid_field *report_field = bigben->report->field[0];
+
+ if (bigben->work_led) {
+ bigben->work_led = false;
+ report_field->value[0] = 0x01; /* 1 = led message */
+ report_field->value[1] = 0x08; /* reserved value, always 8 */
+ report_field->value[2] = bigben->led_state;
+ report_field->value[3] = 0x00; /* padding */
+ report_field->value[4] = 0x00; /* padding */
+ report_field->value[5] = 0x00; /* padding */
+ report_field->value[6] = 0x00; /* padding */
+ report_field->value[7] = 0x00; /* padding */
+ hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
+ }
+
+ if (bigben->work_ff) {
+ bigben->work_ff = false;
+ report_field->value[0] = 0x02; /* 2 = rumble effect message */
+ report_field->value[1] = 0x08; /* reserved value, always 8 */
+ report_field->value[2] = bigben->right_motor_on;
+ report_field->value[3] = bigben->left_motor_force;
+ report_field->value[4] = 0xff; /* duration 0-254 (255 = nonstop) */
+ report_field->value[5] = 0x00; /* padding */
+ report_field->value[6] = 0x00; /* padding */
+ report_field->value[7] = 0x00; /* padding */
+ hid_hw_request(bigben->hid, bigben->report, HID_REQ_SET_REPORT);
+ }
+}
+
+static int hid_bigben_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct bigben_device *bigben = data;
+ u8 right_motor_on;
+ u8 left_motor_force;
+
+ if (effect->type != FF_RUMBLE)
+ return 0;
+
+ right_motor_on = effect->u.rumble.weak_magnitude ? 1 : 0;
+ left_motor_force = effect->u.rumble.strong_magnitude / 256;
+
+ if (right_motor_on != bigben->right_motor_on ||
+ left_motor_force != bigben->left_motor_force) {
+ bigben->right_motor_on = right_motor_on;
+ bigben->left_motor_force = left_motor_force;
+ bigben->work_ff = true;
+ schedule_work(&bigben->worker);
+ }
+
+ return 0;
+}
+
+static void bigben_set_led(struct led_classdev *led,
+ enum led_brightness value)
+{
+ struct device *dev = led->dev->parent;
+ struct hid_device *hid = to_hid_device(dev);
+ struct bigben_device *bigben = hid_get_drvdata(hid);
+ int n;
+ bool work;
+
+ if (!bigben) {
+ hid_err(hid, "no device data\n");
+ return;
+ }
+
+ for (n = 0; n < NUM_LEDS; n++) {
+ if (led == bigben->leds[n]) {
+ if (value == LED_OFF) {
+ work = (bigben->led_state & BIT(n));
+ bigben->led_state &= ~BIT(n);
+ } else {
+ work = !(bigben->led_state & BIT(n));
+ bigben->led_state |= BIT(n);
+ }
+
+ if (work) {
+ bigben->work_led = true;
+ schedule_work(&bigben->worker);
+ }
+ return;
+ }
+ }
+}
+
+static enum led_brightness bigben_get_led(struct led_classdev *led)
+{
+ struct device *dev = led->dev->parent;
+ struct hid_device *hid = to_hid_device(dev);
+ struct bigben_device *bigben = hid_get_drvdata(hid);
+ int n;
+
+ if (!bigben) {
+ hid_err(hid, "no device data\n");
+ return LED_OFF;
+ }
+
+ for (n = 0; n < NUM_LEDS; n++) {
+ if (led == bigben->leds[n])
+ return (bigben->led_state & BIT(n)) ? LED_ON : LED_OFF;
+ }
+
+ return LED_OFF;
+}
+
+static void bigben_remove(struct hid_device *hid)
+{
+ struct bigben_device *bigben = hid_get_drvdata(hid);
+
+ cancel_work_sync(&bigben->worker);
+ hid_hw_close(hid);
+ hid_hw_stop(hid);
+}
+
+static int bigben_probe(struct hid_device *hid,
+ const struct hid_device_id *id)
+{
+ struct bigben_device *bigben;
+ struct hid_input *hidinput;
+ struct list_head *report_list;
+ struct led_classdev *led;
+ char *name;
+ size_t name_sz;
+ int n, error;
+
+ bigben = devm_kzalloc(&hid->dev, sizeof(*bigben), GFP_KERNEL);
+ if (!bigben)
+ return -ENOMEM;
+ hid_set_drvdata(hid, bigben);
+ bigben->hid = hid;
+
+ error = hid_parse(hid);
+ if (error) {
+ hid_err(hid, "parse failed\n");
+ return error;
+ }
+
+ error = hid_hw_start(hid, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF);
+ if (error) {
+ hid_err(hid, "hw start failed\n");
+ return error;
+ }
+
+ report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+ bigben->report = list_entry(report_list->next,
+ struct hid_report, list);
+
+ hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+ set_bit(FF_RUMBLE, hidinput->input->ffbit);
+
+ INIT_WORK(&bigben->worker, bigben_worker);
+
+ error = input_ff_create_memless(hidinput->input, bigben,
+ hid_bigben_play_effect);
+ if (error)
+ return error;
+
+ name_sz = strlen(dev_name(&hid->dev)) + strlen(":red:bigben#") + 1;
+
+ for (n = 0; n < NUM_LEDS; n++) {
+ led = devm_kzalloc(
+ &hid->dev,
+ sizeof(struct led_classdev) + name_sz,
+ GFP_KERNEL
+ );
+ if (!led)
+ return -ENOMEM;
+ name = (void *)(&led[1]);
+ snprintf(name, name_sz,
+ "%s:red:bigben%d",
+ dev_name(&hid->dev), n + 1
+ );
+ led->name = name;
+ led->brightness = (n == 0) ? LED_ON : LED_OFF;
+ led->max_brightness = 1;
+ led->brightness_get = bigben_get_led;
+ led->brightness_set = bigben_set_led;
+ bigben->leds[n] = led;
+ error = devm_led_classdev_register(&hid->dev, led);
+ if (error)
+ return error;
+ }
+
+ /* initial state: LED1 is on, no rumble effect */
+ bigben->led_state = BIT(0);
+ bigben->right_motor_on = 0;
+ bigben->left_motor_force = 0;
+ bigben->work_led = true;
+ bigben->work_ff = true;
+ schedule_work(&bigben->worker);
+
+ hid_info(hid, "LED and force feedback support for BigBen gamepad\n");
+
+ return 0;
+}
+
+static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc,
+ unsigned int *rsize)
+{
+ if (*rsize == PID0902_RDESC_ORIG_SIZE) {
+ rdesc = pid0902_rdesc_fixed;
+ *rsize = sizeof(pid0902_rdesc_fixed);
+ } else
+ hid_warn(hid, "unexpected rdesc, please submit for review\n");
+ return rdesc;
+}
+
+static const struct hid_device_id bigben_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_BIGBEN, USB_DEVICE_ID_BIGBEN_PS3OFMINIPAD) },
+ { }
+};
+MODULE_DEVICE_TABLE(hid, bigben_devices);
+
+static struct hid_driver bigben_driver = {
+ .name = "bigben",
+ .id_table = bigben_devices,
+ .probe = bigben_probe,
+ .report_fixup = bigben_report_fixup,
+ .remove = bigben_remove,
+};
+module_hid_driver(bigben_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 44564f61e9cc..5bec9244c45b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -406,7 +406,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
parser->global.report_size = item_udata(item);
- if (parser->global.report_size > 128) {
+ if (parser->global.report_size > 256) {
hid_err(parser->device, "invalid report_size %d\n",
parser->global.report_size);
return -1;
diff --git a/drivers/hid/hid-cougar.c b/drivers/hid/hid-cougar.c
index ad2e87de7dc5..3f0916b64c60 100644
--- a/drivers/hid/hid-cougar.c
+++ b/drivers/hid/hid-cougar.c
@@ -7,6 +7,7 @@
#include <linux/hid.h>
#include <linux/module.h>
+#include <linux/printk.h>
#include "hid-ids.h"
@@ -15,11 +16,9 @@ MODULE_DESCRIPTION("Cougar 500k Gaming Keyboard");
MODULE_LICENSE("GPL");
MODULE_INFO(key_mappings, "G1-G6 are mapped to F13-F18");
-static int cougar_g6_is_space = 1;
-module_param_named(g6_is_space, cougar_g6_is_space, int, 0600);
+static bool g6_is_space = true;
MODULE_PARM_DESC(g6_is_space,
- "If set, G6 programmable key sends SPACE instead of F18 (0=off, 1=on) (default=1)");
-
+ "If true, G6 programmable key sends SPACE instead of F18 (default=true)");
#define COUGAR_VENDOR_USAGE 0xff00ff00
@@ -82,20 +81,23 @@ struct cougar {
static LIST_HEAD(cougar_udev_list);
static DEFINE_MUTEX(cougar_udev_list_lock);
-static void cougar_fix_g6_mapping(struct hid_device *hdev)
+/**
+ * cougar_fix_g6_mapping - configure the mapping for key G6/Spacebar
+ */
+static void cougar_fix_g6_mapping(void)
{
int i;
for (i = 0; cougar_mapping[i][0]; i++) {
if (cougar_mapping[i][0] == COUGAR_KEY_G6) {
cougar_mapping[i][1] =
- cougar_g6_is_space ? KEY_SPACE : KEY_F18;
- hid_info(hdev, "G6 mapped to %s\n",
- cougar_g6_is_space ? "space" : "F18");
+ g6_is_space ? KEY_SPACE : KEY_F18;
+ pr_info("cougar: G6 mapped to %s\n",
+ g6_is_space ? "space" : "F18");
return;
}
}
- hid_warn(hdev, "no mapping defined for G6/spacebar");
+ pr_warn("cougar: no mappings defined for G6/spacebar");
}
/*
@@ -154,7 +156,8 @@ static void cougar_remove_shared_data(void *resource)
* Bind the device group's shared data to this cougar struct.
* If no shared data exists for this group, create and initialize it.
*/
-static int cougar_bind_shared_data(struct hid_device *hdev, struct cougar *cougar)
+static int cougar_bind_shared_data(struct hid_device *hdev,
+ struct cougar *cougar)
{
struct cougar_shared *shared;
int error = 0;
@@ -228,7 +231,6 @@ static int cougar_probe(struct hid_device *hdev,
* to it.
*/
if (hdev->collection->usage == HID_GD_KEYBOARD) {
- cougar_fix_g6_mapping(hdev);
list_for_each_entry_safe(hidinput, next, &hdev->inputs, list) {
if (hidinput->registered && hidinput->input != NULL) {
cougar->shared->input = hidinput->input;
@@ -237,6 +239,8 @@ static int cougar_probe(struct hid_device *hdev,
}
}
} else if (hdev->collection->usage == COUGAR_VENDOR_USAGE) {
+ /* Preinit the mapping table */
+ cougar_fix_g6_mapping();
error = hid_hw_open(hdev);
if (error)
goto fail_stop_and_cleanup;
@@ -257,26 +261,32 @@ static int cougar_raw_event(struct hid_device *hdev, struct hid_report *report,
u8 *data, int size)
{
struct cougar *cougar;
+ struct cougar_shared *shared;
unsigned char code, action;
int i;
cougar = hid_get_drvdata(hdev);
- if (!cougar->special_intf || !cougar->shared ||
- !cougar->shared->input || !cougar->shared->enabled)
+ shared = cougar->shared;
+ if (!cougar->special_intf || !shared)
return 0;
+ if (!shared->enabled || !shared->input)
+ return -EPERM;
+
code = data[COUGAR_FIELD_CODE];
action = data[COUGAR_FIELD_ACTION];
for (i = 0; cougar_mapping[i][0]; i++) {
if (code == cougar_mapping[i][0]) {
- input_event(cougar->shared->input, EV_KEY,
+ input_event(shared->input, EV_KEY,
cougar_mapping[i][1], action);
- input_sync(cougar->shared->input);
- return 0;
+ input_sync(shared->input);
+ return -EPERM;
}
}
- hid_warn(hdev, "unmapped special key code %x: ignoring\n", code);
- return 0;
+ /* Avoid warnings on the same unmapped key twice */
+ if (action != 0)
+ hid_warn(hdev, "unmapped special key code %0x: ignoring\n", code);
+ return -EPERM;
}
static void cougar_remove(struct hid_device *hdev)
@@ -293,6 +303,26 @@ static void cougar_remove(struct hid_device *hdev)
hid_hw_stop(hdev);
}
+static int cougar_param_set_g6_is_space(const char *val,
+ const struct kernel_param *kp)
+{
+ int ret;
+
+ ret = param_set_bool(val, kp);
+ if (ret)
+ return ret;
+
+ cougar_fix_g6_mapping();
+
+ return 0;
+}
+
+static const struct kernel_param_ops cougar_g6_is_space_ops = {
+ .set = cougar_param_set_g6_is_space,
+ .get = param_get_bool,
+};
+module_param_cb(g6_is_space, &cougar_g6_is_space_ops, &g6_is_space, 0644);
+
static struct hid_device_id cougar_id_table[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SOLID_YEAR,
USB_DEVICE_ID_COUGAR_500K_GAMING_KEYBOARD) },
diff --git a/drivers/hid/hid-elan.c b/drivers/hid/hid-elan.c
index 07e26c3567eb..0bfd6d1b44c1 100644
--- a/drivers/hid/hid-elan.c
+++ b/drivers/hid/hid-elan.c
@@ -497,7 +497,7 @@ static int elan_probe(struct hid_device *hdev, const struct hid_device_id *id)
return 0;
if (!drvdata->input) {
- hid_err(hdev, "Input device is not registred\n");
+ hid_err(hdev, "Input device is not registered\n");
ret = -ENAVAIL;
goto err;
}
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 6bf4da7ad63a..ee5e0bdcf078 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -13,16 +13,268 @@
* any later version.
*/
+#include <linux/acpi.h>
#include <linux/hid.h>
#include <linux/leds.h>
+#include <linux/mfd/cros_ec.h>
+#include <linux/mfd/cros_ec_commands.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeup.h>
+#include <asm/unaligned.h>
#include "hid-ids.h"
-#define MAX_BRIGHTNESS 100
+/*
+ * C(hrome)B(ase)A(ttached)S(witch) - switch exported by Chrome EC and reporting
+ * state of the "Whiskers" base - attached or detached. Whiskers USB device also
+ * reports position of the keyboard - folded or not. Combining base state and
+ * position allows us to generate proper "Tablet mode" events.
+ */
+struct cbas_ec {
+ struct device *dev; /* The platform device (EC) */
+ struct input_dev *input;
+ bool base_present;
+ struct notifier_block notifier;
+};
-/* HID usage for keyboard backlight (Alphanumeric display brightness) */
-#define HID_AD_BRIGHTNESS 0x00140046
+static struct cbas_ec cbas_ec;
+static DEFINE_SPINLOCK(cbas_ec_lock);
+static DEFINE_MUTEX(cbas_ec_reglock);
+
+static bool cbas_parse_base_state(const void *data)
+{
+ u32 switches = get_unaligned_le32(data);
+
+ return !!(switches & BIT(EC_MKBP_BASE_ATTACHED));
+}
+
+static int cbas_ec_query_base(struct cros_ec_device *ec_dev, bool get_state,
+ bool *state)
+{
+ struct ec_params_mkbp_info *params;
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(sizeof(u32), sizeof(*params)),
+ GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_MKBP_INFO;
+ msg->version = 1;
+ msg->outsize = sizeof(*params);
+ msg->insize = sizeof(u32);
+ params = (struct ec_params_mkbp_info *)msg->data;
+ params->info_type = get_state ?
+ EC_MKBP_INFO_CURRENT : EC_MKBP_INFO_SUPPORTED;
+ params->event_type = EC_MKBP_EVENT_SWITCH;
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret >= 0) {
+ if (ret != sizeof(u32)) {
+ dev_warn(ec_dev->dev, "wrong result size: %d != %zu\n",
+ ret, sizeof(u32));
+ ret = -EPROTO;
+ } else {
+ *state = cbas_parse_base_state(msg->data);
+ ret = 0;
+ }
+ }
+
+ kfree(msg);
+
+ return ret;
+}
+
+static int cbas_ec_notify(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_device *ec = _notify;
+ unsigned long flags;
+ bool base_present;
+
+ if (ec->event_data.event_type == EC_MKBP_EVENT_SWITCH) {
+ base_present = cbas_parse_base_state(
+ &ec->event_data.data.switches);
+ dev_dbg(cbas_ec.dev,
+ "%s: base: %d\n", __func__, base_present);
+
+ if (device_may_wakeup(cbas_ec.dev) ||
+ !queued_during_suspend) {
+
+ pm_wakeup_event(cbas_ec.dev, 0);
+
+ spin_lock_irqsave(&cbas_ec_lock, flags);
+
+ /*
+ * While input layer dedupes the events, we do not want
+ * to disrupt the state reported by the base by
+ * overriding it with state reported by the LID. Only
+ * report changes, as we assume that on attach the base
+ * is not folded.
+ */
+ if (base_present != cbas_ec.base_present) {
+ input_report_switch(cbas_ec.input,
+ SW_TABLET_MODE,
+ !base_present);
+ input_sync(cbas_ec.input);
+ cbas_ec.base_present = base_present;
+ }
+
+ spin_unlock_irqrestore(&cbas_ec_lock, flags);
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static __maybe_unused int cbas_ec_resume(struct device *dev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(dev->parent);
+ bool base_present;
+ int error;
+
+ error = cbas_ec_query_base(ec, true, &base_present);
+ if (error) {
+ dev_warn(dev, "failed to fetch base state on resume: %d\n",
+ error);
+ } else {
+ spin_lock_irq(&cbas_ec_lock);
+
+ cbas_ec.base_present = base_present;
+
+ /*
+ * Only report if base is disconnected. If base is connected,
+ * it will resend its state on resume, and we'll update it
+ * in hammer_event().
+ */
+ if (!cbas_ec.base_present) {
+ input_report_switch(cbas_ec.input, SW_TABLET_MODE, 1);
+ input_sync(cbas_ec.input);
+ }
+
+ spin_unlock_irq(&cbas_ec_lock);
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cbas_ec_pm_ops, NULL, cbas_ec_resume);
+
+static void cbas_ec_set_input(struct input_dev *input)
+{
+ /* Take the lock so hammer_event() does not race with us here */
+ spin_lock_irq(&cbas_ec_lock);
+ cbas_ec.input = input;
+ spin_unlock_irq(&cbas_ec_lock);
+}
+
+static int __cbas_ec_probe(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+ struct input_dev *input;
+ bool base_supported;
+ int error;
+
+ error = cbas_ec_query_base(ec, false, &base_supported);
+ if (error)
+ return error;
+
+ if (!base_supported)
+ return -ENXIO;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->name = "Whiskers Tablet Mode Switch";
+ input->id.bustype = BUS_HOST;
+
+ input_set_capability(input, EV_SW, SW_TABLET_MODE);
+
+ error = input_register_device(input);
+ if (error) {
+ dev_err(&pdev->dev, "cannot register input device: %d\n",
+ error);
+ return error;
+ }
+
+ /* Seed the state */
+ error = cbas_ec_query_base(ec, true, &cbas_ec.base_present);
+ if (error) {
+ dev_err(&pdev->dev, "cannot query base state: %d\n", error);
+ return error;
+ }
+
+ input_report_switch(input, SW_TABLET_MODE, !cbas_ec.base_present);
+
+ cbas_ec_set_input(input);
+
+ cbas_ec.dev = &pdev->dev;
+ cbas_ec.notifier.notifier_call = cbas_ec_notify;
+ error = blocking_notifier_chain_register(&ec->event_notifier,
+ &cbas_ec.notifier);
+ if (error) {
+ dev_err(&pdev->dev, "cannot register notifier: %d\n", error);
+ cbas_ec_set_input(NULL);
+ return error;
+ }
+
+ device_init_wakeup(&pdev->dev, true);
+ return 0;
+}
+
+static int cbas_ec_probe(struct platform_device *pdev)
+{
+ int retval;
+
+ mutex_lock(&cbas_ec_reglock);
+
+ if (cbas_ec.input) {
+ retval = -EBUSY;
+ goto out;
+ }
+
+ retval = __cbas_ec_probe(pdev);
+
+out:
+ mutex_unlock(&cbas_ec_reglock);
+ return retval;
+}
+
+static int cbas_ec_remove(struct platform_device *pdev)
+{
+ struct cros_ec_device *ec = dev_get_drvdata(pdev->dev.parent);
+
+ mutex_lock(&cbas_ec_reglock);
+
+ blocking_notifier_chain_unregister(&ec->event_notifier,
+ &cbas_ec.notifier);
+ cbas_ec_set_input(NULL);
+
+ mutex_unlock(&cbas_ec_reglock);
+ return 0;
+}
+
+static const struct acpi_device_id cbas_ec_acpi_ids[] = {
+ { "GOOG000B", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids);
+
+static struct platform_driver cbas_ec_driver = {
+ .probe = cbas_ec_probe,
+ .remove = cbas_ec_remove,
+ .driver = {
+ .name = "cbas_ec",
+ .acpi_match_table = ACPI_PTR(cbas_ec_acpi_ids),
+ .pm = &cbas_ec_pm_ops,
+ },
+};
+
+#define MAX_BRIGHTNESS 100
struct hammer_kbd_leds {
struct led_classdev cdev;
@@ -90,33 +342,130 @@ static int hammer_register_leds(struct hid_device *hdev)
return devm_led_classdev_register(&hdev->dev, &kbd_backlight->cdev);
}
-static int hammer_input_configured(struct hid_device *hdev,
- struct hid_input *hi)
+#define HID_UP_GOOGLEVENDOR 0xffd10000
+#define HID_VD_KBD_FOLDED 0x00000019
+#define WHISKERS_KBD_FOLDED (HID_UP_GOOGLEVENDOR | HID_VD_KBD_FOLDED)
+
+/* HID usage for keyboard backlight (Alphanumeric display brightness) */
+#define HID_AD_BRIGHTNESS 0x00140046
+
+static int hammer_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field,
+ struct hid_usage *usage,
+ unsigned long **bit, int *max)
{
- struct list_head *report_list =
- &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
+ if (hdev->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
+ usage->hid == WHISKERS_KBD_FOLDED) {
+ /*
+ * We do not want to have this usage mapped as it will get
+ * mixed in with "base attached" signal and delivered over
+ * separate input device for tablet switch mode.
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int hammer_event(struct hid_device *hid, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ unsigned long flags;
+
+ if (hid->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
+ usage->hid == WHISKERS_KBD_FOLDED) {
+ spin_lock_irqsave(&cbas_ec_lock, flags);
+
+ hid_dbg(hid, "%s: base: %d, folded: %d\n", __func__,
+ cbas_ec.base_present, value);
+
+ /*
+ * We should not get event if base is detached, but in case
+ * we happen to service HID and EC notifications out of order
+ * let's still check the "base present" flag.
+ */
+ if (cbas_ec.input && cbas_ec.base_present) {
+ input_report_switch(cbas_ec.input,
+ SW_TABLET_MODE, value);
+ input_sync(cbas_ec.input);
+ }
+
+ spin_unlock_irqrestore(&cbas_ec_lock, flags);
+ return 1; /* We handled this event */
+ }
+
+ return 0;
+}
+
+static bool hammer_is_keyboard_interface(struct hid_device *hdev)
+{
+ struct hid_report_enum *re = &hdev->report_enum[HID_INPUT_REPORT];
struct hid_report *report;
- if (list_empty(report_list))
- return 0;
+ list_for_each_entry(report, &re->report_list, list)
+ if (report->application == HID_GD_KEYBOARD)
+ return true;
- report = list_first_entry(report_list, struct hid_report, list);
+ return false;
+}
+
+static bool hammer_has_backlight_control(struct hid_device *hdev)
+{
+ struct hid_report_enum *re = &hdev->report_enum[HID_OUTPUT_REPORT];
+ struct hid_report *report;
+ int i, j;
- if (report->maxfield == 1 &&
- report->field[0]->application == HID_GD_KEYBOARD &&
- report->field[0]->maxusage == 1 &&
- report->field[0]->usage[0].hid == HID_AD_BRIGHTNESS) {
- int err = hammer_register_leds(hdev);
+ list_for_each_entry(report, &re->report_list, list) {
+ if (report->application != HID_GD_KEYBOARD)
+ continue;
- if (err)
+ for (i = 0; i < report->maxfield; i++) {
+ struct hid_field *field = report->field[i];
+
+ for (j = 0; j < field->maxusage; j++)
+ if (field->usage[j].hid == HID_AD_BRIGHTNESS)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int hammer_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int error;
+
+ /*
+ * We always want to poll for, and handle tablet mode events from
+ * Whiskers, even when nobody has opened the input device. This also
+ * prevents the hid core from dropping early tablet mode events from
+ * the device.
+ */
+ if (hdev->product == USB_DEVICE_ID_GOOGLE_WHISKERS &&
+ hammer_is_keyboard_interface(hdev))
+ hdev->quirks |= HID_QUIRK_ALWAYS_POLL;
+
+ error = hid_parse(hdev);
+ if (error)
+ return error;
+
+ error = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (error)
+ return error;
+
+ if (hammer_has_backlight_control(hdev)) {
+ error = hammer_register_leds(hdev);
+ if (error)
hid_warn(hdev,
"Failed to register keyboard backlight: %d\n",
- err);
+ error);
}
return 0;
}
+
static const struct hid_device_id hammer_devices[] = {
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
@@ -133,8 +482,34 @@ MODULE_DEVICE_TABLE(hid, hammer_devices);
static struct hid_driver hammer_driver = {
.name = "hammer",
.id_table = hammer_devices,
- .input_configured = hammer_input_configured,
+ .probe = hammer_probe,
+ .input_mapping = hammer_input_mapping,
+ .event = hammer_event,
};
-module_hid_driver(hammer_driver);
+
+static int __init hammer_init(void)
+{
+ int error;
+
+ error = platform_driver_register(&cbas_ec_driver);
+ if (error)
+ return error;
+
+ error = hid_register_driver(&hammer_driver);
+ if (error) {
+ platform_driver_unregister(&cbas_ec_driver);
+ return error;
+ }
+
+ return 0;
+}
+module_init(hammer_init);
+
+static void __exit hammer_exit(void)
+{
+ hid_unregister_driver(&hammer_driver);
+ platform_driver_unregister(&cbas_ec_driver);
+}
+module_exit(hammer_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index bc49909aba8e..f63489c882bb 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -92,6 +92,7 @@
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
+#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 0x0265
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI 0x020e
#define USB_DEVICE_ID_APPLE_FOUNTAIN_ISO 0x020f
#define USB_DEVICE_ID_APPLE_GEYSER_ANSI 0x0214
@@ -229,6 +230,9 @@
#define USB_VENDOR_ID_BETOP_2185V2PC 0x8380
#define USB_VENDOR_ID_BETOP_2185V2BFM 0x20bc
+#define USB_VENDOR_ID_BIGBEN 0x146b
+#define USB_DEVICE_ID_BIGBEN_PS3OFMINIPAD 0x0902
+
#define USB_VENDOR_ID_BTC 0x046e
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE 0x5578
#define USB_DEVICE_ID_BTC_EMPREX_REMOTE_2 0x5577
@@ -342,6 +346,7 @@
#define USB_DEVICE_ID_DMI_ENC 0x5fab
#define USB_VENDOR_ID_DRAGONRISE 0x0079
+#define USB_DEVICE_ID_REDRAGON_SEYMUR2 0x0006
#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
@@ -799,6 +804,7 @@
#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
#define USB_DEVICE_ID_MS_POWER_COVER 0x07da
+#define USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER 0x02fd
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index a481eaf39e88..567c3bf64515 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -758,6 +758,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
break;
case HID_UP_DIGITIZER:
+ if ((field->application & 0xff) == 0x01) /* Digitizer */
+ __set_bit(INPUT_PROP_POINTER, input->propbit);
+ else if ((field->application & 0xff) == 0x02) /* Pen */
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
+
switch (usage->hid & 0xff) {
case 0x00: /* Undefined */
goto ignore;
@@ -1516,6 +1521,7 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
struct hid_input *hidinput = kzalloc(sizeof(*hidinput), GFP_KERNEL);
struct input_dev *input_dev = input_allocate_device();
const char *suffix = NULL;
+ size_t suffix_len, name_len;
if (!hidinput || !input_dev)
goto fail;
@@ -1559,10 +1565,15 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid,
}
if (suffix) {
- hidinput->name = kasprintf(GFP_KERNEL, "%s %s",
- hid->name, suffix);
- if (!hidinput->name)
- goto fail;
+ name_len = strlen(hid->name);
+ suffix_len = strlen(suffix);
+ if ((name_len < suffix_len) ||
+ strcmp(hid->name + name_len - suffix_len, suffix)) {
+ hidinput->name = kasprintf(GFP_KERNEL, "%s %s",
+ hid->name, suffix);
+ if (!hidinput->name)
+ goto fail;
+ }
}
input_set_drvdata(input_dev, hid);
@@ -1827,3 +1838,48 @@ void hidinput_disconnect(struct hid_device *hid)
}
EXPORT_SYMBOL_GPL(hidinput_disconnect);
+/**
+ * hid_scroll_counter_handle_scroll() - Send high- and low-resolution scroll
+ * events given a high-resolution wheel
+ * movement.
+ * @counter: a hid_scroll_counter struct describing the wheel.
+ * @hi_res_value: the movement of the wheel, in the mouse's high-resolution
+ * units.
+ *
+ * Given a high-resolution movement, this function converts the movement into
+ * microns and emits high-resolution scroll events for the input device. It also
+ * uses the multiplier from &struct hid_scroll_counter to emit low-resolution
+ * scroll events when appropriate for backwards-compatibility with userspace
+ * input libraries.
+ */
+void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
+ int hi_res_value)
+{
+ int low_res_scroll_amount;
+ /* Some wheels will rest 7/8ths of a notch from the previous notch
+ * after slow movement, so we want the threshold for low-res events to
+ * be in the middle of the notches (e.g. after 4/8ths) as opposed to on
+ * the notches themselves (8/8ths).
+ */
+ int threshold = counter->resolution_multiplier / 2;
+
+ input_report_rel(counter->dev, REL_WHEEL_HI_RES,
+ hi_res_value * counter->microns_per_hi_res_unit);
+
+ counter->remainder += hi_res_value;
+ if (abs(counter->remainder) >= threshold) {
+ /* Add (or subtract) 1 because we want to trigger when the wheel
+ * is half-way to the next notch (i.e. scroll 1 notch after a
+ * 1/2 notch movement, 2 notches after a 1 1/2 notch movement,
+ * etc.).
+ */
+ low_res_scroll_amount =
+ counter->remainder / counter->resolution_multiplier
+ + (hi_res_value > 0 ? 1 : -1);
+ input_report_rel(counter->dev, REL_WHEEL,
+ low_res_scroll_amount);
+ counter->remainder -=
+ low_res_scroll_amount * counter->resolution_multiplier;
+ }
+}
+EXPORT_SYMBOL_GPL(hid_scroll_counter_handle_scroll);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 19cc980eebce..f01280898b24 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -64,6 +64,14 @@ MODULE_PARM_DESC(disable_tap_to_click,
#define HIDPP_QUIRK_NO_HIDINPUT BIT(23)
#define HIDPP_QUIRK_FORCE_OUTPUT_REPORTS BIT(24)
#define HIDPP_QUIRK_UNIFYING BIT(25)
+#define HIDPP_QUIRK_HI_RES_SCROLL_1P0 BIT(26)
+#define HIDPP_QUIRK_HI_RES_SCROLL_X2120 BIT(27)
+#define HIDPP_QUIRK_HI_RES_SCROLL_X2121 BIT(28)
+
+/* Convenience constant to check for any high-res support. */
+#define HIDPP_QUIRK_HI_RES_SCROLL (HIDPP_QUIRK_HI_RES_SCROLL_1P0 | \
+ HIDPP_QUIRK_HI_RES_SCROLL_X2120 | \
+ HIDPP_QUIRK_HI_RES_SCROLL_X2121)
#define HIDPP_QUIRK_DELAYED_INIT HIDPP_QUIRK_NO_HIDINPUT
@@ -149,6 +157,7 @@ struct hidpp_device {
unsigned long capabilities;
struct hidpp_battery battery;
+ struct hid_scroll_counter vertical_wheel_counter;
};
/* HID++ 1.0 error codes */
@@ -400,32 +409,53 @@ static void hidpp_prefix_name(char **name, int name_length)
#define HIDPP_SET_LONG_REGISTER 0x82
#define HIDPP_GET_LONG_REGISTER 0x83
-#define HIDPP_REG_GENERAL 0x00
-
-static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
+/**
+ * hidpp10_set_register_bit() - Sets a single bit in a HID++ 1.0 register.
+ * @hidpp_dev: the device to set the register on.
+ * @register_address: the address of the register to modify.
+ * @byte: the byte of the register to modify. Should be less than 3.
+ * Return: 0 if successful, otherwise a negative error code.
+ */
+static int hidpp10_set_register_bit(struct hidpp_device *hidpp_dev,
+ u8 register_address, u8 byte, u8 bit)
{
struct hidpp_report response;
int ret;
u8 params[3] = { 0 };
ret = hidpp_send_rap_command_sync(hidpp_dev,
- REPORT_ID_HIDPP_SHORT,
- HIDPP_GET_REGISTER,
- HIDPP_REG_GENERAL,
- NULL, 0, &response);
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_GET_REGISTER,
+ register_address,
+ NULL, 0, &response);
if (ret)
return ret;
memcpy(params, response.rap.params, 3);
- /* Set the battery bit */
- params[0] |= BIT(4);
+ params[byte] |= BIT(bit);
return hidpp_send_rap_command_sync(hidpp_dev,
- REPORT_ID_HIDPP_SHORT,
- HIDPP_SET_REGISTER,
- HIDPP_REG_GENERAL,
- params, 3, &response);
+ REPORT_ID_HIDPP_SHORT,
+ HIDPP_SET_REGISTER,
+ register_address,
+ params, 3, &response);
+}
+
+
+#define HIDPP_REG_GENERAL 0x00
+
+static int hidpp10_enable_battery_reporting(struct hidpp_device *hidpp_dev)
+{
+ return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_GENERAL, 0, 4);
+}
+
+#define HIDPP_REG_FEATURES 0x01
+
+/* On HID++ 1.0 devices, high-res scroll was called "scrolling acceleration". */
+static int hidpp10_enable_scrolling_acceleration(struct hidpp_device *hidpp_dev)
+{
+ return hidpp10_set_register_bit(hidpp_dev, HIDPP_REG_FEATURES, 0, 6);
}
#define HIDPP_REG_BATTERY_STATUS 0x07
@@ -1137,6 +1167,100 @@ static int hidpp_battery_get_property(struct power_supply *psy,
}
/* -------------------------------------------------------------------------- */
+/* 0x2120: Hi-resolution scrolling */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_HI_RESOLUTION_SCROLLING 0x2120
+
+#define CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE 0x10
+
+static int hidpp_hrs_set_highres_scrolling_mode(struct hidpp_device *hidpp,
+ bool enabled, u8 *multiplier)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ u8 params[1];
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp,
+ HIDPP_PAGE_HI_RESOLUTION_SCROLLING,
+ &feature_index,
+ &feature_type);
+ if (ret)
+ return ret;
+
+ params[0] = enabled ? BIT(0) : 0;
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HI_RESOLUTION_SCROLLING_SET_HIGHRES_SCROLLING_MODE,
+ params, sizeof(params), &response);
+ if (ret)
+ return ret;
+ *multiplier = response.fap.params[1];
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+/* 0x2121: HiRes Wheel */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_PAGE_HIRES_WHEEL 0x2121
+
+#define CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY 0x00
+#define CMD_HIRES_WHEEL_SET_WHEEL_MODE 0x20
+
+static int hidpp_hrw_get_wheel_capability(struct hidpp_device *hidpp,
+ u8 *multiplier)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+ &feature_index, &feature_type);
+ if (ret)
+ goto return_default;
+
+ ret = hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HIRES_WHEEL_GET_WHEEL_CAPABILITY,
+ NULL, 0, &response);
+ if (ret)
+ goto return_default;
+
+ *multiplier = response.fap.params[0];
+ return 0;
+return_default:
+ hid_warn(hidpp->hid_dev,
+ "Couldn't get wheel multiplier (error %d), assuming %d.\n",
+ ret, *multiplier);
+ return ret;
+}
+
+static int hidpp_hrw_set_wheel_mode(struct hidpp_device *hidpp, bool invert,
+ bool high_resolution, bool use_hidpp)
+{
+ u8 feature_index;
+ u8 feature_type;
+ int ret;
+ u8 params[1];
+ struct hidpp_report response;
+
+ ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_HIRES_WHEEL,
+ &feature_index, &feature_type);
+ if (ret)
+ return ret;
+
+ params[0] = (invert ? BIT(2) : 0) |
+ (high_resolution ? BIT(1) : 0) |
+ (use_hidpp ? BIT(0) : 0);
+
+ return hidpp_send_fap_command_sync(hidpp, feature_index,
+ CMD_HIRES_WHEEL_SET_WHEEL_MODE,
+ params, sizeof(params), &response);
+}
+
+/* -------------------------------------------------------------------------- */
/* 0x4301: Solar Keyboard */
/* -------------------------------------------------------------------------- */
@@ -2399,7 +2523,8 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
input_report_rel(mydata->input, REL_Y, v);
v = hid_snto32(data[6], 8);
- input_report_rel(mydata->input, REL_WHEEL, v);
+ hid_scroll_counter_handle_scroll(
+ &hidpp->vertical_wheel_counter, v);
input_sync(mydata->input);
}
@@ -2528,6 +2653,72 @@ static int g920_get_config(struct hidpp_device *hidpp)
}
/* -------------------------------------------------------------------------- */
+/* High-resolution scroll wheels */
+/* -------------------------------------------------------------------------- */
+
+/**
+ * struct hi_res_scroll_info - Stores info on a device's high-res scroll wheel.
+ * @product_id: the HID product ID of the device being described.
+ * @microns_per_hi_res_unit: the distance moved by the user's finger for each
+ * high-resolution unit reported by the device, in
+ * 256ths of a millimetre.
+ */
+struct hi_res_scroll_info {
+ __u32 product_id;
+ int microns_per_hi_res_unit;
+};
+
+static struct hi_res_scroll_info hi_res_scroll_devices[] = {
+ { /* Anywhere MX */
+ .product_id = 0x1017, .microns_per_hi_res_unit = 445 },
+ { /* Performance MX */
+ .product_id = 0x101a, .microns_per_hi_res_unit = 406 },
+ { /* M560 */
+ .product_id = 0x402d, .microns_per_hi_res_unit = 435 },
+ { /* MX Master 2S */
+ .product_id = 0x4069, .microns_per_hi_res_unit = 406 },
+};
+
+static int hi_res_scroll_look_up_microns(__u32 product_id)
+{
+ int i;
+ int num_devices = sizeof(hi_res_scroll_devices)
+ / sizeof(hi_res_scroll_devices[0]);
+ for (i = 0; i < num_devices; i++) {
+ if (hi_res_scroll_devices[i].product_id == product_id)
+ return hi_res_scroll_devices[i].microns_per_hi_res_unit;
+ }
+ /* We don't have a value for this device, so use a sensible default. */
+ return 406;
+}
+
+static int hi_res_scroll_enable(struct hidpp_device *hidpp)
+{
+ int ret;
+ u8 multiplier = 8;
+
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2121) {
+ ret = hidpp_hrw_set_wheel_mode(hidpp, false, true, false);
+ hidpp_hrw_get_wheel_capability(hidpp, &multiplier);
+ } else if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_X2120) {
+ ret = hidpp_hrs_set_highres_scrolling_mode(hidpp, true,
+ &multiplier);
+ } else /* if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL_1P0) */
+ ret = hidpp10_enable_scrolling_acceleration(hidpp);
+
+ if (ret)
+ return ret;
+
+ hidpp->vertical_wheel_counter.resolution_multiplier = multiplier;
+ hidpp->vertical_wheel_counter.microns_per_hi_res_unit =
+ hi_res_scroll_look_up_microns(hidpp->hid_dev->product);
+ hid_info(hidpp->hid_dev, "multiplier = %d, microns = %d\n",
+ multiplier,
+ hidpp->vertical_wheel_counter.microns_per_hi_res_unit);
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
/* Generic HID++ devices */
/* -------------------------------------------------------------------------- */
@@ -2572,6 +2763,11 @@ static void hidpp_populate_input(struct hidpp_device *hidpp,
wtp_populate_input(hidpp, input, origin_is_hid_core);
else if (hidpp->quirks & HIDPP_QUIRK_CLASS_M560)
m560_populate_input(hidpp, input, origin_is_hid_core);
+
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) {
+ input_set_capability(input, EV_REL, REL_WHEEL_HI_RES);
+ hidpp->vertical_wheel_counter.dev = input;
+ }
}
static int hidpp_input_configured(struct hid_device *hdev,
@@ -2690,6 +2886,27 @@ static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
return 0;
}
+static int hidpp_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ /* This function will only be called for scroll events, due to the
+ * restriction imposed in hidpp_usages.
+ */
+ struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+ struct hid_scroll_counter *counter = &hidpp->vertical_wheel_counter;
+ /* A scroll event may occur before the multiplier has been retrieved or
+ * the input device set, or high-res scroll enabling may fail. In such
+ * cases we must return early (falling back to default behaviour) to
+ * avoid a crash in hid_scroll_counter_handle_scroll.
+ */
+ if (!(hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL) || value == 0
+ || counter->dev == NULL || counter->resolution_multiplier == 0)
+ return 0;
+
+ hid_scroll_counter_handle_scroll(counter, value);
+ return 1;
+}
+
static int hidpp_initialize_battery(struct hidpp_device *hidpp)
{
static atomic_t battery_no = ATOMIC_INIT(0);
@@ -2901,6 +3118,9 @@ static void hidpp_connect_event(struct hidpp_device *hidpp)
if (hidpp->battery.ps)
power_supply_changed(hidpp->battery.ps);
+ if (hidpp->quirks & HIDPP_QUIRK_HI_RES_SCROLL)
+ hi_res_scroll_enable(hidpp);
+
if (!(hidpp->quirks & HIDPP_QUIRK_NO_HIDINPUT) || hidpp->delayed_input)
/* if the input nodes are already created, we can stop now */
return;
@@ -3086,35 +3306,63 @@ static void hidpp_remove(struct hid_device *hdev)
mutex_destroy(&hidpp->send_mutex);
}
+#define LDJ_DEVICE(product) \
+ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE, \
+ USB_VENDOR_ID_LOGITECH, (product))
+
static const struct hid_device_id hidpp_devices[] = {
{ /* wireless touchpad */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4011),
+ LDJ_DEVICE(0x4011),
.driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT |
HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS },
{ /* wireless touchpad T650 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4101),
+ LDJ_DEVICE(0x4101),
.driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
{ /* wireless touchpad T651 */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_T651),
.driver_data = HIDPP_QUIRK_CLASS_WTP },
+ { /* Mouse Logitech Anywhere MX */
+ LDJ_DEVICE(0x1017), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ { /* Mouse Logitech Cube */
+ LDJ_DEVICE(0x4010), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
+ { /* Mouse Logitech M335 */
+ LDJ_DEVICE(0x4050), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech M515 */
+ LDJ_DEVICE(0x4007), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
{ /* Mouse logitech M560 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x402d),
- .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560 },
+ LDJ_DEVICE(0x402d),
+ .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_CLASS_M560
+ | HIDPP_QUIRK_HI_RES_SCROLL_X2120 },
+ { /* Mouse Logitech M705 (firmware RQM17) */
+ LDJ_DEVICE(0x101b), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
+ { /* Mouse Logitech M705 (firmware RQM67) */
+ LDJ_DEVICE(0x406d), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech M720 */
+ LDJ_DEVICE(0x405e), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Anywhere 2 */
+ LDJ_DEVICE(0x404a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb013), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb018), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0xb01f), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Anywhere 2S */
+ LDJ_DEVICE(0x406a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master */
+ LDJ_DEVICE(0x4041), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0x4060), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech MX Master 2S */
+ LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 },
+ { /* Mouse Logitech Performance MX */
+ LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 },
{ /* Keyboard logitech K400 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4024),
+ LDJ_DEVICE(0x4024),
.driver_data = HIDPP_QUIRK_CLASS_K400 },
{ /* Solar Keyboard Logitech K750 */
- HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, 0x4002),
+ LDJ_DEVICE(0x4002),
.driver_data = HIDPP_QUIRK_CLASS_K750 },
- { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
- USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
+ { LDJ_DEVICE(HID_ANY_ID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
.driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
@@ -3123,12 +3371,19 @@ static const struct hid_device_id hidpp_devices[] = {
MODULE_DEVICE_TABLE(hid, hidpp_devices);
+static const struct hid_usage_id hidpp_usages[] = {
+ { HID_GD_WHEEL, EV_REL, REL_WHEEL },
+ { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
+};
+
static struct hid_driver hidpp_driver = {
.name = "logitech-hidpp-device",
.id_table = hidpp_devices,
.probe = hidpp_probe,
.remove = hidpp_remove,
.raw_event = hidpp_raw_event,
+ .usage_table = hidpp_usages,
+ .event = hidpp_event,
.input_configured = hidpp_input_configured,
.input_mapping = hidpp_input_mapping,
.input_mapped = hidpp_input_mapped,
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index b454c4386157..1d5ea678d268 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -54,6 +54,8 @@ module_param(report_undeciphered, bool, 0644);
MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state field using a MSC_RAW event");
#define TRACKPAD_REPORT_ID 0x28
+#define TRACKPAD2_USB_REPORT_ID 0x02
+#define TRACKPAD2_BT_REPORT_ID 0x31
#define MOUSE_REPORT_ID 0x29
#define DOUBLE_REPORT_ID 0xf7
/* These definitions are not precise, but they're close enough. (Bits
@@ -91,6 +93,17 @@ MODULE_PARM_DESC(report_undeciphered, "Report undeciphered multi-touch state fie
#define TRACKPAD_RES_Y \
((TRACKPAD_MAX_Y - TRACKPAD_MIN_Y) / (TRACKPAD_DIMENSION_Y / 100))
+#define TRACKPAD2_DIMENSION_X (float)16000
+#define TRACKPAD2_MIN_X -3678
+#define TRACKPAD2_MAX_X 3934
+#define TRACKPAD2_RES_X \
+ ((TRACKPAD2_MAX_X - TRACKPAD2_MIN_X) / (TRACKPAD2_DIMENSION_X / 100))
+#define TRACKPAD2_DIMENSION_Y (float)11490
+#define TRACKPAD2_MIN_Y -2478
+#define TRACKPAD2_MAX_Y 2587
+#define TRACKPAD2_RES_Y \
+ ((TRACKPAD2_MAX_Y - TRACKPAD2_MIN_Y) / (TRACKPAD2_DIMENSION_Y / 100))
+
/**
* struct magicmouse_sc - Tracks Magic Mouse-specific data.
* @input: Input device through which we report events.
@@ -183,6 +196,7 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
{
struct input_dev *input = msc->input;
int id, x, y, size, orientation, touch_major, touch_minor, state, down;
+ int pressure = 0;
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
id = (tdata[6] << 2 | tdata[5] >> 6) & 0xf;
@@ -194,6 +208,17 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
touch_minor = tdata[4];
state = tdata[7] & TOUCH_STATE_MASK;
down = state != TOUCH_STATE_NONE;
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ id = tdata[8] & 0xf;
+ x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
+ y = -((tdata[3] << 30 | tdata[2] << 22 | tdata[1] << 14) >> 19);
+ size = tdata[6];
+ orientation = (tdata[8] >> 5) - 4;
+ touch_major = tdata[4];
+ touch_minor = tdata[5];
+ pressure = tdata[7];
+ state = tdata[3] & 0xC0;
+ down = state == 0x80;
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
id = (tdata[7] << 2 | tdata[6] >> 6) & 0xf;
x = (tdata[1] << 27 | tdata[0] << 19) >> 19;
@@ -215,7 +240,8 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
/* If requested, emulate a scroll wheel by detecting small
* vertical touch motions.
*/
- if (emulate_scroll_wheel) {
+ if (emulate_scroll_wheel && (input->id.product !=
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)) {
unsigned long now = jiffies;
int step_x = msc->touches[id].scroll_x - x;
int step_y = msc->touches[id].scroll_y - y;
@@ -269,10 +295,14 @@ static void magicmouse_emit_touch(struct magicmouse_sc *msc, int raw_id, u8 *tda
input_report_abs(input, ABS_MT_POSITION_X, x);
input_report_abs(input, ABS_MT_POSITION_Y, y);
+ if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
+ input_report_abs(input, ABS_MT_PRESSURE, pressure);
+
if (report_undeciphered) {
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
input_event(input, EV_MSC, MSC_RAW, tdata[7]);
- else /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ else if (input->id.product !=
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2)
input_event(input, EV_MSC, MSC_RAW, tdata[8]);
}
}
@@ -287,6 +317,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
switch (data[0]) {
case TRACKPAD_REPORT_ID:
+ case TRACKPAD2_BT_REPORT_ID:
/* Expect four bytes of prefix, and N*9 bytes of touch data. */
if (size < 4 || ((size - 4) % 9) != 0)
return 0;
@@ -308,6 +339,22 @@ static int magicmouse_raw_event(struct hid_device *hdev,
* ts = data[1] >> 6 | data[2] << 2 | data[3] << 10;
*/
break;
+ case TRACKPAD2_USB_REPORT_ID:
+ /* Expect twelve bytes of prefix and N*9 bytes of touch data. */
+ if (size < 12 || ((size - 12) % 9) != 0)
+ return 0;
+ npoints = (size - 12) / 9;
+ if (npoints > 15) {
+ hid_warn(hdev, "invalid size value (%d) for TRACKPAD2_USB_REPORT_ID\n",
+ size);
+ return 0;
+ }
+ msc->ntouches = 0;
+ for (ii = 0; ii < npoints; ii++)
+ magicmouse_emit_touch(msc, ii, data + ii * 9 + 12);
+
+ clicks = data[1];
+ break;
case MOUSE_REPORT_ID:
/* Expect six bytes of prefix, and N*8 bytes of touch data. */
if (size < 6 || ((size - 6) % 8) != 0)
@@ -352,6 +399,9 @@ static int magicmouse_raw_event(struct hid_device *hdev,
magicmouse_emit_buttons(msc, clicks & 3);
input_report_rel(input, REL_X, x);
input_report_rel(input, REL_Y, y);
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ input_mt_sync_frame(input);
+ input_report_key(input, BTN_MOUSE, clicks & 1);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
input_report_key(input, BTN_MOUSE, clicks & 1);
input_mt_report_pointer_emulation(input, true);
@@ -364,6 +414,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hdev)
{
int error;
+ int mt_flags = 0;
__set_bit(EV_KEY, input->evbit);
@@ -380,6 +431,22 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(REL_WHEEL, input->relbit);
__set_bit(REL_HWHEEL, input->relbit);
}
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ /* setting the device name to ensure the same driver settings
+ * get loaded, whether connected through bluetooth or USB
+ */
+ input->name = "Apple Inc. Magic Trackpad 2";
+
+ __clear_bit(EV_MSC, input->evbit);
+ __clear_bit(BTN_0, input->keybit);
+ __clear_bit(BTN_RIGHT, input->keybit);
+ __clear_bit(BTN_MIDDLE, input->keybit);
+ __set_bit(BTN_MOUSE, input->keybit);
+ __set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
+ __set_bit(BTN_TOOL_FINGER, input->keybit);
+
+ mt_flags = INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED |
+ INPUT_MT_TRACK;
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
/* input->keybit is initialized with incorrect button info
* for Magic Trackpad. There really is only one physical
@@ -402,14 +469,13 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(EV_ABS, input->evbit);
- error = input_mt_init_slots(input, 16, 0);
+ error = input_mt_init_slots(input, 16, mt_flags);
if (error)
return error;
input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2,
4, 0);
input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, 255 << 2,
4, 0);
- input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
/* Note: Touch Y position from the device is inverted relative
* to how pointer motion is reported (and relative to how USB
@@ -418,6 +484,7 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
* inverse of the reported Y.
*/
if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) {
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_MT_POSITION_X,
MOUSE_MIN_X, MOUSE_MAX_X, 4, 0);
input_set_abs_params(input, ABS_MT_POSITION_Y,
@@ -427,7 +494,25 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
MOUSE_RES_X);
input_abs_set_res(input, ABS_MT_POSITION_Y,
MOUSE_RES_Y);
+ } else if (input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ input_set_abs_params(input, ABS_MT_PRESSURE, 0, 253, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 253, 0, 0);
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -3, 4, 0, 0);
+ input_set_abs_params(input, ABS_X, TRACKPAD2_MIN_X,
+ TRACKPAD2_MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_Y, TRACKPAD2_MIN_Y,
+ TRACKPAD2_MAX_Y, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X,
+ TRACKPAD2_MIN_X, TRACKPAD2_MAX_X, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y,
+ TRACKPAD2_MIN_Y, TRACKPAD2_MAX_Y, 0, 0);
+
+ input_abs_set_res(input, ABS_X, TRACKPAD2_RES_X);
+ input_abs_set_res(input, ABS_Y, TRACKPAD2_RES_Y);
+ input_abs_set_res(input, ABS_MT_POSITION_X, TRACKPAD2_RES_X);
+ input_abs_set_res(input, ABS_MT_POSITION_Y, TRACKPAD2_RES_Y);
} else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ input_set_abs_params(input, ABS_MT_ORIENTATION, -31, 32, 1, 0);
input_set_abs_params(input, ABS_X, TRACKPAD_MIN_X,
TRACKPAD_MAX_X, 4, 0);
input_set_abs_params(input, ABS_Y, TRACKPAD_MIN_Y,
@@ -447,7 +532,8 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
input_set_events_per_packet(input, 60);
- if (report_undeciphered) {
+ if (report_undeciphered &&
+ input->id.product != USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
__set_bit(EV_MSC, input->evbit);
__set_bit(MSC_RAW, input->mscbit);
}
@@ -465,7 +551,8 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
msc->input = hi->input;
/* Magic Trackpad does not give relative data after switching to MT */
- if (hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD &&
+ if ((hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD ||
+ hi->input->id.product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) &&
field->flags & HID_MAIN_ITEM_RELATIVE)
return -1;
@@ -494,11 +581,20 @@ static int magicmouse_input_configured(struct hid_device *hdev,
static int magicmouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- const u8 feature[] = { 0xd7, 0x01 };
+ const u8 *feature;
+ const u8 feature_mt[] = { 0xD7, 0x01 };
+ const u8 feature_mt_trackpad2_usb[] = { 0x02, 0x01 };
+ const u8 feature_mt_trackpad2_bt[] = { 0xF1, 0x02, 0x01 };
u8 *buf;
struct magicmouse_sc *msc;
struct hid_report *report;
int ret;
+ int feature_size;
+
+ if (id->vendor == USB_VENDOR_ID_APPLE &&
+ id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 &&
+ hdev->type != HID_TYPE_USBMOUSE)
+ return 0;
msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
if (msc == NULL) {
@@ -532,7 +628,14 @@ static int magicmouse_probe(struct hid_device *hdev,
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
report = hid_register_report(hdev, HID_INPUT_REPORT,
MOUSE_REPORT_ID, 0);
- else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
+ else if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (id->vendor == BT_VENDOR_ID_APPLE)
+ report = hid_register_report(hdev, HID_INPUT_REPORT,
+ TRACKPAD2_BT_REPORT_ID, 0);
+ else /* USB_VENDOR_ID_APPLE */
+ report = hid_register_report(hdev, HID_INPUT_REPORT,
+ TRACKPAD2_USB_REPORT_ID, 0);
+ } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */
report = hid_register_report(hdev, HID_INPUT_REPORT,
TRACKPAD_REPORT_ID, 0);
report = hid_register_report(hdev, HID_INPUT_REPORT,
@@ -546,7 +649,20 @@ static int magicmouse_probe(struct hid_device *hdev,
}
report->size = 6;
- buf = kmemdup(feature, sizeof(feature), GFP_KERNEL);
+ if (id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2) {
+ if (id->vendor == BT_VENDOR_ID_APPLE) {
+ feature_size = sizeof(feature_mt_trackpad2_bt);
+ feature = feature_mt_trackpad2_bt;
+ } else { /* USB_VENDOR_ID_APPLE */
+ feature_size = sizeof(feature_mt_trackpad2_usb);
+ feature = feature_mt_trackpad2_usb;
+ }
+ } else {
+ feature_size = sizeof(feature_mt);
+ feature = feature_mt;
+ }
+
+ buf = kmemdup(feature, feature_size, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto err_stop_hw;
@@ -560,10 +676,10 @@ static int magicmouse_probe(struct hid_device *hdev,
* but there seems to be no other way of switching the mode.
* Thus the super-ugly hacky success check below.
*/
- ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, feature_size,
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
kfree(buf);
- if (ret != -EIO && ret != sizeof(feature)) {
+ if (ret != -EIO && ret != feature_size) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
}
@@ -579,6 +695,10 @@ static const struct hid_device_id magic_mice[] = {
USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICTRACKPAD), .driver_data = 0 },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_MAGICTRACKPAD2), .driver_data = 0 },
{ }
};
MODULE_DEVICE_TABLE(hid, magic_mice);
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 72d983626afd..330cb073cb66 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -29,11 +29,41 @@
#define MS_NOGET BIT(4)
#define MS_DUPLICATE_USAGES BIT(5)
#define MS_SURFACE_DIAL BIT(6)
+#define MS_QUIRK_FF BIT(7)
+
+struct ms_data {
+ unsigned long quirks;
+ struct hid_device *hdev;
+ struct work_struct ff_worker;
+ __u8 strong;
+ __u8 weak;
+ void *output_report_dmabuf;
+};
+
+#define XB1S_FF_REPORT 3
+#define ENABLE_WEAK BIT(0)
+#define ENABLE_STRONG BIT(1)
+
+enum {
+ MAGNITUDE_STRONG = 2,
+ MAGNITUDE_WEAK,
+ MAGNITUDE_NUM
+};
+
+struct xb1s_ff_report {
+ __u8 report_id;
+ __u8 enable;
+ __u8 magnitude[MAGNITUDE_NUM];
+ __u8 duration_10ms;
+ __u8 start_delay_10ms;
+ __u8 loop_count;
+} __packed;
static __u8 *ms_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
/*
* Microsoft Wireless Desktop Receiver (Model 1028) has
@@ -159,7 +189,8 @@ static int ms_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
if (quirks & MS_ERGONOMY) {
int ret = ms_ergonomy_kb_quirk(hi, usage, bit, max);
@@ -185,7 +216,8 @@ static int ms_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
if (quirks & MS_DUPLICATE_USAGES)
clear_bit(usage->code, *bit);
@@ -196,7 +228,8 @@ static int ms_input_mapped(struct hid_device *hdev, struct hid_input *hi,
static int ms_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
- unsigned long quirks = (unsigned long)hid_get_drvdata(hdev);
+ struct ms_data *ms = hid_get_drvdata(hdev);
+ unsigned long quirks = ms->quirks;
struct input_dev *input;
if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
@@ -251,12 +284,97 @@ static int ms_event(struct hid_device *hdev, struct hid_field *field,
return 0;
}
+static void ms_ff_worker(struct work_struct *work)
+{
+ struct ms_data *ms = container_of(work, struct ms_data, ff_worker);
+ struct hid_device *hdev = ms->hdev;
+ struct xb1s_ff_report *r = ms->output_report_dmabuf;
+ int ret;
+
+ memset(r, 0, sizeof(*r));
+
+ r->report_id = XB1S_FF_REPORT;
+ r->enable = ENABLE_WEAK | ENABLE_STRONG;
+ /*
+ * Specifying maximum duration and maximum loop count should
+ * cover maximum duration of a single effect, which is 65536
+ * ms
+ */
+ r->duration_10ms = U8_MAX;
+ r->loop_count = U8_MAX;
+ r->magnitude[MAGNITUDE_STRONG] = ms->strong; /* left actuator */
+ r->magnitude[MAGNITUDE_WEAK] = ms->weak; /* right actuator */
+
+ ret = hid_hw_output_report(hdev, (__u8 *)r, sizeof(*r));
+ if (ret)
+ hid_warn(hdev, "failed to send FF report\n");
+}
+
+static int ms_play_effect(struct input_dev *dev, void *data,
+ struct ff_effect *effect)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct ms_data *ms = hid_get_drvdata(hid);
+
+ if (effect->type != FF_RUMBLE)
+ return 0;
+
+ /*
+ * Magnitude is 0..100 so scale the 16-bit input here
+ */
+ ms->strong = ((u32) effect->u.rumble.strong_magnitude * 100) / U16_MAX;
+ ms->weak = ((u32) effect->u.rumble.weak_magnitude * 100) / U16_MAX;
+
+ schedule_work(&ms->ff_worker);
+ return 0;
+}
+
+static int ms_init_ff(struct hid_device *hdev)
+{
+ struct hid_input *hidinput = list_entry(hdev->inputs.next,
+ struct hid_input, list);
+ struct input_dev *input_dev = hidinput->input;
+ struct ms_data *ms = hid_get_drvdata(hdev);
+
+ if (!(ms->quirks & MS_QUIRK_FF))
+ return 0;
+
+ ms->hdev = hdev;
+ INIT_WORK(&ms->ff_worker, ms_ff_worker);
+
+ ms->output_report_dmabuf = devm_kzalloc(&hdev->dev,
+ sizeof(struct xb1s_ff_report),
+ GFP_KERNEL);
+ if (ms->output_report_dmabuf == NULL)
+ return -ENOMEM;
+
+ input_set_capability(input_dev, EV_FF, FF_RUMBLE);
+ return input_ff_create_memless(input_dev, NULL, ms_play_effect);
+}
+
+static void ms_remove_ff(struct hid_device *hdev)
+{
+ struct ms_data *ms = hid_get_drvdata(hdev);
+
+ if (!(ms->quirks & MS_QUIRK_FF))
+ return;
+
+ cancel_work_sync(&ms->ff_worker);
+}
+
static int ms_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
unsigned long quirks = id->driver_data;
+ struct ms_data *ms;
int ret;
- hid_set_drvdata(hdev, (void *)quirks);
+ ms = devm_kzalloc(&hdev->dev, sizeof(*ms), GFP_KERNEL);
+ if (ms == NULL)
+ return -ENOMEM;
+
+ ms->quirks = quirks;
+
+ hid_set_drvdata(hdev, ms);
if (quirks & MS_NOGET)
hdev->quirks |= HID_QUIRK_NOGET;
@@ -277,11 +395,21 @@ static int ms_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_free;
}
+ ret = ms_init_ff(hdev);
+ if (ret)
+ hid_err(hdev, "could not initialize ff, continuing anyway");
+
return 0;
err_free:
return ret;
}
+static void ms_remove(struct hid_device *hdev)
+{
+ hid_hw_stop(hdev);
+ ms_remove_ff(hdev);
+}
+
static const struct hid_device_id ms_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV),
.driver_data = MS_HIDINPUT },
@@ -318,6 +446,8 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_PRESENTER },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, 0x091B),
.driver_data = MS_SURFACE_DIAL },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_XBOX_ONE_S_CONTROLLER),
+ .driver_data = MS_QUIRK_FF },
{ }
};
MODULE_DEVICE_TABLE(hid, ms_devices);
@@ -330,6 +460,7 @@ static struct hid_driver ms_driver = {
.input_mapped = ms_input_mapped,
.event = ms_event,
.probe = ms_probe,
+ .remove = ms_remove,
};
module_hid_driver(ms_driver);
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index da954f3f4da7..f7c6de2b6730 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1319,6 +1319,13 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
return mt_touch_input_mapping(hdev, hi, field, usage, bit, max,
application);
+ /*
+ * some egalax touchscreens have "application == DG_TOUCHSCREEN"
+ * for the stylus. Overwrite the hid_input application
+ */
+ if (field->physical == HID_DG_STYLUS)
+ hi->application = HID_DG_STYLUS;
+
/* let hid-core decide for the others */
return 0;
}
@@ -1507,14 +1514,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
struct mt_device *td = hid_get_drvdata(hdev);
char *name;
const char *suffix = NULL;
- unsigned int application = 0;
struct mt_report_data *rdata;
struct mt_application *mt_application = NULL;
struct hid_report *report;
int ret;
list_for_each_entry(report, &hi->reports, hidinput_list) {
- application = report->application;
rdata = mt_find_report_data(td, report);
if (!rdata) {
hid_err(hdev, "failed to allocate data for report\n");
@@ -1529,46 +1534,33 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (ret)
return ret;
}
-
- /*
- * some egalax touchscreens have "application == DG_TOUCHSCREEN"
- * for the stylus. Check this first, and then rely on
- * the application field.
- */
- if (report->field[0]->physical == HID_DG_STYLUS) {
- suffix = "Pen";
- /* force BTN_STYLUS to allow tablet matching in udev */
- __set_bit(BTN_STYLUS, hi->input->keybit);
- }
}
- if (!suffix) {
- switch (application) {
- case HID_GD_KEYBOARD:
- case HID_GD_KEYPAD:
- case HID_GD_MOUSE:
- case HID_DG_TOUCHPAD:
- case HID_GD_SYSTEM_CONTROL:
- case HID_CP_CONSUMER_CONTROL:
- case HID_GD_WIRELESS_RADIO_CTLS:
- case HID_GD_SYSTEM_MULTIAXIS:
- /* already handled by hid core */
- break;
- case HID_DG_TOUCHSCREEN:
- /* we do not set suffix = "Touchscreen" */
- hi->input->name = hdev->name;
- break;
- case HID_DG_STYLUS:
- /* force BTN_STYLUS to allow tablet matching in udev */
- __set_bit(BTN_STYLUS, hi->input->keybit);
- break;
- case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
- suffix = "Custom Media Keys";
- break;
- default:
- suffix = "UNKNOWN";
- break;
- }
+ switch (hi->application) {
+ case HID_GD_KEYBOARD:
+ case HID_GD_KEYPAD:
+ case HID_GD_MOUSE:
+ case HID_DG_TOUCHPAD:
+ case HID_GD_SYSTEM_CONTROL:
+ case HID_CP_CONSUMER_CONTROL:
+ case HID_GD_WIRELESS_RADIO_CTLS:
+ case HID_GD_SYSTEM_MULTIAXIS:
+ /* already handled by hid core */
+ break;
+ case HID_DG_TOUCHSCREEN:
+ /* we do not set suffix = "Touchscreen" */
+ hi->input->name = hdev->name;
+ break;
+ case HID_DG_STYLUS:
+ /* force BTN_STYLUS to allow tablet matching in udev */
+ __set_bit(BTN_STYLUS, hi->input->keybit);
+ break;
+ case HID_VD_ASUS_CUSTOM_MEDIA_KEYS:
+ suffix = "Custom Media Keys";
+ break;
+ default:
+ suffix = "UNKNOWN";
+ break;
}
if (suffix) {
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 249d49b6b16c..52c3b01917e7 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -70,6 +70,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_2NES2SNES), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRACAL_RAPHNET, USB_DEVICE_ID_RAPHNET_4NES4SNES), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_REDRAGON_SEYMUR2), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/i2c-hid/Makefile b/drivers/hid/i2c-hid/Makefile
index 832d8f9aaba2..099e1ce2f234 100644
--- a/drivers/hid/i2c-hid/Makefile
+++ b/drivers/hid/i2c-hid/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_I2C_HID) += i2c-hid.o
+
+i2c-hid-objs = i2c-hid-core.o
+i2c-hid-$(CONFIG_DMI) += i2c-hid-dmi-quirks.o
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 4e3592e7a3f7..4aab96cf0818 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -43,6 +43,7 @@
#include <linux/platform_data/i2c-hid.h>
#include "../hid-ids.h"
+#include "i2c-hid.h"
/* quirks to control the device */
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
@@ -668,6 +669,7 @@ static int i2c_hid_parse(struct hid_device *hid)
char *rdesc;
int ret;
int tries = 3;
+ char *use_override;
i2c_hid_dbg(ihid, "entering %s\n", __func__);
@@ -686,26 +688,37 @@ static int i2c_hid_parse(struct hid_device *hid)
if (ret)
return ret;
- rdesc = kzalloc(rsize, GFP_KERNEL);
+ use_override = i2c_hid_get_dmi_hid_report_desc_override(client->name,
+ &rsize);
- if (!rdesc) {
- dbg_hid("couldn't allocate rdesc memory\n");
- return -ENOMEM;
- }
-
- i2c_hid_dbg(ihid, "asking HID report descriptor\n");
-
- ret = i2c_hid_command(client, &hid_report_descr_cmd, rdesc, rsize);
- if (ret) {
- hid_err(hid, "reading report descriptor failed\n");
- kfree(rdesc);
- return -EIO;
+ if (use_override) {
+ rdesc = use_override;
+ i2c_hid_dbg(ihid, "Using a HID report descriptor override\n");
+ } else {
+ rdesc = kzalloc(rsize, GFP_KERNEL);
+
+ if (!rdesc) {
+ dbg_hid("couldn't allocate rdesc memory\n");
+ return -ENOMEM;
+ }
+
+ i2c_hid_dbg(ihid, "asking HID report descriptor\n");
+
+ ret = i2c_hid_command(client, &hid_report_descr_cmd,
+ rdesc, rsize);
+ if (ret) {
+ hid_err(hid, "reading report descriptor failed\n");
+ kfree(rdesc);
+ return -EIO;
+ }
}
i2c_hid_dbg(ihid, "Report Descriptor: %*ph\n", rsize, rdesc);
ret = hid_parse_report(hid, rdesc, rsize);
- kfree(rdesc);
+ if (!use_override)
+ kfree(rdesc);
+
if (ret) {
dbg_hid("parsing report descriptor failed\n");
return ret;
@@ -832,12 +845,19 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
int ret;
/* i2c hid fetch using a fixed descriptor size (30 bytes) */
- i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
- ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer,
- sizeof(struct i2c_hid_desc));
- if (ret) {
- dev_err(&client->dev, "hid_descr_cmd failed\n");
- return -ENODEV;
+ if (i2c_hid_get_dmi_i2c_hid_desc_override(client->name)) {
+ i2c_hid_dbg(ihid, "Using a HID descriptor override\n");
+ ihid->hdesc =
+ *i2c_hid_get_dmi_i2c_hid_desc_override(client->name);
+ } else {
+ i2c_hid_dbg(ihid, "Fetching the HID descriptor\n");
+ ret = i2c_hid_command(client, &hid_descr_cmd,
+ ihid->hdesc_buffer,
+ sizeof(struct i2c_hid_desc));
+ if (ret) {
+ dev_err(&client->dev, "hid_descr_cmd failed\n");
+ return -ENODEV;
+ }
}
/* Validate the length of HID descriptor, the 4 first bytes:
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
new file mode 100644
index 000000000000..1d645c9ab417
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Quirks for I2C-HID devices that do not supply proper descriptors
+ *
+ * Copyright (c) 2018 Julian Sax <jsbc@gmx.de>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/dmi.h>
+#include <linux/mod_devicetable.h>
+
+#include "i2c-hid.h"
+
+
+struct i2c_hid_desc_override {
+ union {
+ struct i2c_hid_desc *i2c_hid_desc;
+ uint8_t *i2c_hid_desc_buffer;
+ };
+ uint8_t *hid_report_desc;
+ unsigned int hid_report_desc_size;
+ uint8_t *i2c_name;
+};
+
+
+/*
+ * descriptors for the SIPODEV SP1064 touchpad
+ *
+ * This device does not supply any descriptors and on windows a filter
+ * driver operates between the i2c-hid layer and the device and injects
+ * these descriptors when the device is prompted. The descriptors were
+ * extracted by listening to the i2c-hid traffic that occurs between the
+ * windows filter driver and the windows i2c-hid driver.
+ */
+
+static const struct i2c_hid_desc_override sipodev_desc = {
+ .i2c_hid_desc_buffer = (uint8_t [])
+ {0x1e, 0x00, /* Length of descriptor */
+ 0x00, 0x01, /* Version of descriptor */
+ 0xdb, 0x01, /* Length of report descriptor */
+ 0x21, 0x00, /* Location of report descriptor */
+ 0x24, 0x00, /* Location of input report */
+ 0x1b, 0x00, /* Max input report length */
+ 0x25, 0x00, /* Location of output report */
+ 0x11, 0x00, /* Max output report length */
+ 0x22, 0x00, /* Location of command register */
+ 0x23, 0x00, /* Location of data register */
+ 0x11, 0x09, /* Vendor ID */
+ 0x88, 0x52, /* Product ID */
+ 0x06, 0x00, /* Version ID */
+ 0x00, 0x00, 0x00, 0x00 /* Reserved */
+ },
+
+ .hid_report_desc = (uint8_t [])
+ {0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x02, /* Usage (Mouse), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x01, /* Report ID (1), */
+ 0x09, 0x01, /* Usage (Pointer), */
+ 0xA1, 0x00, /* Collection (Physical), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x02, /* Usage Maximum (02h), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0x81, 0x01, /* Input (Constant), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x15, 0x81, /* Logical Minimum (-127), */
+ 0x25, 0x7F, /* Logical Maximum (127), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x81, 0x06, /* Input (Variable, Relative), */
+ 0xC0, /* End Collection, */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x05, /* Usage (Touchpad), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x04, /* Report ID (4), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x15, 0x00, /* Logical Minimum (0), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x55, 0x0E, /* Unit Exponent (14), */
+ 0x65, 0x11, /* Unit (Centimeter), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x09, 0x47, /* Usage (Touch Valid), */
+ 0x09, 0x42, /* Usage (Tip Switch), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x75, 0x03, /* Report Size (3), */
+ 0x25, 0x05, /* Logical Maximum (5), */
+ 0x09, 0x51, /* Usage (Contact Identifier), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x03, /* Report Count (3), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x01, /* Usage Page (Desktop), */
+ 0x26, 0x44, 0x0A, /* Logical Maximum (2628), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x09, 0x30, /* Usage (X), */
+ 0x46, 0x1A, 0x04, /* Physical Maximum (1050), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x46, 0xBC, 0x02, /* Physical Maximum (700), */
+ 0x26, 0x34, 0x05, /* Logical Maximum (1332), */
+ 0x09, 0x31, /* Usage (Y), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x55, 0x0C, /* Unit Exponent (12), */
+ 0x66, 0x01, 0x10, /* Unit (Seconds), */
+ 0x47, 0xFF, 0xFF, 0x00, 0x00,/* Physical Maximum (65535), */
+ 0x27, 0xFF, 0xFF, 0x00, 0x00,/* Logical Maximum (65535), */
+ 0x75, 0x10, /* Report Size (16), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x09, 0x56, /* Usage (Scan Time), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x09, 0x54, /* Usage (Contact Count), */
+ 0x25, 0x7F, /* Logical Maximum (127), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x05, 0x09, /* Usage Page (Button), */
+ 0x09, 0x01, /* Usage (01h), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x81, 0x02, /* Input (Variable), */
+ 0x95, 0x07, /* Report Count (7), */
+ 0x81, 0x03, /* Input (Constant, Variable), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x85, 0x02, /* Report ID (2), */
+ 0x09, 0x55, /* Usage (Contact Count Maximum), */
+ 0x09, 0x59, /* Usage (59h), */
+ 0x75, 0x04, /* Report Size (4), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x25, 0x0F, /* Logical Maximum (15), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x85, 0x07, /* Report ID (7), */
+ 0x09, 0x60, /* Usage (60h), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x95, 0x07, /* Report Count (7), */
+ 0xB1, 0x03, /* Feature (Constant, Variable), */
+ 0x85, 0x06, /* Report ID (6), */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0xC5, /* Usage (C5h), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x96, 0x00, 0x01, /* Report Count (256), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x06, 0x00, 0xFF, /* Usage Page (FF00h), */
+ 0x09, 0x01, /* Usage (01h), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x0D, /* Report ID (13), */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255), */
+ 0x19, 0x01, /* Usage Minimum (01h), */
+ 0x29, 0x02, /* Usage Maximum (02h), */
+ 0x75, 0x08, /* Report Size (8), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x05, 0x0D, /* Usage Page (Digitizer), */
+ 0x09, 0x0E, /* Usage (Configuration), */
+ 0xA1, 0x01, /* Collection (Application), */
+ 0x85, 0x03, /* Report ID (3), */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x02, /* Collection (Logical), */
+ 0x09, 0x52, /* Usage (Device Mode), */
+ 0x25, 0x0A, /* Logical Maximum (10), */
+ 0x95, 0x01, /* Report Count (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0xC0, /* End Collection, */
+ 0x09, 0x22, /* Usage (Finger), */
+ 0xA1, 0x00, /* Collection (Physical), */
+ 0x85, 0x05, /* Report ID (5), */
+ 0x09, 0x57, /* Usage (57h), */
+ 0x09, 0x58, /* Usage (58h), */
+ 0x75, 0x01, /* Report Size (1), */
+ 0x95, 0x02, /* Report Count (2), */
+ 0x25, 0x01, /* Logical Maximum (1), */
+ 0xB1, 0x02, /* Feature (Variable), */
+ 0x95, 0x06, /* Report Count (6), */
+ 0xB1, 0x03, /* Feature (Constant, Variable),*/
+ 0xC0, /* End Collection, */
+ 0xC0 /* End Collection */
+ },
+ .hid_report_desc_size = 475,
+ .i2c_name = "SYNA3602:00"
+};
+
+
+static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
+ {
+ .ident = "Teclast F6 Pro",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F6 Pro"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Teclast F7",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "F7"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Trekstor Primebook C13",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C13"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Trekstor Primebook C11",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Direkt-Tek DTLAPY116-2",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY116-2"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
+ .ident = "Mediacom Flexbook Edge 11",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook edge11 - M-FBE11"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ }
+};
+
+
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+{
+ struct i2c_hid_desc_override *override;
+ const struct dmi_system_id *system_id;
+
+ system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
+ if (!system_id)
+ return NULL;
+
+ override = system_id->driver_data;
+ if (strcmp(override->i2c_name, i2c_name))
+ return NULL;
+
+ return override->i2c_hid_desc;
+}
+
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size)
+{
+ struct i2c_hid_desc_override *override;
+ const struct dmi_system_id *system_id;
+
+ system_id = dmi_first_match(i2c_hid_dmi_desc_override_table);
+ if (!system_id)
+ return NULL;
+
+ override = system_id->driver_data;
+ if (strcmp(override->i2c_name, i2c_name))
+ return NULL;
+
+ *size = override->hid_report_desc_size;
+ return override->hid_report_desc;
+}
diff --git a/drivers/hid/i2c-hid/i2c-hid.h b/drivers/hid/i2c-hid/i2c-hid.h
new file mode 100644
index 000000000000..a8c19aef5824
--- /dev/null
+++ b/drivers/hid/i2c-hid/i2c-hid.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef I2C_HID_H
+#define I2C_HID_H
+
+
+#ifdef CONFIG_DMI
+struct i2c_hid_desc *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name);
+char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size);
+#else
+static inline struct i2c_hid_desc
+ *i2c_hid_get_dmi_i2c_hid_desc_override(uint8_t *i2c_name)
+{ return NULL; }
+static inline char *i2c_hid_get_dmi_hid_report_desc_override(uint8_t *i2c_name,
+ unsigned int *size)
+{ return NULL; }
+#endif
+
+#endif
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index bfbca7ec54ce..742191bb24c6 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -280,14 +280,14 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
* if tx send list is empty - return 0;
* may happen, as RX_COMPLETE handler doesn't check list emptiness.
*/
- if (list_empty(&dev->wr_processing_list_head.link)) {
+ if (list_empty(&dev->wr_processing_list)) {
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
out_ipc_locked = 0;
return 0;
}
- ipc_link = list_entry(dev->wr_processing_list_head.link.next,
- struct wr_msg_ctl_info, link);
+ ipc_link = list_first_entry(&dev->wr_processing_list,
+ struct wr_msg_ctl_info, link);
/* first 4 bytes of the data is the doorbell value (IPC header) */
length = ipc_link->length - sizeof(uint32_t);
doorbell_val = *(uint32_t *)ipc_link->inline_data;
@@ -338,7 +338,7 @@ static int write_ipc_from_queue(struct ishtp_device *dev)
ipc_send_compl = ipc_link->ipc_send_compl;
ipc_send_compl_prm = ipc_link->ipc_send_compl_prm;
list_del_init(&ipc_link->link);
- list_add_tail(&ipc_link->link, &dev->wr_free_list_head.link);
+ list_add(&ipc_link->link, &dev->wr_free_list);
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
/*
@@ -372,18 +372,18 @@ static int write_ipc_to_queue(struct ishtp_device *dev,
unsigned char *msg, int length)
{
struct wr_msg_ctl_info *ipc_link;
- unsigned long flags;
+ unsigned long flags;
if (length > IPC_FULL_MSG_SIZE)
return -EMSGSIZE;
spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
- if (list_empty(&dev->wr_free_list_head.link)) {
+ if (list_empty(&dev->wr_free_list)) {
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
return -ENOMEM;
}
- ipc_link = list_entry(dev->wr_free_list_head.link.next,
- struct wr_msg_ctl_info, link);
+ ipc_link = list_first_entry(&dev->wr_free_list,
+ struct wr_msg_ctl_info, link);
list_del_init(&ipc_link->link);
ipc_link->ipc_send_compl = ipc_send_compl;
@@ -391,7 +391,7 @@ static int write_ipc_to_queue(struct ishtp_device *dev,
ipc_link->length = length;
memcpy(ipc_link->inline_data, msg, length);
- list_add_tail(&ipc_link->link, &dev->wr_processing_list_head.link);
+ list_add_tail(&ipc_link->link, &dev->wr_processing_list);
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
write_ipc_from_queue(dev);
@@ -487,17 +487,13 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
{
uint32_t reset_id;
unsigned long flags;
- struct wr_msg_ctl_info *processing, *next;
/* Read reset ID */
reset_id = ish_reg_read(dev, IPC_REG_ISH2HOST_MSG) & 0xFFFF;
/* Clear IPC output queue */
spin_lock_irqsave(&dev->wr_processing_spinlock, flags);
- list_for_each_entry_safe(processing, next,
- &dev->wr_processing_list_head.link, link) {
- list_move_tail(&processing->link, &dev->wr_free_list_head.link);
- }
+ list_splice_init(&dev->wr_processing_list, &dev->wr_free_list);
spin_unlock_irqrestore(&dev->wr_processing_spinlock, flags);
/* ISHTP notification in IPC_RESET */
@@ -921,9 +917,9 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
spin_lock_init(&dev->out_ipc_spinlock);
/* Init IPC processing and free lists */
- INIT_LIST_HEAD(&dev->wr_processing_list_head.link);
- INIT_LIST_HEAD(&dev->wr_free_list_head.link);
- for (i = 0; i < IPC_TX_FIFO_SIZE; ++i) {
+ INIT_LIST_HEAD(&dev->wr_processing_list);
+ INIT_LIST_HEAD(&dev->wr_free_list);
+ for (i = 0; i < IPC_TX_FIFO_SIZE; i++) {
struct wr_msg_ctl_info *tx_buf;
tx_buf = devm_kzalloc(&pdev->dev,
@@ -939,7 +935,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
i);
break;
}
- list_add_tail(&tx_buf->link, &dev->wr_free_list_head.link);
+ list_add_tail(&tx_buf->link, &dev->wr_free_list);
}
dev->ops = &ish_hw_ops;
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 256b3016116c..8793cc49f855 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -115,18 +115,19 @@ static const struct pci_device_id ish_invalid_pci_ids[] = {
*/
static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ishtp_device *dev;
+ int ret;
struct ish_hw *hw;
- int ret;
+ struct ishtp_device *ishtp;
+ struct device *dev = &pdev->dev;
/* Check for invalid platforms for ISH support */
if (pci_dev_present(ish_invalid_pci_ids))
return -ENODEV;
/* enable pci dev */
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret) {
- dev_err(&pdev->dev, "ISH: Failed to enable PCI device\n");
+ dev_err(dev, "ISH: Failed to enable PCI device\n");
return ret;
}
@@ -134,65 +135,44 @@ static int ish_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* pci request regions for ISH driver */
- ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ ret = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
if (ret) {
- dev_err(&pdev->dev, "ISH: Failed to get PCI regions\n");
- goto disable_device;
+ dev_err(dev, "ISH: Failed to get PCI regions\n");
+ return ret;
}
/* allocates and initializes the ISH dev structure */
- dev = ish_dev_init(pdev);
- if (!dev) {
+ ishtp = ish_dev_init(pdev);
+ if (!ishtp) {
ret = -ENOMEM;
- goto release_regions;
+ return ret;
}
- hw = to_ish_hw(dev);
- dev->print_log = ish_event_tracer;
+ hw = to_ish_hw(ishtp);
+ ishtp->print_log = ish_event_tracer;
/* mapping IO device memory */
- hw->mem_addr = pci_iomap(pdev, 0, 0);
- if (!hw->mem_addr) {
- dev_err(&pdev->dev, "ISH: mapping I/O range failure\n");
- ret = -ENOMEM;
- goto free_device;
- }
-
- dev->pdev = pdev;
-
+ hw->mem_addr = pcim_iomap_table(pdev)[0];
+ ishtp->pdev = pdev;
pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
/* request and enable interrupt */
- ret = request_irq(pdev->irq, ish_irq_handler, IRQF_SHARED,
- KBUILD_MODNAME, dev);
+ ret = devm_request_irq(dev, pdev->irq, ish_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, ishtp);
if (ret) {
- dev_err(&pdev->dev, "ISH: request IRQ failure (%d)\n",
- pdev->irq);
- goto free_device;
+ dev_err(dev, "ISH: request IRQ %d failed\n", pdev->irq);
+ return ret;
}
- dev_set_drvdata(dev->devc, dev);
+ dev_set_drvdata(ishtp->devc, ishtp);
- init_waitqueue_head(&dev->suspend_wait);
- init_waitqueue_head(&dev->resume_wait);
+ init_waitqueue_head(&ishtp->suspend_wait);
+ init_waitqueue_head(&ishtp->resume_wait);
- ret = ish_init(dev);
+ ret = ish_init(ishtp);
if (ret)
- goto free_irq;
+ return ret;
return 0;
-
-free_irq:
- free_irq(pdev->irq, dev);
-free_device:
- pci_iounmap(pdev, hw->mem_addr);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_clear_master(pdev);
- pci_disable_device(pdev);
- dev_err(&pdev->dev, "ISH: PCI driver initialization failed.\n");
-
- return ret;
}
/**
@@ -204,16 +184,9 @@ disable_device:
static void ish_remove(struct pci_dev *pdev)
{
struct ishtp_device *ishtp_dev = pci_get_drvdata(pdev);
- struct ish_hw *hw = to_ish_hw(ishtp_dev);
ishtp_bus_remove_all_clients(ishtp_dev, false);
ish_device_disable(ishtp_dev);
-
- free_irq(pdev->irq, ishtp_dev);
- pci_iounmap(pdev, hw->mem_addr);
- pci_release_regions(pdev);
- pci_clear_master(pdev);
- pci_disable_device(pdev);
}
static struct device __maybe_unused *ish_resume_device;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index 2d28cffc1404..e64243bc9c96 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -320,23 +320,14 @@ do_get_report:
*/
static void ish_cl_event_cb(struct ishtp_cl_device *device)
{
- struct ishtp_cl *hid_ishtp_cl = device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(device);
struct ishtp_cl_rb *rb_in_proc;
size_t r_length;
- unsigned long flags;
if (!hid_ishtp_cl)
return;
- spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags);
- while (!list_empty(&hid_ishtp_cl->in_process_list.list)) {
- rb_in_proc = list_entry(
- hid_ishtp_cl->in_process_list.list.next,
- struct ishtp_cl_rb, list);
- list_del_init(&rb_in_proc->list);
- spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock,
- flags);
-
+ while ((rb_in_proc = ishtp_cl_rx_get_rb(hid_ishtp_cl)) != NULL) {
if (!rb_in_proc->buffer.data)
return;
@@ -346,9 +337,7 @@ static void ish_cl_event_cb(struct ishtp_cl_device *device)
process_recv(hid_ishtp_cl, rb_in_proc->buffer.data, r_length);
ishtp_cl_io_rb_recycle(rb_in_proc);
- spin_lock_irqsave(&hid_ishtp_cl->in_process_spinlock, flags);
}
- spin_unlock_irqrestore(&hid_ishtp_cl->in_process_spinlock, flags);
}
/**
@@ -637,8 +626,8 @@ static int ishtp_get_report_descriptor(struct ishtp_cl *hid_ishtp_cl,
static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
{
struct ishtp_device *dev;
- unsigned long flags;
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
+ struct ishtp_fw_client *fw_client;
int i;
int rv;
@@ -660,16 +649,14 @@ static int hid_ishtp_cl_init(struct ishtp_cl *hid_ishtp_cl, int reset)
hid_ishtp_cl->rx_ring_size = HID_CL_RX_RING_SIZE;
hid_ishtp_cl->tx_ring_size = HID_CL_TX_RING_SIZE;
- spin_lock_irqsave(&dev->fw_clients_lock, flags);
- i = ishtp_fw_cl_by_uuid(dev, &hid_ishtp_guid);
- if (i < 0) {
- spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ fw_client = ishtp_fw_cl_get_client(dev, &hid_ishtp_guid);
+ if (!fw_client) {
dev_err(&client_data->cl_device->dev,
"ish client uuid not found\n");
- return i;
+ return -ENOENT;
}
- hid_ishtp_cl->fw_client_id = dev->fw_clients[i].client_id;
- spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+
+ hid_ishtp_cl->fw_client_id = fw_client->client_id;
hid_ishtp_cl->state = ISHTP_CL_CONNECTING;
rv = ishtp_cl_connect(hid_ishtp_cl);
@@ -765,7 +752,7 @@ static void hid_ishtp_cl_reset_handler(struct work_struct *work)
if (!hid_ishtp_cl)
return;
- cl_device->driver_data = hid_ishtp_cl;
+ ishtp_set_drvdata(cl_device, hid_ishtp_cl);
hid_ishtp_cl->client_data = client_data;
client_data->hid_ishtp_cl = hid_ishtp_cl;
@@ -814,7 +801,7 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
if (!hid_ishtp_cl)
return -ENOMEM;
- cl_device->driver_data = hid_ishtp_cl;
+ ishtp_set_drvdata(cl_device, hid_ishtp_cl);
hid_ishtp_cl->client_data = client_data;
client_data->hid_ishtp_cl = hid_ishtp_cl;
client_data->cl_device = cl_device;
@@ -844,7 +831,7 @@ static int hid_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
*/
static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
{
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
@@ -874,7 +861,7 @@ static int hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
*/
static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
{
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
@@ -898,7 +885,7 @@ static int hid_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
static int hid_ishtp_cl_suspend(struct device *device)
{
struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
@@ -919,7 +906,7 @@ static int hid_ishtp_cl_suspend(struct device *device)
static int hid_ishtp_cl_resume(struct device *device)
{
struct ishtp_cl_device *cl_device = to_ishtp_cl_device(device);
- struct ishtp_cl *hid_ishtp_cl = cl_device->driver_data;
+ struct ishtp_cl *hid_ishtp_cl = ishtp_get_drvdata(cl_device);
struct ishtp_cl_data *client_data = hid_ishtp_cl->client_data;
hid_ishtp_trace(client_data, "%s hid_ishtp_cl %p\n", __func__,
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
index 2623a567ffba..728dc6d4561a 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.c
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
@@ -149,6 +149,31 @@ int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *uuid)
EXPORT_SYMBOL(ishtp_fw_cl_by_uuid);
/**
+ * ishtp_fw_cl_get_client() - return client information to client
+ * @dev: the ishtp device structure
+ * @uuid: uuid of the client to search
+ *
+ * Search firmware client using UUID and reture related client information.
+ *
+ * Return: pointer of client information on success, NULL on failure.
+ */
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const uuid_le *uuid)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->fw_clients_lock, flags);
+ i = ishtp_fw_cl_by_uuid(dev, uuid);
+ spin_unlock_irqrestore(&dev->fw_clients_lock, flags);
+ if (i < 0 || dev->fw_clients[i].props.fixed_address)
+ return NULL;
+
+ return &dev->fw_clients[i];
+}
+EXPORT_SYMBOL(ishtp_fw_cl_get_client);
+
+/**
* ishtp_fw_cl_by_id() - return index to fw_clients for client_id
* @dev: the ishtp device structure
* @client_id: fw client id to search
@@ -564,6 +589,33 @@ void ishtp_put_device(struct ishtp_cl_device *cl_device)
EXPORT_SYMBOL(ishtp_put_device);
/**
+ * ishtp_set_drvdata() - set client driver data
+ * @cl_device: client device instance
+ * @data: driver data need to be set
+ *
+ * Set client driver data to cl_device->driver_data.
+ */
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data)
+{
+ cl_device->driver_data = data;
+}
+EXPORT_SYMBOL(ishtp_set_drvdata);
+
+/**
+ * ishtp_get_drvdata() - get client driver data
+ * @cl_device: client device instance
+ *
+ * Get client driver data from cl_device->driver_data.
+ *
+ * Return: pointer of driver data
+ */
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device)
+{
+ return cl_device->driver_data;
+}
+EXPORT_SYMBOL(ishtp_get_drvdata);
+
+/**
* ishtp_bus_new_client() - Create a new client
* @dev: ISHTP device instance
*
diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.h b/drivers/hid/intel-ish-hid/ishtp/bus.h
index a1ffae7f26ad..b8a5bcc82536 100644
--- a/drivers/hid/intel-ish-hid/ishtp/bus.h
+++ b/drivers/hid/intel-ish-hid/ishtp/bus.h
@@ -101,6 +101,9 @@ void ishtp_reset_compl_handler(struct ishtp_device *dev);
void ishtp_put_device(struct ishtp_cl_device *);
void ishtp_get_device(struct ishtp_cl_device *);
+void ishtp_set_drvdata(struct ishtp_cl_device *cl_device, void *data);
+void *ishtp_get_drvdata(struct ishtp_cl_device *cl_device);
+
int __ishtp_cl_driver_register(struct ishtp_cl_driver *driver,
struct module *owner);
#define ishtp_cl_driver_register(driver) \
@@ -110,5 +113,7 @@ void ishtp_cl_driver_unregister(struct ishtp_cl_driver *driver);
int ishtp_register_event_cb(struct ishtp_cl_device *device,
void (*read_cb)(struct ishtp_cl_device *));
int ishtp_fw_cl_by_uuid(struct ishtp_device *dev, const uuid_le *cuuid);
+struct ishtp_fw_client *ishtp_fw_cl_get_client(struct ishtp_device *dev,
+ const uuid_le *uuid);
#endif /* _LINUX_ISHTP_CL_BUS_H */
diff --git a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
index b9b917d2d50d..248651c35497 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client-buffers.c
@@ -69,6 +69,8 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
int j;
unsigned long flags;
+ cl->tx_ring_free_size = 0;
+
/* Allocate pool to free Tx bufs */
for (j = 0; j < cl->tx_ring_size; ++j) {
struct ishtp_cl_tx_ring *tx_buf;
@@ -85,6 +87,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
spin_lock_irqsave(&cl->tx_free_list_spinlock, flags);
list_add_tail(&tx_buf->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
spin_unlock_irqrestore(&cl->tx_free_list_spinlock, flags);
}
return 0;
@@ -144,6 +147,7 @@ void ishtp_cl_free_tx_ring(struct ishtp_cl *cl)
tx_buf = list_entry(cl->tx_free_list.list.next,
struct ishtp_cl_tx_ring, list);
list_del(&tx_buf->list);
+ --cl->tx_ring_free_size;
kfree(tx_buf->send_buf.data);
kfree(tx_buf);
}
@@ -255,3 +259,48 @@ int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb)
return rets;
}
EXPORT_SYMBOL(ishtp_cl_io_rb_recycle);
+
+/**
+ * ishtp_cl_tx_empty() -test whether client device tx buffer is empty
+ * @cl: Pointer to client device instance
+ *
+ * Look client device tx buffer list, and check whether this list is empty
+ *
+ * Return: true if client tx buffer list is empty else false
+ */
+bool ishtp_cl_tx_empty(struct ishtp_cl *cl)
+{
+ int tx_list_empty;
+ unsigned long tx_flags;
+
+ spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
+ tx_list_empty = list_empty(&cl->tx_list.list);
+ spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
+
+ return !!tx_list_empty;
+}
+EXPORT_SYMBOL(ishtp_cl_tx_empty);
+
+/**
+ * ishtp_cl_rx_get_rb() -Get a rb from client device rx buffer list
+ * @cl: Pointer to client device instance
+ *
+ * Check client device in-processing buffer list and get a rb from it.
+ *
+ * Return: rb pointer if buffer list isn't empty else NULL
+ */
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl)
+{
+ unsigned long rx_flags;
+ struct ishtp_cl_rb *rb;
+
+ spin_lock_irqsave(&cl->in_process_spinlock, rx_flags);
+ rb = list_first_entry_or_null(&cl->in_process_list.list,
+ struct ishtp_cl_rb, list);
+ if (rb)
+ list_del_init(&rb->list);
+ spin_unlock_irqrestore(&cl->in_process_spinlock, rx_flags);
+
+ return rb;
+}
+EXPORT_SYMBOL(ishtp_cl_rx_get_rb);
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.c b/drivers/hid/intel-ish-hid/ishtp/client.c
index 007443ef5fca..faeccdb1475b 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.c
+++ b/drivers/hid/intel-ish-hid/ishtp/client.c
@@ -22,6 +22,25 @@
#include "hbm.h"
#include "client.h"
+int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
+{
+ unsigned long tx_free_flags;
+ int size;
+
+ spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
+ size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
+ spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
+
+ return size;
+}
+EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
+
+int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
+{
+ return cl->tx_ring_free_size;
+}
+EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
+
/**
* ishtp_read_list_flush() - Flush read queue
* @cl: ishtp client instance
@@ -90,6 +109,7 @@ static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
+ cl->tx_ring_free_size = cl->tx_ring_size;
/* dma */
cl->last_tx_path = CL_TX_PATH_IPC;
@@ -577,6 +597,8 @@ int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
* max ISHTP message size per client
*/
list_del_init(&cl_msg->list);
+ --cl->tx_ring_free_size;
+
spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
memcpy(cl_msg->send_buf.data, buf, length);
cl_msg->send_buf.size = length;
@@ -685,6 +707,7 @@ static void ipc_tx_callback(void *prm)
ishtp_write_message(dev, &ishtp_hdr, pmsg);
spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
tx_free_flags);
} else {
@@ -778,6 +801,7 @@ static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
+ ++cl->tx_ring_free_size;
spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
++cl->send_msg_cnt_dma;
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/client.h b/drivers/hid/intel-ish-hid/ishtp/client.h
index 79eade547f5d..042f4c4853b1 100644
--- a/drivers/hid/intel-ish-hid/ishtp/client.h
+++ b/drivers/hid/intel-ish-hid/ishtp/client.h
@@ -84,6 +84,7 @@ struct ishtp_cl {
/* Client Tx buffers list */
unsigned int tx_ring_size;
struct ishtp_cl_tx_ring tx_list, tx_free_list;
+ int tx_ring_free_size;
spinlock_t tx_list_spinlock;
spinlock_t tx_free_list_spinlock;
size_t tx_offs; /* Offset in buffer at head of 'tx_list' */
@@ -137,6 +138,8 @@ int ishtp_cl_alloc_rx_ring(struct ishtp_cl *cl);
int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_rx_ring(struct ishtp_cl *cl);
void ishtp_cl_free_tx_ring(struct ishtp_cl *cl);
+int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl);
+int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl);
/* DMA I/F functions */
void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
@@ -178,5 +181,7 @@ int ishtp_cl_flush_queues(struct ishtp_cl *cl);
/* exported functions from ISHTP client buffer management scope */
int ishtp_cl_io_rb_recycle(struct ishtp_cl_rb *rb);
+bool ishtp_cl_tx_empty(struct ishtp_cl *cl);
+struct ishtp_cl_rb *ishtp_cl_rx_get_rb(struct ishtp_cl *cl);
#endif /* _ISHTP_CLIENT_H_ */
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index 6a6d927b78b0..e7c6bfefaf9e 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -207,7 +207,7 @@ struct ishtp_device {
struct work_struct bh_hbm_work;
/* IPC write queue */
- struct wr_msg_ctl_info wr_processing_list_head, wr_free_list_head;
+ struct list_head wr_processing_list, wr_free_list;
/* For both processing list and free list */
spinlock_t wr_processing_spinlock;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index e0a06be5ef5c..5dd3a8245f0f 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -3335,6 +3335,7 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
void wacom_setup_device_quirks(struct wacom *wacom)
{
+ struct wacom_wac *wacom_wac = &wacom->wacom_wac;
struct wacom_features *features = &wacom->wacom_wac.features;
/* The pen and pad share the same interface on most devices */
@@ -3464,6 +3465,24 @@ void wacom_setup_device_quirks(struct wacom *wacom)
if (features->type == REMOTE)
features->device_type |= WACOM_DEVICETYPE_WL_MONITOR;
+
+ /* HID descriptor for DTK-2451 / DTH-2452 claims to report lots
+ * of things it shouldn't. Lets fix up the damage...
+ */
+ if (wacom->hdev->product == 0x382 || wacom->hdev->product == 0x37d) {
+ features->quirks &= ~WACOM_QUIRK_TOOLSERIAL;
+ __clear_bit(BTN_TOOL_BRUSH, wacom_wac->pen_input->keybit);
+ __clear_bit(BTN_TOOL_PENCIL, wacom_wac->pen_input->keybit);
+ __clear_bit(BTN_TOOL_AIRBRUSH, wacom_wac->pen_input->keybit);
+ __clear_bit(ABS_Z, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_DISTANCE, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_TILT_X, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_TILT_Y, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_WHEEL, wacom_wac->pen_input->absbit);
+ __clear_bit(ABS_MISC, wacom_wac->pen_input->absbit);
+ __clear_bit(MSC_SERIAL, wacom_wac->pen_input->mscbit);
+ __clear_bit(EV_MSC, wacom_wac->pen_input->evbit);
+ }
}
int wacom_setup_pen_input_capabilities(struct input_dev *input_dev,
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 741857d80da1..de8193f3b838 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -79,85 +79,96 @@ void vmbus_setevent(struct vmbus_channel *channel)
}
EXPORT_SYMBOL_GPL(vmbus_setevent);
-/*
- * vmbus_open - Open the specified channel.
- */
-int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
- u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
- void (*onchannelcallback)(void *context), void *context)
+/* vmbus_free_ring - drop mapping of ring buffer */
+void vmbus_free_ring(struct vmbus_channel *channel)
+{
+ hv_ringbuffer_cleanup(&channel->outbound);
+ hv_ringbuffer_cleanup(&channel->inbound);
+
+ if (channel->ringbuffer_page) {
+ __free_pages(channel->ringbuffer_page,
+ get_order(channel->ringbuffer_pagecount
+ << PAGE_SHIFT));
+ channel->ringbuffer_page = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(vmbus_free_ring);
+
+/* vmbus_alloc_ring - allocate and map pages for ring buffer */
+int vmbus_alloc_ring(struct vmbus_channel *newchannel,
+ u32 send_size, u32 recv_size)
+{
+ struct page *page;
+ int order;
+
+ if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
+ return -EINVAL;
+
+ /* Allocate the ring buffer */
+ order = get_order(send_size + recv_size);
+ page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
+ GFP_KERNEL|__GFP_ZERO, order);
+
+ if (!page)
+ page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
+
+ if (!page)
+ return -ENOMEM;
+
+ newchannel->ringbuffer_page = page;
+ newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
+ newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
+
+static int __vmbus_open(struct vmbus_channel *newchannel,
+ void *userdata, u32 userdatalen,
+ void (*onchannelcallback)(void *context), void *context)
{
struct vmbus_channel_open_channel *open_msg;
struct vmbus_channel_msginfo *open_info = NULL;
+ struct page *page = newchannel->ringbuffer_page;
+ u32 send_pages, recv_pages;
unsigned long flags;
- int ret, err = 0;
- struct page *page;
+ int err;
- if (send_ringbuffer_size % PAGE_SIZE ||
- recv_ringbuffer_size % PAGE_SIZE)
+ if (userdatalen > MAX_USER_DEFINED_BYTES)
return -EINVAL;
+ send_pages = newchannel->ringbuffer_send_offset;
+ recv_pages = newchannel->ringbuffer_pagecount - send_pages;
+
spin_lock_irqsave(&newchannel->lock, flags);
- if (newchannel->state == CHANNEL_OPEN_STATE) {
- newchannel->state = CHANNEL_OPENING_STATE;
- } else {
+ if (newchannel->state != CHANNEL_OPEN_STATE) {
spin_unlock_irqrestore(&newchannel->lock, flags);
return -EINVAL;
}
spin_unlock_irqrestore(&newchannel->lock, flags);
+ newchannel->state = CHANNEL_OPENING_STATE;
newchannel->onchannel_callback = onchannelcallback;
newchannel->channel_callback_context = context;
- /* Allocate the ring buffer */
- page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
- GFP_KERNEL|__GFP_ZERO,
- get_order(send_ringbuffer_size +
- recv_ringbuffer_size));
-
- if (!page)
- page = alloc_pages(GFP_KERNEL|__GFP_ZERO,
- get_order(send_ringbuffer_size +
- recv_ringbuffer_size));
-
- if (!page) {
- err = -ENOMEM;
- goto error_set_chnstate;
- }
-
- newchannel->ringbuffer_pages = page_address(page);
- newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
- recv_ringbuffer_size) >> PAGE_SHIFT;
-
- ret = hv_ringbuffer_init(&newchannel->outbound, page,
- send_ringbuffer_size >> PAGE_SHIFT);
-
- if (ret != 0) {
- err = ret;
- goto error_free_pages;
- }
-
- ret = hv_ringbuffer_init(&newchannel->inbound,
- &page[send_ringbuffer_size >> PAGE_SHIFT],
- recv_ringbuffer_size >> PAGE_SHIFT);
- if (ret != 0) {
- err = ret;
- goto error_free_pages;
- }
+ err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
+ if (err)
+ goto error_clean_ring;
+ err = hv_ringbuffer_init(&newchannel->inbound,
+ &page[send_pages], recv_pages);
+ if (err)
+ goto error_clean_ring;
/* Establish the gpadl for the ring buffer */
newchannel->ringbuffer_gpadlhandle = 0;
- ret = vmbus_establish_gpadl(newchannel,
- page_address(page),
- send_ringbuffer_size +
- recv_ringbuffer_size,
+ err = vmbus_establish_gpadl(newchannel,
+ page_address(newchannel->ringbuffer_page),
+ (send_pages + recv_pages) << PAGE_SHIFT,
&newchannel->ringbuffer_gpadlhandle);
-
- if (ret != 0) {
- err = ret;
- goto error_free_pages;
- }
+ if (err)
+ goto error_clean_ring;
/* Create and init the channel open message */
open_info = kmalloc(sizeof(*open_info) +
@@ -176,15 +187,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
open_msg->openid = newchannel->offermsg.child_relid;
open_msg->child_relid = newchannel->offermsg.child_relid;
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
- open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
- PAGE_SHIFT;
+ open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
open_msg->target_vp = newchannel->target_vp;
- if (userdatalen > MAX_USER_DEFINED_BYTES) {
- err = -EINVAL;
- goto error_free_gpadl;
- }
-
if (userdatalen)
memcpy(open_msg->userdata, userdata, userdatalen);
@@ -195,18 +200,16 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
if (newchannel->rescind) {
err = -ENODEV;
- goto error_free_gpadl;
+ goto error_free_info;
}
- ret = vmbus_post_msg(open_msg,
+ err = vmbus_post_msg(open_msg,
sizeof(struct vmbus_channel_open_channel), true);
- trace_vmbus_open(open_msg, ret);
+ trace_vmbus_open(open_msg, err);
- if (ret != 0) {
- err = ret;
+ if (err != 0)
goto error_clean_msglist;
- }
wait_for_completion(&open_info->waitevent);
@@ -216,12 +219,12 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
if (newchannel->rescind) {
err = -ENODEV;
- goto error_free_gpadl;
+ goto error_free_info;
}
if (open_info->response.open_result.status) {
err = -EAGAIN;
- goto error_free_gpadl;
+ goto error_free_info;
}
newchannel->state = CHANNEL_OPENED_STATE;
@@ -232,19 +235,50 @@ error_clean_msglist:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
-
+error_free_info:
+ kfree(open_info);
error_free_gpadl:
vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
- kfree(open_info);
-error_free_pages:
+ newchannel->ringbuffer_gpadlhandle = 0;
+error_clean_ring:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
- __free_pages(page,
- get_order(send_ringbuffer_size + recv_ringbuffer_size));
-error_set_chnstate:
newchannel->state = CHANNEL_OPEN_STATE;
return err;
}
+
+/*
+ * vmbus_connect_ring - Open the channel but reuse ring buffer
+ */
+int vmbus_connect_ring(struct vmbus_channel *newchannel,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
+}
+EXPORT_SYMBOL_GPL(vmbus_connect_ring);
+
+/*
+ * vmbus_open - Open the specified channel.
+ */
+int vmbus_open(struct vmbus_channel *newchannel,
+ u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
+ void *userdata, u32 userdatalen,
+ void (*onchannelcallback)(void *context), void *context)
+{
+ int err;
+
+ err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
+ recv_ringbuffer_size);
+ if (err)
+ return err;
+
+ err = __vmbus_open(newchannel, userdata, userdatalen,
+ onchannelcallback, context);
+ if (err)
+ vmbus_free_ring(newchannel);
+
+ return err;
+}
EXPORT_SYMBOL_GPL(vmbus_open);
/* Used for Hyper-V Socket: a guest client's connect() to the host */
@@ -612,10 +646,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* in Hyper-V Manager), the driver's remove() invokes vmbus_close():
* here we should skip most of the below cleanup work.
*/
- if (channel->state != CHANNEL_OPENED_STATE) {
- ret = -EINVAL;
- goto out;
- }
+ if (channel->state != CHANNEL_OPENED_STATE)
+ return -EINVAL;
channel->state = CHANNEL_OPEN_STATE;
@@ -637,11 +669,10 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* If we failed to post the close msg,
* it is perhaps better to leak memory.
*/
- goto out;
}
/* Tear down the gpadl for the channel's ring buffer */
- if (channel->ringbuffer_gpadlhandle) {
+ else if (channel->ringbuffer_gpadlhandle) {
ret = vmbus_teardown_gpadl(channel,
channel->ringbuffer_gpadlhandle);
if (ret) {
@@ -650,74 +681,78 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* If we failed to teardown gpadl,
* it is perhaps better to leak memory.
*/
- goto out;
}
- }
-
- /* Cleanup the ring buffers for this channel */
- hv_ringbuffer_cleanup(&channel->outbound);
- hv_ringbuffer_cleanup(&channel->inbound);
- free_pages((unsigned long)channel->ringbuffer_pages,
- get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+ channel->ringbuffer_gpadlhandle = 0;
+ }
-out:
return ret;
}
-/*
- * vmbus_close - Close the specified channel
- */
-void vmbus_close(struct vmbus_channel *channel)
+/* disconnect ring - close all channels */
+int vmbus_disconnect_ring(struct vmbus_channel *channel)
{
- struct list_head *cur, *tmp;
- struct vmbus_channel *cur_channel;
+ struct vmbus_channel *cur_channel, *tmp;
+ unsigned long flags;
+ LIST_HEAD(list);
+ int ret;
- if (channel->primary_channel != NULL) {
- /*
- * We will only close sub-channels when
- * the primary is closed.
- */
- return;
- }
- /*
- * Close all the sub-channels first and then close the
- * primary channel.
- */
- list_for_each_safe(cur, tmp, &channel->sc_list) {
- cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
- if (cur_channel->rescind) {
+ if (channel->primary_channel != NULL)
+ return -EINVAL;
+
+ /* Snapshot the list of subchannels */
+ spin_lock_irqsave(&channel->lock, flags);
+ list_splice_init(&channel->sc_list, &list);
+ channel->num_sc = 0;
+ spin_unlock_irqrestore(&channel->lock, flags);
+
+ list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
+ if (cur_channel->rescind)
wait_for_completion(&cur_channel->rescind_event);
- mutex_lock(&vmbus_connection.channel_mutex);
- vmbus_close_internal(cur_channel);
- hv_process_channel_removal(
- cur_channel->offermsg.child_relid);
- } else {
- mutex_lock(&vmbus_connection.channel_mutex);
- vmbus_close_internal(cur_channel);
+
+ mutex_lock(&vmbus_connection.channel_mutex);
+ if (vmbus_close_internal(cur_channel) == 0) {
+ vmbus_free_ring(cur_channel);
+
+ if (cur_channel->rescind)
+ hv_process_channel_removal(cur_channel);
}
mutex_unlock(&vmbus_connection.channel_mutex);
}
+
/*
* Now close the primary.
*/
mutex_lock(&vmbus_connection.channel_mutex);
- vmbus_close_internal(channel);
+ ret = vmbus_close_internal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
+
+/*
+ * vmbus_close - Close the specified channel
+ */
+void vmbus_close(struct vmbus_channel *channel)
+{
+ if (vmbus_disconnect_ring(channel) == 0)
+ vmbus_free_ring(channel);
}
EXPORT_SYMBOL_GPL(vmbus_close);
/**
* vmbus_sendpacket() - Send the specified buffer on the given channel
- * @channel: Pointer to vmbus_channel structure.
- * @buffer: Pointer to the buffer you want to receive the data into.
- * @bufferlen: Maximum size of what the the buffer will hold
+ * @channel: Pointer to vmbus_channel structure
+ * @buffer: Pointer to the buffer you want to send the data from.
+ * @bufferlen: Maximum size of what the buffer holds.
* @requestid: Identifier of the request
- * @type: Type of packet that is being send e.g. negotiate, time
- * packet etc.
+ * @type: Type of packet that is being sent e.g. negotiate, time
+ * packet etc.
+ * @flags: 0 or VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED
*
- * Sends data in @buffer directly to hyper-v via the vmbus
- * This will send the data unparsed to hyper-v.
+ * Sends data in @buffer directly to Hyper-V via the vmbus.
+ * This will send the data unparsed to Hyper-V.
*
* Mainly used by Hyper-V drivers.
*/
@@ -850,12 +885,13 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
/**
- * vmbus_recvpacket() - Retrieve the user packet on the specified channel
- * @channel: Pointer to vmbus_channel structure.
+ * __vmbus_recvpacket() - Retrieve the user packet on the specified channel
+ * @channel: Pointer to vmbus_channel structure
* @buffer: Pointer to the buffer you want to receive the data into.
- * @bufferlen: Maximum size of what the the buffer will hold
- * @buffer_actual_len: The actual size of the data after it was received
+ * @bufferlen: Maximum size of what the buffer can hold.
+ * @buffer_actual_len: The actual size of the data after it was received.
* @requestid: Identifier of the request
+ * @raw: true means keep the vmpacket_descriptor header in the received data.
*
* Receives directly from the hyper-v vmbus and puts the data it received
* into Buffer. This will receive the data unparsed from hyper-v.
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 0f0e091c117c..6277597d3d58 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -198,24 +198,19 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
}
/**
- * vmbus_prep_negotiate_resp() - Create default response for Hyper-V Negotiate message
+ * vmbus_prep_negotiate_resp() - Create default response for Negotiate message
* @icmsghdrp: Pointer to msg header structure
- * @icmsg_negotiate: Pointer to negotiate message structure
* @buf: Raw buffer channel data
+ * @fw_version: The framework versions we can support.
+ * @fw_vercnt: The size of @fw_version.
+ * @srv_version: The service versions we can support.
+ * @srv_vercnt: The size of @srv_version.
+ * @nego_fw_version: The selected framework version.
+ * @nego_srv_version: The selected service version.
*
- * @icmsghdrp is of type &struct icmsg_hdr.
- * Set up and fill in default negotiate response message.
- *
- * The fw_version and fw_vercnt specifies the framework version that
- * we can support.
- *
- * The srv_version and srv_vercnt specifies the service
- * versions we can support.
- *
- * Versions are given in decreasing order.
- *
- * nego_fw_version and nego_srv_version store the selected protocol versions.
+ * Note: Versions are given in decreasing order.
*
+ * Set up and fill in default negotiate response message.
* Mainly used by Hyper-V drivers.
*/
bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
@@ -385,21 +380,14 @@ static void vmbus_release_relid(u32 relid)
trace_vmbus_release_relid(&msg, ret);
}
-void hv_process_channel_removal(u32 relid)
+void hv_process_channel_removal(struct vmbus_channel *channel)
{
+ struct vmbus_channel *primary_channel;
unsigned long flags;
- struct vmbus_channel *primary_channel, *channel;
BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex));
-
- /*
- * Make sure channel is valid as we may have raced.
- */
- channel = relid2channel(relid);
- if (!channel)
- return;
-
BUG_ON(!channel->rescind);
+
if (channel->target_cpu != get_cpu()) {
put_cpu();
smp_call_function_single(channel->target_cpu,
@@ -429,7 +417,7 @@ void hv_process_channel_removal(u32 relid)
cpumask_clear_cpu(channel->target_cpu,
&primary_channel->alloced_cpus_in_node);
- vmbus_release_relid(relid);
+ vmbus_release_relid(channel->offermsg.child_relid);
free_channel(channel);
}
@@ -606,16 +594,18 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
bool perf_chn = vmbus_devs[dev_type].perf_device;
struct vmbus_channel *primary = channel->primary_channel;
int next_node;
- struct cpumask available_mask;
+ cpumask_var_t available_mask;
struct cpumask *alloced_mask;
if ((vmbus_proto_version == VERSION_WS2008) ||
- (vmbus_proto_version == VERSION_WIN7) || (!perf_chn)) {
+ (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
+ !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
/*
* Prior to win8, all channel interrupts are
* delivered on cpu 0.
* Also if the channel is not a performance critical
* channel, bind it to cpu 0.
+ * In case alloc_cpumask_var() fails, bind it to cpu 0.
*/
channel->numa_node = 0;
channel->target_cpu = 0;
@@ -653,7 +643,7 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
cpumask_clear(alloced_mask);
}
- cpumask_xor(&available_mask, alloced_mask,
+ cpumask_xor(available_mask, alloced_mask,
cpumask_of_node(primary->numa_node));
cur_cpu = -1;
@@ -671,10 +661,10 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
}
while (true) {
- cur_cpu = cpumask_next(cur_cpu, &available_mask);
+ cur_cpu = cpumask_next(cur_cpu, available_mask);
if (cur_cpu >= nr_cpu_ids) {
cur_cpu = -1;
- cpumask_copy(&available_mask,
+ cpumask_copy(available_mask,
cpumask_of_node(primary->numa_node));
continue;
}
@@ -704,6 +694,8 @@ static void init_vp_index(struct vmbus_channel *channel, u16 dev_type)
channel->target_cpu = cur_cpu;
channel->target_vp = hv_cpu_number_to_vp_number(cur_cpu);
+
+ free_cpumask_var(available_mask);
}
static void vmbus_wait_for_unload(void)
@@ -943,7 +935,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
* The channel is currently not open;
* it is safe for us to cleanup the channel.
*/
- hv_process_channel_removal(rescind->child_relid);
+ hv_process_channel_removal(channel);
} else {
complete(&channel->rescind_event);
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 748a1c4172a6..332d7c34be5c 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -189,6 +189,17 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
int hv_synic_alloc(void)
{
int cpu;
+ struct hv_per_cpu_context *hv_cpu;
+
+ /*
+ * First, zero all per-cpu memory areas so hv_synic_free() can
+ * detect what memory has been allocated and cleanup properly
+ * after any failures.
+ */
+ for_each_present_cpu(cpu) {
+ hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
+ memset(hv_cpu, 0, sizeof(*hv_cpu));
+ }
hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
GFP_KERNEL);
@@ -198,10 +209,8 @@ int hv_synic_alloc(void)
}
for_each_present_cpu(cpu) {
- struct hv_per_cpu_context *hv_cpu
- = per_cpu_ptr(hv_context.cpu_context, cpu);
+ hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
- memset(hv_cpu, 0, sizeof(*hv_cpu));
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b1b788082793..41631512ae97 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -689,7 +689,7 @@ static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
__online_page_increment_counters(pg);
__online_page_free(pg);
- WARN_ON_ONCE(!spin_is_locked(&dm_device.ha_lock));
+ lockdep_assert_held(&dm_device.ha_lock);
dm_device.num_pages_onlined++;
}
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 5eed1e7da15c..a7513a8a8e37 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -353,7 +353,6 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op)
out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled;
- default:
utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id,
MAX_ADAPTER_ID_SIZE,
UTF16_LITTLE_ENDIAN,
@@ -406,7 +405,7 @@ kvp_send_key(struct work_struct *dummy)
process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO);
break;
case KVP_OP_GET_IP_INFO:
- process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO);
+ /* We only need to pass on message->kvp_hdr.operation. */
break;
case KVP_OP_SET:
switch (in_msg->body.kvp_set.data.value_type) {
@@ -421,7 +420,7 @@ kvp_send_key(struct work_struct *dummy)
UTF16_LITTLE_ENDIAN,
message->body.kvp_set.data.value,
HV_KVP_EXCHANGE_MAX_VALUE_SIZE - 1) + 1;
- break;
+ break;
case REG_U32:
/*
@@ -446,6 +445,9 @@ kvp_send_key(struct work_struct *dummy)
break;
}
+
+ break;
+
case KVP_OP_GET:
message->body.kvp_set.data.key_size =
utf16s_to_utf8s(
@@ -454,7 +456,7 @@ kvp_send_key(struct work_struct *dummy)
UTF16_LITTLE_ENDIAN,
message->body.kvp_set.data.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
- break;
+ break;
case KVP_OP_DELETE:
message->body.kvp_delete.key_size =
@@ -464,12 +466,12 @@ kvp_send_key(struct work_struct *dummy)
UTF16_LITTLE_ENDIAN,
message->body.kvp_delete.key,
HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1;
- break;
+ break;
case KVP_OP_ENUMERATE:
message->body.kvp_enum_data.index =
in_msg->body.kvp_enum_data.index;
- break;
+ break;
}
kvp_transaction.state = HVUTIL_USERSPACE_REQ;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 3e90eb91db45..64d0c85d5161 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -241,6 +241,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
{
vunmap(ring_info->ring_buffer);
+ ring_info->ring_buffer = NULL;
}
/* Write to the ring buffer. */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index c71cc857b649..283d184280af 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -498,6 +498,54 @@ static ssize_t device_show(struct device *dev,
}
static DEVICE_ATTR_RO(device);
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ char *driver_override, *old, *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ device_lock(dev);
+ old = hv_dev->driver_override;
+ if (strlen(driver_override)) {
+ hv_dev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ hv_dev->driver_override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct hv_device *hv_dev = device_to_hv_device(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", hv_dev->driver_override);
+ device_unlock(dev);
+
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
/* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */
static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_id.attr,
@@ -528,6 +576,7 @@ static struct attribute *vmbus_dev_attrs[] = {
&dev_attr_channel_vp_mapping.attr,
&dev_attr_vendor.attr,
&dev_attr_device.attr,
+ &dev_attr_driver_override.attr,
NULL,
};
ATTRIBUTE_GROUPS(vmbus_dev);
@@ -563,17 +612,26 @@ static inline bool is_null_guid(const uuid_le *guid)
return true;
}
-/*
- * Return a matching hv_vmbus_device_id pointer.
- * If there is no match, return NULL.
- */
-static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
- const uuid_le *guid)
+static const struct hv_vmbus_device_id *
+hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const uuid_le *guid)
+
+{
+ if (id == NULL)
+ return NULL; /* empty device table */
+
+ for (; !is_null_guid(&id->guid); id++)
+ if (!uuid_le_cmp(id->guid, *guid))
+ return id;
+
+ return NULL;
+}
+
+static const struct hv_vmbus_device_id *
+hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid)
{
const struct hv_vmbus_device_id *id = NULL;
struct vmbus_dynid *dynid;
- /* Look at the dynamic ids first, before the static ones */
spin_lock(&drv->dynids.lock);
list_for_each_entry(dynid, &drv->dynids.list, node) {
if (!uuid_le_cmp(dynid->id.guid, *guid)) {
@@ -583,18 +641,37 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
}
spin_unlock(&drv->dynids.lock);
- if (id)
- return id;
+ return id;
+}
- id = drv->id_table;
- if (id == NULL)
- return NULL; /* empty device table */
+static const struct hv_vmbus_device_id vmbus_device_null = {
+ .guid = NULL_UUID_LE,
+};
- for (; !is_null_guid(&id->guid); id++)
- if (!uuid_le_cmp(id->guid, *guid))
- return id;
+/*
+ * Return a matching hv_vmbus_device_id pointer.
+ * If there is no match, return NULL.
+ */
+static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv,
+ struct hv_device *dev)
+{
+ const uuid_le *guid = &dev->dev_type;
+ const struct hv_vmbus_device_id *id;
- return NULL;
+ /* When driver_override is set, only bind to the matching driver */
+ if (dev->driver_override && strcmp(dev->driver_override, drv->name))
+ return NULL;
+
+ /* Look at the dynamic ids first, before the static ones */
+ id = hv_vmbus_dynid_match(drv, guid);
+ if (!id)
+ id = hv_vmbus_dev_match(drv->id_table, guid);
+
+ /* driver_override will always match, send a dummy id */
+ if (!id && dev->driver_override)
+ id = &vmbus_device_null;
+
+ return id;
}
/* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */
@@ -643,7 +720,7 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf,
if (retval)
return retval;
- if (hv_vmbus_get_id(drv, &guid))
+ if (hv_vmbus_dynid_match(drv, &guid))
return -EEXIST;
retval = vmbus_add_dynid(drv, &guid);
@@ -708,7 +785,7 @@ static int vmbus_match(struct device *device, struct device_driver *driver)
if (is_hvsock_channel(hv_dev->channel))
return drv->hvsock;
- if (hv_vmbus_get_id(drv, &hv_dev->dev_type))
+ if (hv_vmbus_get_id(drv, hv_dev))
return 1;
return 0;
@@ -725,7 +802,7 @@ static int vmbus_probe(struct device *child_device)
struct hv_device *dev = device_to_hv_device(child_device);
const struct hv_vmbus_device_id *dev_id;
- dev_id = hv_vmbus_get_id(drv, &dev->dev_type);
+ dev_id = hv_vmbus_get_id(drv, dev);
if (drv->probe) {
ret = drv->probe(dev, dev_id);
if (ret != 0)
@@ -787,10 +864,9 @@ static void vmbus_device_release(struct device *device)
struct vmbus_channel *channel = hv_dev->channel;
mutex_lock(&vmbus_connection.channel_mutex);
- hv_process_channel_removal(channel->offermsg.child_relid);
+ hv_process_channel_removal(channel);
mutex_unlock(&vmbus_connection.channel_mutex);
kfree(hv_dev);
-
}
/* The one and only one */
diff --git a/drivers/hwtracing/coresight/coresight-catu.c b/drivers/hwtracing/coresight/coresight-catu.c
index ff94e58845b7..170fbb66bda2 100644
--- a/drivers/hwtracing/coresight/coresight-catu.c
+++ b/drivers/hwtracing/coresight/coresight-catu.c
@@ -406,6 +406,7 @@ static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
{
+ int rc;
u32 control, mode;
struct etr_buf *etr_buf = data;
@@ -418,6 +419,10 @@ static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
return -EBUSY;
}
+ rc = coresight_claim_device_unlocked(drvdata->base);
+ if (rc)
+ return rc;
+
control |= BIT(CATU_CONTROL_ENABLE);
if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
@@ -459,6 +464,7 @@ static int catu_disable_hw(struct catu_drvdata *drvdata)
int rc = 0;
catu_write_control(drvdata, 0);
+ coresight_disclaim_device_unlocked(drvdata->base);
if (catu_wait_for_ready(drvdata)) {
dev_info(drvdata->dev, "Timeout while waiting for READY\n");
rc = -EAGAIN;
diff --git a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
index f6d0571ab9dd..299667b887fc 100644
--- a/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-dynamic-replicator.c
@@ -34,48 +34,87 @@ struct replicator_state {
struct coresight_device *csdev;
};
+/*
+ * replicator_reset : Reset the replicator configuration to sane values.
+ */
+static void replicator_reset(struct replicator_state *drvdata)
+{
+ CS_UNLOCK(drvdata->base);
+
+ if (!coresight_claim_device_unlocked(drvdata->base)) {
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
+ writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
+ coresight_disclaim_device_unlocked(drvdata->base);
+ }
+
+ CS_LOCK(drvdata->base);
+}
+
static int replicator_enable(struct coresight_device *csdev, int inport,
int outport)
{
+ int rc = 0;
+ u32 reg;
struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
+ switch (outport) {
+ case 0:
+ reg = REPLICATOR_IDFILTER0;
+ break;
+ case 1:
+ reg = REPLICATOR_IDFILTER1;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
CS_UNLOCK(drvdata->base);
- /*
- * Ensure that the other port is disabled
- * 0x00 - passing through the replicator unimpeded
- * 0xff - disable (or impede) the flow of ATB data
- */
- if (outport == 0) {
- writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER0);
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
- } else {
- writel_relaxed(0x00, drvdata->base + REPLICATOR_IDFILTER1);
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
+ if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
+ (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
+ rc = coresight_claim_device_unlocked(drvdata->base);
+
+ /* Ensure that the outport is enabled. */
+ if (!rc) {
+ writel_relaxed(0x00, drvdata->base + reg);
+ dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
}
CS_LOCK(drvdata->base);
- dev_info(drvdata->dev, "REPLICATOR enabled\n");
- return 0;
+ return rc;
}
static void replicator_disable(struct coresight_device *csdev, int inport,
int outport)
{
+ u32 reg;
struct replicator_state *drvdata = dev_get_drvdata(csdev->dev.parent);
+ switch (outport) {
+ case 0:
+ reg = REPLICATOR_IDFILTER0;
+ break;
+ case 1:
+ reg = REPLICATOR_IDFILTER1;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
CS_UNLOCK(drvdata->base);
/* disable the flow of ATB data through port */
- if (outport == 0)
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
- else
- writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
+ writel_relaxed(0xff, drvdata->base + reg);
+ if ((readl_relaxed(drvdata->base + REPLICATOR_IDFILTER0) == 0xff) &&
+ (readl_relaxed(drvdata->base + REPLICATOR_IDFILTER1) == 0xff))
+ coresight_disclaim_device_unlocked(drvdata->base);
CS_LOCK(drvdata->base);
- dev_info(drvdata->dev, "REPLICATOR disabled\n");
+ dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
}
static const struct coresight_ops_link replicator_link_ops = {
@@ -156,7 +195,11 @@ static int replicator_probe(struct amba_device *adev, const struct amba_id *id)
desc.groups = replicator_groups;
drvdata->csdev = coresight_register(&desc);
- return PTR_ERR_OR_ZERO(drvdata->csdev);
+ if (!IS_ERR(drvdata->csdev)) {
+ replicator_reset(drvdata);
+ return 0;
+ }
+ return PTR_ERR(drvdata->csdev);
}
#ifdef CONFIG_PM
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 306119eaf16a..824be0c5f592 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -5,7 +5,6 @@
* Description: CoreSight Embedded Trace Buffer driver
*/
-#include <asm/local.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -28,6 +27,7 @@
#include "coresight-priv.h"
+#include "coresight-etm-perf.h"
#define ETB_RAM_DEPTH_REG 0x004
#define ETB_STATUS_REG 0x00c
@@ -71,8 +71,8 @@
* @miscdev: specifics to handle "/dev/xyz.etb" entry.
* @spinlock: only one at a time pls.
* @reading: synchronise user space access to etb buffer.
- * @mode: this ETB is being used.
* @buf: area of memory where ETB buffer content gets sent.
+ * @mode: this ETB is being used.
* @buffer_depth: size of @buf.
* @trigger_cntr: amount of words to store after a trigger.
*/
@@ -84,12 +84,15 @@ struct etb_drvdata {
struct miscdevice miscdev;
spinlock_t spinlock;
local_t reading;
- local_t mode;
u8 *buf;
+ u32 mode;
u32 buffer_depth;
u32 trigger_cntr;
};
+static int etb_set_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle);
+
static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
{
u32 depth = 0;
@@ -103,7 +106,7 @@ static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
return depth;
}
-static void etb_enable_hw(struct etb_drvdata *drvdata)
+static void __etb_enable_hw(struct etb_drvdata *drvdata)
{
int i;
u32 depth;
@@ -131,32 +134,92 @@ static void etb_enable_hw(struct etb_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int etb_enable(struct coresight_device *csdev, u32 mode)
+static int etb_enable_hw(struct etb_drvdata *drvdata)
+{
+ __etb_enable_hw(drvdata);
+ return 0;
+}
+
+static int etb_enable_sysfs(struct coresight_device *csdev)
{
- u32 val;
+ int ret = 0;
unsigned long flags;
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- val = local_cmpxchg(&drvdata->mode,
- CS_MODE_DISABLED, mode);
- /*
- * When accessing from Perf, a HW buffer can be handled
- * by a single trace entity. In sysFS mode many tracers
- * can be logging to the same HW buffer.
- */
- if (val == CS_MODE_PERF)
- return -EBUSY;
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* Don't messup with perf sessions. */
+ if (drvdata->mode == CS_MODE_PERF) {
+ ret = -EBUSY;
+ goto out;
+ }
/* Nothing to do, the tracer is already enabled. */
- if (val == CS_MODE_SYSFS)
+ if (drvdata->mode == CS_MODE_SYSFS)
goto out;
- spin_lock_irqsave(&drvdata->spinlock, flags);
- etb_enable_hw(drvdata);
+ ret = etb_enable_hw(drvdata);
+ if (!ret)
+ drvdata->mode = CS_MODE_SYSFS;
+
+out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return ret;
+}
+
+static int etb_enable_perf(struct coresight_device *csdev, void *data)
+{
+ int ret = 0;
+ unsigned long flags;
+ struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+
+ /* No need to continue if the component is already in use. */
+ if (drvdata->mode != CS_MODE_DISABLED) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * We don't have an internal state to clean up if we fail to setup
+ * the perf buffer. So we can perform the step before we turn the
+ * ETB on and leave without cleaning up.
+ */
+ ret = etb_set_buffer(csdev, (struct perf_output_handle *)data);
+ if (ret)
+ goto out;
+
+ ret = etb_enable_hw(drvdata);
+ if (!ret)
+ drvdata->mode = CS_MODE_PERF;
out:
- dev_info(drvdata->dev, "ETB enabled\n");
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return ret;
+}
+
+static int etb_enable(struct coresight_device *csdev, u32 mode, void *data)
+{
+ int ret;
+ struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ switch (mode) {
+ case CS_MODE_SYSFS:
+ ret = etb_enable_sysfs(csdev);
+ break;
+ case CS_MODE_PERF:
+ ret = etb_enable_perf(csdev, data);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ dev_dbg(drvdata->dev, "ETB enabled\n");
return 0;
}
@@ -256,13 +319,16 @@ static void etb_disable(struct coresight_device *csdev)
unsigned long flags;
spin_lock_irqsave(&drvdata->spinlock, flags);
- etb_disable_hw(drvdata);
- etb_dump_hw(drvdata);
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ /* Disable the ETB only if it needs to */
+ if (drvdata->mode != CS_MODE_DISABLED) {
+ etb_disable_hw(drvdata);
+ etb_dump_hw(drvdata);
+ drvdata->mode = CS_MODE_DISABLED;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "ETB disabled\n");
+ dev_dbg(drvdata->dev, "ETB disabled\n");
}
static void *etb_alloc_buffer(struct coresight_device *csdev, int cpu,
@@ -294,12 +360,14 @@ static void etb_free_buffer(void *config)
}
static int etb_set_buffer(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config)
+ struct perf_output_handle *handle)
{
int ret = 0;
unsigned long head;
- struct cs_buffers *buf = sink_config;
+ struct cs_buffers *buf = etm_perf_sink_config(handle);
+
+ if (!buf)
+ return -EINVAL;
/* wrap head around to the amount of space we have */
head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
@@ -315,37 +383,7 @@ static int etb_set_buffer(struct coresight_device *csdev,
return ret;
}
-static unsigned long etb_reset_buffer(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config)
-{
- unsigned long size = 0;
- struct cs_buffers *buf = sink_config;
-
- if (buf) {
- /*
- * In snapshot mode ->data_size holds the new address of the
- * ring buffer's head. The size itself is the whole address
- * range since we want the latest information.
- */
- if (buf->snapshot)
- handle->head = local_xchg(&buf->data_size,
- buf->nr_pages << PAGE_SHIFT);
-
- /*
- * Tell the tracer PMU how much we got in this run and if
- * something went wrong along the way. Nobody else can use
- * this cs_buffers instance until we are done. As such
- * resetting parameters here and squaring off with the ring
- * buffer API in the tracer PMU is fine.
- */
- size = local_xchg(&buf->data_size, 0);
- }
-
- return size;
-}
-
-static void etb_update_buffer(struct coresight_device *csdev,
+static unsigned long etb_update_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config)
{
@@ -354,13 +392,13 @@ static void etb_update_buffer(struct coresight_device *csdev,
u8 *buf_ptr;
const u32 *barrier;
u32 read_ptr, write_ptr, capacity;
- u32 status, read_data, to_read;
- unsigned long offset;
+ u32 status, read_data;
+ unsigned long offset, to_read;
struct cs_buffers *buf = sink_config;
struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (!buf)
- return;
+ return 0;
capacity = drvdata->buffer_depth * ETB_FRAME_SIZE_WORDS;
@@ -465,18 +503,17 @@ static void etb_update_buffer(struct coresight_device *csdev,
writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
/*
- * In snapshot mode all we have to do is communicate to
- * perf_aux_output_end() the address of the current head. In full
- * trace mode the same function expects a size to move rb->aux_head
- * forward.
+ * In snapshot mode we have to update the handle->head to point
+ * to the new location.
*/
- if (buf->snapshot)
- local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
- else
- local_add(to_read, &buf->data_size);
-
+ if (buf->snapshot) {
+ handle->head = (cur * PAGE_SIZE) + offset;
+ to_read = buf->nr_pages << PAGE_SHIFT;
+ }
etb_enable_hw(drvdata);
CS_LOCK(drvdata->base);
+
+ return to_read;
}
static const struct coresight_ops_sink etb_sink_ops = {
@@ -484,8 +521,6 @@ static const struct coresight_ops_sink etb_sink_ops = {
.disable = etb_disable,
.alloc_buffer = etb_alloc_buffer,
.free_buffer = etb_free_buffer,
- .set_buffer = etb_set_buffer,
- .reset_buffer = etb_reset_buffer,
.update_buffer = etb_update_buffer,
};
@@ -498,14 +533,14 @@ static void etb_dump(struct etb_drvdata *drvdata)
unsigned long flags;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ if (drvdata->mode == CS_MODE_SYSFS) {
etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
etb_enable_hw(drvdata);
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "ETB dumped\n");
+ dev_dbg(drvdata->dev, "ETB dumped\n");
}
static int etb_open(struct inode *inode, struct file *file)
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index 677695635211..abe8249b893b 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/perf_event.h>
+#include <linux/percpu-defs.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/workqueue.h>
@@ -22,20 +23,6 @@
static struct pmu etm_pmu;
static bool etm_perf_up;
-/**
- * struct etm_event_data - Coresight specifics associated to an event
- * @work: Handle to free allocated memory outside IRQ context.
- * @mask: Hold the CPU(s) this event was set for.
- * @snk_config: The sink configuration.
- * @path: An array of path, each slot for one CPU.
- */
-struct etm_event_data {
- struct work_struct work;
- cpumask_t mask;
- void *snk_config;
- struct list_head **path;
-};
-
static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
@@ -61,6 +48,18 @@ static const struct attribute_group *etm_pmu_attr_groups[] = {
NULL,
};
+static inline struct list_head **
+etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
+{
+ return per_cpu_ptr(data->path, cpu);
+}
+
+static inline struct list_head *
+etm_event_cpu_path(struct etm_event_data *data, int cpu)
+{
+ return *etm_event_cpu_path_ptr(data, cpu);
+}
+
static void etm_event_read(struct perf_event *event) {}
static int etm_addr_filters_alloc(struct perf_event *event)
@@ -114,29 +113,30 @@ static void free_event_data(struct work_struct *work)
event_data = container_of(work, struct etm_event_data, work);
mask = &event_data->mask;
- /*
- * First deal with the sink configuration. See comment in
- * etm_setup_aux() about why we take the first available path.
- */
- if (event_data->snk_config) {
+
+ /* Free the sink buffers, if there are any */
+ if (event_data->snk_config && !WARN_ON(cpumask_empty(mask))) {
cpu = cpumask_first(mask);
- sink = coresight_get_sink(event_data->path[cpu]);
+ sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
if (sink_ops(sink)->free_buffer)
sink_ops(sink)->free_buffer(event_data->snk_config);
}
for_each_cpu(cpu, mask) {
- if (!(IS_ERR_OR_NULL(event_data->path[cpu])))
- coresight_release_path(event_data->path[cpu]);
+ struct list_head **ppath;
+
+ ppath = etm_event_cpu_path_ptr(event_data, cpu);
+ if (!(IS_ERR_OR_NULL(*ppath)))
+ coresight_release_path(*ppath);
+ *ppath = NULL;
}
- kfree(event_data->path);
+ free_percpu(event_data->path);
kfree(event_data);
}
static void *alloc_event_data(int cpu)
{
- int size;
cpumask_t *mask;
struct etm_event_data *event_data;
@@ -145,16 +145,12 @@ static void *alloc_event_data(int cpu)
if (!event_data)
return NULL;
- /* Make sure nothing disappears under us */
- get_online_cpus();
- size = num_online_cpus();
mask = &event_data->mask;
if (cpu != -1)
cpumask_set_cpu(cpu, mask);
else
- cpumask_copy(mask, cpu_online_mask);
- put_online_cpus();
+ cpumask_copy(mask, cpu_present_mask);
/*
* Each CPU has a single path between source and destination. As such
@@ -164,8 +160,8 @@ static void *alloc_event_data(int cpu)
* unused memory when dealing with single CPU trace scenarios is small
* compared to the cost of searching through an optimized array.
*/
- event_data->path = kcalloc(size,
- sizeof(struct list_head *), GFP_KERNEL);
+ event_data->path = alloc_percpu(struct list_head *);
+
if (!event_data->path) {
kfree(event_data);
return NULL;
@@ -206,34 +202,53 @@ static void *etm_setup_aux(int event_cpu, void **pages,
* on the cmd line. As such the "enable_sink" flag in sysFS is reset.
*/
sink = coresight_get_enabled_sink(true);
- if (!sink)
+ if (!sink || !sink_ops(sink)->alloc_buffer)
goto err;
mask = &event_data->mask;
- /* Setup the path for each CPU in a trace session */
+ /*
+ * Setup the path for each CPU in a trace session. We try to build
+ * trace path for each CPU in the mask. If we don't find an ETM
+ * for the CPU or fail to build a path, we clear the CPU from the
+ * mask and continue with the rest. If ever we try to trace on those
+ * CPUs, we can handle it and fail the session.
+ */
for_each_cpu(cpu, mask) {
+ struct list_head *path;
struct coresight_device *csdev;
csdev = per_cpu(csdev_src, cpu);
- if (!csdev)
- goto err;
+ /*
+ * If there is no ETM associated with this CPU clear it from
+ * the mask and continue with the rest. If ever we try to trace
+ * on this CPU, we handle it accordingly.
+ */
+ if (!csdev) {
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
/*
* Building a path doesn't enable it, it simply builds a
* list of devices from source to sink that can be
* referenced later when the path is actually needed.
*/
- event_data->path[cpu] = coresight_build_path(csdev, sink);
- if (IS_ERR(event_data->path[cpu]))
- goto err;
+ path = coresight_build_path(csdev, sink);
+ if (IS_ERR(path)) {
+ cpumask_clear_cpu(cpu, mask);
+ continue;
+ }
+
+ *etm_event_cpu_path_ptr(event_data, cpu) = path;
}
- if (!sink_ops(sink)->alloc_buffer)
+ /* If we don't have any CPUs ready for tracing, abort */
+ cpu = cpumask_first(mask);
+ if (cpu >= nr_cpu_ids)
goto err;
- cpu = cpumask_first(mask);
- /* Get the AUX specific data from the sink buffer */
+ /* Allocate the sink buffer for this session */
event_data->snk_config =
sink_ops(sink)->alloc_buffer(sink, cpu, pages,
nr_pages, overwrite);
@@ -255,6 +270,7 @@ static void etm_event_start(struct perf_event *event, int flags)
struct etm_event_data *event_data;
struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
+ struct list_head *path;
if (!csdev)
goto fail;
@@ -267,18 +283,14 @@ static void etm_event_start(struct perf_event *event, int flags)
if (!event_data)
goto fail;
+ path = etm_event_cpu_path(event_data, cpu);
/* We need a sink, no need to continue without one */
- sink = coresight_get_sink(event_data->path[cpu]);
- if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
- goto fail_end_stop;
-
- /* Configure the sink */
- if (sink_ops(sink)->set_buffer(sink, handle,
- event_data->snk_config))
+ sink = coresight_get_sink(path);
+ if (WARN_ON_ONCE(!sink))
goto fail_end_stop;
/* Nothing will happen without a path */
- if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF))
+ if (coresight_enable_path(path, CS_MODE_PERF, handle))
goto fail_end_stop;
/* Tell the perf core the event is alive */
@@ -286,11 +298,13 @@ static void etm_event_start(struct perf_event *event, int flags)
/* Finally enable the tracer */
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
- goto fail_end_stop;
+ goto fail_disable_path;
out:
return;
+fail_disable_path:
+ coresight_disable_path(path);
fail_end_stop:
perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
perf_aux_output_end(handle, 0);
@@ -306,6 +320,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
struct etm_event_data *event_data = perf_get_aux(handle);
+ struct list_head *path;
if (event->hw.state == PERF_HES_STOPPED)
return;
@@ -313,7 +328,11 @@ static void etm_event_stop(struct perf_event *event, int mode)
if (!csdev)
return;
- sink = coresight_get_sink(event_data->path[cpu]);
+ path = etm_event_cpu_path(event_data, cpu);
+ if (!path)
+ return;
+
+ sink = coresight_get_sink(path);
if (!sink)
return;
@@ -331,20 +350,13 @@ static void etm_event_stop(struct perf_event *event, int mode)
if (!sink_ops(sink)->update_buffer)
return;
- sink_ops(sink)->update_buffer(sink, handle,
+ size = sink_ops(sink)->update_buffer(sink, handle,
event_data->snk_config);
-
- if (!sink_ops(sink)->reset_buffer)
- return;
-
- size = sink_ops(sink)->reset_buffer(sink, handle,
- event_data->snk_config);
-
perf_aux_output_end(handle, size);
}
/* Disabling the path make its elements available to other sessions */
- coresight_disable_path(event_data->path[cpu]);
+ coresight_disable_path(path);
}
static int etm_event_add(struct perf_event *event, int mode)
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h
index 4197df4faf5e..da7d9336a15c 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.h
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.h
@@ -7,6 +7,7 @@
#ifndef _CORESIGHT_ETM_PERF_H
#define _CORESIGHT_ETM_PERF_H
+#include <linux/percpu-defs.h>
#include "coresight-priv.h"
struct coresight_device;
@@ -42,14 +43,39 @@ struct etm_filters {
bool ssstatus;
};
+/**
+ * struct etm_event_data - Coresight specifics associated to an event
+ * @work: Handle to free allocated memory outside IRQ context.
+ * @mask: Hold the CPU(s) this event was set for.
+ * @snk_config: The sink configuration.
+ * @path: An array of path, each slot for one CPU.
+ */
+struct etm_event_data {
+ struct work_struct work;
+ cpumask_t mask;
+ void *snk_config;
+ struct list_head * __percpu *path;
+};
#ifdef CONFIG_CORESIGHT
int etm_perf_symlink(struct coresight_device *csdev, bool link);
+static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
+{
+ struct etm_event_data *data = perf_get_aux(handle);
+ if (data)
+ return data->snk_config;
+ return NULL;
+}
#else
static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
{ return -EINVAL; }
+static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
+{
+ return NULL;
+}
+
#endif /* CONFIG_CORESIGHT */
#endif
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 7c74263c333d..fd5c4cca7db5 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -355,11 +355,10 @@ static int etm_parse_event_config(struct etm_drvdata *drvdata,
return 0;
}
-static void etm_enable_hw(void *info)
+static int etm_enable_hw(struct etm_drvdata *drvdata)
{
- int i;
+ int i, rc;
u32 etmcr;
- struct etm_drvdata *drvdata = info;
struct etm_config *config = &drvdata->config;
CS_UNLOCK(drvdata->base);
@@ -370,6 +369,9 @@ static void etm_enable_hw(void *info)
etm_set_pwrup(drvdata);
/* Make sure all registers are accessible */
etm_os_unlock(drvdata);
+ rc = coresight_claim_device_unlocked(drvdata->base);
+ if (rc)
+ goto done;
etm_set_prog(drvdata);
@@ -418,9 +420,29 @@ static void etm_enable_hw(void *info)
etm_writel(drvdata, 0x0, ETMVMIDCVR);
etm_clr_prog(drvdata);
+
+done:
+ if (rc)
+ etm_set_pwrdwn(drvdata);
CS_LOCK(drvdata->base);
- dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
+ dev_dbg(drvdata->dev, "cpu: %d enable smp call done: %d\n",
+ drvdata->cpu, rc);
+ return rc;
+}
+
+struct etm_enable_arg {
+ struct etm_drvdata *drvdata;
+ int rc;
+};
+
+static void etm_enable_hw_smp_call(void *info)
+{
+ struct etm_enable_arg *arg = info;
+
+ if (WARN_ON(!arg))
+ return;
+ arg->rc = etm_enable_hw(arg->drvdata);
}
static int etm_cpu_id(struct coresight_device *csdev)
@@ -475,14 +497,13 @@ static int etm_enable_perf(struct coresight_device *csdev,
/* Configure the tracer based on the session's specifics */
etm_parse_event_config(drvdata, event);
/* And enable it */
- etm_enable_hw(drvdata);
-
- return 0;
+ return etm_enable_hw(drvdata);
}
static int etm_enable_sysfs(struct coresight_device *csdev)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct etm_enable_arg arg = { 0 };
int ret;
spin_lock(&drvdata->spinlock);
@@ -492,20 +513,21 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
* hw configuration will take place on the local CPU during bring up.
*/
if (cpu_online(drvdata->cpu)) {
+ arg.drvdata = drvdata;
ret = smp_call_function_single(drvdata->cpu,
- etm_enable_hw, drvdata, 1);
- if (ret)
- goto err;
+ etm_enable_hw_smp_call, &arg, 1);
+ if (!ret)
+ ret = arg.rc;
+ if (!ret)
+ drvdata->sticky_enable = true;
+ } else {
+ ret = -ENODEV;
}
- drvdata->sticky_enable = true;
spin_unlock(&drvdata->spinlock);
- dev_info(drvdata->dev, "ETM tracing enabled\n");
- return 0;
-
-err:
- spin_unlock(&drvdata->spinlock);
+ if (!ret)
+ dev_dbg(drvdata->dev, "ETM tracing enabled\n");
return ret;
}
@@ -555,6 +577,8 @@ static void etm_disable_hw(void *info)
for (i = 0; i < drvdata->nr_cntr; i++)
config->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
+ coresight_disclaim_device_unlocked(drvdata->base);
+
etm_set_pwrdwn(drvdata);
CS_LOCK(drvdata->base);
@@ -604,7 +628,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
- dev_info(drvdata->dev, "ETM tracing disabled\n");
+ dev_dbg(drvdata->dev, "ETM tracing disabled\n");
}
static void etm_disable(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 1d94ebec027b..53e2fb6e86f6 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <asm/sections.h>
#include <asm/local.h>
+#include <asm/virt.h>
#include "coresight-etm4x.h"
#include "coresight-etm-perf.h"
@@ -77,16 +78,24 @@ static int etm4_trace_id(struct coresight_device *csdev)
return drvdata->trcid;
}
-static void etm4_enable_hw(void *info)
+struct etm4_enable_arg {
+ struct etmv4_drvdata *drvdata;
+ int rc;
+};
+
+static int etm4_enable_hw(struct etmv4_drvdata *drvdata)
{
- int i;
- struct etmv4_drvdata *drvdata = info;
+ int i, rc;
struct etmv4_config *config = &drvdata->config;
CS_UNLOCK(drvdata->base);
etm4_os_unlock(drvdata);
+ rc = coresight_claim_device_unlocked(drvdata->base);
+ if (rc)
+ goto done;
+
/* Disable the trace unit before programming trace registers */
writel_relaxed(0, drvdata->base + TRCPRGCTLR);
@@ -174,9 +183,21 @@ static void etm4_enable_hw(void *info)
dev_err(drvdata->dev,
"timeout while waiting for Idle Trace Status\n");
+done:
CS_LOCK(drvdata->base);
- dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
+ dev_dbg(drvdata->dev, "cpu: %d enable smp call done: %d\n",
+ drvdata->cpu, rc);
+ return rc;
+}
+
+static void etm4_enable_hw_smp_call(void *info)
+{
+ struct etm4_enable_arg *arg = info;
+
+ if (WARN_ON(!arg))
+ return;
+ arg->rc = etm4_enable_hw(arg->drvdata);
}
static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
@@ -242,7 +263,7 @@ static int etm4_enable_perf(struct coresight_device *csdev,
if (ret)
goto out;
/* And enable it */
- etm4_enable_hw(drvdata);
+ ret = etm4_enable_hw(drvdata);
out:
return ret;
@@ -251,6 +272,7 @@ out:
static int etm4_enable_sysfs(struct coresight_device *csdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct etm4_enable_arg arg = { 0 };
int ret;
spin_lock(&drvdata->spinlock);
@@ -259,19 +281,17 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
* Executing etm4_enable_hw on the cpu whose ETM is being enabled
* ensures that register writes occur when cpu is powered.
*/
+ arg.drvdata = drvdata;
ret = smp_call_function_single(drvdata->cpu,
- etm4_enable_hw, drvdata, 1);
- if (ret)
- goto err;
-
- drvdata->sticky_enable = true;
+ etm4_enable_hw_smp_call, &arg, 1);
+ if (!ret)
+ ret = arg.rc;
+ if (!ret)
+ drvdata->sticky_enable = true;
spin_unlock(&drvdata->spinlock);
- dev_info(drvdata->dev, "ETM tracing enabled\n");
- return 0;
-
-err:
- spin_unlock(&drvdata->spinlock);
+ if (!ret)
+ dev_dbg(drvdata->dev, "ETM tracing enabled\n");
return ret;
}
@@ -328,6 +348,8 @@ static void etm4_disable_hw(void *info)
isb();
writel_relaxed(control, drvdata->base + TRCPRGCTLR);
+ coresight_disclaim_device_unlocked(drvdata->base);
+
CS_LOCK(drvdata->base);
dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
@@ -380,7 +402,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
spin_unlock(&drvdata->spinlock);
cpus_read_unlock();
- dev_info(drvdata->dev, "ETM tracing disabled\n");
+ dev_dbg(drvdata->dev, "ETM tracing disabled\n");
}
static void etm4_disable(struct coresight_device *csdev,
@@ -605,7 +627,7 @@ static void etm4_set_default_config(struct etmv4_config *config)
config->vinst_ctrl |= BIT(0);
}
-static u64 etm4_get_access_type(struct etmv4_config *config)
+static u64 etm4_get_ns_access_type(struct etmv4_config *config)
{
u64 access_type = 0;
@@ -616,17 +638,26 @@ static u64 etm4_get_access_type(struct etmv4_config *config)
* Bit[13] Exception level 1 - OS
* Bit[14] Exception level 2 - Hypervisor
* Bit[15] Never implemented
- *
- * Always stay away from hypervisor mode.
*/
- access_type = ETM_EXLEVEL_NS_HYP;
-
- if (config->mode & ETM_MODE_EXCL_KERN)
- access_type |= ETM_EXLEVEL_NS_OS;
+ if (!is_kernel_in_hyp_mode()) {
+ /* Stay away from hypervisor mode for non-VHE */
+ access_type = ETM_EXLEVEL_NS_HYP;
+ if (config->mode & ETM_MODE_EXCL_KERN)
+ access_type |= ETM_EXLEVEL_NS_OS;
+ } else if (config->mode & ETM_MODE_EXCL_KERN) {
+ access_type = ETM_EXLEVEL_NS_HYP;
+ }
if (config->mode & ETM_MODE_EXCL_USER)
access_type |= ETM_EXLEVEL_NS_APP;
+ return access_type;
+}
+
+static u64 etm4_get_access_type(struct etmv4_config *config)
+{
+ u64 access_type = etm4_get_ns_access_type(config);
+
/*
* EXLEVEL_S, bits[11:8], don't trace anything happening
* in secure state.
@@ -880,20 +911,10 @@ void etm4_config_trace_mode(struct etmv4_config *config)
addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
/* clear default config */
- addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS);
+ addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS |
+ ETM_EXLEVEL_NS_HYP);
- /*
- * EXLEVEL_NS, bits[15:12]
- * The Exception levels are:
- * Bit[12] Exception level 0 - Application
- * Bit[13] Exception level 1 - OS
- * Bit[14] Exception level 2 - Hypervisor
- * Bit[15] Never implemented
- */
- if (mode & ETM_MODE_EXCL_KERN)
- addr_acc |= ETM_EXLEVEL_NS_OS;
- else
- addr_acc |= ETM_EXLEVEL_NS_APP;
+ addr_acc |= etm4_get_ns_access_type(config);
config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 448145a36675..927925151509 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -25,6 +25,7 @@
#define FUNNEL_HOLDTIME_MASK 0xf00
#define FUNNEL_HOLDTIME_SHFT 0x8
#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT)
+#define FUNNEL_ENSx_MASK 0xff
/**
* struct funnel_drvdata - specifics associated to a funnel component
@@ -42,31 +43,42 @@ struct funnel_drvdata {
unsigned long priority;
};
-static void funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
+static int funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
{
u32 functl;
+ int rc = 0;
CS_UNLOCK(drvdata->base);
functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
+ /* Claim the device only when we enable the first slave */
+ if (!(functl & FUNNEL_ENSx_MASK)) {
+ rc = coresight_claim_device_unlocked(drvdata->base);
+ if (rc)
+ goto done;
+ }
+
functl &= ~FUNNEL_HOLDTIME_MASK;
functl |= FUNNEL_HOLDTIME;
functl |= (1 << port);
writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
writel_relaxed(drvdata->priority, drvdata->base + FUNNEL_PRICTL);
-
+done:
CS_LOCK(drvdata->base);
+ return rc;
}
static int funnel_enable(struct coresight_device *csdev, int inport,
int outport)
{
+ int rc;
struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- funnel_enable_hw(drvdata, inport);
+ rc = funnel_enable_hw(drvdata, inport);
- dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
- return 0;
+ if (!rc)
+ dev_dbg(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
+ return rc;
}
static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport)
@@ -79,6 +91,10 @@ static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport)
functl &= ~(1 << inport);
writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
+ /* Disclaim the device if none of the slaves are now active */
+ if (!(functl & FUNNEL_ENSx_MASK))
+ coresight_disclaim_device_unlocked(drvdata->base);
+
CS_LOCK(drvdata->base);
}
@@ -89,7 +105,7 @@ static void funnel_disable(struct coresight_device *csdev, int inport,
funnel_disable_hw(drvdata, inport);
- dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
+ dev_dbg(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
}
static const struct coresight_ops_link funnel_link_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 1a6cf3589866..579f34943bf1 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -25,6 +25,13 @@
#define CORESIGHT_DEVID 0xfc8
#define CORESIGHT_DEVTYPE 0xfcc
+
+/*
+ * Coresight device CLAIM protocol.
+ * See PSCI - ARM DEN 0022D, Section: 6.8.1 Debug and Trace save and restore.
+ */
+#define CORESIGHT_CLAIM_SELF_HOSTED BIT(1)
+
#define TIMEOUT_US 100
#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
@@ -137,7 +144,7 @@ static inline void coresight_write_reg_pair(void __iomem *addr, u64 val,
}
void coresight_disable_path(struct list_head *path);
-int coresight_enable_path(struct list_head *path, u32 mode);
+int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
struct coresight_device *coresight_get_enabled_sink(bool reset);
struct list_head *coresight_build_path(struct coresight_device *csdev,
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 8d2eaaab6c2f..feac98315471 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -35,7 +35,7 @@ static int replicator_enable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- dev_info(drvdata->dev, "REPLICATOR enabled\n");
+ dev_dbg(drvdata->dev, "REPLICATOR enabled\n");
return 0;
}
@@ -44,7 +44,7 @@ static void replicator_disable(struct coresight_device *csdev, int inport,
{
struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- dev_info(drvdata->dev, "REPLICATOR disabled\n");
+ dev_dbg(drvdata->dev, "REPLICATOR disabled\n");
}
static const struct coresight_ops_link replicator_link_ops = {
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index c46c70aec1d5..35d6f9709274 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -211,7 +211,7 @@ static int stm_enable(struct coresight_device *csdev,
stm_enable_hw(drvdata);
spin_unlock(&drvdata->spinlock);
- dev_info(drvdata->dev, "STM tracing enabled\n");
+ dev_dbg(drvdata->dev, "STM tracing enabled\n");
return 0;
}
@@ -274,7 +274,7 @@ static void stm_disable(struct coresight_device *csdev,
pm_runtime_put(drvdata->dev);
local_set(&drvdata->mode, CS_MODE_DISABLED);
- dev_info(drvdata->dev, "STM tracing disabled\n");
+ dev_dbg(drvdata->dev, "STM tracing disabled\n");
}
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 0549249f4b39..53fc83b72a49 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -10,8 +10,12 @@
#include <linux/slab.h>
#include "coresight-priv.h"
#include "coresight-tmc.h"
+#include "coresight-etm-perf.h"
-static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
+static int tmc_set_etf_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle);
+
+static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
@@ -30,33 +34,41 @@ static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
+{
+ int rc = coresight_claim_device(drvdata->base);
+
+ if (rc)
+ return rc;
+
+ __tmc_etb_enable_hw(drvdata);
+ return 0;
+}
+
static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
{
char *bufp;
u32 read_data, lost;
- int i;
/* Check if the buffer wrapped around. */
lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
bufp = drvdata->buf;
drvdata->len = 0;
while (1) {
- for (i = 0; i < drvdata->memwidth; i++) {
- read_data = readl_relaxed(drvdata->base + TMC_RRD);
- if (read_data == 0xFFFFFFFF)
- goto done;
- memcpy(bufp, &read_data, 4);
- bufp += 4;
- drvdata->len += 4;
- }
+ read_data = readl_relaxed(drvdata->base + TMC_RRD);
+ if (read_data == 0xFFFFFFFF)
+ break;
+ memcpy(bufp, &read_data, 4);
+ bufp += 4;
+ drvdata->len += 4;
}
-done:
+
if (lost)
coresight_insert_barrier_packet(drvdata->buf);
return;
}
-static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
+static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
@@ -72,7 +84,13 @@ static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
+static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
+{
+ coresight_disclaim_device(drvdata);
+ __tmc_etb_disable_hw(drvdata);
+}
+
+static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
@@ -88,13 +106,24 @@ static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
+{
+ int rc = coresight_claim_device(drvdata->base);
+
+ if (rc)
+ return rc;
+
+ __tmc_etf_enable_hw(drvdata);
+ return 0;
+}
+
static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
tmc_flush_and_stop(drvdata);
tmc_disable_hw(drvdata);
-
+ coresight_disclaim_device_unlocked(drvdata->base);
CS_LOCK(drvdata->base);
}
@@ -170,8 +199,12 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
drvdata->buf = buf;
}
- drvdata->mode = CS_MODE_SYSFS;
- tmc_etb_enable_hw(drvdata);
+ ret = tmc_etb_enable_hw(drvdata);
+ if (!ret)
+ drvdata->mode = CS_MODE_SYSFS;
+ else
+ /* Free up the buffer if we failed to enable */
+ used = false;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -182,37 +215,40 @@ out:
return ret;
}
-static int tmc_enable_etf_sink_perf(struct coresight_device *csdev)
+static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
{
int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct perf_output_handle *handle = data;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->reading) {
+ do {
ret = -EINVAL;
- goto out;
- }
-
- /*
- * In Perf mode there can be only one writer per sink. There
- * is also no need to continue if the ETB/ETR is already operated
- * from sysFS.
- */
- if (drvdata->mode != CS_MODE_DISABLED) {
- ret = -EINVAL;
- goto out;
- }
+ if (drvdata->reading)
+ break;
+ /*
+ * In Perf mode there can be only one writer per sink. There
+ * is also no need to continue if the ETB/ETF is already
+ * operated from sysFS.
+ */
+ if (drvdata->mode != CS_MODE_DISABLED)
+ break;
- drvdata->mode = CS_MODE_PERF;
- tmc_etb_enable_hw(drvdata);
-out:
+ ret = tmc_set_etf_buffer(csdev, handle);
+ if (ret)
+ break;
+ ret = tmc_etb_enable_hw(drvdata);
+ if (!ret)
+ drvdata->mode = CS_MODE_PERF;
+ } while (0);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
}
-static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
+static int tmc_enable_etf_sink(struct coresight_device *csdev,
+ u32 mode, void *data)
{
int ret;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -222,7 +258,7 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
ret = tmc_enable_etf_sink_sysfs(csdev);
break;
case CS_MODE_PERF:
- ret = tmc_enable_etf_sink_perf(csdev);
+ ret = tmc_enable_etf_sink_perf(csdev, data);
break;
/* We shouldn't be here */
default:
@@ -233,7 +269,7 @@ static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
if (ret)
return ret;
- dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
+ dev_dbg(drvdata->dev, "TMC-ETB/ETF enabled\n");
return 0;
}
@@ -256,12 +292,13 @@ static void tmc_disable_etf_sink(struct coresight_device *csdev)
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
+ dev_dbg(drvdata->dev, "TMC-ETB/ETF disabled\n");
}
static int tmc_enable_etf_link(struct coresight_device *csdev,
int inport, int outport)
{
+ int ret;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -271,12 +308,14 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
return -EBUSY;
}
- tmc_etf_enable_hw(drvdata);
- drvdata->mode = CS_MODE_SYSFS;
+ ret = tmc_etf_enable_hw(drvdata);
+ if (!ret)
+ drvdata->mode = CS_MODE_SYSFS;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "TMC-ETF enabled\n");
- return 0;
+ if (!ret)
+ dev_dbg(drvdata->dev, "TMC-ETF enabled\n");
+ return ret;
}
static void tmc_disable_etf_link(struct coresight_device *csdev,
@@ -295,7 +334,7 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
drvdata->mode = CS_MODE_DISABLED;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "TMC-ETF disabled\n");
+ dev_dbg(drvdata->dev, "TMC-ETF disabled\n");
}
static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
@@ -328,12 +367,14 @@ static void tmc_free_etf_buffer(void *config)
}
static int tmc_set_etf_buffer(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config)
+ struct perf_output_handle *handle)
{
int ret = 0;
unsigned long head;
- struct cs_buffers *buf = sink_config;
+ struct cs_buffers *buf = etm_perf_sink_config(handle);
+
+ if (!buf)
+ return -EINVAL;
/* wrap head around to the amount of space we have */
head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
@@ -349,36 +390,7 @@ static int tmc_set_etf_buffer(struct coresight_device *csdev,
return ret;
}
-static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config)
-{
- long size = 0;
- struct cs_buffers *buf = sink_config;
-
- if (buf) {
- /*
- * In snapshot mode ->data_size holds the new address of the
- * ring buffer's head. The size itself is the whole address
- * range since we want the latest information.
- */
- if (buf->snapshot)
- handle->head = local_xchg(&buf->data_size,
- buf->nr_pages << PAGE_SHIFT);
- /*
- * Tell the tracer PMU how much we got in this run and if
- * something went wrong along the way. Nobody else can use
- * this cs_buffers instance until we are done. As such
- * resetting parameters here and squaring off with the ring
- * buffer API in the tracer PMU is fine.
- */
- size = local_xchg(&buf->data_size, 0);
- }
-
- return size;
-}
-
-static void tmc_update_etf_buffer(struct coresight_device *csdev,
+static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config)
{
@@ -387,17 +399,17 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
const u32 *barrier;
u32 *buf_ptr;
u64 read_ptr, write_ptr;
- u32 status, to_read;
- unsigned long offset;
+ u32 status;
+ unsigned long offset, to_read;
struct cs_buffers *buf = sink_config;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (!buf)
- return;
+ return 0;
/* This shouldn't happen */
if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
- return;
+ return 0;
CS_UNLOCK(drvdata->base);
@@ -438,10 +450,10 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
case TMC_MEM_INTF_WIDTH_32BITS:
case TMC_MEM_INTF_WIDTH_64BITS:
case TMC_MEM_INTF_WIDTH_128BITS:
- mask = GENMASK(31, 5);
+ mask = GENMASK(31, 4);
break;
case TMC_MEM_INTF_WIDTH_256BITS:
- mask = GENMASK(31, 6);
+ mask = GENMASK(31, 5);
break;
}
@@ -486,18 +498,14 @@ static void tmc_update_etf_buffer(struct coresight_device *csdev,
}
}
- /*
- * In snapshot mode all we have to do is communicate to
- * perf_aux_output_end() the address of the current head. In full
- * trace mode the same function expects a size to move rb->aux_head
- * forward.
- */
- if (buf->snapshot)
- local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
- else
- local_add(to_read, &buf->data_size);
-
+ /* In snapshot mode we have to update the head */
+ if (buf->snapshot) {
+ handle->head = (cur * PAGE_SIZE) + offset;
+ to_read = buf->nr_pages << PAGE_SHIFT;
+ }
CS_LOCK(drvdata->base);
+
+ return to_read;
}
static const struct coresight_ops_sink tmc_etf_sink_ops = {
@@ -505,8 +513,6 @@ static const struct coresight_ops_sink tmc_etf_sink_ops = {
.disable = tmc_disable_etf_sink,
.alloc_buffer = tmc_alloc_etf_buffer,
.free_buffer = tmc_free_etf_buffer,
- .set_buffer = tmc_set_etf_buffer,
- .reset_buffer = tmc_reset_etf_buffer,
.update_buffer = tmc_update_etf_buffer,
};
@@ -563,7 +569,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
/* Disable the TMC if need be */
if (drvdata->mode == CS_MODE_SYSFS)
- tmc_etb_disable_hw(drvdata);
+ __tmc_etb_disable_hw(drvdata);
drvdata->reading = true;
out:
@@ -603,7 +609,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
* can't be NULL.
*/
memset(drvdata->buf, 0, drvdata->size);
- tmc_etb_enable_hw(drvdata);
+ __tmc_etb_enable_hw(drvdata);
} else {
/*
* The ETB/ETF is not tracing and the buffer was just read.
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 2eda5de304c2..f684283890d3 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "coresight-catu.h"
+#include "coresight-etm-perf.h"
#include "coresight-priv.h"
#include "coresight-tmc.h"
@@ -21,6 +22,28 @@ struct etr_flat_buf {
};
/*
+ * etr_perf_buffer - Perf buffer used for ETR
+ * @etr_buf - Actual buffer used by the ETR
+ * @snaphost - Perf session mode
+ * @head - handle->head at the beginning of the session.
+ * @nr_pages - Number of pages in the ring buffer.
+ * @pages - Array of Pages in the ring buffer.
+ */
+struct etr_perf_buffer {
+ struct etr_buf *etr_buf;
+ bool snapshot;
+ unsigned long head;
+ int nr_pages;
+ void **pages;
+};
+
+/* Convert the perf index to an offset within the ETR buffer */
+#define PERF_IDX2OFF(idx, buf) ((idx) % ((buf)->nr_pages << PAGE_SHIFT))
+
+/* Lower limit for ETR hardware buffer */
+#define TMC_ETR_PERF_MIN_BUF_SIZE SZ_1M
+
+/*
* The TMC ETR SG has a page size of 4K. The SG table contains pointers
* to 4KB buffers. However, the OS may use a PAGE_SIZE different from
* 4K (i.e, 16KB or 64KB). This implies that a single OS page could
@@ -536,7 +559,7 @@ tmc_init_etr_sg_table(struct device *dev, int node,
sg_table = tmc_alloc_sg_table(dev, node, nr_tpages, nr_dpages, pages);
if (IS_ERR(sg_table)) {
kfree(etr_table);
- return ERR_PTR(PTR_ERR(sg_table));
+ return ERR_CAST(sg_table);
}
etr_table->sg_table = sg_table;
@@ -728,12 +751,14 @@ tmc_etr_get_catu_device(struct tmc_drvdata *drvdata)
return NULL;
}
-static inline void tmc_etr_enable_catu(struct tmc_drvdata *drvdata)
+static inline int tmc_etr_enable_catu(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf)
{
struct coresight_device *catu = tmc_etr_get_catu_device(drvdata);
if (catu && helper_ops(catu)->enable)
- helper_ops(catu)->enable(catu, drvdata->etr_buf);
+ return helper_ops(catu)->enable(catu, etr_buf);
+ return 0;
}
static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
@@ -895,17 +920,11 @@ static void tmc_sync_etr_buf(struct tmc_drvdata *drvdata)
tmc_etr_buf_insert_barrier_packet(etr_buf, etr_buf->offset);
}
-static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
+static void __tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
{
u32 axictl, sts;
struct etr_buf *etr_buf = drvdata->etr_buf;
- /*
- * If this ETR is connected to a CATU, enable it before we turn
- * this on
- */
- tmc_etr_enable_catu(drvdata);
-
CS_UNLOCK(drvdata->base);
/* Wait for TMCSReady bit to be set */
@@ -924,11 +943,8 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
axictl |= TMC_AXICTL_ARCACHE_OS;
}
- if (etr_buf->mode == ETR_MODE_ETR_SG) {
- if (WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
- return;
+ if (etr_buf->mode == ETR_MODE_ETR_SG)
axictl |= TMC_AXICTL_SCT_GAT_MODE;
- }
writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
tmc_write_dba(drvdata, etr_buf->hwaddr);
@@ -954,19 +970,54 @@ static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
+static int tmc_etr_enable_hw(struct tmc_drvdata *drvdata,
+ struct etr_buf *etr_buf)
+{
+ int rc;
+
+ /* Callers should provide an appropriate buffer for use */
+ if (WARN_ON(!etr_buf))
+ return -EINVAL;
+
+ if ((etr_buf->mode == ETR_MODE_ETR_SG) &&
+ WARN_ON(!tmc_etr_has_cap(drvdata, TMC_ETR_SG)))
+ return -EINVAL;
+
+ if (WARN_ON(drvdata->etr_buf))
+ return -EBUSY;
+
+ /*
+ * If this ETR is connected to a CATU, enable it before we turn
+ * this on.
+ */
+ rc = tmc_etr_enable_catu(drvdata, etr_buf);
+ if (rc)
+ return rc;
+ rc = coresight_claim_device(drvdata->base);
+ if (!rc) {
+ drvdata->etr_buf = etr_buf;
+ __tmc_etr_enable_hw(drvdata);
+ }
+
+ return rc;
+}
+
/*
* Return the available trace data in the buffer (starts at etr_buf->offset,
* limited by etr_buf->len) from @pos, with a maximum limit of @len,
* also updating the @bufpp on where to find it. Since the trace data
* starts at anywhere in the buffer, depending on the RRP, we adjust the
* @len returned to handle buffer wrapping around.
+ *
+ * We are protected here by drvdata->reading != 0, which ensures the
+ * sysfs_buf stays alive.
*/
ssize_t tmc_etr_get_sysfs_trace(struct tmc_drvdata *drvdata,
loff_t pos, size_t len, char **bufpp)
{
s64 offset;
ssize_t actual = len;
- struct etr_buf *etr_buf = drvdata->etr_buf;
+ struct etr_buf *etr_buf = drvdata->sysfs_buf;
if (pos + actual > etr_buf->len)
actual = etr_buf->len - pos;
@@ -996,10 +1047,17 @@ tmc_etr_free_sysfs_buf(struct etr_buf *buf)
static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
{
- tmc_sync_etr_buf(drvdata);
+ struct etr_buf *etr_buf = drvdata->etr_buf;
+
+ if (WARN_ON(drvdata->sysfs_buf != etr_buf)) {
+ tmc_etr_free_sysfs_buf(drvdata->sysfs_buf);
+ drvdata->sysfs_buf = NULL;
+ } else {
+ tmc_sync_etr_buf(drvdata);
+ }
}
-static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
@@ -1015,8 +1073,16 @@ static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
+}
+
+static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+{
+ __tmc_etr_disable_hw(drvdata);
/* Disable CATU device if this ETR is connected to one */
tmc_etr_disable_catu(drvdata);
+ coresight_disclaim_device(drvdata->base);
+ /* Reset the ETR buf used by hardware */
+ drvdata->etr_buf = NULL;
}
static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
@@ -1024,7 +1090,7 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
int ret = 0;
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- struct etr_buf *new_buf = NULL, *free_buf = NULL;
+ struct etr_buf *sysfs_buf = NULL, *new_buf = NULL, *free_buf = NULL;
/*
* If we are enabling the ETR from disabled state, we need to make
@@ -1035,7 +1101,8 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
* with the lock released.
*/
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (!drvdata->etr_buf || (drvdata->etr_buf->size != drvdata->size)) {
+ sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
+ if (!sysfs_buf || (sysfs_buf->size != drvdata->size)) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Allocate memory with the locks released */
@@ -1064,14 +1131,15 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
* If we don't have a buffer or it doesn't match the requested size,
* use the buffer allocated above. Otherwise reuse the existing buffer.
*/
- if (!drvdata->etr_buf ||
- (new_buf && drvdata->etr_buf->size != new_buf->size)) {
- free_buf = drvdata->etr_buf;
- drvdata->etr_buf = new_buf;
+ sysfs_buf = READ_ONCE(drvdata->sysfs_buf);
+ if (!sysfs_buf || (new_buf && sysfs_buf->size != new_buf->size)) {
+ free_buf = sysfs_buf;
+ drvdata->sysfs_buf = new_buf;
}
- drvdata->mode = CS_MODE_SYSFS;
- tmc_etr_enable_hw(drvdata);
+ ret = tmc_etr_enable_hw(drvdata, drvdata->sysfs_buf);
+ if (!ret)
+ drvdata->mode = CS_MODE_SYSFS;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -1080,24 +1148,244 @@ out:
tmc_etr_free_sysfs_buf(free_buf);
if (!ret)
- dev_info(drvdata->dev, "TMC-ETR enabled\n");
+ dev_dbg(drvdata->dev, "TMC-ETR enabled\n");
return ret;
}
-static int tmc_enable_etr_sink_perf(struct coresight_device *csdev)
+/*
+ * tmc_etr_setup_perf_buf: Allocate ETR buffer for use by perf.
+ * The size of the hardware buffer is dependent on the size configured
+ * via sysfs and the perf ring buffer size. We prefer to allocate the
+ * largest possible size, scaling down the size by half until it
+ * reaches a minimum limit (1M), beyond which we give up.
+ */
+static struct etr_perf_buffer *
+tmc_etr_setup_perf_buf(struct tmc_drvdata *drvdata, int node, int nr_pages,
+ void **pages, bool snapshot)
{
- /* We don't support perf mode yet ! */
- return -EINVAL;
+ struct etr_buf *etr_buf;
+ struct etr_perf_buffer *etr_perf;
+ unsigned long size;
+
+ etr_perf = kzalloc_node(sizeof(*etr_perf), GFP_KERNEL, node);
+ if (!etr_perf)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * Try to match the perf ring buffer size if it is larger
+ * than the size requested via sysfs.
+ */
+ if ((nr_pages << PAGE_SHIFT) > drvdata->size) {
+ etr_buf = tmc_alloc_etr_buf(drvdata, (nr_pages << PAGE_SHIFT),
+ 0, node, NULL);
+ if (!IS_ERR(etr_buf))
+ goto done;
+ }
+
+ /*
+ * Else switch to configured size for this ETR
+ * and scale down until we hit the minimum limit.
+ */
+ size = drvdata->size;
+ do {
+ etr_buf = tmc_alloc_etr_buf(drvdata, size, 0, node, NULL);
+ if (!IS_ERR(etr_buf))
+ goto done;
+ size /= 2;
+ } while (size >= TMC_ETR_PERF_MIN_BUF_SIZE);
+
+ kfree(etr_perf);
+ return ERR_PTR(-ENOMEM);
+
+done:
+ etr_perf->etr_buf = etr_buf;
+ return etr_perf;
}
-static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
+
+static void *tmc_alloc_etr_buffer(struct coresight_device *csdev,
+ int cpu, void **pages, int nr_pages,
+ bool snapshot)
+{
+ struct etr_perf_buffer *etr_perf;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ if (cpu == -1)
+ cpu = smp_processor_id();
+
+ etr_perf = tmc_etr_setup_perf_buf(drvdata, cpu_to_node(cpu),
+ nr_pages, pages, snapshot);
+ if (IS_ERR(etr_perf)) {
+ dev_dbg(drvdata->dev, "Unable to allocate ETR buffer\n");
+ return NULL;
+ }
+
+ etr_perf->snapshot = snapshot;
+ etr_perf->nr_pages = nr_pages;
+ etr_perf->pages = pages;
+
+ return etr_perf;
+}
+
+static void tmc_free_etr_buffer(void *config)
+{
+ struct etr_perf_buffer *etr_perf = config;
+
+ if (etr_perf->etr_buf)
+ tmc_free_etr_buf(etr_perf->etr_buf);
+ kfree(etr_perf);
+}
+
+/*
+ * tmc_etr_sync_perf_buffer: Copy the actual trace data from the hardware
+ * buffer to the perf ring buffer.
+ */
+static void tmc_etr_sync_perf_buffer(struct etr_perf_buffer *etr_perf)
+{
+ long bytes, to_copy;
+ long pg_idx, pg_offset, src_offset;
+ unsigned long head = etr_perf->head;
+ char **dst_pages, *src_buf;
+ struct etr_buf *etr_buf = etr_perf->etr_buf;
+
+ head = etr_perf->head;
+ pg_idx = head >> PAGE_SHIFT;
+ pg_offset = head & (PAGE_SIZE - 1);
+ dst_pages = (char **)etr_perf->pages;
+ src_offset = etr_buf->offset;
+ to_copy = etr_buf->len;
+
+ while (to_copy > 0) {
+ /*
+ * In one iteration, we can copy minimum of :
+ * 1) what is available in the source buffer,
+ * 2) what is available in the source buffer, before it
+ * wraps around.
+ * 3) what is available in the destination page.
+ * in one iteration.
+ */
+ bytes = tmc_etr_buf_get_data(etr_buf, src_offset, to_copy,
+ &src_buf);
+ if (WARN_ON_ONCE(bytes <= 0))
+ break;
+ bytes = min(bytes, (long)(PAGE_SIZE - pg_offset));
+
+ memcpy(dst_pages[pg_idx] + pg_offset, src_buf, bytes);
+
+ to_copy -= bytes;
+
+ /* Move destination pointers */
+ pg_offset += bytes;
+ if (pg_offset == PAGE_SIZE) {
+ pg_offset = 0;
+ if (++pg_idx == etr_perf->nr_pages)
+ pg_idx = 0;
+ }
+
+ /* Move source pointers */
+ src_offset += bytes;
+ if (src_offset >= etr_buf->size)
+ src_offset -= etr_buf->size;
+ }
+}
+
+/*
+ * tmc_update_etr_buffer : Update the perf ring buffer with the
+ * available trace data. We use software double buffering at the moment.
+ *
+ * TODO: Add support for reusing the perf ring buffer.
+ */
+static unsigned long
+tmc_update_etr_buffer(struct coresight_device *csdev,
+ struct perf_output_handle *handle,
+ void *config)
+{
+ bool lost = false;
+ unsigned long flags, size = 0;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct etr_perf_buffer *etr_perf = config;
+ struct etr_buf *etr_buf = etr_perf->etr_buf;
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (WARN_ON(drvdata->perf_data != etr_perf)) {
+ lost = true;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ goto out;
+ }
+
+ CS_UNLOCK(drvdata->base);
+
+ tmc_flush_and_stop(drvdata);
+ tmc_sync_etr_buf(drvdata);
+
+ CS_LOCK(drvdata->base);
+ /* Reset perf specific data */
+ drvdata->perf_data = NULL;
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+ size = etr_buf->len;
+ tmc_etr_sync_perf_buffer(etr_perf);
+
+ /*
+ * Update handle->head in snapshot mode. Also update the size to the
+ * hardware buffer size if there was an overflow.
+ */
+ if (etr_perf->snapshot) {
+ handle->head += size;
+ if (etr_buf->full)
+ size = etr_buf->size;
+ }
+
+ lost |= etr_buf->full;
+out:
+ if (lost)
+ perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
+ return size;
+}
+
+static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
+{
+ int rc = 0;
+ unsigned long flags;
+ struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+ struct perf_output_handle *handle = data;
+ struct etr_perf_buffer *etr_perf = etm_perf_sink_config(handle);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ /*
+ * There can be only one writer per sink in perf mode. If the sink
+ * is already open in SYSFS mode, we can't use it.
+ */
+ if (drvdata->mode != CS_MODE_DISABLED || WARN_ON(drvdata->perf_data)) {
+ rc = -EBUSY;
+ goto unlock_out;
+ }
+
+ if (WARN_ON(!etr_perf || !etr_perf->etr_buf)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+
+ etr_perf->head = PERF_IDX2OFF(handle->head, etr_perf);
+ drvdata->perf_data = etr_perf;
+ rc = tmc_etr_enable_hw(drvdata, etr_perf->etr_buf);
+ if (!rc)
+ drvdata->mode = CS_MODE_PERF;
+
+unlock_out:
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ return rc;
+}
+
+static int tmc_enable_etr_sink(struct coresight_device *csdev,
+ u32 mode, void *data)
{
switch (mode) {
case CS_MODE_SYSFS:
return tmc_enable_etr_sink_sysfs(csdev);
case CS_MODE_PERF:
- return tmc_enable_etr_sink_perf(csdev);
+ return tmc_enable_etr_sink_perf(csdev, data);
}
/* We shouldn't be here */
@@ -1123,12 +1411,15 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
spin_unlock_irqrestore(&drvdata->spinlock, flags);
- dev_info(drvdata->dev, "TMC-ETR disabled\n");
+ dev_dbg(drvdata->dev, "TMC-ETR disabled\n");
}
static const struct coresight_ops_sink tmc_etr_sink_ops = {
.enable = tmc_enable_etr_sink,
.disable = tmc_disable_etr_sink,
+ .alloc_buffer = tmc_alloc_etr_buffer,
+ .update_buffer = tmc_update_etr_buffer,
+ .free_buffer = tmc_free_etr_buffer,
};
const struct coresight_ops tmc_etr_cs_ops = {
@@ -1150,21 +1441,19 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
goto out;
}
- /* Don't interfere if operated from Perf */
- if (drvdata->mode == CS_MODE_PERF) {
- ret = -EINVAL;
- goto out;
- }
-
- /* If drvdata::etr_buf is NULL the trace data has been read already */
- if (drvdata->etr_buf == NULL) {
+ /*
+ * We can safely allow reads even if the ETR is operating in PERF mode,
+ * since the sysfs session is captured in mode specific data.
+ * If drvdata::sysfs_data is NULL the trace data has been read already.
+ */
+ if (!drvdata->sysfs_buf) {
ret = -EINVAL;
goto out;
}
- /* Disable the TMC if need be */
+ /* Disable the TMC if we are trying to read from a running session. */
if (drvdata->mode == CS_MODE_SYSFS)
- tmc_etr_disable_hw(drvdata);
+ __tmc_etr_disable_hw(drvdata);
drvdata->reading = true;
out:
@@ -1176,7 +1465,7 @@ out:
int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
{
unsigned long flags;
- struct etr_buf *etr_buf = NULL;
+ struct etr_buf *sysfs_buf = NULL;
/* config types are set a boot time and never change */
if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
@@ -1191,22 +1480,22 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
* buffer. Since the tracer is still enabled drvdata::buf can't
* be NULL.
*/
- tmc_etr_enable_hw(drvdata);
+ __tmc_etr_enable_hw(drvdata);
} else {
/*
* The ETR is not tracing and the buffer was just read.
* As such prepare to free the trace buffer.
*/
- etr_buf = drvdata->etr_buf;
- drvdata->etr_buf = NULL;
+ sysfs_buf = drvdata->sysfs_buf;
+ drvdata->sysfs_buf = NULL;
}
drvdata->reading = false;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
/* Free allocated memory out side of the spinlock */
- if (etr_buf)
- tmc_free_etr_buf(etr_buf);
+ if (sysfs_buf)
+ tmc_etr_free_sysfs_buf(sysfs_buf);
return 0;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 1b817ec1192c..ea249f0bcd73 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -81,7 +81,7 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
}
if (!ret)
- dev_info(drvdata->dev, "TMC read start\n");
+ dev_dbg(drvdata->dev, "TMC read start\n");
return ret;
}
@@ -103,7 +103,7 @@ static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
}
if (!ret)
- dev_info(drvdata->dev, "TMC read end\n");
+ dev_dbg(drvdata->dev, "TMC read end\n");
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 7027bd60c4cc..487c53701e9c 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -170,6 +170,8 @@ struct etr_buf {
* @trigger_cntr: amount of words to store after a trigger.
* @etr_caps: Bitmask of capabilities of the TMC ETR, inferred from the
* device configuration register (DEVID)
+ * @perf_data: PERF buffer for ETR.
+ * @sysfs_data: SYSFS buffer for ETR.
*/
struct tmc_drvdata {
void __iomem *base;
@@ -189,6 +191,8 @@ struct tmc_drvdata {
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
u32 etr_caps;
+ struct etr_buf *sysfs_buf;
+ void *perf_data;
};
struct etr_buf_operations {
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 459ef930d98c..b2f72a1fa402 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -68,13 +68,13 @@ static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
CS_LOCK(drvdata->base);
}
-static int tpiu_enable(struct coresight_device *csdev, u32 mode)
+static int tpiu_enable(struct coresight_device *csdev, u32 mode, void *__unused)
{
struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
tpiu_enable_hw(drvdata);
- dev_info(drvdata->dev, "TPIU enabled\n");
+ dev_dbg(drvdata->dev, "TPIU enabled\n");
return 0;
}
@@ -100,7 +100,7 @@ static void tpiu_disable(struct coresight_device *csdev)
tpiu_disable_hw(drvdata);
- dev_info(drvdata->dev, "TPIU disabled\n");
+ dev_dbg(drvdata->dev, "TPIU disabled\n");
}
static const struct coresight_ops_sink tpiu_sink_ops = {
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 3e07fd335f8c..2b0df1a0a8df 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -128,16 +128,105 @@ static int coresight_find_link_outport(struct coresight_device *csdev,
return -ENODEV;
}
-static int coresight_enable_sink(struct coresight_device *csdev, u32 mode)
+static inline u32 coresight_read_claim_tags(void __iomem *base)
+{
+ return readl_relaxed(base + CORESIGHT_CLAIMCLR);
+}
+
+static inline bool coresight_is_claimed_self_hosted(void __iomem *base)
+{
+ return coresight_read_claim_tags(base) == CORESIGHT_CLAIM_SELF_HOSTED;
+}
+
+static inline bool coresight_is_claimed_any(void __iomem *base)
+{
+ return coresight_read_claim_tags(base) != 0;
+}
+
+static inline void coresight_set_claim_tags(void __iomem *base)
+{
+ writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMSET);
+ isb();
+}
+
+static inline void coresight_clear_claim_tags(void __iomem *base)
+{
+ writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, base + CORESIGHT_CLAIMCLR);
+ isb();
+}
+
+/*
+ * coresight_claim_device_unlocked : Claim the device for self-hosted usage
+ * to prevent an external tool from touching this device. As per PSCI
+ * standards, section "Preserving the execution context" => "Debug and Trace
+ * save and Restore", DBGCLAIM[1] is reserved for Self-hosted debug/trace and
+ * DBGCLAIM[0] is reserved for external tools.
+ *
+ * Called with CS_UNLOCKed for the component.
+ * Returns : 0 on success
+ */
+int coresight_claim_device_unlocked(void __iomem *base)
+{
+ if (coresight_is_claimed_any(base))
+ return -EBUSY;
+
+ coresight_set_claim_tags(base);
+ if (coresight_is_claimed_self_hosted(base))
+ return 0;
+ /* There was a race setting the tags, clean up and fail */
+ coresight_clear_claim_tags(base);
+ return -EBUSY;
+}
+
+int coresight_claim_device(void __iomem *base)
+{
+ int rc;
+
+ CS_UNLOCK(base);
+ rc = coresight_claim_device_unlocked(base);
+ CS_LOCK(base);
+
+ return rc;
+}
+
+/*
+ * coresight_disclaim_device_unlocked : Clear the claim tags for the device.
+ * Called with CS_UNLOCKed for the component.
+ */
+void coresight_disclaim_device_unlocked(void __iomem *base)
+{
+
+ if (coresight_is_claimed_self_hosted(base))
+ coresight_clear_claim_tags(base);
+ else
+ /*
+ * The external agent may have not honoured our claim
+ * and has manipulated it. Or something else has seriously
+ * gone wrong in our driver.
+ */
+ WARN_ON_ONCE(1);
+}
+
+void coresight_disclaim_device(void __iomem *base)
+{
+ CS_UNLOCK(base);
+ coresight_disclaim_device_unlocked(base);
+ CS_LOCK(base);
+}
+
+static int coresight_enable_sink(struct coresight_device *csdev,
+ u32 mode, void *data)
{
int ret;
- if (!csdev->enable) {
- if (sink_ops(csdev)->enable) {
- ret = sink_ops(csdev)->enable(csdev, mode);
- if (ret)
- return ret;
- }
+ /*
+ * We need to make sure the "new" session is compatible with the
+ * existing "mode" of operation.
+ */
+ if (sink_ops(csdev)->enable) {
+ ret = sink_ops(csdev)->enable(csdev, mode, data);
+ if (ret)
+ return ret;
csdev->enable = true;
}
@@ -184,8 +273,10 @@ static int coresight_enable_link(struct coresight_device *csdev,
if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
if (link_ops(csdev)->enable) {
ret = link_ops(csdev)->enable(csdev, inport, outport);
- if (ret)
+ if (ret) {
+ atomic_dec(&csdev->refcnt[refport]);
return ret;
+ }
}
}
@@ -274,13 +365,21 @@ static bool coresight_disable_source(struct coresight_device *csdev)
return !csdev->enable;
}
-void coresight_disable_path(struct list_head *path)
+/*
+ * coresight_disable_path_from : Disable components in the given path beyond
+ * @nd in the list. If @nd is NULL, all the components, except the SOURCE are
+ * disabled.
+ */
+static void coresight_disable_path_from(struct list_head *path,
+ struct coresight_node *nd)
{
u32 type;
- struct coresight_node *nd;
struct coresight_device *csdev, *parent, *child;
- list_for_each_entry(nd, path, link) {
+ if (!nd)
+ nd = list_first_entry(path, struct coresight_node, link);
+
+ list_for_each_entry_continue(nd, path, link) {
csdev = nd->csdev;
type = csdev->type;
@@ -300,7 +399,12 @@ void coresight_disable_path(struct list_head *path)
coresight_disable_sink(csdev);
break;
case CORESIGHT_DEV_TYPE_SOURCE:
- /* sources are disabled from either sysFS or Perf */
+ /*
+ * We skip the first node in the path assuming that it
+ * is the source. So we don't expect a source device in
+ * the middle of a path.
+ */
+ WARN_ON(1);
break;
case CORESIGHT_DEV_TYPE_LINK:
parent = list_prev_entry(nd, link)->csdev;
@@ -313,7 +417,12 @@ void coresight_disable_path(struct list_head *path)
}
}
-int coresight_enable_path(struct list_head *path, u32 mode)
+void coresight_disable_path(struct list_head *path)
+{
+ coresight_disable_path_from(path, NULL);
+}
+
+int coresight_enable_path(struct list_head *path, u32 mode, void *sink_data)
{
int ret = 0;
@@ -338,9 +447,15 @@ int coresight_enable_path(struct list_head *path, u32 mode)
switch (type) {
case CORESIGHT_DEV_TYPE_SINK:
- ret = coresight_enable_sink(csdev, mode);
+ ret = coresight_enable_sink(csdev, mode, sink_data);
+ /*
+ * Sink is the first component turned on. If we
+ * failed to enable the sink, there are no components
+ * that need disabling. Disabling the path here
+ * would mean we could disrupt an existing session.
+ */
if (ret)
- goto err;
+ goto out;
break;
case CORESIGHT_DEV_TYPE_SOURCE:
/* sources are enabled from either sysFS or Perf */
@@ -360,7 +475,7 @@ int coresight_enable_path(struct list_head *path, u32 mode)
out:
return ret;
err:
- coresight_disable_path(path);
+ coresight_disable_path_from(path, nd);
goto out;
}
@@ -635,7 +750,7 @@ int coresight_enable(struct coresight_device *csdev)
goto out;
}
- ret = coresight_enable_path(path, CS_MODE_SYSFS);
+ ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
if (ret)
goto err_path;
@@ -995,18 +1110,16 @@ postcore_initcall(coresight_init);
struct coresight_device *coresight_register(struct coresight_desc *desc)
{
- int i;
int ret;
int link_subtype;
int nr_refcnts = 1;
atomic_t *refcnts = NULL;
struct coresight_device *csdev;
- struct coresight_connection *conns = NULL;
csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
if (!csdev) {
ret = -ENOMEM;
- goto err_kzalloc_csdev;
+ goto err_out;
}
if (desc->type == CORESIGHT_DEV_TYPE_LINK ||
@@ -1022,7 +1135,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
if (!refcnts) {
ret = -ENOMEM;
- goto err_kzalloc_refcnts;
+ goto err_free_csdev;
}
csdev->refcnt = refcnts;
@@ -1030,22 +1143,7 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->nr_inport = desc->pdata->nr_inport;
csdev->nr_outport = desc->pdata->nr_outport;
- /* Initialise connections if there is at least one outport */
- if (csdev->nr_outport) {
- conns = kcalloc(csdev->nr_outport, sizeof(*conns), GFP_KERNEL);
- if (!conns) {
- ret = -ENOMEM;
- goto err_kzalloc_conns;
- }
-
- for (i = 0; i < csdev->nr_outport; i++) {
- conns[i].outport = desc->pdata->outports[i];
- conns[i].child_name = desc->pdata->child_names[i];
- conns[i].child_port = desc->pdata->child_ports[i];
- }
- }
-
- csdev->conns = conns;
+ csdev->conns = desc->pdata->conns;
csdev->type = desc->type;
csdev->subtype = desc->subtype;
@@ -1062,7 +1160,11 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
ret = device_register(&csdev->dev);
if (ret) {
put_device(&csdev->dev);
- goto err_kzalloc_csdev;
+ /*
+ * All resources are free'd explicitly via
+ * coresight_device_release(), triggered from put_device().
+ */
+ goto err_out;
}
mutex_lock(&coresight_mutex);
@@ -1074,11 +1176,9 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
return csdev;
-err_kzalloc_conns:
- kfree(refcnts);
-err_kzalloc_refcnts:
+err_free_csdev:
kfree(csdev);
-err_kzalloc_csdev:
+err_out:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(coresight_register);
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 6880bee195c8..89092f83567e 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -45,8 +45,13 @@ of_coresight_get_endpoint_device(struct device_node *endpoint)
endpoint, of_dev_node_match);
}
-static void of_coresight_get_ports(const struct device_node *node,
- int *nr_inport, int *nr_outport)
+static inline bool of_coresight_legacy_ep_is_input(struct device_node *ep)
+{
+ return of_property_read_bool(ep, "slave-mode");
+}
+
+static void of_coresight_get_ports_legacy(const struct device_node *node,
+ int *nr_inport, int *nr_outport)
{
struct device_node *ep = NULL;
int in = 0, out = 0;
@@ -56,7 +61,7 @@ static void of_coresight_get_ports(const struct device_node *node,
if (!ep)
break;
- if (of_property_read_bool(ep, "slave-mode"))
+ if (of_coresight_legacy_ep_is_input(ep))
in++;
else
out++;
@@ -67,32 +72,77 @@ static void of_coresight_get_ports(const struct device_node *node,
*nr_outport = out;
}
+static struct device_node *of_coresight_get_port_parent(struct device_node *ep)
+{
+ struct device_node *parent = of_graph_get_port_parent(ep);
+
+ /*
+ * Skip one-level up to the real device node, if we
+ * are using the new bindings.
+ */
+ if (!of_node_cmp(parent->name, "in-ports") ||
+ !of_node_cmp(parent->name, "out-ports"))
+ parent = of_get_next_parent(parent);
+
+ return parent;
+}
+
+static inline struct device_node *
+of_coresight_get_input_ports_node(const struct device_node *node)
+{
+ return of_get_child_by_name(node, "in-ports");
+}
+
+static inline struct device_node *
+of_coresight_get_output_ports_node(const struct device_node *node)
+{
+ return of_get_child_by_name(node, "out-ports");
+}
+
+static inline int
+of_coresight_count_ports(struct device_node *port_parent)
+{
+ int i = 0;
+ struct device_node *ep = NULL;
+
+ while ((ep = of_graph_get_next_endpoint(port_parent, ep)))
+ i++;
+ return i;
+}
+
+static void of_coresight_get_ports(const struct device_node *node,
+ int *nr_inport, int *nr_outport)
+{
+ struct device_node *input_ports = NULL, *output_ports = NULL;
+
+ input_ports = of_coresight_get_input_ports_node(node);
+ output_ports = of_coresight_get_output_ports_node(node);
+
+ if (input_ports || output_ports) {
+ if (input_ports) {
+ *nr_inport = of_coresight_count_ports(input_ports);
+ of_node_put(input_ports);
+ }
+ if (output_ports) {
+ *nr_outport = of_coresight_count_ports(output_ports);
+ of_node_put(output_ports);
+ }
+ } else {
+ /* Fall back to legacy DT bindings parsing */
+ of_coresight_get_ports_legacy(node, nr_inport, nr_outport);
+ }
+}
+
static int of_coresight_alloc_memory(struct device *dev,
struct coresight_platform_data *pdata)
{
- /* List of output port on this component */
- pdata->outports = devm_kcalloc(dev,
- pdata->nr_outport,
- sizeof(*pdata->outports),
- GFP_KERNEL);
- if (!pdata->outports)
- return -ENOMEM;
-
- /* Children connected to this component via @outports */
- pdata->child_names = devm_kcalloc(dev,
- pdata->nr_outport,
- sizeof(*pdata->child_names),
- GFP_KERNEL);
- if (!pdata->child_names)
- return -ENOMEM;
-
- /* Port number on the child this component is connected to */
- pdata->child_ports = devm_kcalloc(dev,
- pdata->nr_outport,
- sizeof(*pdata->child_ports),
- GFP_KERNEL);
- if (!pdata->child_ports)
- return -ENOMEM;
+ if (pdata->nr_outport) {
+ pdata->conns = devm_kzalloc(dev, pdata->nr_outport *
+ sizeof(*pdata->conns),
+ GFP_KERNEL);
+ if (!pdata->conns)
+ return -ENOMEM;
+ }
return 0;
}
@@ -114,17 +164,78 @@ int of_coresight_get_cpu(const struct device_node *node)
}
EXPORT_SYMBOL_GPL(of_coresight_get_cpu);
+/*
+ * of_coresight_parse_endpoint : Parse the given output endpoint @ep
+ * and fill the connection information in @conn
+ *
+ * Parses the local port, remote device name and the remote port.
+ *
+ * Returns :
+ * 1 - If the parsing is successful and a connection record
+ * was created for an output connection.
+ * 0 - If the parsing completed without any fatal errors.
+ * -Errno - Fatal error, abort the scanning.
+ */
+static int of_coresight_parse_endpoint(struct device *dev,
+ struct device_node *ep,
+ struct coresight_connection *conn)
+{
+ int ret = 0;
+ struct of_endpoint endpoint, rendpoint;
+ struct device_node *rparent = NULL;
+ struct device_node *rep = NULL;
+ struct device *rdev = NULL;
+
+ do {
+ /* Parse the local port details */
+ if (of_graph_parse_endpoint(ep, &endpoint))
+ break;
+ /*
+ * Get a handle on the remote endpoint and the device it is
+ * attached to.
+ */
+ rep = of_graph_get_remote_endpoint(ep);
+ if (!rep)
+ break;
+ rparent = of_coresight_get_port_parent(rep);
+ if (!rparent)
+ break;
+ if (of_graph_parse_endpoint(rep, &rendpoint))
+ break;
+
+ /* If the remote device is not available, defer probing */
+ rdev = of_coresight_get_endpoint_device(rparent);
+ if (!rdev) {
+ ret = -EPROBE_DEFER;
+ break;
+ }
+
+ conn->outport = endpoint.port;
+ conn->child_name = devm_kstrdup(dev,
+ dev_name(rdev),
+ GFP_KERNEL);
+ conn->child_port = rendpoint.port;
+ /* Connection record updated */
+ ret = 1;
+ } while (0);
+
+ of_node_put(rparent);
+ of_node_put(rep);
+ put_device(rdev);
+
+ return ret;
+}
+
struct coresight_platform_data *
of_get_coresight_platform_data(struct device *dev,
const struct device_node *node)
{
- int i = 0, ret = 0;
+ int ret = 0;
struct coresight_platform_data *pdata;
- struct of_endpoint endpoint, rendpoint;
- struct device *rdev;
+ struct coresight_connection *conn;
struct device_node *ep = NULL;
- struct device_node *rparent = NULL;
- struct device_node *rport = NULL;
+ const struct device_node *parent = NULL;
+ bool legacy_binding = false;
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
@@ -132,63 +243,54 @@ of_get_coresight_platform_data(struct device *dev,
/* Use device name as sysfs handle */
pdata->name = dev_name(dev);
+ pdata->cpu = of_coresight_get_cpu(node);
/* Get the number of input and output port for this component */
of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
- if (pdata->nr_outport) {
- ret = of_coresight_alloc_memory(dev, pdata);
- if (ret)
- return ERR_PTR(ret);
-
- /* Iterate through each port to discover topology */
- do {
- /* Get a handle on a port */
- ep = of_graph_get_next_endpoint(node, ep);
- if (!ep)
- break;
-
- /*
- * No need to deal with input ports, processing for as
- * processing for output ports will deal with them.
- */
- if (of_find_property(ep, "slave-mode", NULL))
- continue;
-
- /* Get a handle on the local endpoint */
- ret = of_graph_parse_endpoint(ep, &endpoint);
-
- if (ret)
- continue;
-
- /* The local out port number */
- pdata->outports[i] = endpoint.port;
-
- /*
- * Get a handle on the remote port and parent
- * attached to it.
- */
- rparent = of_graph_get_remote_port_parent(ep);
- rport = of_graph_get_remote_port(ep);
-
- if (!rparent || !rport)
- continue;
+ /* If there are no output connections, we are done */
+ if (!pdata->nr_outport)
+ return pdata;
- if (of_graph_parse_endpoint(rport, &rendpoint))
- continue;
+ ret = of_coresight_alloc_memory(dev, pdata);
+ if (ret)
+ return ERR_PTR(ret);
- rdev = of_coresight_get_endpoint_device(rparent);
- if (!rdev)
- return ERR_PTR(-EPROBE_DEFER);
-
- pdata->child_names[i] = dev_name(rdev);
- pdata->child_ports[i] = rendpoint.id;
-
- i++;
- } while (ep);
+ parent = of_coresight_get_output_ports_node(node);
+ /*
+ * If the DT uses obsoleted bindings, the ports are listed
+ * under the device and we need to filter out the input
+ * ports.
+ */
+ if (!parent) {
+ legacy_binding = true;
+ parent = node;
+ dev_warn_once(dev, "Uses obsolete Coresight DT bindings\n");
}
- pdata->cpu = of_coresight_get_cpu(node);
+ conn = pdata->conns;
+
+ /* Iterate through each output port to discover topology */
+ while ((ep = of_graph_get_next_endpoint(parent, ep))) {
+ /*
+ * Legacy binding mixes input/output ports under the
+ * same parent. So, skip the input ports if we are dealing
+ * with legacy binding, as they processed with their
+ * connected output ports.
+ */
+ if (legacy_binding && of_coresight_legacy_ep_is_input(ep))
+ continue;
+
+ ret = of_coresight_parse_endpoint(dev, ep, conn);
+ switch (ret) {
+ case 1:
+ conn++; /* Fall through */
+ case 0:
+ break;
+ default:
+ return ERR_PTR(ret);
+ }
+ }
return pdata;
}
diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
index 723e2d90083d..752dd66742bf 100644
--- a/drivers/hwtracing/stm/Kconfig
+++ b/drivers/hwtracing/stm/Kconfig
@@ -11,6 +11,35 @@ config STM
if STM
+config STM_PROTO_BASIC
+ tristate "Basic STM framing protocol driver"
+ default CONFIG_STM
+ help
+ This is a simple framing protocol for sending data over STM
+ devices. This was the protocol that the STM framework used
+ exclusively until the MIPI SyS-T support was added. Use this
+ driver for compatibility with your existing STM setup.
+
+ The receiving side only needs to be able to decode the MIPI
+ STP protocol in order to extract the data.
+
+ If you want to be able to use the basic protocol or want the
+ backwards compatibility for your existing setup, say Y.
+
+config STM_PROTO_SYS_T
+ tristate "MIPI SyS-T STM framing protocol driver"
+ default CONFIG_STM
+ help
+ This is an implementation of MIPI SyS-T protocol to be used
+ over the STP transport. In addition to the data payload, it
+ also carries additional metadata for time correlation, better
+ means of trace source identification, etc.
+
+ The receiving side must be able to decode this protocol in
+ addition to the MIPI STP, in order to extract the data.
+
+ If you don't know what this is, say N.
+
config STM_DUMMY
tristate "Dummy STM driver"
help
diff --git a/drivers/hwtracing/stm/Makefile b/drivers/hwtracing/stm/Makefile
index effc19e5190f..1692fcd29277 100644
--- a/drivers/hwtracing/stm/Makefile
+++ b/drivers/hwtracing/stm/Makefile
@@ -3,6 +3,12 @@ obj-$(CONFIG_STM) += stm_core.o
stm_core-y := core.o policy.o
+obj-$(CONFIG_STM_PROTO_BASIC) += stm_p_basic.o
+obj-$(CONFIG_STM_PROTO_SYS_T) += stm_p_sys-t.o
+
+stm_p_basic-y := p_basic.o
+stm_p_sys-t-y := p_sys-t.o
+
obj-$(CONFIG_STM_DUMMY) += dummy_stm.o
obj-$(CONFIG_STM_SOURCE_CONSOLE) += stm_console.o
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 10bcb5d73f90..93ce3aa740a9 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -293,15 +293,15 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
if (width > stm->data->sw_nchannels)
return -EINVAL;
- if (policy_node) {
- stp_policy_node_get_ranges(policy_node,
- &midx, &mend, &cidx, &cend);
- } else {
- midx = stm->data->sw_start;
- cidx = 0;
- mend = stm->data->sw_end;
- cend = stm->data->sw_nchannels - 1;
- }
+ /* We no longer accept policy_node==NULL here */
+ if (WARN_ON_ONCE(!policy_node))
+ return -EINVAL;
+
+ /*
+ * Also, the caller holds reference to policy_node, so it won't
+ * disappear on us.
+ */
+ stp_policy_node_get_ranges(policy_node, &midx, &mend, &cidx, &cend);
spin_lock(&stm->mc_lock);
spin_lock(&output->lock);
@@ -316,11 +316,26 @@ static int stm_output_assign(struct stm_device *stm, unsigned int width,
output->master = midx;
output->channel = cidx;
output->nr_chans = width;
+ if (stm->pdrv->output_open) {
+ void *priv = stp_policy_node_priv(policy_node);
+
+ if (WARN_ON_ONCE(!priv))
+ goto unlock;
+
+ /* configfs subsys mutex is held by the caller */
+ ret = stm->pdrv->output_open(priv, output);
+ if (ret)
+ goto unlock;
+ }
+
stm_output_claim(stm, output);
dev_dbg(&stm->dev, "assigned %u:%u (+%u)\n", midx, cidx, width);
ret = 0;
unlock:
+ if (ret)
+ output->nr_chans = 0;
+
spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
@@ -333,6 +348,8 @@ static void stm_output_free(struct stm_device *stm, struct stm_output *output)
spin_lock(&output->lock);
if (output->nr_chans)
stm_output_disclaim(stm, output);
+ if (stm->pdrv && stm->pdrv->output_close)
+ stm->pdrv->output_close(output);
spin_unlock(&output->lock);
spin_unlock(&stm->mc_lock);
}
@@ -349,6 +366,127 @@ static int major_match(struct device *dev, const void *data)
return MAJOR(dev->devt) == major;
}
+/*
+ * Framing protocol management
+ * Modules can implement STM protocol drivers and (un-)register them
+ * with the STM class framework.
+ */
+static struct list_head stm_pdrv_head;
+static struct mutex stm_pdrv_mutex;
+
+struct stm_pdrv_entry {
+ struct list_head entry;
+ const struct stm_protocol_driver *pdrv;
+ const struct config_item_type *node_type;
+};
+
+static const struct stm_pdrv_entry *
+__stm_lookup_protocol(const char *name)
+{
+ struct stm_pdrv_entry *pe;
+
+ /*
+ * If no name is given (NULL or ""), fall back to "p_basic".
+ */
+ if (!name || !*name)
+ name = "p_basic";
+
+ list_for_each_entry(pe, &stm_pdrv_head, entry) {
+ if (!strcmp(name, pe->pdrv->name))
+ return pe;
+ }
+
+ return NULL;
+}
+
+int stm_register_protocol(const struct stm_protocol_driver *pdrv)
+{
+ struct stm_pdrv_entry *pe = NULL;
+ int ret = -ENOMEM;
+
+ mutex_lock(&stm_pdrv_mutex);
+
+ if (__stm_lookup_protocol(pdrv->name)) {
+ ret = -EEXIST;
+ goto unlock;
+ }
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ goto unlock;
+
+ if (pdrv->policy_attr) {
+ pe->node_type = get_policy_node_type(pdrv->policy_attr);
+ if (!pe->node_type)
+ goto unlock;
+ }
+
+ list_add_tail(&pe->entry, &stm_pdrv_head);
+ pe->pdrv = pdrv;
+
+ ret = 0;
+unlock:
+ mutex_unlock(&stm_pdrv_mutex);
+
+ if (ret)
+ kfree(pe);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stm_register_protocol);
+
+void stm_unregister_protocol(const struct stm_protocol_driver *pdrv)
+{
+ struct stm_pdrv_entry *pe, *iter;
+
+ mutex_lock(&stm_pdrv_mutex);
+
+ list_for_each_entry_safe(pe, iter, &stm_pdrv_head, entry) {
+ if (pe->pdrv == pdrv) {
+ list_del(&pe->entry);
+
+ if (pe->node_type) {
+ kfree(pe->node_type->ct_attrs);
+ kfree(pe->node_type);
+ }
+ kfree(pe);
+ break;
+ }
+ }
+
+ mutex_unlock(&stm_pdrv_mutex);
+}
+EXPORT_SYMBOL_GPL(stm_unregister_protocol);
+
+static bool stm_get_protocol(const struct stm_protocol_driver *pdrv)
+{
+ return try_module_get(pdrv->owner);
+}
+
+void stm_put_protocol(const struct stm_protocol_driver *pdrv)
+{
+ module_put(pdrv->owner);
+}
+
+int stm_lookup_protocol(const char *name,
+ const struct stm_protocol_driver **pdrv,
+ const struct config_item_type **node_type)
+{
+ const struct stm_pdrv_entry *pe;
+
+ mutex_lock(&stm_pdrv_mutex);
+
+ pe = __stm_lookup_protocol(name);
+ if (pe && pe->pdrv && stm_get_protocol(pe->pdrv)) {
+ *pdrv = pe->pdrv;
+ *node_type = pe->node_type;
+ }
+
+ mutex_unlock(&stm_pdrv_mutex);
+
+ return pe ? 0 : -ENOENT;
+}
+
static int stm_char_open(struct inode *inode, struct file *file)
{
struct stm_file *stmf;
@@ -405,42 +543,81 @@ static int stm_char_release(struct inode *inode, struct file *file)
return 0;
}
-static int stm_file_assign(struct stm_file *stmf, char *id, unsigned int width)
+static int
+stm_assign_first_policy(struct stm_device *stm, struct stm_output *output,
+ char **ids, unsigned int width)
{
- struct stm_device *stm = stmf->stm;
- int ret;
+ struct stp_policy_node *pn;
+ int err, n;
- stmf->policy_node = stp_policy_node_lookup(stm, id);
+ /*
+ * On success, stp_policy_node_lookup() will return holding the
+ * configfs subsystem mutex, which is then released in
+ * stp_policy_node_put(). This allows the pdrv->output_open() in
+ * stm_output_assign() to serialize against the attribute accessors.
+ */
+ for (n = 0, pn = NULL; ids[n] && !pn; n++)
+ pn = stp_policy_node_lookup(stm, ids[n]);
- ret = stm_output_assign(stm, width, stmf->policy_node, &stmf->output);
+ if (!pn)
+ return -EINVAL;
- if (stmf->policy_node)
- stp_policy_node_put(stmf->policy_node);
+ err = stm_output_assign(stm, width, pn, output);
- return ret;
+ stp_policy_node_put(pn);
+
+ return err;
}
-static ssize_t notrace stm_write(struct stm_data *data, unsigned int master,
- unsigned int channel, const char *buf, size_t count)
+/**
+ * stm_data_write() - send the given payload as data packets
+ * @data: stm driver's data
+ * @m: STP master
+ * @c: STP channel
+ * @ts_first: timestamp the first packet
+ * @buf: data payload buffer
+ * @count: data payload size
+ */
+ssize_t notrace stm_data_write(struct stm_data *data, unsigned int m,
+ unsigned int c, bool ts_first, const void *buf,
+ size_t count)
{
- unsigned int flags = STP_PACKET_TIMESTAMPED;
- const unsigned char *p = buf, nil = 0;
- size_t pos;
+ unsigned int flags = ts_first ? STP_PACKET_TIMESTAMPED : 0;
ssize_t sz;
+ size_t pos;
- for (pos = 0, p = buf; count > pos; pos += sz, p += sz) {
+ for (pos = 0, sz = 0; pos < count; pos += sz) {
sz = min_t(unsigned int, count - pos, 8);
- sz = data->packet(data, master, channel, STP_PACKET_DATA, flags,
- sz, p);
- flags = 0;
-
- if (sz < 0)
+ sz = data->packet(data, m, c, STP_PACKET_DATA, flags, sz,
+ &((u8 *)buf)[pos]);
+ if (sz <= 0)
break;
+
+ if (ts_first) {
+ flags = 0;
+ ts_first = false;
+ }
}
- data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil);
+ return sz < 0 ? sz : pos;
+}
+EXPORT_SYMBOL_GPL(stm_data_write);
+
+static ssize_t notrace
+stm_write(struct stm_device *stm, struct stm_output *output,
+ unsigned int chan, const char *buf, size_t count)
+{
+ int err;
+
+ /* stm->pdrv is serialized against policy_mutex */
+ if (!stm->pdrv)
+ return -ENODEV;
+
+ err = stm->pdrv->write(stm->data, output, chan, buf, count);
+ if (err < 0)
+ return err;
- return pos;
+ return err;
}
static ssize_t stm_char_write(struct file *file, const char __user *buf,
@@ -455,16 +632,21 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
count = PAGE_SIZE - 1;
/*
- * if no m/c have been assigned to this writer up to this
- * point, use "default" policy entry
+ * If no m/c have been assigned to this writer up to this
+ * point, try to use the task name and "default" policy entries.
*/
if (!stmf->output.nr_chans) {
- err = stm_file_assign(stmf, "default", 1);
+ char comm[sizeof(current->comm)];
+ char *ids[] = { comm, "default", NULL };
+
+ get_task_comm(comm, current);
+
+ err = stm_assign_first_policy(stmf->stm, &stmf->output, ids, 1);
/*
* EBUSY means that somebody else just assigned this
* output, which is just fine for write()
*/
- if (err && err != -EBUSY)
+ if (err)
return err;
}
@@ -480,8 +662,7 @@ static ssize_t stm_char_write(struct file *file, const char __user *buf,
pm_runtime_get_sync(&stm->dev);
- count = stm_write(stm->data, stmf->output.master, stmf->output.channel,
- kbuf, count);
+ count = stm_write(stm, &stmf->output, 0, kbuf, count);
pm_runtime_mark_last_busy(&stm->dev);
pm_runtime_put_autosuspend(&stm->dev);
@@ -550,6 +731,7 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
{
struct stm_device *stm = stmf->stm;
struct stp_policy_id *id;
+ char *ids[] = { NULL, NULL };
int ret = -EINVAL;
u32 size;
@@ -582,7 +764,9 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
id->width > PAGE_SIZE / stm->data->sw_mmiosz)
goto err_free;
- ret = stm_file_assign(stmf, id->id, id->width);
+ ids[0] = id->id;
+ ret = stm_assign_first_policy(stmf->stm, &stmf->output, ids,
+ id->width);
if (ret)
goto err_free;
@@ -818,8 +1002,8 @@ EXPORT_SYMBOL_GPL(stm_unregister_device);
static int stm_source_link_add(struct stm_source_device *src,
struct stm_device *stm)
{
- char *id;
- int err;
+ char *ids[] = { NULL, "default", NULL };
+ int err = -ENOMEM;
mutex_lock(&stm->link_mutex);
spin_lock(&stm->link_lock);
@@ -833,19 +1017,13 @@ static int stm_source_link_add(struct stm_source_device *src,
spin_unlock(&stm->link_lock);
mutex_unlock(&stm->link_mutex);
- id = kstrdup(src->data->name, GFP_KERNEL);
- if (id) {
- src->policy_node =
- stp_policy_node_lookup(stm, id);
-
- kfree(id);
- }
-
- err = stm_output_assign(stm, src->data->nr_chans,
- src->policy_node, &src->output);
+ ids[0] = kstrdup(src->data->name, GFP_KERNEL);
+ if (!ids[0])
+ goto fail_detach;
- if (src->policy_node)
- stp_policy_node_put(src->policy_node);
+ err = stm_assign_first_policy(stm, &src->output, ids,
+ src->data->nr_chans);
+ kfree(ids[0]);
if (err)
goto fail_detach;
@@ -1134,9 +1312,7 @@ int notrace stm_source_write(struct stm_source_data *data,
stm = srcu_dereference(src->link, &stm_source_srcu);
if (stm)
- count = stm_write(stm->data, src->output.master,
- src->output.channel + chan,
- buf, count);
+ count = stm_write(stm, &src->output, chan, buf, count);
else
count = -ENODEV;
@@ -1163,7 +1339,15 @@ static int __init stm_core_init(void)
goto err_src;
init_srcu_struct(&stm_source_srcu);
+ INIT_LIST_HEAD(&stm_pdrv_head);
+ mutex_init(&stm_pdrv_mutex);
+ /*
+ * So as to not confuse existing users with a requirement
+ * to load yet another module, do it here.
+ */
+ if (IS_ENABLED(CONFIG_STM_PROTO_BASIC))
+ (void)request_module_nowait("stm_p_basic");
stm_core_up++;
return 0;
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index 7db42395e131..3e7df1c0477f 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -76,7 +76,7 @@ static int stm_heartbeat_init(void)
goto fail_unregister;
stm_heartbeat[i].data.nr_chans = 1;
- stm_heartbeat[i].data.link = stm_heartbeat_link;
+ stm_heartbeat[i].data.link = stm_heartbeat_link;
stm_heartbeat[i].data.unlink = stm_heartbeat_unlink;
hrtimer_init(&stm_heartbeat[i].hrtimer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS);
diff --git a/drivers/hwtracing/stm/p_basic.c b/drivers/hwtracing/stm/p_basic.c
new file mode 100644
index 000000000000..8980a6a5fd6c
--- /dev/null
+++ b/drivers/hwtracing/stm/p_basic.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Basic framing protocol for STM devices.
+ * Copyright (c) 2018, Intel Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/stm.h>
+#include "stm.h"
+
+static ssize_t basic_write(struct stm_data *data, struct stm_output *output,
+ unsigned int chan, const char *buf, size_t count)
+{
+ unsigned int c = output->channel + chan;
+ unsigned int m = output->master;
+ const unsigned char nil = 0;
+ ssize_t sz;
+
+ sz = stm_data_write(data, m, c, true, buf, count);
+ if (sz > 0)
+ data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
+
+ return sz;
+}
+
+static const struct stm_protocol_driver basic_pdrv = {
+ .owner = THIS_MODULE,
+ .name = "p_basic",
+ .write = basic_write,
+};
+
+static int basic_stm_init(void)
+{
+ return stm_register_protocol(&basic_pdrv);
+}
+
+static void basic_stm_exit(void)
+{
+ stm_unregister_protocol(&basic_pdrv);
+}
+
+module_init(basic_stm_init);
+module_exit(basic_stm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Basic STM framing protocol driver");
+MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/p_sys-t.c b/drivers/hwtracing/stm/p_sys-t.c
new file mode 100644
index 000000000000..b178a5495b67
--- /dev/null
+++ b/drivers/hwtracing/stm/p_sys-t.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MIPI SyS-T framing protocol for STM devices.
+ * Copyright (c) 2018, Intel Corporation.
+ */
+
+#include <linux/configfs.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/stm.h>
+#include "stm.h"
+
+enum sys_t_message_type {
+ MIPI_SYST_TYPE_BUILD = 0,
+ MIPI_SYST_TYPE_SHORT32,
+ MIPI_SYST_TYPE_STRING,
+ MIPI_SYST_TYPE_CATALOG,
+ MIPI_SYST_TYPE_RAW = 6,
+ MIPI_SYST_TYPE_SHORT64,
+ MIPI_SYST_TYPE_CLOCK,
+};
+
+enum sys_t_message_severity {
+ MIPI_SYST_SEVERITY_MAX = 0,
+ MIPI_SYST_SEVERITY_FATAL,
+ MIPI_SYST_SEVERITY_ERROR,
+ MIPI_SYST_SEVERITY_WARNING,
+ MIPI_SYST_SEVERITY_INFO,
+ MIPI_SYST_SEVERITY_USER1,
+ MIPI_SYST_SEVERITY_USER2,
+ MIPI_SYST_SEVERITY_DEBUG,
+};
+
+enum sys_t_message_build_subtype {
+ MIPI_SYST_BUILD_ID_COMPACT32 = 0,
+ MIPI_SYST_BUILD_ID_COMPACT64,
+ MIPI_SYST_BUILD_ID_LONG,
+};
+
+enum sys_t_message_clock_subtype {
+ MIPI_SYST_CLOCK_TRANSPORT_SYNC = 1,
+};
+
+enum sys_t_message_string_subtype {
+ MIPI_SYST_STRING_GENERIC = 1,
+ MIPI_SYST_STRING_FUNCTIONENTER,
+ MIPI_SYST_STRING_FUNCTIONEXIT,
+ MIPI_SYST_STRING_INVALIDPARAM = 5,
+ MIPI_SYST_STRING_ASSERT = 7,
+ MIPI_SYST_STRING_PRINTF_32 = 11,
+ MIPI_SYST_STRING_PRINTF_64 = 12,
+};
+
+#define MIPI_SYST_TYPE(t) ((u32)(MIPI_SYST_TYPE_ ## t))
+#define MIPI_SYST_SEVERITY(s) ((u32)(MIPI_SYST_SEVERITY_ ## s) << 4)
+#define MIPI_SYST_OPT_LOC BIT(8)
+#define MIPI_SYST_OPT_LEN BIT(9)
+#define MIPI_SYST_OPT_CHK BIT(10)
+#define MIPI_SYST_OPT_TS BIT(11)
+#define MIPI_SYST_UNIT(u) ((u32)(u) << 12)
+#define MIPI_SYST_ORIGIN(o) ((u32)(o) << 16)
+#define MIPI_SYST_OPT_GUID BIT(23)
+#define MIPI_SYST_SUBTYPE(s) ((u32)(MIPI_SYST_ ## s) << 24)
+#define MIPI_SYST_UNITLARGE(u) (MIPI_SYST_UNIT(u & 0xf) | \
+ MIPI_SYST_ORIGIN(u >> 4))
+#define MIPI_SYST_TYPES(t, s) (MIPI_SYST_TYPE(t) | \
+ MIPI_SYST_SUBTYPE(t ## _ ## s))
+
+#define DATA_HEADER (MIPI_SYST_TYPES(STRING, GENERIC) | \
+ MIPI_SYST_SEVERITY(INFO) | \
+ MIPI_SYST_OPT_GUID)
+
+#define CLOCK_SYNC_HEADER (MIPI_SYST_TYPES(CLOCK, TRANSPORT_SYNC) | \
+ MIPI_SYST_SEVERITY(MAX))
+
+struct sys_t_policy_node {
+ uuid_t uuid;
+ bool do_len;
+ unsigned long ts_interval;
+ unsigned long clocksync_interval;
+};
+
+struct sys_t_output {
+ struct sys_t_policy_node node;
+ unsigned long ts_jiffies;
+ unsigned long clocksync_jiffies;
+};
+
+static void sys_t_policy_node_init(void *priv)
+{
+ struct sys_t_policy_node *pn = priv;
+
+ generate_random_uuid(pn->uuid.b);
+}
+
+static int sys_t_output_open(void *priv, struct stm_output *output)
+{
+ struct sys_t_policy_node *pn = priv;
+ struct sys_t_output *opriv;
+
+ opriv = kzalloc(sizeof(*opriv), GFP_ATOMIC);
+ if (!opriv)
+ return -ENOMEM;
+
+ memcpy(&opriv->node, pn, sizeof(opriv->node));
+ output->pdrv_private = opriv;
+
+ return 0;
+}
+
+static void sys_t_output_close(struct stm_output *output)
+{
+ kfree(output->pdrv_private);
+}
+
+static ssize_t sys_t_policy_uuid_show(struct config_item *item,
+ char *page)
+{
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+
+ return sprintf(page, "%pU\n", &pn->uuid);
+}
+
+static ssize_t
+sys_t_policy_uuid_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+ int ret;
+
+ mutex_lock(mutexp);
+ ret = uuid_parse(page, &pn->uuid);
+ mutex_unlock(mutexp);
+
+ return ret < 0 ? ret : count;
+}
+
+CONFIGFS_ATTR(sys_t_policy_, uuid);
+
+static ssize_t sys_t_policy_do_len_show(struct config_item *item,
+ char *page)
+{
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+
+ return sprintf(page, "%d\n", pn->do_len);
+}
+
+static ssize_t
+sys_t_policy_do_len_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+ int ret;
+
+ mutex_lock(mutexp);
+ ret = kstrtobool(page, &pn->do_len);
+ mutex_unlock(mutexp);
+
+ return ret ? ret : count;
+}
+
+CONFIGFS_ATTR(sys_t_policy_, do_len);
+
+static ssize_t sys_t_policy_ts_interval_show(struct config_item *item,
+ char *page)
+{
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+
+ return sprintf(page, "%u\n", jiffies_to_msecs(pn->ts_interval));
+}
+
+static ssize_t
+sys_t_policy_ts_interval_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+ unsigned int ms;
+ int ret;
+
+ mutex_lock(mutexp);
+ ret = kstrtouint(page, 10, &ms);
+ mutex_unlock(mutexp);
+
+ if (!ret) {
+ pn->ts_interval = msecs_to_jiffies(ms);
+ return count;
+ }
+
+ return ret;
+}
+
+CONFIGFS_ATTR(sys_t_policy_, ts_interval);
+
+static ssize_t sys_t_policy_clocksync_interval_show(struct config_item *item,
+ char *page)
+{
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+
+ return sprintf(page, "%u\n", jiffies_to_msecs(pn->clocksync_interval));
+}
+
+static ssize_t
+sys_t_policy_clocksync_interval_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct mutex *mutexp = &item->ci_group->cg_subsys->su_mutex;
+ struct sys_t_policy_node *pn = to_pdrv_policy_node(item);
+ unsigned int ms;
+ int ret;
+
+ mutex_lock(mutexp);
+ ret = kstrtouint(page, 10, &ms);
+ mutex_unlock(mutexp);
+
+ if (!ret) {
+ pn->clocksync_interval = msecs_to_jiffies(ms);
+ return count;
+ }
+
+ return ret;
+}
+
+CONFIGFS_ATTR(sys_t_policy_, clocksync_interval);
+
+static struct configfs_attribute *sys_t_policy_attrs[] = {
+ &sys_t_policy_attr_uuid,
+ &sys_t_policy_attr_do_len,
+ &sys_t_policy_attr_ts_interval,
+ &sys_t_policy_attr_clocksync_interval,
+ NULL,
+};
+
+static inline bool sys_t_need_ts(struct sys_t_output *op)
+{
+ if (op->node.ts_interval &&
+ time_after(op->ts_jiffies + op->node.ts_interval, jiffies)) {
+ op->ts_jiffies = jiffies;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool sys_t_need_clock_sync(struct sys_t_output *op)
+{
+ if (op->node.clocksync_interval &&
+ time_after(op->clocksync_jiffies + op->node.clocksync_interval,
+ jiffies)) {
+ op->clocksync_jiffies = jiffies;
+
+ return true;
+ }
+
+ return false;
+}
+
+static ssize_t
+sys_t_clock_sync(struct stm_data *data, unsigned int m, unsigned int c)
+{
+ u32 header = CLOCK_SYNC_HEADER;
+ const unsigned char nil = 0;
+ u64 payload[2]; /* Clock value and frequency */
+ ssize_t sz;
+
+ sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED,
+ 4, (u8 *)&header);
+ if (sz <= 0)
+ return sz;
+
+ payload[0] = ktime_get_real_ns();
+ payload[1] = NSEC_PER_SEC;
+ sz = stm_data_write(data, m, c, false, &payload, sizeof(payload));
+ if (sz <= 0)
+ return sz;
+
+ data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
+
+ return sizeof(header) + sizeof(payload);
+}
+
+static ssize_t sys_t_write(struct stm_data *data, struct stm_output *output,
+ unsigned int chan, const char *buf, size_t count)
+{
+ struct sys_t_output *op = output->pdrv_private;
+ unsigned int c = output->channel + chan;
+ unsigned int m = output->master;
+ const unsigned char nil = 0;
+ u32 header = DATA_HEADER;
+ ssize_t sz;
+
+ /* We require an existing policy node to proceed */
+ if (!op)
+ return -EINVAL;
+
+ if (sys_t_need_clock_sync(op)) {
+ sz = sys_t_clock_sync(data, m, c);
+ if (sz <= 0)
+ return sz;
+ }
+
+ if (op->node.do_len)
+ header |= MIPI_SYST_OPT_LEN;
+ if (sys_t_need_ts(op))
+ header |= MIPI_SYST_OPT_TS;
+
+ /*
+ * STP framing rules for SyS-T frames:
+ * * the first packet of the SyS-T frame is timestamped;
+ * * the last packet is a FLAG.
+ */
+ /* Message layout: HEADER / GUID / [LENGTH /][TIMESTAMP /] DATA */
+ /* HEADER */
+ sz = data->packet(data, m, c, STP_PACKET_DATA, STP_PACKET_TIMESTAMPED,
+ 4, (u8 *)&header);
+ if (sz <= 0)
+ return sz;
+
+ /* GUID */
+ sz = stm_data_write(data, m, c, false, op->node.uuid.b, UUID_SIZE);
+ if (sz <= 0)
+ return sz;
+
+ /* [LENGTH] */
+ if (op->node.do_len) {
+ u16 length = count;
+
+ sz = data->packet(data, m, c, STP_PACKET_DATA, 0, 2,
+ (u8 *)&length);
+ if (sz <= 0)
+ return sz;
+ }
+
+ /* [TIMESTAMP] */
+ if (header & MIPI_SYST_OPT_TS) {
+ u64 ts = ktime_get_real_ns();
+
+ sz = stm_data_write(data, m, c, false, &ts, sizeof(ts));
+ if (sz <= 0)
+ return sz;
+ }
+
+ /* DATA */
+ sz = stm_data_write(data, m, c, false, buf, count);
+ if (sz > 0)
+ data->packet(data, m, c, STP_PACKET_FLAG, 0, 0, &nil);
+
+ return sz;
+}
+
+static const struct stm_protocol_driver sys_t_pdrv = {
+ .owner = THIS_MODULE,
+ .name = "p_sys-t",
+ .priv_sz = sizeof(struct sys_t_policy_node),
+ .write = sys_t_write,
+ .policy_attr = sys_t_policy_attrs,
+ .policy_node_init = sys_t_policy_node_init,
+ .output_open = sys_t_output_open,
+ .output_close = sys_t_output_close,
+};
+
+static int sys_t_stm_init(void)
+{
+ return stm_register_protocol(&sys_t_pdrv);
+}
+
+static void sys_t_stm_exit(void)
+{
+ stm_unregister_protocol(&sys_t_pdrv);
+}
+
+module_init(sys_t_stm_init);
+module_exit(sys_t_stm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MIPI SyS-T STM framing protocol driver");
+MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 3fd07e275b34..0910ec807187 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -33,8 +33,18 @@ struct stp_policy_node {
unsigned int last_master;
unsigned int first_channel;
unsigned int last_channel;
+ /* this is the one that's exposed to the attributes */
+ unsigned char priv[0];
};
+void *stp_policy_node_priv(struct stp_policy_node *pn)
+{
+ if (!pn)
+ return NULL;
+
+ return pn->priv;
+}
+
static struct configfs_subsystem stp_policy_subsys;
void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
@@ -68,6 +78,14 @@ to_stp_policy_node(struct config_item *item)
NULL;
}
+void *to_pdrv_policy_node(struct config_item *item)
+{
+ struct stp_policy_node *node = to_stp_policy_node(item);
+
+ return stp_policy_node_priv(node);
+}
+EXPORT_SYMBOL_GPL(to_pdrv_policy_node);
+
static ssize_t
stp_policy_node_masters_show(struct config_item *item, char *page)
{
@@ -163,7 +181,9 @@ unlock:
static void stp_policy_node_release(struct config_item *item)
{
- kfree(to_stp_policy_node(item));
+ struct stp_policy_node *node = to_stp_policy_node(item);
+
+ kfree(node);
}
static struct configfs_item_operations stp_policy_node_item_ops = {
@@ -182,10 +202,34 @@ static struct configfs_attribute *stp_policy_node_attrs[] = {
static const struct config_item_type stp_policy_type;
static const struct config_item_type stp_policy_node_type;
+const struct config_item_type *
+get_policy_node_type(struct configfs_attribute **attrs)
+{
+ struct config_item_type *type;
+ struct configfs_attribute **merged;
+
+ type = kmemdup(&stp_policy_node_type, sizeof(stp_policy_node_type),
+ GFP_KERNEL);
+ if (!type)
+ return NULL;
+
+ merged = memcat_p(stp_policy_node_attrs, attrs);
+ if (!merged) {
+ kfree(type);
+ return NULL;
+ }
+
+ type->ct_attrs = merged;
+
+ return type;
+}
+
static struct config_group *
stp_policy_node_make(struct config_group *group, const char *name)
{
+ const struct config_item_type *type = &stp_policy_node_type;
struct stp_policy_node *policy_node, *parent_node;
+ const struct stm_protocol_driver *pdrv;
struct stp_policy *policy;
if (group->cg_item.ci_type == &stp_policy_type) {
@@ -199,12 +243,20 @@ stp_policy_node_make(struct config_group *group, const char *name)
if (!policy->stm)
return ERR_PTR(-ENODEV);
- policy_node = kzalloc(sizeof(struct stp_policy_node), GFP_KERNEL);
+ pdrv = policy->stm->pdrv;
+ policy_node =
+ kzalloc(offsetof(struct stp_policy_node, priv[pdrv->priv_sz]),
+ GFP_KERNEL);
if (!policy_node)
return ERR_PTR(-ENOMEM);
- config_group_init_type_name(&policy_node->group, name,
- &stp_policy_node_type);
+ if (pdrv->policy_node_init)
+ pdrv->policy_node_init((void *)policy_node->priv);
+
+ if (policy->stm->pdrv_node_type)
+ type = policy->stm->pdrv_node_type;
+
+ config_group_init_type_name(&policy_node->group, name, type);
policy_node->policy = policy;
@@ -254,8 +306,25 @@ static ssize_t stp_policy_device_show(struct config_item *item,
CONFIGFS_ATTR_RO(stp_policy_, device);
+static ssize_t stp_policy_protocol_show(struct config_item *item,
+ char *page)
+{
+ struct stp_policy *policy = to_stp_policy(item);
+ ssize_t count;
+
+ count = sprintf(page, "%s\n",
+ (policy && policy->stm) ?
+ policy->stm->pdrv->name :
+ "<none>");
+
+ return count;
+}
+
+CONFIGFS_ATTR_RO(stp_policy_, protocol);
+
static struct configfs_attribute *stp_policy_attrs[] = {
&stp_policy_attr_device,
+ &stp_policy_attr_protocol,
NULL,
};
@@ -276,6 +345,7 @@ void stp_policy_unbind(struct stp_policy *policy)
stm->policy = NULL;
policy->stm = NULL;
+ stm_put_protocol(stm->pdrv);
stm_put_device(stm);
}
@@ -311,11 +381,14 @@ static const struct config_item_type stp_policy_type = {
};
static struct config_group *
-stp_policies_make(struct config_group *group, const char *name)
+stp_policy_make(struct config_group *group, const char *name)
{
+ const struct config_item_type *pdrv_node_type;
+ const struct stm_protocol_driver *pdrv;
+ char *devname, *proto, *p;
struct config_group *ret;
struct stm_device *stm;
- char *devname, *p;
+ int err;
devname = kasprintf(GFP_KERNEL, "%s", name);
if (!devname)
@@ -326,6 +399,7 @@ stp_policies_make(struct config_group *group, const char *name)
* <device_name> is the name of an existing stm device; may
* contain dots;
* <policy_name> is an arbitrary string; may not contain dots
+ * <device_name>:<protocol_name>.<policy_name>
*/
p = strrchr(devname, '.');
if (!p) {
@@ -335,11 +409,28 @@ stp_policies_make(struct config_group *group, const char *name)
*p = '\0';
+ /*
+ * look for ":<protocol_name>":
+ * + no protocol suffix: fall back to whatever is available;
+ * + unknown protocol: fail the whole thing
+ */
+ proto = strrchr(devname, ':');
+ if (proto)
+ *proto++ = '\0';
+
stm = stm_find_device(devname);
+ if (!stm) {
+ kfree(devname);
+ return ERR_PTR(-ENODEV);
+ }
+
+ err = stm_lookup_protocol(proto, &pdrv, &pdrv_node_type);
kfree(devname);
- if (!stm)
+ if (err) {
+ stm_put_device(stm);
return ERR_PTR(-ENODEV);
+ }
mutex_lock(&stm->policy_mutex);
if (stm->policy) {
@@ -349,31 +440,37 @@ stp_policies_make(struct config_group *group, const char *name)
stm->policy = kzalloc(sizeof(*stm->policy), GFP_KERNEL);
if (!stm->policy) {
- ret = ERR_PTR(-ENOMEM);
- goto unlock_policy;
+ mutex_unlock(&stm->policy_mutex);
+ stm_put_protocol(pdrv);
+ stm_put_device(stm);
+ return ERR_PTR(-ENOMEM);
}
config_group_init_type_name(&stm->policy->group, name,
&stp_policy_type);
- stm->policy->stm = stm;
+ stm->pdrv = pdrv;
+ stm->pdrv_node_type = pdrv_node_type;
+ stm->policy->stm = stm;
ret = &stm->policy->group;
unlock_policy:
mutex_unlock(&stm->policy_mutex);
- if (IS_ERR(ret))
+ if (IS_ERR(ret)) {
+ stm_put_protocol(stm->pdrv);
stm_put_device(stm);
+ }
return ret;
}
-static struct configfs_group_operations stp_policies_group_ops = {
- .make_group = stp_policies_make,
+static struct configfs_group_operations stp_policy_root_group_ops = {
+ .make_group = stp_policy_make,
};
-static const struct config_item_type stp_policies_type = {
- .ct_group_ops = &stp_policies_group_ops,
+static const struct config_item_type stp_policy_root_type = {
+ .ct_group_ops = &stp_policy_root_group_ops,
.ct_owner = THIS_MODULE,
};
@@ -381,7 +478,7 @@ static struct configfs_subsystem stp_policy_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "stp-policy",
- .ci_type = &stp_policies_type,
+ .ci_type = &stp_policy_root_type,
},
},
};
@@ -392,7 +489,7 @@ static struct configfs_subsystem stp_policy_subsys = {
static struct stp_policy_node *
__stp_policy_node_lookup(struct stp_policy *policy, char *s)
{
- struct stp_policy_node *policy_node, *ret;
+ struct stp_policy_node *policy_node, *ret = NULL;
struct list_head *head = &policy->group.cg_children;
struct config_item *item;
char *start, *end = s;
@@ -400,10 +497,6 @@ __stp_policy_node_lookup(struct stp_policy *policy, char *s)
if (list_empty(head))
return NULL;
- /* return the first entry if everything else fails */
- item = list_entry(head->next, struct config_item, ci_entry);
- ret = to_stp_policy_node(item);
-
next:
for (;;) {
start = strsep(&end, "/");
@@ -449,25 +542,25 @@ stp_policy_node_lookup(struct stm_device *stm, char *s)
if (policy_node)
config_item_get(&policy_node->group.cg_item);
- mutex_unlock(&stp_policy_subsys.su_mutex);
+ else
+ mutex_unlock(&stp_policy_subsys.su_mutex);
return policy_node;
}
void stp_policy_node_put(struct stp_policy_node *policy_node)
{
+ lockdep_assert_held(&stp_policy_subsys.su_mutex);
+
+ mutex_unlock(&stp_policy_subsys.su_mutex);
config_item_put(&policy_node->group.cg_item);
}
int __init stp_configfs_init(void)
{
- int err;
-
config_group_init(&stp_policy_subsys.su_group);
mutex_init(&stp_policy_subsys.su_mutex);
- err = configfs_register_subsystem(&stp_policy_subsys);
-
- return err;
+ return configfs_register_subsystem(&stp_policy_subsys);
}
void __exit stp_configfs_exit(void)
diff --git a/drivers/hwtracing/stm/stm.h b/drivers/hwtracing/stm/stm.h
index 923571adc6f4..3569439d53bb 100644
--- a/drivers/hwtracing/stm/stm.h
+++ b/drivers/hwtracing/stm/stm.h
@@ -10,20 +10,17 @@
#ifndef _STM_STM_H_
#define _STM_STM_H_
+#include <linux/configfs.h>
+
struct stp_policy;
struct stp_policy_node;
+struct stm_protocol_driver;
-struct stp_policy_node *
-stp_policy_node_lookup(struct stm_device *stm, char *s);
-void stp_policy_node_put(struct stp_policy_node *policy_node);
-void stp_policy_unbind(struct stp_policy *policy);
-
-void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
- unsigned int *mstart, unsigned int *mend,
- unsigned int *cstart, unsigned int *cend);
int stp_configfs_init(void);
void stp_configfs_exit(void);
+void *stp_policy_node_priv(struct stp_policy_node *pn);
+
struct stp_master {
unsigned int nr_free;
unsigned long chan_map[0];
@@ -40,6 +37,9 @@ struct stm_device {
struct mutex link_mutex;
spinlock_t link_lock;
struct list_head link_list;
+ /* framing protocol in use */
+ const struct stm_protocol_driver *pdrv;
+ const struct config_item_type *pdrv_node_type;
/* master allocation */
spinlock_t mc_lock;
struct stp_master *masters[0];
@@ -48,16 +48,28 @@ struct stm_device {
#define to_stm_device(_d) \
container_of((_d), struct stm_device, dev)
+struct stp_policy_node *
+stp_policy_node_lookup(struct stm_device *stm, char *s);
+void stp_policy_node_put(struct stp_policy_node *policy_node);
+void stp_policy_unbind(struct stp_policy *policy);
+
+void stp_policy_node_get_ranges(struct stp_policy_node *policy_node,
+ unsigned int *mstart, unsigned int *mend,
+ unsigned int *cstart, unsigned int *cend);
+
+const struct config_item_type *
+get_policy_node_type(struct configfs_attribute **attrs);
+
struct stm_output {
spinlock_t lock;
unsigned int master;
unsigned int channel;
unsigned int nr_chans;
+ void *pdrv_private;
};
struct stm_file {
struct stm_device *stm;
- struct stp_policy_node *policy_node;
struct stm_output output;
};
@@ -71,11 +83,35 @@ struct stm_source_device {
struct stm_device __rcu *link;
struct list_head link_entry;
/* one output per stm_source device */
- struct stp_policy_node *policy_node;
struct stm_output output;
};
#define to_stm_source_device(_d) \
container_of((_d), struct stm_source_device, dev)
+void *to_pdrv_policy_node(struct config_item *item);
+
+struct stm_protocol_driver {
+ struct module *owner;
+ const char *name;
+ ssize_t (*write)(struct stm_data *data,
+ struct stm_output *output, unsigned int chan,
+ const char *buf, size_t count);
+ void (*policy_node_init)(void *arg);
+ int (*output_open)(void *priv, struct stm_output *output);
+ void (*output_close)(struct stm_output *output);
+ ssize_t priv_sz;
+ struct configfs_attribute **policy_attr;
+};
+
+int stm_register_protocol(const struct stm_protocol_driver *pdrv);
+void stm_unregister_protocol(const struct stm_protocol_driver *pdrv);
+int stm_lookup_protocol(const char *name,
+ const struct stm_protocol_driver **pdrv,
+ const struct config_item_type **type);
+void stm_put_protocol(const struct stm_protocol_driver *pdrv);
+ssize_t stm_data_write(struct stm_data *data, unsigned int m,
+ unsigned int c, bool ts_first, const void *buf,
+ size_t count);
+
#endif /* _STM_STM_H_ */
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 80df5a377d30..cafb1dcadc48 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -693,16 +693,12 @@ static int __maybe_unused tiadc_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct tiadc_device *adc_dev = iio_priv(indio_dev);
- struct ti_tscadc_dev *tscadc_dev;
unsigned int idle;
- tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
- if (!device_may_wakeup(tscadc_dev->dev)) {
- idle = tiadc_readl(adc_dev, REG_CTRL);
- idle &= ~(CNTRLREG_TSCSSENB);
- tiadc_writel(adc_dev, REG_CTRL, (idle |
- CNTRLREG_POWERDOWN));
- }
+ idle = tiadc_readl(adc_dev, REG_CTRL);
+ idle &= ~(CNTRLREG_TSCSSENB);
+ tiadc_writel(adc_dev, REG_CTRL, (idle |
+ CNTRLREG_POWERDOWN));
return 0;
}
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index abb6660c099c..0a3ec7c726ec 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -26,6 +26,7 @@ config INFINIBAND_USER_MAD
config INFINIBAND_USER_ACCESS
tristate "InfiniBand userspace access (verbs and CM)"
select ANON_INODES
+ depends on MMU
---help---
Userspace InfiniBand access support. This enables the
kernel side of userspace verbs and the userspace
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 46b855a42884..0dce94e3c495 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -45,6 +45,7 @@
#include <net/addrconf.h>
#include <net/ip6_route.h>
#include <rdma/ib_addr.h>
+#include <rdma/ib_sa.h>
#include <rdma/ib.h>
#include <rdma/rdma_netlink.h>
#include <net/netlink.h>
@@ -61,6 +62,7 @@ struct addr_req {
struct rdma_dev_addr *addr, void *context);
unsigned long timeout;
struct delayed_work work;
+ bool resolve_by_gid_attr; /* Consider gid attr in resolve phase */
int status;
u32 seq;
};
@@ -219,60 +221,75 @@ int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr)
}
EXPORT_SYMBOL(rdma_addr_size_kss);
-void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
- const struct net_device *dev,
- const unsigned char *dst_dev_addr)
+/**
+ * rdma_copy_src_l2_addr - Copy netdevice source addresses
+ * @dev_addr: Destination address pointer where to copy the addresses
+ * @dev: Netdevice whose source addresses to copy
+ *
+ * rdma_copy_src_l2_addr() copies source addresses from the specified netdevice.
+ * This includes unicast address, broadcast address, device type and
+ * interface index.
+ */
+void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev)
{
dev_addr->dev_type = dev->type;
memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
- if (dst_dev_addr)
- memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
dev_addr->bound_dev_if = dev->ifindex;
}
-EXPORT_SYMBOL(rdma_copy_addr);
+EXPORT_SYMBOL(rdma_copy_src_l2_addr);
-int rdma_translate_ip(const struct sockaddr *addr,
- struct rdma_dev_addr *dev_addr)
+static struct net_device *
+rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in)
{
- struct net_device *dev;
+ struct net_device *dev = NULL;
+ int ret = -EADDRNOTAVAIL;
- if (dev_addr->bound_dev_if) {
- dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
- if (!dev)
- return -ENODEV;
- rdma_copy_addr(dev_addr, dev, NULL);
- dev_put(dev);
- return 0;
- }
-
- switch (addr->sa_family) {
+ switch (src_in->sa_family) {
case AF_INET:
- dev = ip_dev_find(dev_addr->net,
- ((const struct sockaddr_in *)addr)->sin_addr.s_addr);
-
- if (!dev)
- return -EADDRNOTAVAIL;
-
- rdma_copy_addr(dev_addr, dev, NULL);
- dev_put(dev);
+ dev = __ip_dev_find(net,
+ ((const struct sockaddr_in *)src_in)->sin_addr.s_addr,
+ false);
+ if (dev)
+ ret = 0;
break;
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- rcu_read_lock();
- for_each_netdev_rcu(dev_addr->net, dev) {
- if (ipv6_chk_addr(dev_addr->net,
- &((const struct sockaddr_in6 *)addr)->sin6_addr,
+ for_each_netdev_rcu(net, dev) {
+ if (ipv6_chk_addr(net,
+ &((const struct sockaddr_in6 *)src_in)->sin6_addr,
dev, 1)) {
- rdma_copy_addr(dev_addr, dev, NULL);
+ ret = 0;
break;
}
}
- rcu_read_unlock();
break;
#endif
}
- return 0;
+ return ret ? ERR_PTR(ret) : dev;
+}
+
+int rdma_translate_ip(const struct sockaddr *addr,
+ struct rdma_dev_addr *dev_addr)
+{
+ struct net_device *dev;
+
+ if (dev_addr->bound_dev_if) {
+ dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
+ if (!dev)
+ return -ENODEV;
+ rdma_copy_src_l2_addr(dev_addr, dev);
+ dev_put(dev);
+ return 0;
+ }
+
+ rcu_read_lock();
+ dev = rdma_find_ndev_for_src_ip_rcu(dev_addr->net, addr);
+ if (!IS_ERR(dev))
+ rdma_copy_src_l2_addr(dev_addr, dev);
+ rcu_read_unlock();
+ return PTR_ERR_OR_ZERO(dev);
}
EXPORT_SYMBOL(rdma_translate_ip);
@@ -295,15 +312,12 @@ static void queue_req(struct addr_req *req)
spin_unlock_bh(&lock);
}
-static int ib_nl_fetch_ha(const struct dst_entry *dst,
- struct rdma_dev_addr *dev_addr,
+static int ib_nl_fetch_ha(struct rdma_dev_addr *dev_addr,
const void *daddr, u32 seq, u16 family)
{
- if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS))
+ if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS))
return -EADDRNOTAVAIL;
- /* We fill in what we can, the response will fill the rest */
- rdma_copy_addr(dev_addr, dst->dev, NULL);
return ib_nl_ip_send_msg(dev_addr, daddr, seq, family);
}
@@ -322,7 +336,7 @@ static int dst_fetch_ha(const struct dst_entry *dst,
neigh_event_send(n, NULL);
ret = -ENODATA;
} else {
- rdma_copy_addr(dev_addr, dst->dev, n->ha);
+ memcpy(dev_addr->dst_dev_addr, n->ha, MAX_ADDR_LEN);
}
neigh_release(n);
@@ -356,18 +370,22 @@ static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
(const void *)&dst_in6->sin6_addr;
sa_family_t family = dst_in->sa_family;
- /* Gateway + ARPHRD_INFINIBAND -> IB router */
- if (has_gateway(dst, family) && dst->dev->type == ARPHRD_INFINIBAND)
- return ib_nl_fetch_ha(dst, dev_addr, daddr, seq, family);
+ /* If we have a gateway in IB mode then it must be an IB network */
+ if (has_gateway(dst, family) && dev_addr->network == RDMA_NETWORK_IB)
+ return ib_nl_fetch_ha(dev_addr, daddr, seq, family);
else
return dst_fetch_ha(dst, dev_addr, daddr);
}
-static int addr4_resolve(struct sockaddr_in *src_in,
- const struct sockaddr_in *dst_in,
+static int addr4_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
struct rdma_dev_addr *addr,
struct rtable **prt)
{
+ struct sockaddr_in *src_in = (struct sockaddr_in *)src_sock;
+ const struct sockaddr_in *dst_in =
+ (const struct sockaddr_in *)dst_sock;
+
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
struct rtable *rt;
@@ -383,16 +401,8 @@ static int addr4_resolve(struct sockaddr_in *src_in,
if (ret)
return ret;
- src_in->sin_family = AF_INET;
src_in->sin_addr.s_addr = fl4.saddr;
- /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
- * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
- * type accordingly.
- */
- if (rt->rt_uses_gateway && rt->dst.dev->type != ARPHRD_INFINIBAND)
- addr->network = RDMA_NETWORK_IPV4;
-
addr->hoplimit = ip4_dst_hoplimit(&rt->dst);
*prt = rt;
@@ -400,14 +410,16 @@ static int addr4_resolve(struct sockaddr_in *src_in,
}
#if IS_ENABLED(CONFIG_IPV6)
-static int addr6_resolve(struct sockaddr_in6 *src_in,
- const struct sockaddr_in6 *dst_in,
+static int addr6_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
struct rdma_dev_addr *addr,
struct dst_entry **pdst)
{
+ struct sockaddr_in6 *src_in = (struct sockaddr_in6 *)src_sock;
+ const struct sockaddr_in6 *dst_in =
+ (const struct sockaddr_in6 *)dst_sock;
struct flowi6 fl6;
struct dst_entry *dst;
- struct rt6_info *rt;
int ret;
memset(&fl6, 0, sizeof fl6);
@@ -419,19 +431,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
if (ret < 0)
return ret;
- rt = (struct rt6_info *)dst;
- if (ipv6_addr_any(&src_in->sin6_addr)) {
- src_in->sin6_family = AF_INET6;
+ if (ipv6_addr_any(&src_in->sin6_addr))
src_in->sin6_addr = fl6.saddr;
- }
-
- /* If there's a gateway and type of device not ARPHRD_INFINIBAND, we're
- * definitely in RoCE v2 (as RoCE v1 isn't routable) set the network
- * type accordingly.
- */
- if (rt->rt6i_flags & RTF_GATEWAY &&
- ip6_dst_idev(dst)->dev->type != ARPHRD_INFINIBAND)
- addr->network = RDMA_NETWORK_IPV6;
addr->hoplimit = ip6_dst_hoplimit(dst);
@@ -439,8 +440,8 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
return 0;
}
#else
-static int addr6_resolve(struct sockaddr_in6 *src_in,
- const struct sockaddr_in6 *dst_in,
+static int addr6_resolve(struct sockaddr *src_sock,
+ const struct sockaddr *dst_sock,
struct rdma_dev_addr *addr,
struct dst_entry **pdst)
{
@@ -451,36 +452,110 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
static int addr_resolve_neigh(const struct dst_entry *dst,
const struct sockaddr *dst_in,
struct rdma_dev_addr *addr,
+ unsigned int ndev_flags,
u32 seq)
{
- if (dst->dev->flags & IFF_LOOPBACK) {
- int ret;
+ int ret = 0;
- ret = rdma_translate_ip(dst_in, addr);
- if (!ret)
- memcpy(addr->dst_dev_addr, addr->src_dev_addr,
- MAX_ADDR_LEN);
+ if (ndev_flags & IFF_LOOPBACK) {
+ memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
+ } else {
+ if (!(ndev_flags & IFF_NOARP)) {
+ /* If the device doesn't do ARP internally */
+ ret = fetch_ha(dst, addr, dst_in, seq);
+ }
+ }
+ return ret;
+}
- return ret;
+static int copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+ const struct sockaddr *dst_in,
+ const struct dst_entry *dst,
+ const struct net_device *ndev)
+{
+ int ret = 0;
+
+ if (dst->dev->flags & IFF_LOOPBACK)
+ ret = rdma_translate_ip(dst_in, dev_addr);
+ else
+ rdma_copy_src_l2_addr(dev_addr, dst->dev);
+
+ /*
+ * If there's a gateway and type of device not ARPHRD_INFINIBAND,
+ * we're definitely in RoCE v2 (as RoCE v1 isn't routable) set the
+ * network type accordingly.
+ */
+ if (has_gateway(dst, dst_in->sa_family) &&
+ ndev->type != ARPHRD_INFINIBAND)
+ dev_addr->network = dst_in->sa_family == AF_INET ?
+ RDMA_NETWORK_IPV4 :
+ RDMA_NETWORK_IPV6;
+ else
+ dev_addr->network = RDMA_NETWORK_IB;
+
+ return ret;
+}
+
+static int rdma_set_src_addr_rcu(struct rdma_dev_addr *dev_addr,
+ unsigned int *ndev_flags,
+ const struct sockaddr *dst_in,
+ const struct dst_entry *dst)
+{
+ struct net_device *ndev = READ_ONCE(dst->dev);
+
+ *ndev_flags = ndev->flags;
+ /* A physical device must be the RDMA device to use */
+ if (ndev->flags & IFF_LOOPBACK) {
+ /*
+ * RDMA (IB/RoCE, iWarp) doesn't run on lo interface or
+ * loopback IP address. So if route is resolved to loopback
+ * interface, translate that to a real ndev based on non
+ * loopback IP address.
+ */
+ ndev = rdma_find_ndev_for_src_ip_rcu(dev_net(ndev), dst_in);
+ if (IS_ERR(ndev))
+ return -ENODEV;
}
- /* If the device doesn't do ARP internally */
- if (!(dst->dev->flags & IFF_NOARP))
- return fetch_ha(dst, addr, dst_in, seq);
+ return copy_src_l2_addr(dev_addr, dst_in, dst, ndev);
+}
+
+static int set_addr_netns_by_gid_rcu(struct rdma_dev_addr *addr)
+{
+ struct net_device *ndev;
- rdma_copy_addr(addr, dst->dev, NULL);
+ ndev = rdma_read_gid_attr_ndev_rcu(addr->sgid_attr);
+ if (IS_ERR(ndev))
+ return PTR_ERR(ndev);
+ /*
+ * Since we are holding the rcu, reading net and ifindex
+ * are safe without any additional reference; because
+ * change_net_namespace() in net/core/dev.c does rcu sync
+ * after it changes the state to IFF_DOWN and before
+ * updating netdev fields {net, ifindex}.
+ */
+ addr->net = dev_net(ndev);
+ addr->bound_dev_if = ndev->ifindex;
return 0;
}
+static void rdma_addr_set_net_defaults(struct rdma_dev_addr *addr)
+{
+ addr->net = &init_net;
+ addr->bound_dev_if = 0;
+}
+
static int addr_resolve(struct sockaddr *src_in,
const struct sockaddr *dst_in,
struct rdma_dev_addr *addr,
bool resolve_neigh,
+ bool resolve_by_gid_attr,
u32 seq)
{
- struct net_device *ndev;
- struct dst_entry *dst;
+ struct dst_entry *dst = NULL;
+ unsigned int ndev_flags = 0;
+ struct rtable *rt = NULL;
int ret;
if (!addr->net) {
@@ -488,58 +563,55 @@ static int addr_resolve(struct sockaddr *src_in,
return -EINVAL;
}
- if (src_in->sa_family == AF_INET) {
- struct rtable *rt = NULL;
- const struct sockaddr_in *dst_in4 =
- (const struct sockaddr_in *)dst_in;
-
- ret = addr4_resolve((struct sockaddr_in *)src_in,
- dst_in4, addr, &rt);
- if (ret)
- return ret;
-
- if (resolve_neigh)
- ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq);
-
- if (addr->bound_dev_if) {
- ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
- } else {
- ndev = rt->dst.dev;
- dev_hold(ndev);
+ rcu_read_lock();
+ if (resolve_by_gid_attr) {
+ if (!addr->sgid_attr) {
+ rcu_read_unlock();
+ pr_warn_ratelimited("%s: missing gid_attr\n", __func__);
+ return -EINVAL;
}
-
- ip_rt_put(rt);
- } else {
- const struct sockaddr_in6 *dst_in6 =
- (const struct sockaddr_in6 *)dst_in;
-
- ret = addr6_resolve((struct sockaddr_in6 *)src_in,
- dst_in6, addr,
- &dst);
- if (ret)
+ /*
+ * If the request is for a specific gid attribute of the
+ * rdma_dev_addr, derive net from the netdevice of the
+ * GID attribute.
+ */
+ ret = set_addr_netns_by_gid_rcu(addr);
+ if (ret) {
+ rcu_read_unlock();
return ret;
-
- if (resolve_neigh)
- ret = addr_resolve_neigh(dst, dst_in, addr, seq);
-
- if (addr->bound_dev_if) {
- ndev = dev_get_by_index(addr->net, addr->bound_dev_if);
- } else {
- ndev = dst->dev;
- dev_hold(ndev);
}
-
- dst_release(dst);
}
-
- if (ndev) {
- if (ndev->flags & IFF_LOOPBACK)
- ret = rdma_translate_ip(dst_in, addr);
- else
- addr->bound_dev_if = ndev->ifindex;
- dev_put(ndev);
+ if (src_in->sa_family == AF_INET) {
+ ret = addr4_resolve(src_in, dst_in, addr, &rt);
+ dst = &rt->dst;
+ } else {
+ ret = addr6_resolve(src_in, dst_in, addr, &dst);
}
+ if (ret) {
+ rcu_read_unlock();
+ goto done;
+ }
+ ret = rdma_set_src_addr_rcu(addr, &ndev_flags, dst_in, dst);
+ rcu_read_unlock();
+
+ /*
+ * Resolve neighbor destination address if requested and
+ * only if src addr translation didn't fail.
+ */
+ if (!ret && resolve_neigh)
+ ret = addr_resolve_neigh(dst, dst_in, addr, ndev_flags, seq);
+ if (src_in->sa_family == AF_INET)
+ ip_rt_put(rt);
+ else
+ dst_release(dst);
+done:
+ /*
+ * Clear the addr net to go back to its original state, only if it was
+ * derived from GID attribute in this context.
+ */
+ if (resolve_by_gid_attr)
+ rdma_addr_set_net_defaults(addr);
return ret;
}
@@ -554,7 +626,8 @@ static void process_one_req(struct work_struct *_work)
src_in = (struct sockaddr *)&req->src_addr;
dst_in = (struct sockaddr *)&req->dst_addr;
req->status = addr_resolve(src_in, dst_in, req->addr,
- true, req->seq);
+ true, req->resolve_by_gid_attr,
+ req->seq);
if (req->status && time_after_eq(jiffies, req->timeout)) {
req->status = -ETIMEDOUT;
} else if (req->status == -ENODATA) {
@@ -586,10 +659,10 @@ static void process_one_req(struct work_struct *_work)
}
int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr, int timeout_ms,
+ struct rdma_dev_addr *addr, unsigned long timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
- void *context)
+ bool resolve_by_gid_attr, void *context)
{
struct sockaddr *src_in, *dst_in;
struct addr_req *req;
@@ -617,10 +690,12 @@ int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
req->addr = addr;
req->callback = callback;
req->context = context;
+ req->resolve_by_gid_attr = resolve_by_gid_attr;
INIT_DELAYED_WORK(&req->work, process_one_req);
req->seq = (u32)atomic_inc_return(&ib_nl_addr_request_seq);
- req->status = addr_resolve(src_in, dst_in, addr, true, req->seq);
+ req->status = addr_resolve(src_in, dst_in, addr, true,
+ req->resolve_by_gid_attr, req->seq);
switch (req->status) {
case 0:
req->timeout = jiffies;
@@ -641,25 +716,53 @@ err:
}
EXPORT_SYMBOL(rdma_resolve_ip);
-int rdma_resolve_ip_route(struct sockaddr *src_addr,
- const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr)
+int roce_resolve_route_from_path(struct sa_path_rec *rec,
+ const struct ib_gid_attr *attr)
{
- struct sockaddr_storage ssrc_addr = {};
- struct sockaddr *src_in = (struct sockaddr *)&ssrc_addr;
+ union {
+ struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid, dgid;
+ struct rdma_dev_addr dev_addr = {};
+ int ret;
- if (src_addr) {
- if (src_addr->sa_family != dst_addr->sa_family)
- return -EINVAL;
+ if (rec->roce.route_resolved)
+ return 0;
- memcpy(src_in, src_addr, rdma_addr_size(src_addr));
- } else {
- src_in->sa_family = dst_addr->sa_family;
- }
+ rdma_gid2ip(&sgid._sockaddr, &rec->sgid);
+ rdma_gid2ip(&dgid._sockaddr, &rec->dgid);
+
+ if (sgid._sockaddr.sa_family != dgid._sockaddr.sa_family)
+ return -EINVAL;
+
+ if (!attr || !attr->ndev)
+ return -EINVAL;
+
+ dev_addr.net = &init_net;
+ dev_addr.sgid_attr = attr;
+
+ ret = addr_resolve(&sgid._sockaddr, &dgid._sockaddr,
+ &dev_addr, false, true, 0);
+ if (ret)
+ return ret;
+
+ if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
+ dev_addr.network == RDMA_NETWORK_IPV6) &&
+ rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
+ return -EINVAL;
- return addr_resolve(src_in, dst_addr, addr, false, 0);
+ rec->roce.route_resolved = true;
+ return 0;
}
+/**
+ * rdma_addr_cancel - Cancel resolve ip request
+ * @addr: Pointer to address structure given previously
+ * during rdma_resolve_ip().
+ * rdma_addr_cancel() is synchronous function which cancels any pending
+ * request if there is any.
+ */
void rdma_addr_cancel(struct rdma_dev_addr *addr)
{
struct addr_req *req, *temp_req;
@@ -687,11 +790,6 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr)
* guarentees no work is running and none will be started.
*/
cancel_delayed_work_sync(&found->work);
-
- if (found->callback)
- found->callback(-ECANCELED, (struct sockaddr *)&found->src_addr,
- found->addr, found->context);
-
kfree(found);
}
EXPORT_SYMBOL(rdma_addr_cancel);
@@ -710,7 +808,7 @@ static void resolve_cb(int status, struct sockaddr *src_addr,
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *dmac, const struct net_device *ndev,
+ u8 *dmac, const struct ib_gid_attr *sgid_attr,
int *hoplimit)
{
struct rdma_dev_addr dev_addr;
@@ -726,12 +824,12 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
rdma_gid2ip(&dgid_addr._sockaddr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
- dev_addr.bound_dev_if = ndev->ifindex;
dev_addr.net = &init_net;
+ dev_addr.sgid_attr = sgid_attr;
init_completion(&ctx.comp);
ret = rdma_resolve_ip(&sgid_addr._sockaddr, &dgid_addr._sockaddr,
- &dev_addr, 1000, resolve_cb, &ctx);
+ &dev_addr, 1000, resolve_cb, true, &ctx);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 3208ad6ad540..5b2fce4a7091 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -212,9 +212,8 @@ static void free_gid_entry_locked(struct ib_gid_table_entry *entry)
u8 port_num = entry->attr.port_num;
struct ib_gid_table *table = rdma_gid_table(device, port_num);
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- device->name, port_num, entry->attr.index,
- entry->attr.gid.raw);
+ dev_dbg(&device->dev, "%s port=%d index=%d gid %pI6\n", __func__,
+ port_num, entry->attr.index, entry->attr.gid.raw);
if (rdma_cap_roce_gid_table(device, port_num) &&
entry->state != GID_TABLE_ENTRY_INVALID)
@@ -289,9 +288,9 @@ static void store_gid_entry(struct ib_gid_table *table,
{
entry->state = GID_TABLE_ENTRY_VALID;
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- entry->attr.device->name, entry->attr.port_num,
- entry->attr.index, entry->attr.gid.raw);
+ dev_dbg(&entry->attr.device->dev, "%s port=%d index=%d gid %pI6\n",
+ __func__, entry->attr.port_num, entry->attr.index,
+ entry->attr.gid.raw);
lockdep_assert_held(&table->lock);
write_lock_irq(&table->rwlock);
@@ -320,17 +319,16 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
int ret;
if (!attr->ndev) {
- pr_err("%s NULL netdev device=%s port=%d index=%d\n",
- __func__, attr->device->name, attr->port_num,
- attr->index);
+ dev_err(&attr->device->dev, "%s NULL netdev port=%d index=%d\n",
+ __func__, attr->port_num, attr->index);
return -EINVAL;
}
if (rdma_cap_roce_gid_table(attr->device, attr->port_num)) {
ret = attr->device->add_gid(attr, &entry->context);
if (ret) {
- pr_err("%s GID add failed device=%s port=%d index=%d\n",
- __func__, attr->device->name, attr->port_num,
- attr->index);
+ dev_err(&attr->device->dev,
+ "%s GID add failed port=%d index=%d\n",
+ __func__, attr->port_num, attr->index);
return ret;
}
}
@@ -353,9 +351,8 @@ static void del_gid(struct ib_device *ib_dev, u8 port,
lockdep_assert_held(&table->lock);
- pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
- ib_dev->name, port, ix,
- table->data_vec[ix]->attr.gid.raw);
+ dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port,
+ ix, table->data_vec[ix]->attr.gid.raw);
write_lock_irq(&table->rwlock);
entry = table->data_vec[ix];
@@ -782,9 +779,9 @@ static void release_gid_table(struct ib_device *device, u8 port,
if (is_gid_entry_free(table->data_vec[i]))
continue;
if (kref_read(&table->data_vec[i]->kref) > 1) {
- pr_err("GID entry ref leak for %s (index %d) ref=%d\n",
- device->name, i,
- kref_read(&table->data_vec[i]->kref));
+ dev_err(&device->dev,
+ "GID entry ref leak for index %d ref=%d\n", i,
+ kref_read(&table->data_vec[i]->kref));
leak = true;
}
}
@@ -1252,6 +1249,39 @@ void rdma_hold_gid_attr(const struct ib_gid_attr *attr)
}
EXPORT_SYMBOL(rdma_hold_gid_attr);
+/**
+ * rdma_read_gid_attr_ndev_rcu - Read GID attribute netdevice
+ * which must be in UP state.
+ *
+ * @attr:Pointer to the GID attribute
+ *
+ * Returns pointer to netdevice if the netdevice was attached to GID and
+ * netdevice is in UP state. Caller must hold RCU lock as this API
+ * reads the netdev flags which can change while netdevice migrates to
+ * different net namespace. Returns ERR_PTR with error code otherwise.
+ *
+ */
+struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr)
+{
+ struct ib_gid_table_entry *entry =
+ container_of(attr, struct ib_gid_table_entry, attr);
+ struct ib_device *device = entry->attr.device;
+ struct net_device *ndev = ERR_PTR(-ENODEV);
+ u8 port_num = entry->attr.port_num;
+ struct ib_gid_table *table;
+ unsigned long flags;
+ bool valid;
+
+ table = rdma_gid_table(device, port_num);
+
+ read_lock_irqsave(&table->rwlock, flags);
+ valid = is_gid_entry_valid(table->data_vec[attr->index]);
+ if (valid && attr->ndev && (READ_ONCE(attr->ndev->flags) & IFF_UP))
+ ndev = attr->ndev;
+ read_unlock_irqrestore(&table->rwlock, flags);
+ return ndev;
+}
+
static int config_non_roce_gid_cache(struct ib_device *device,
u8 port, int gid_tbl_len)
{
@@ -1270,8 +1300,9 @@ static int config_non_roce_gid_cache(struct ib_device *device,
continue;
ret = device->query_gid(device, port, i, &gid_attr.gid);
if (ret) {
- pr_warn("query_gid failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ dev_warn(&device->dev,
+ "query_gid failed (%d) for index %d\n", ret,
+ i);
goto err;
}
gid_attr.index = i;
@@ -1300,8 +1331,7 @@ static void ib_cache_update(struct ib_device *device,
ret = ib_query_port(device, port, tprops);
if (ret) {
- pr_warn("ib_query_port failed (%d) for %s\n",
- ret, device->name);
+ dev_warn(&device->dev, "ib_query_port failed (%d)\n", ret);
goto err;
}
@@ -1323,8 +1353,9 @@ static void ib_cache_update(struct ib_device *device,
for (i = 0; i < pkey_cache->table_len; ++i) {
ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
if (ret) {
- pr_warn("ib_query_pkey failed (%d) for %s (index %d)\n",
- ret, device->name, i);
+ dev_warn(&device->dev,
+ "ib_query_pkey failed (%d) for index %d\n",
+ ret, i);
goto err;
}
}
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 6e39c27dca8e..edb2cb758be7 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3292,8 +3292,11 @@ static int cm_lap_handler(struct cm_work *work)
if (ret)
goto unlock;
- cm_init_av_by_path(param->alternate_path, NULL, &cm_id_priv->alt_av,
- cm_id_priv);
+ ret = cm_init_av_by_path(param->alternate_path, NULL,
+ &cm_id_priv->alt_av, cm_id_priv);
+ if (ret)
+ goto unlock;
+
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
@@ -4367,7 +4370,7 @@ static void cm_add_one(struct ib_device *ib_device)
cm_dev->going_down = 0;
cm_dev->device = device_create(&cm_class, &ib_device->dev,
MKDEV(0, 0), NULL,
- "%s", ib_device->name);
+ "%s", dev_name(&ib_device->dev));
if (IS_ERR(cm_dev->device)) {
kfree(cm_dev);
return;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index a36c94930c31..15d5bb7bf6bb 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -639,13 +639,21 @@ static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
}
-static int cma_acquire_dev(struct rdma_id_private *id_priv,
- const struct rdma_id_private *listen_id_priv)
+/**
+ * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
+ * based on source ip address.
+ * @id_priv: cm_id which should be bound to cma device
+ *
+ * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
+ * based on source IP address. It returns 0 on success or error code otherwise.
+ * It is applicable to active and passive side cm_id.
+ */
+static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
const struct ib_gid_attr *sgid_attr;
- struct cma_device *cma_dev;
union ib_gid gid, iboe_gid, *gidp;
+ struct cma_device *cma_dev;
enum ib_gid_type gid_type;
int ret = -ENODEV;
u8 port;
@@ -654,41 +662,125 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv,
id_priv->id.ps == RDMA_PS_IPOIB)
return -EINVAL;
- mutex_lock(&lock);
rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
&iboe_gid);
memcpy(&gid, dev_addr->src_dev_addr +
- rdma_addr_gid_offset(dev_addr), sizeof gid);
-
- if (listen_id_priv) {
- cma_dev = listen_id_priv->cma_dev;
- port = listen_id_priv->id.port_num;
- gidp = rdma_protocol_roce(cma_dev->device, port) ?
- &iboe_gid : &gid;
- gid_type = listen_id_priv->gid_type;
- sgid_attr = cma_validate_port(cma_dev->device, port,
- gid_type, gidp, id_priv);
- if (!IS_ERR(sgid_attr)) {
- id_priv->id.port_num = port;
- cma_bind_sgid_attr(id_priv, sgid_attr);
- ret = 0;
- goto out;
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ mutex_lock(&lock);
+ list_for_each_entry(cma_dev, &dev_list, list) {
+ for (port = rdma_start_port(cma_dev->device);
+ port <= rdma_end_port(cma_dev->device); port++) {
+ gidp = rdma_protocol_roce(cma_dev->device, port) ?
+ &iboe_gid : &gid;
+ gid_type = cma_dev->default_gid_type[port - 1];
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, gidp, id_priv);
+ if (!IS_ERR(sgid_attr)) {
+ id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ cma_attach_to_dev(id_priv, cma_dev);
+ ret = 0;
+ goto out;
+ }
}
}
+out:
+ mutex_unlock(&lock);
+ return ret;
+}
+
+/**
+ * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
+ * @id_priv: cm id to bind to cma device
+ * @listen_id_priv: listener cm id to match against
+ * @req: Pointer to req structure containaining incoming
+ * request information
+ * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
+ * rdma device matches for listen_id and incoming request. It also verifies
+ * that a GID table entry is present for the source address.
+ * Returns 0 on success, or returns error code otherwise.
+ */
+static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
+ const struct rdma_id_private *listen_id_priv,
+ struct cma_req_info *req)
+{
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr;
+ enum ib_gid_type gid_type;
+ union ib_gid gid;
+
+ if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
+ id_priv->id.ps == RDMA_PS_IPOIB)
+ return -EINVAL;
+
+ if (rdma_protocol_roce(req->device, req->port))
+ rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
+ &gid);
+ else
+ memcpy(&gid, dev_addr->src_dev_addr +
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1];
+ sgid_attr = cma_validate_port(req->device, req->port,
+ gid_type, &gid, id_priv);
+ if (IS_ERR(sgid_attr))
+ return PTR_ERR(sgid_attr);
+
+ id_priv->id.port_num = req->port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ /* Need to acquire lock to protect against reader
+ * of cma_dev->id_list such as cma_netdev_callback() and
+ * cma_process_remove().
+ */
+ mutex_lock(&lock);
+ cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
+ mutex_unlock(&lock);
+ return 0;
+}
+
+static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
+ const struct rdma_id_private *listen_id_priv)
+{
+ struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
+ const struct ib_gid_attr *sgid_attr;
+ struct cma_device *cma_dev;
+ enum ib_gid_type gid_type;
+ int ret = -ENODEV;
+ union ib_gid gid;
+ u8 port;
+
+ if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
+ id_priv->id.ps == RDMA_PS_IPOIB)
+ return -EINVAL;
+
+ memcpy(&gid, dev_addr->src_dev_addr +
+ rdma_addr_gid_offset(dev_addr), sizeof(gid));
+
+ mutex_lock(&lock);
+
+ cma_dev = listen_id_priv->cma_dev;
+ port = listen_id_priv->id.port_num;
+ gid_type = listen_id_priv->gid_type;
+ sgid_attr = cma_validate_port(cma_dev->device, port,
+ gid_type, &gid, id_priv);
+ if (!IS_ERR(sgid_attr)) {
+ id_priv->id.port_num = port;
+ cma_bind_sgid_attr(id_priv, sgid_attr);
+ ret = 0;
+ goto out;
+ }
list_for_each_entry(cma_dev, &dev_list, list) {
for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
- if (listen_id_priv &&
- listen_id_priv->cma_dev == cma_dev &&
+ if (listen_id_priv->cma_dev == cma_dev &&
listen_id_priv->id.port_num == port)
continue;
- gidp = rdma_protocol_roce(cma_dev->device, port) ?
- &iboe_gid : &gid;
gid_type = cma_dev->default_gid_type[port - 1];
sgid_attr = cma_validate_port(cma_dev->device, port,
- gid_type, gidp, id_priv);
+ gid_type, &gid, id_priv);
if (!IS_ERR(sgid_attr)) {
id_priv->id.port_num = port;
cma_bind_sgid_attr(id_priv, sgid_attr);
@@ -785,10 +877,7 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
if (!id_priv)
return ERR_PTR(-ENOMEM);
- if (caller)
- id_priv->res.kern_name = caller;
- else
- rdma_restrack_set_task(&id_priv->res, current);
+ rdma_restrack_set_task(&id_priv->res, caller);
id_priv->res.type = RDMA_RESTRACK_CM_ID;
id_priv->state = RDMA_CM_IDLE;
id_priv->id.context = context;
@@ -1462,18 +1551,35 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id)
return rdma_protocol_roce(device, port_num);
}
+static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
+{
+ const struct sockaddr *daddr =
+ (const struct sockaddr *)&req->listen_addr_storage;
+ const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
+
+ /* Returns true if the req is for IPv6 link local */
+ return (daddr->sa_family == AF_INET6 &&
+ (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL));
+}
+
static bool cma_match_net_dev(const struct rdma_cm_id *id,
const struct net_device *net_dev,
- u8 port_num)
+ const struct cma_req_info *req)
{
const struct rdma_addr *addr = &id->route.addr;
if (!net_dev)
/* This request is an AF_IB request */
- return (!id->port_num || id->port_num == port_num) &&
+ return (!id->port_num || id->port_num == req->port) &&
(addr->src_addr.ss_family == AF_IB);
/*
+ * If the request is not for IPv6 link local, allow matching
+ * request to any netdevice of the one or multiport rdma device.
+ */
+ if (!cma_is_req_ipv6_ll(req))
+ return true;
+ /*
* Net namespaces must match, and if the listner is listening
* on a specific netdevice than netdevice must match as well.
*/
@@ -1500,13 +1606,14 @@ static struct rdma_id_private *cma_find_listener(
hlist_for_each_entry(id_priv, &bind_list->owners, node) {
if (cma_match_private_data(id_priv, ib_event->private_data)) {
if (id_priv->id.device == cm_id->device &&
- cma_match_net_dev(&id_priv->id, net_dev, req->port))
+ cma_match_net_dev(&id_priv->id, net_dev, req))
return id_priv;
list_for_each_entry(id_priv_dev,
&id_priv->listen_list,
listen_list) {
if (id_priv_dev->id.device == cm_id->device &&
- cma_match_net_dev(&id_priv_dev->id, net_dev, req->port))
+ cma_match_net_dev(&id_priv_dev->id,
+ net_dev, req))
return id_priv_dev;
}
}
@@ -1518,18 +1625,18 @@ static struct rdma_id_private *cma_find_listener(
static struct rdma_id_private *
cma_ib_id_from_event(struct ib_cm_id *cm_id,
const struct ib_cm_event *ib_event,
+ struct cma_req_info *req,
struct net_device **net_dev)
{
- struct cma_req_info req;
struct rdma_bind_list *bind_list;
struct rdma_id_private *id_priv;
int err;
- err = cma_save_req_info(ib_event, &req);
+ err = cma_save_req_info(ib_event, req);
if (err)
return ERR_PTR(err);
- *net_dev = cma_get_net_dev(ib_event, &req);
+ *net_dev = cma_get_net_dev(ib_event, req);
if (IS_ERR(*net_dev)) {
if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
/* Assuming the protocol is AF_IB */
@@ -1567,17 +1674,17 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
if (!validate_net_dev(*net_dev,
- (struct sockaddr *)&req.listen_addr_storage,
- (struct sockaddr *)&req.src_addr_storage)) {
+ (struct sockaddr *)&req->listen_addr_storage,
+ (struct sockaddr *)&req->src_addr_storage)) {
id_priv = ERR_PTR(-EHOSTUNREACH);
goto err;
}
}
bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
- rdma_ps_from_service_id(req.service_id),
- cma_port_from_service_id(req.service_id));
- id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
+ rdma_ps_from_service_id(req->service_id),
+ cma_port_from_service_id(req->service_id));
+ id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
err:
rcu_read_unlock();
if (IS_ERR(id_priv) && *net_dev) {
@@ -1710,8 +1817,8 @@ void rdma_destroy_id(struct rdma_cm_id *id)
mutex_lock(&id_priv->handler_mutex);
mutex_unlock(&id_priv->handler_mutex);
+ rdma_restrack_del(&id_priv->res);
if (id_priv->cma_dev) {
- rdma_restrack_del(&id_priv->res);
if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
if (id_priv->cm_id.ib)
ib_destroy_cm_id(id_priv->cm_id.ib);
@@ -1902,7 +2009,7 @@ cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
if (net_dev) {
- rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
+ rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev);
} else {
if (!cma_protocol_roce(listen_id) &&
cma_any_addr(cma_src_addr(id_priv))) {
@@ -1952,7 +2059,7 @@ cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
goto err;
if (net_dev) {
- rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
+ rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev);
} else {
if (!cma_any_addr(cma_src_addr(id_priv))) {
ret = cma_translate_addr(cma_src_addr(id_priv),
@@ -1999,11 +2106,12 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
{
struct rdma_id_private *listen_id, *conn_id = NULL;
struct rdma_cm_event event = {};
+ struct cma_req_info req = {};
struct net_device *net_dev;
u8 offset;
int ret;
- listen_id = cma_ib_id_from_event(cm_id, ib_event, &net_dev);
+ listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev);
if (IS_ERR(listen_id))
return PTR_ERR(listen_id);
@@ -2036,7 +2144,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id,
}
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
- ret = cma_acquire_dev(conn_id, listen_id);
+ ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
if (ret)
goto err2;
@@ -2232,7 +2340,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
goto out;
}
- ret = cma_acquire_dev(conn_id, listen_id);
+ ret = cma_iw_acquire_dev(conn_id, listen_id);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -2354,8 +2462,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(id, id_priv->backlog);
if (ret)
- pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
- ret, cma_dev->device->name);
+ dev_warn(&cma_dev->device->dev,
+ "RDMA CMA: cma_listen_on_dev, error %d\n", ret);
}
static void cma_listen_on_all(struct rdma_id_private *id_priv)
@@ -2402,8 +2510,8 @@ static void cma_query_handler(int status, struct sa_path_rec *path_rec,
queue_work(cma_wq, &work->work);
}
-static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
- struct cma_work *work)
+static int cma_query_ib_route(struct rdma_id_private *id_priv,
+ unsigned long timeout_ms, struct cma_work *work)
{
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
struct sa_path_rec path_rec;
@@ -2521,7 +2629,8 @@ static void cma_init_resolve_addr_work(struct cma_work *work,
work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
}
-static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
+static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
+ unsigned long timeout_ms)
{
struct rdma_route *route = &id_priv->id.route;
struct cma_work *work;
@@ -2643,7 +2752,7 @@ err:
}
EXPORT_SYMBOL(rdma_set_ib_path);
-static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
+static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
{
struct cma_work *work;
@@ -2744,7 +2853,7 @@ err1:
return ret;
}
-int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
+int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv;
int ret;
@@ -2759,7 +2868,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
else if (rdma_protocol_roce(id->device, id->port_num))
ret = cma_resolve_iboe_route(id_priv);
else if (rdma_protocol_iwarp(id->device, id->port_num))
- ret = cma_resolve_iw_route(id_priv, timeout_ms);
+ ret = cma_resolve_iw_route(id_priv);
else
ret = -ENOSYS;
@@ -2862,7 +2971,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
if (!status && !id_priv->cma_dev) {
- status = cma_acquire_dev(id_priv, NULL);
+ status = cma_acquire_dev_by_src_ip(id_priv);
if (status)
pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
status);
@@ -2882,13 +2991,11 @@ static void addr_handler(int status, struct sockaddr *src_addr,
if (id_priv->id.event_handler(&id_priv->id, &event)) {
cma_exch(id_priv, RDMA_CM_DESTROYING);
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
rdma_destroy_id(&id_priv->id);
return;
}
out:
mutex_unlock(&id_priv->handler_mutex);
- cma_deref_id(id_priv);
}
static int cma_resolve_loopback(struct rdma_id_private *id_priv)
@@ -2966,7 +3073,7 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
}
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- const struct sockaddr *dst_addr, int timeout_ms)
+ const struct sockaddr *dst_addr, unsigned long timeout_ms)
{
struct rdma_id_private *id_priv;
int ret;
@@ -2985,16 +3092,16 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
return -EINVAL;
memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
- atomic_inc(&id_priv->refcount);
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
} else {
if (dst_addr->sa_family == AF_IB) {
ret = cma_resolve_ib_addr(id_priv);
} else {
- ret = rdma_resolve_ip(cma_src_addr(id_priv),
- dst_addr, &id->route.addr.dev_addr,
- timeout_ms, addr_handler, id_priv);
+ ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
+ &id->route.addr.dev_addr,
+ timeout_ms, addr_handler,
+ false, id_priv);
}
}
if (ret)
@@ -3003,7 +3110,6 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
return 0;
err:
cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
- cma_deref_id(id_priv);
return ret;
}
EXPORT_SYMBOL(rdma_resolve_addr);
@@ -3414,7 +3520,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
if (ret)
goto err1;
- ret = cma_acquire_dev(id_priv, NULL);
+ ret = cma_acquire_dev_by_src_ip(id_priv);
if (ret)
goto err1;
}
@@ -3439,10 +3545,9 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
return 0;
err2:
- if (id_priv->cma_dev) {
- rdma_restrack_del(&id_priv->res);
+ rdma_restrack_del(&id_priv->res);
+ if (id_priv->cma_dev)
cma_release_dev(id_priv);
- }
err1:
cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
return ret;
@@ -3839,10 +3944,7 @@ int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
id_priv = container_of(id, struct rdma_id_private, id);
- if (caller)
- id_priv->res.kern_name = caller;
- else
- rdma_restrack_set_task(&id_priv->res, current);
+ rdma_restrack_set_task(&id_priv->res, caller);
if (!cma_comp(id_priv, RDMA_CM_CONNECT))
return -EINVAL;
@@ -4087,9 +4189,10 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
(!ib_sa_sendonly_fullmem_support(&sa_client,
id_priv->id.device,
id_priv->id.port_num))) {
- pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
- "RDMA CM: SM doesn't support Send Only Full Member option\n",
- id_priv->id.device->name, id_priv->id.port_num);
+ dev_warn(
+ &id_priv->id.device->dev,
+ "RDMA CM: port %u Unable to multicast join: SM doesn't support Send Only Full Member option\n",
+ id_priv->id.port_num);
return -EOPNOTSUPP;
}
diff --git a/drivers/infiniband/core/cma_configfs.c b/drivers/infiniband/core/cma_configfs.c
index eee38b40be99..8c2dfb3e294e 100644
--- a/drivers/infiniband/core/cma_configfs.c
+++ b/drivers/infiniband/core/cma_configfs.c
@@ -65,7 +65,7 @@ static struct cma_dev_port_group *to_dev_port_group(struct config_item *item)
static bool filter_by_name(struct ib_device *ib_dev, void *cookie)
{
- return !strcmp(ib_dev->name, cookie);
+ return !strcmp(dev_name(&ib_dev->dev), cookie);
}
static int cma_configfs_params_get(struct config_item *item,
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index 77c7005c396c..bb9007a0cca7 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -44,7 +44,7 @@
#include "mad_priv.h"
/* Total number of ports combined across all struct ib_devices's */
-#define RDMA_MAX_PORTS 1024
+#define RDMA_MAX_PORTS 8192
struct pkey_index_qp_list {
struct list_head pkey_index_list;
@@ -87,6 +87,7 @@ int ib_device_register_sysfs(struct ib_device *device,
int (*port_callback)(struct ib_device *,
u8, struct kobject *));
void ib_device_unregister_sysfs(struct ib_device *device);
+int ib_device_rename(struct ib_device *ibdev, const char *name);
typedef void (*roce_netdev_callback)(struct ib_device *device, u8 port,
struct net_device *idev, void *cookie);
@@ -338,7 +339,14 @@ int rdma_resolve_ip_route(struct sockaddr *src_addr,
int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
const union ib_gid *dgid,
- u8 *dmac, const struct net_device *ndev,
+ u8 *dmac, const struct ib_gid_attr *sgid_attr,
int *hoplimit);
+void rdma_copy_src_l2_addr(struct rdma_dev_addr *dev_addr,
+ const struct net_device *dev);
+struct sa_path_rec;
+int roce_resolve_route_from_path(struct sa_path_rec *rec,
+ const struct ib_gid_attr *attr);
+
+struct net_device *rdma_read_gid_attr_ndev_rcu(const struct ib_gid_attr *attr);
#endif /* _CORE_PRIV_H */
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
index af5ad6a56ae4..b1e5365ddafa 100644
--- a/drivers/infiniband/core/cq.c
+++ b/drivers/infiniband/core/cq.c
@@ -112,12 +112,12 @@ static void ib_cq_poll_work(struct work_struct *work)
IB_POLL_BATCH);
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
- queue_work(ib_comp_wq, &cq->work);
+ queue_work(cq->comp_wq, &cq->work);
}
static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
{
- queue_work(ib_comp_wq, &cq->work);
+ queue_work(cq->comp_wq, &cq->work);
}
/**
@@ -161,7 +161,7 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
goto out_destroy_cq;
cq->res.type = RDMA_RESTRACK_CQ;
- cq->res.kern_name = caller;
+ rdma_restrack_set_task(&cq->res, caller);
rdma_restrack_add(&cq->res);
switch (cq->poll_ctx) {
@@ -175,9 +175,12 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
break;
case IB_POLL_WORKQUEUE:
+ case IB_POLL_UNBOUND_WORKQUEUE:
cq->comp_handler = ib_cq_completion_workqueue;
INIT_WORK(&cq->work, ib_cq_poll_work);
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
+ ib_comp_wq : ib_comp_unbound_wq;
break;
default:
ret = -EINVAL;
@@ -213,6 +216,7 @@ void ib_free_cq(struct ib_cq *cq)
irq_poll_disable(&cq->iop);
break;
case IB_POLL_WORKQUEUE:
+ case IB_POLL_UNBOUND_WORKQUEUE:
cancel_work_sync(&cq->work);
break;
default:
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index db3b6271f09d..87eb4f2cdd7d 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -61,6 +61,7 @@ struct ib_client_data {
};
struct workqueue_struct *ib_comp_wq;
+struct workqueue_struct *ib_comp_unbound_wq;
struct workqueue_struct *ib_wq;
EXPORT_SYMBOL_GPL(ib_wq);
@@ -122,8 +123,9 @@ static int ib_device_check_mandatory(struct ib_device *device)
for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
- pr_warn("Device %s is missing mandatory function %s\n",
- device->name, mandatory_table[i].name);
+ dev_warn(&device->dev,
+ "Device is missing mandatory function %s\n",
+ mandatory_table[i].name);
return -EINVAL;
}
}
@@ -163,16 +165,40 @@ static struct ib_device *__ib_device_get_by_name(const char *name)
struct ib_device *device;
list_for_each_entry(device, &device_list, core_list)
- if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
+ if (!strcmp(name, dev_name(&device->dev)))
return device;
return NULL;
}
-static int alloc_name(char *name)
+int ib_device_rename(struct ib_device *ibdev, const char *name)
+{
+ struct ib_device *device;
+ int ret = 0;
+
+ if (!strcmp(name, dev_name(&ibdev->dev)))
+ return ret;
+
+ mutex_lock(&device_mutex);
+ list_for_each_entry(device, &device_list, core_list) {
+ if (!strcmp(name, dev_name(&device->dev))) {
+ ret = -EEXIST;
+ goto out;
+ }
+ }
+
+ ret = device_rename(&ibdev->dev, name);
+ if (ret)
+ goto out;
+ strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
+out:
+ mutex_unlock(&device_mutex);
+ return ret;
+}
+
+static int alloc_name(struct ib_device *ibdev, const char *name)
{
unsigned long *inuse;
- char buf[IB_DEVICE_NAME_MAX];
struct ib_device *device;
int i;
@@ -181,24 +207,21 @@ static int alloc_name(char *name)
return -ENOMEM;
list_for_each_entry(device, &device_list, core_list) {
- if (!sscanf(device->name, name, &i))
+ char buf[IB_DEVICE_NAME_MAX];
+
+ if (sscanf(dev_name(&device->dev), name, &i) != 1)
continue;
if (i < 0 || i >= PAGE_SIZE * 8)
continue;
snprintf(buf, sizeof buf, name, i);
- if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
+ if (!strcmp(buf, dev_name(&device->dev)))
set_bit(i, inuse);
}
i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
free_page((unsigned long) inuse);
- snprintf(buf, sizeof buf, name, i);
- if (__ib_device_get_by_name(buf))
- return -ENFILE;
-
- strlcpy(name, buf, IB_DEVICE_NAME_MAX);
- return 0;
+ return dev_set_name(&ibdev->dev, name, i);
}
static void ib_device_release(struct device *device)
@@ -221,9 +244,7 @@ static void ib_device_release(struct device *device)
static int ib_device_uevent(struct device *device,
struct kobj_uevent_env *env)
{
- struct ib_device *dev = container_of(device, struct ib_device, dev);
-
- if (add_uevent_var(env, "NAME=%s", dev->name))
+ if (add_uevent_var(env, "NAME=%s", dev_name(device)))
return -ENOMEM;
/*
@@ -269,7 +290,7 @@ struct ib_device *ib_alloc_device(size_t size)
INIT_LIST_HEAD(&device->event_handler_list);
spin_lock_init(&device->event_handler_lock);
- spin_lock_init(&device->client_data_lock);
+ rwlock_init(&device->client_data_lock);
INIT_LIST_HEAD(&device->client_data_list);
INIT_LIST_HEAD(&device->port_list);
@@ -285,6 +306,7 @@ EXPORT_SYMBOL(ib_alloc_device);
*/
void ib_dealloc_device(struct ib_device *device)
{
+ WARN_ON(!list_empty(&device->client_data_list));
WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
device->reg_state != IB_DEV_UNINITIALIZED);
rdma_restrack_clean(&device->res);
@@ -295,9 +317,8 @@ EXPORT_SYMBOL(ib_dealloc_device);
static int add_client_context(struct ib_device *device, struct ib_client *client)
{
struct ib_client_data *context;
- unsigned long flags;
- context = kmalloc(sizeof *context, GFP_KERNEL);
+ context = kmalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
@@ -306,9 +327,9 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
context->going_down = false;
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
+ write_lock_irq(&device->client_data_lock);
list_add(&context->list, &device->client_data_list);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
up_write(&lists_rwsem);
return 0;
@@ -444,22 +465,8 @@ static u32 __dev_new_index(void)
}
}
-/**
- * ib_register_device - Register an IB device with IB core
- * @device:Device to register
- *
- * Low-level drivers use ib_register_device() to register their
- * devices with the IB core. All registered clients will receive a
- * callback for each device that is added. @device must be allocated
- * with ib_alloc_device().
- */
-int ib_register_device(struct ib_device *device,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *))
+static void setup_dma_device(struct ib_device *device)
{
- int ret;
- struct ib_client *client;
- struct ib_udata uhw = {.outlen = 0, .inlen = 0};
struct device *parent = device->dev.parent;
WARN_ON_ONCE(device->dma_device);
@@ -491,56 +498,113 @@ int ib_register_device(struct ib_device *device,
WARN_ON_ONCE(!parent);
device->dma_device = parent;
}
+}
- mutex_lock(&device_mutex);
+static void cleanup_device(struct ib_device *device)
+{
+ ib_cache_cleanup_one(device);
+ ib_cache_release_one(device);
+ kfree(device->port_pkey_list);
+ kfree(device->port_immutable);
+}
- if (strchr(device->name, '%')) {
- ret = alloc_name(device->name);
- if (ret)
- goto out;
- }
+static int setup_device(struct ib_device *device)
+{
+ struct ib_udata uhw = {.outlen = 0, .inlen = 0};
+ int ret;
- if (ib_device_check_mandatory(device)) {
- ret = -EINVAL;
- goto out;
- }
+ ret = ib_device_check_mandatory(device);
+ if (ret)
+ return ret;
ret = read_port_immutable(device);
if (ret) {
- pr_warn("Couldn't create per port immutable data %s\n",
- device->name);
- goto out;
+ dev_warn(&device->dev,
+ "Couldn't create per port immutable data\n");
+ return ret;
}
- ret = setup_port_pkey_list(device);
+ memset(&device->attrs, 0, sizeof(device->attrs));
+ ret = device->query_device(device, &device->attrs, &uhw);
if (ret) {
- pr_warn("Couldn't create per port_pkey_list\n");
- goto out;
+ dev_warn(&device->dev,
+ "Couldn't query the device attributes\n");
+ goto port_cleanup;
}
- ret = ib_cache_setup_one(device);
+ ret = setup_port_pkey_list(device);
if (ret) {
- pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
+ dev_warn(&device->dev, "Couldn't create per port_pkey_list\n");
goto port_cleanup;
}
- ret = ib_device_register_rdmacg(device);
+ ret = ib_cache_setup_one(device);
if (ret) {
- pr_warn("Couldn't register device with rdma cgroup\n");
- goto cache_cleanup;
+ dev_warn(&device->dev,
+ "Couldn't set up InfiniBand P_Key/GID cache\n");
+ goto pkey_cleanup;
+ }
+ return 0;
+
+pkey_cleanup:
+ kfree(device->port_pkey_list);
+port_cleanup:
+ kfree(device->port_immutable);
+ return ret;
+}
+
+/**
+ * ib_register_device - Register an IB device with IB core
+ * @device:Device to register
+ *
+ * Low-level drivers use ib_register_device() to register their
+ * devices with the IB core. All registered clients will receive a
+ * callback for each device that is added. @device must be allocated
+ * with ib_alloc_device().
+ */
+int ib_register_device(struct ib_device *device, const char *name,
+ int (*port_callback)(struct ib_device *, u8,
+ struct kobject *))
+{
+ int ret;
+ struct ib_client *client;
+
+ setup_dma_device(device);
+
+ mutex_lock(&device_mutex);
+
+ if (strchr(name, '%')) {
+ ret = alloc_name(device, name);
+ if (ret)
+ goto out;
+ } else {
+ ret = dev_set_name(&device->dev, name);
+ if (ret)
+ goto out;
+ }
+ if (__ib_device_get_by_name(dev_name(&device->dev))) {
+ ret = -ENFILE;
+ goto out;
}
+ strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
- memset(&device->attrs, 0, sizeof(device->attrs));
- ret = device->query_device(device, &device->attrs, &uhw);
+ ret = setup_device(device);
+ if (ret)
+ goto out;
+
+ device->index = __dev_new_index();
+
+ ret = ib_device_register_rdmacg(device);
if (ret) {
- pr_warn("Couldn't query the device attributes\n");
- goto cg_cleanup;
+ dev_warn(&device->dev,
+ "Couldn't register device with rdma cgroup\n");
+ goto dev_cleanup;
}
ret = ib_device_register_sysfs(device, port_callback);
if (ret) {
- pr_warn("Couldn't register device %s with driver model\n",
- device->name);
+ dev_warn(&device->dev,
+ "Couldn't register device with driver model\n");
goto cg_cleanup;
}
@@ -550,7 +614,6 @@ int ib_register_device(struct ib_device *device,
if (!add_client_context(device, client) && client->add)
client->add(device);
- device->index = __dev_new_index();
down_write(&lists_rwsem);
list_add_tail(&device->core_list, &device_list);
up_write(&lists_rwsem);
@@ -559,11 +622,8 @@ int ib_register_device(struct ib_device *device,
cg_cleanup:
ib_device_unregister_rdmacg(device);
-cache_cleanup:
- ib_cache_cleanup_one(device);
- ib_cache_release_one(device);
-port_cleanup:
- kfree(device->port_immutable);
+dev_cleanup:
+ cleanup_device(device);
out:
mutex_unlock(&device_mutex);
return ret;
@@ -585,21 +645,20 @@ void ib_unregister_device(struct ib_device *device)
down_write(&lists_rwsem);
list_del(&device->core_list);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ write_lock_irq(&device->client_data_lock);
+ list_for_each_entry(context, &device->client_data_list, list)
context->going_down = true;
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
downgrade_write(&lists_rwsem);
- list_for_each_entry_safe(context, tmp, &device->client_data_list,
- list) {
+ list_for_each_entry(context, &device->client_data_list, list) {
if (context->client->remove)
context->client->remove(device, context->data);
}
up_read(&lists_rwsem);
- ib_device_unregister_rdmacg(device);
ib_device_unregister_sysfs(device);
+ ib_device_unregister_rdmacg(device);
mutex_unlock(&device_mutex);
@@ -609,10 +668,13 @@ void ib_unregister_device(struct ib_device *device)
kfree(device->port_pkey_list);
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ write_lock_irqsave(&device->client_data_lock, flags);
+ list_for_each_entry_safe(context, tmp, &device->client_data_list,
+ list) {
+ list_del(&context->list);
kfree(context);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ }
+ write_unlock_irqrestore(&device->client_data_lock, flags);
up_write(&lists_rwsem);
device->reg_state = IB_DEV_UNREGISTERED;
@@ -662,9 +724,8 @@ EXPORT_SYMBOL(ib_register_client);
*/
void ib_unregister_client(struct ib_client *client)
{
- struct ib_client_data *context, *tmp;
+ struct ib_client_data *context;
struct ib_device *device;
- unsigned long flags;
mutex_lock(&device_mutex);
@@ -676,14 +737,14 @@ void ib_unregister_client(struct ib_client *client)
struct ib_client_data *found_context = NULL;
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
- list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
+ write_lock_irq(&device->client_data_lock);
+ list_for_each_entry(context, &device->client_data_list, list)
if (context->client == client) {
context->going_down = true;
found_context = context;
break;
}
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
up_write(&lists_rwsem);
if (client->remove)
@@ -691,17 +752,18 @@ void ib_unregister_client(struct ib_client *client)
found_context->data : NULL);
if (!found_context) {
- pr_warn("No client context found for %s/%s\n",
- device->name, client->name);
+ dev_warn(&device->dev,
+ "No client context found for %s\n",
+ client->name);
continue;
}
down_write(&lists_rwsem);
- spin_lock_irqsave(&device->client_data_lock, flags);
+ write_lock_irq(&device->client_data_lock);
list_del(&found_context->list);
- kfree(found_context);
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irq(&device->client_data_lock);
up_write(&lists_rwsem);
+ kfree(found_context);
}
mutex_unlock(&device_mutex);
@@ -722,13 +784,13 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
void *ret = NULL;
unsigned long flags;
- spin_lock_irqsave(&device->client_data_lock, flags);
+ read_lock_irqsave(&device->client_data_lock, flags);
list_for_each_entry(context, &device->client_data_list, list)
if (context->client == client) {
ret = context->data;
break;
}
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ read_unlock_irqrestore(&device->client_data_lock, flags);
return ret;
}
@@ -749,18 +811,18 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
struct ib_client_data *context;
unsigned long flags;
- spin_lock_irqsave(&device->client_data_lock, flags);
+ write_lock_irqsave(&device->client_data_lock, flags);
list_for_each_entry(context, &device->client_data_list, list)
if (context->client == client) {
context->data = data;
goto out;
}
- pr_warn("No client context found for %s/%s\n",
- device->name, client->name);
+ dev_warn(&device->dev, "No client context found for %s\n",
+ client->name);
out:
- spin_unlock_irqrestore(&device->client_data_lock, flags);
+ write_unlock_irqrestore(&device->client_data_lock, flags);
}
EXPORT_SYMBOL(ib_set_client_data);
@@ -1166,10 +1228,19 @@ static int __init ib_core_init(void)
goto err;
}
+ ib_comp_unbound_wq =
+ alloc_workqueue("ib-comp-unb-wq",
+ WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
+ WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
+ if (!ib_comp_unbound_wq) {
+ ret = -ENOMEM;
+ goto err_comp;
+ }
+
ret = class_register(&ib_class);
if (ret) {
pr_warn("Couldn't create InfiniBand device class\n");
- goto err_comp;
+ goto err_comp_unbound;
}
ret = rdma_nl_init();
@@ -1218,6 +1289,8 @@ err_ibnl:
rdma_nl_exit();
err_sysfs:
class_unregister(&ib_class);
+err_comp_unbound:
+ destroy_workqueue(ib_comp_unbound_wq);
err_comp:
destroy_workqueue(ib_comp_wq);
err:
@@ -1236,6 +1309,7 @@ static void __exit ib_core_cleanup(void)
addr_cleanup();
rdma_nl_exit();
class_unregister(&ib_class);
+ destroy_workqueue(ib_comp_unbound_wq);
destroy_workqueue(ib_comp_wq);
/* Make sure that any pending umem accounting work is done. */
destroy_workqueue(ib_wq);
diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
index a077500f7f32..83ba0068e8bb 100644
--- a/drivers/infiniband/core/fmr_pool.c
+++ b/drivers/infiniband/core/fmr_pool.c
@@ -213,7 +213,7 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
device = pd->device;
if (!device->alloc_fmr || !device->dealloc_fmr ||
!device->map_phys_fmr || !device->unmap_fmr) {
- pr_info(PFX "Device %s does not support FMRs\n", device->name);
+ dev_info(&device->dev, "Device does not support FMRs\n");
return ERR_PTR(-ENOSYS);
}
@@ -257,7 +257,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
atomic_set(&pool->flush_ser, 0);
init_waitqueue_head(&pool->force_wait);
- pool->worker = kthread_create_worker(0, "ib_fmr(%s)", device->name);
+ pool->worker =
+ kthread_create_worker(0, "ib_fmr(%s)", dev_name(&device->dev));
if (IS_ERR(pool->worker)) {
pr_warn(PFX "couldn't start cleanup kthread worker\n");
ret = PTR_ERR(pool->worker);
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 5d676cff41f4..ba668d49c751 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -509,7 +509,7 @@ static int iw_cm_map(struct iw_cm_id *cm_id, bool active)
cm_id->m_local_addr = cm_id->local_addr;
cm_id->m_remote_addr = cm_id->remote_addr;
- memcpy(pm_reg_msg.dev_name, cm_id->device->name,
+ memcpy(pm_reg_msg.dev_name, dev_name(&cm_id->device->dev),
sizeof(pm_reg_msg.dev_name));
memcpy(pm_reg_msg.if_name, cm_id->device->iwcm->ifname,
sizeof(pm_reg_msg.if_name));
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index ef459f2f2eeb..d7025cd5be28 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -220,33 +220,37 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
int ret2, qpn;
u8 mgmt_class, vclass;
+ if ((qp_type == IB_QPT_SMI && !rdma_cap_ib_smi(device, port_num)) ||
+ (qp_type == IB_QPT_GSI && !rdma_cap_ib_cm(device, port_num)))
+ return ERR_PTR(-EPROTONOSUPPORT);
+
/* Validate parameters */
qpn = get_spl_qp_index(qp_type);
if (qpn == -1) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid QP Type %d\n",
- qp_type);
+ dev_dbg_ratelimited(&device->dev, "%s: invalid QP Type %d\n",
+ __func__, qp_type);
goto error1;
}
if (rmpp_version && rmpp_version != IB_MGMT_RMPP_VERSION) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid RMPP Version %u\n",
- rmpp_version);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: invalid RMPP Version %u\n",
+ __func__, rmpp_version);
goto error1;
}
/* Validate MAD registration request if supplied */
if (mad_reg_req) {
if (mad_reg_req->mgmt_class_version >= MAX_MGMT_VERSION) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: invalid Class Version %u\n",
- mad_reg_req->mgmt_class_version);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: invalid Class Version %u\n",
+ __func__,
+ mad_reg_req->mgmt_class_version);
goto error1;
}
if (!recv_handler) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: no recv_handler\n");
+ dev_dbg_ratelimited(&device->dev,
+ "%s: no recv_handler\n", __func__);
goto error1;
}
if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) {
@@ -256,9 +260,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
*/
if (mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid Mgmt Class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid Mgmt Class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
} else if (mad_reg_req->mgmt_class == 0) {
@@ -266,8 +270,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
* Class 0 is reserved in IBA and is used for
* aliasing of IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
*/
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid Mgmt Class 0\n");
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid Mgmt Class 0\n",
+ __func__);
goto error1;
} else if (is_vendor_class(mad_reg_req->mgmt_class)) {
/*
@@ -275,18 +280,19 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
* ensure supplied OUI is not zero
*/
if (!is_vendor_oui(mad_reg_req->oui)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: No OUI specified for class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: No OUI specified for class 0x%x\n",
+ __func__,
+ mad_reg_req->mgmt_class);
goto error1;
}
}
/* Make sure class supplied is consistent with RMPP */
if (!ib_is_mad_class_rmpp(mad_reg_req->mgmt_class)) {
if (rmpp_version) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: RMPP version for non-RMPP class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: RMPP version for non-RMPP class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
}
@@ -297,9 +303,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
IB_MGMT_CLASS_SUBN_LID_ROUTED) &&
(mad_reg_req->mgmt_class !=
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid SM QP type: class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid SM QP type: class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
} else {
@@ -307,9 +313,9 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
IB_MGMT_CLASS_SUBN_LID_ROUTED) ||
(mad_reg_req->mgmt_class ==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid GS QP type: class 0x%x\n",
- mad_reg_req->mgmt_class);
+ dev_dbg_ratelimited(&device->dev,
+ "%s: Invalid GS QP type: class 0x%x\n",
+ __func__, mad_reg_req->mgmt_class);
goto error1;
}
}
@@ -324,18 +330,18 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
/* Validate device and port */
port_priv = ib_get_mad_port(device, port_num);
if (!port_priv) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: Invalid port %d\n",
- port_num);
+ dev_dbg_ratelimited(&device->dev, "%s: Invalid port %d\n",
+ __func__, port_num);
ret = ERR_PTR(-ENODEV);
goto error1;
}
- /* Verify the QP requested is supported. For example, Ethernet devices
- * will not have QP0 */
+ /* Verify the QP requested is supported. For example, Ethernet devices
+ * will not have QP0.
+ */
if (!port_priv->qp_info[qpn].qp) {
- dev_notice(&device->dev,
- "ib_register_mad_agent: QP %d not supported\n", qpn);
+ dev_dbg_ratelimited(&device->dev, "%s: QP %d not supported\n",
+ __func__, qpn);
ret = ERR_PTR(-EPROTONOSUPPORT);
goto error1;
}
@@ -2408,7 +2414,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
}
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
- int timeout_ms)
+ unsigned long timeout_ms)
{
mad_send_wr->timeout = msecs_to_jiffies(timeout_ms);
wait_for_response(mad_send_wr);
@@ -3183,7 +3189,7 @@ static int ib_mad_port_open(struct ib_device *device,
cq_size *= 2;
port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
- IB_POLL_WORKQUEUE);
+ IB_POLL_UNBOUND_WORKQUEUE);
if (IS_ERR(port_priv->cq)) {
dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
ret = PTR_ERR(port_priv->cq);
diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h
index d84ae1671898..216509036aa8 100644
--- a/drivers/infiniband/core/mad_priv.h
+++ b/drivers/infiniband/core/mad_priv.h
@@ -221,6 +221,6 @@ void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr,
void ib_mark_mad_done(struct ib_mad_send_wr_private *mad_send_wr);
void ib_reset_mad_timeout(struct ib_mad_send_wr_private *mad_send_wr,
- int timeout_ms);
+ unsigned long timeout_ms);
#endif /* __IB_MAD_PRIV_H__ */
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 3ccaae18ad75..724f5a62e82f 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -47,9 +47,9 @@ static struct {
const struct rdma_nl_cbs *cb_table;
} rdma_nl_types[RDMA_NL_NUM_CLIENTS];
-int rdma_nl_chk_listeners(unsigned int group)
+bool rdma_nl_chk_listeners(unsigned int group)
{
- return (netlink_has_listeners(nls, group)) ? 0 : -1;
+ return netlink_has_listeners(nls, group);
}
EXPORT_SYMBOL(rdma_nl_chk_listeners);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 0385ab438320..573399e3ccc1 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -179,7 +179,8 @@ static int fill_nldev_handle(struct sk_buff *msg, struct ib_device *device)
{
if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
return -EMSGSIZE;
- if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
+ if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME,
+ dev_name(&device->dev)))
return -EMSGSIZE;
return 0;
@@ -645,6 +646,36 @@ err:
return err;
}
+static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
+ struct ib_device *device;
+ u32 index;
+ int err;
+
+ err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+ extack);
+ if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
+ return -EINVAL;
+
+ index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(index);
+ if (!device)
+ return -EINVAL;
+
+ if (tb[RDMA_NLDEV_ATTR_DEV_NAME]) {
+ char name[IB_DEVICE_NAME_MAX] = {};
+
+ nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
+ IB_DEVICE_NAME_MAX);
+ err = ib_device_rename(device, name);
+ }
+
+ put_device(&device->dev);
+ return err;
+}
+
static int _nldev_get_dumpit(struct ib_device *device,
struct sk_buff *skb,
struct netlink_callback *cb,
@@ -1077,6 +1108,10 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.doit = nldev_get_doit,
.dump = nldev_get_dumpit,
},
+ [RDMA_NLDEV_CMD_SET] = {
+ .doit = nldev_set_doit,
+ .flags = RDMA_NL_ADMIN_PERM,
+ },
[RDMA_NLDEV_CMD_PORT_GET] = {
.doit = nldev_port_get_doit,
.dump = nldev_port_get_dumpit,
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index c4118bcd5103..752a55c6bdce 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -794,44 +794,6 @@ void uverbs_close_fd(struct file *f)
uverbs_uobject_put(uobj);
}
-static void ufile_disassociate_ucontext(struct ib_ucontext *ibcontext)
-{
- struct ib_device *ib_dev = ibcontext->device;
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID);
- if (!owning_process)
- return;
-
- owning_mm = get_task_mm(owning_process);
- if (!owning_mm) {
- pr_info("no mm, disassociate ucontext is pending task termination\n");
- while (1) {
- put_task_struct(owning_process);
- usleep_range(1000, 2000);
- owning_process = get_pid_task(ibcontext->tgid,
- PIDTYPE_PID);
- if (!owning_process ||
- owning_process->state == TASK_DEAD) {
- pr_info("disassociate ucontext done, task was terminated\n");
- /* in case task was dead need to release the
- * task struct.
- */
- if (owning_process)
- put_task_struct(owning_process);
- return;
- }
- }
- }
-
- down_write(&owning_mm->mmap_sem);
- ib_dev->disassociate_ucontext(ibcontext);
- up_write(&owning_mm->mmap_sem);
- mmput(owning_mm);
- put_task_struct(owning_process);
-}
-
/*
* Drop the ucontext off the ufile and completely disconnect it from the
* ib_device
@@ -840,20 +802,28 @@ static void ufile_destroy_ucontext(struct ib_uverbs_file *ufile,
enum rdma_remove_reason reason)
{
struct ib_ucontext *ucontext = ufile->ucontext;
+ struct ib_device *ib_dev = ucontext->device;
int ret;
- if (reason == RDMA_REMOVE_DRIVER_REMOVE)
- ufile_disassociate_ucontext(ucontext);
+ /*
+ * If we are closing the FD then the user mmap VMAs must have
+ * already been destroyed as they hold on to the filep, otherwise
+ * they need to be zap'd.
+ */
+ if (reason == RDMA_REMOVE_DRIVER_REMOVE) {
+ uverbs_user_mmap_disassociate(ufile);
+ if (ib_dev->disassociate_ucontext)
+ ib_dev->disassociate_ucontext(ucontext);
+ }
- put_pid(ucontext->tgid);
- ib_rdmacg_uncharge(&ucontext->cg_obj, ucontext->device,
+ ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev,
RDMACG_RESOURCE_HCA_HANDLE);
/*
* FIXME: Drivers are not permitted to fail dealloc_ucontext, remove
* the error return.
*/
- ret = ucontext->device->dealloc_ucontext(ucontext);
+ ret = ib_dev->dealloc_ucontext(ucontext);
WARN_ON(ret);
ufile->ucontext = NULL;
diff --git a/drivers/infiniband/core/rdma_core.h b/drivers/infiniband/core/rdma_core.h
index f962f2a593ba..4886d2bba7c7 100644
--- a/drivers/infiniband/core/rdma_core.h
+++ b/drivers/infiniband/core/rdma_core.h
@@ -160,5 +160,6 @@ void uverbs_disassociate_api(struct uverbs_api *uapi);
void uverbs_destroy_api(struct uverbs_api *uapi);
void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
unsigned int num_attrs);
+void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile);
#endif /* RDMA_CORE_H */
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 3b7fa0ccaa08..06d8657ce583 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -50,8 +50,7 @@ void rdma_restrack_clean(struct rdma_restrack_root *res)
dev = container_of(res, struct ib_device, res);
pr_err("restrack: %s", CUT_HERE);
- pr_err("restrack: BUG: RESTRACK detected leak of resources on %s\n",
- dev->name);
+ dev_err(&dev->dev, "BUG: RESTRACK detected leak of resources\n");
hash_for_each(res->hash, bkt, e, node) {
if (rdma_is_kernel_res(e)) {
owner = e->kern_name;
@@ -156,6 +155,21 @@ static bool res_is_user(struct rdma_restrack_entry *res)
}
}
+void rdma_restrack_set_task(struct rdma_restrack_entry *res,
+ const char *caller)
+{
+ if (caller) {
+ res->kern_name = caller;
+ return;
+ }
+
+ if (res->task)
+ put_task_struct(res->task);
+ get_task_struct(current);
+ res->task = current;
+}
+EXPORT_SYMBOL(rdma_restrack_set_task);
+
void rdma_restrack_add(struct rdma_restrack_entry *res)
{
struct ib_device *dev = res_to_dev(res);
@@ -168,7 +182,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
if (res_is_user(res)) {
if (!res->task)
- rdma_restrack_set_task(res, current);
+ rdma_restrack_set_task(res, NULL);
res->kern_name = NULL;
} else {
set_kern_name(res);
@@ -209,7 +223,7 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
struct ib_device *dev;
if (!res->valid)
- return;
+ goto out;
dev = res_to_dev(res);
if (!dev)
@@ -222,8 +236,12 @@ void rdma_restrack_del(struct rdma_restrack_entry *res)
down_write(&dev->res.rwsem);
hash_del(&res->node);
res->valid = false;
- if (res->task)
- put_task_struct(res->task);
up_write(&dev->res.rwsem);
+
+out:
+ if (res->task) {
+ put_task_struct(res->task);
+ res->task = NULL;
+ }
}
EXPORT_SYMBOL(rdma_restrack_del);
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 683e6d11a564..d22c4a2ebac6 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -12,6 +12,7 @@
*/
#include <linux/moduleparam.h>
#include <linux/slab.h>
+#include <linux/pci-p2pdma.h>
#include <rdma/mr_pool.h>
#include <rdma/rw.h>
@@ -280,7 +281,11 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
struct ib_device *dev = qp->pd->device;
int ret;
- ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+ if (is_pci_p2pdma_page(sg_page(sg)))
+ ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
+ else
+ ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
+
if (!ret)
return -ENOMEM;
sg_cnt = ret;
@@ -602,7 +607,9 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
break;
}
- ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ /* P2PDMA contexts do not need to be unmapped */
+ if (!is_pci_p2pdma_page(sg_page(sg)))
+ ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
diff --git a/drivers/infiniband/core/sa.h b/drivers/infiniband/core/sa.h
index b1d4bbf4ce5c..cbaaaa92fff3 100644
--- a/drivers/infiniband/core/sa.h
+++ b/drivers/infiniband/core/sa.h
@@ -49,16 +49,14 @@ static inline void ib_sa_client_put(struct ib_sa_client *client)
}
int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- u8 method,
+ struct ib_device *device, u8 port_num, u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
- void *context,
- struct ib_sa_query **sa_query);
+ void *context, struct ib_sa_query **sa_query);
int mcast_init(void);
void mcast_cleanup(void);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7b794a14d6e8..be5ba5e15496 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -761,7 +761,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
/* Construct the family header first */
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
- memcpy(header->device_name, query->port->agent->device->name,
+ memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
LS_DEVICE_NAME_MAX);
header->port_num = query->port->port_num;
@@ -835,7 +835,6 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
void *data;
- int ret = 0;
struct ib_sa_mad *mad;
int len;
@@ -862,13 +861,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- ret = rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
- if (!ret)
- ret = len;
- else
- ret = 0;
-
- return ret;
+ return rdma_nl_multicast(skb, RDMA_NL_GROUP_LS, gfp_mask);
}
static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
@@ -891,14 +884,12 @@ static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
ret = ib_nl_send_msg(query, gfp_mask);
- if (ret <= 0) {
+ if (ret) {
ret = -EIO;
/* Remove the request */
spin_lock_irqsave(&ib_nl_request_lock, flags);
list_del(&query->list);
spin_unlock_irqrestore(&ib_nl_request_lock, flags);
- } else {
- ret = 0;
}
return ret;
@@ -1227,46 +1218,6 @@ static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
return src_path_mask;
}
-static int roce_resolve_route_from_path(struct sa_path_rec *rec,
- const struct ib_gid_attr *attr)
-{
- struct rdma_dev_addr dev_addr = {};
- union {
- struct sockaddr _sockaddr;
- struct sockaddr_in _sockaddr_in;
- struct sockaddr_in6 _sockaddr_in6;
- } sgid_addr, dgid_addr;
- int ret;
-
- if (rec->roce.route_resolved)
- return 0;
- if (!attr || !attr->ndev)
- return -EINVAL;
-
- dev_addr.bound_dev_if = attr->ndev->ifindex;
- /* TODO: Use net from the ib_gid_attr once it is added to it,
- * until than, limit itself to init_net.
- */
- dev_addr.net = &init_net;
-
- rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
- rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
-
- /* validate the route */
- ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
- &dgid_addr._sockaddr, &dev_addr);
- if (ret)
- return ret;
-
- if ((dev_addr.network == RDMA_NETWORK_IPV4 ||
- dev_addr.network == RDMA_NETWORK_IPV6) &&
- rec->rec_type != SA_PATH_REC_TYPE_ROCE_V2)
- return -EINVAL;
-
- rec->roce.route_resolved = true;
- return 0;
-}
-
static int init_ah_attr_grh_fields(struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
struct rdma_ah_attr *ah_attr,
@@ -1409,7 +1360,8 @@ static void init_mad(struct ib_sa_query *query, struct ib_mad_agent *agent)
spin_unlock_irqrestore(&tid_lock, flags);
}
-static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
+static int send_mad(struct ib_sa_query *query, unsigned long timeout_ms,
+ gfp_t gfp_mask)
{
bool preload = gfpflags_allow_blocking(gfp_mask);
unsigned long flags;
@@ -1433,7 +1385,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms, gfp_t gfp_mask)
if ((query->flags & IB_SA_ENABLE_LOCAL_SERVICE) &&
(!(query->flags & IB_SA_QUERY_OPA))) {
- if (!rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
+ if (rdma_nl_chk_listeners(RDMA_NL_GROUP_LS)) {
if (!ib_nl_make_request(query, gfp_mask))
return id;
}
@@ -1599,7 +1551,7 @@ int ib_sa_path_rec_get(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct sa_path_rec *resp,
void *context),
@@ -1753,7 +1705,7 @@ int ib_sa_service_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num, u8 method,
struct ib_sa_service_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_service_rec *resp,
void *context),
@@ -1850,7 +1802,7 @@ int ib_sa_mcmember_rec_query(struct ib_sa_client *client,
u8 method,
struct ib_sa_mcmember_rec *rec,
ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_mcmember_rec *resp,
void *context),
@@ -1941,7 +1893,7 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_guidinfo_rec *resp,
void *context),
@@ -2108,7 +2060,7 @@ static void ib_sa_classport_info_rec_release(struct ib_sa_query *sa_query)
}
static int ib_sa_classport_info_rec_query(struct ib_sa_port *port,
- int timeout_ms,
+ unsigned long timeout_ms,
void (*callback)(void *context),
void *context,
struct ib_sa_query **sa_query)
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 9b0bea8303e0..1143c0448666 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -685,9 +685,8 @@ static int ib_mad_agent_security_change(struct notifier_block *nb,
if (event != LSM_POLICY_CHANGE)
return NOTIFY_DONE;
- ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security,
- ag->device->name,
- ag->port_num);
+ ag->smp_allowed = !security_ib_endport_manage_subnet(
+ ag->security, dev_name(&ag->device->dev), ag->port_num);
return NOTIFY_OK;
}
@@ -708,7 +707,7 @@ int ib_mad_agent_security_setup(struct ib_mad_agent *agent,
return 0;
ret = security_ib_endport_manage_subnet(agent->security,
- agent->device->name,
+ dev_name(&agent->device->dev),
agent->port_num);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 7fd14ead7b37..6fcce2c206c6 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -512,7 +512,7 @@ static ssize_t show_pma_counter(struct ib_port *p, struct port_attribute *attr,
ret = get_perf_mad(p->ibdev, p->port_num, tab_attr->attr_id, &data,
40 + offset / 8, sizeof(data));
if (ret < 0)
- return sprintf(buf, "N/A (no PMA)\n");
+ return ret;
switch (width) {
case 4:
@@ -1036,7 +1036,7 @@ static int add_port(struct ib_device *device, int port_num,
p->port_num = port_num;
ret = kobject_init_and_add(&p->kobj, &port_type,
- device->ports_parent,
+ device->ports_kobj,
"%d", port_num);
if (ret) {
kfree(p);
@@ -1057,10 +1057,12 @@ static int add_port(struct ib_device *device, int port_num,
goto err_put;
}
- p->pma_table = get_counter_table(device, port_num);
- ret = sysfs_create_group(&p->kobj, p->pma_table);
- if (ret)
- goto err_put_gid_attrs;
+ if (device->process_mad) {
+ p->pma_table = get_counter_table(device, port_num);
+ ret = sysfs_create_group(&p->kobj, p->pma_table);
+ if (ret)
+ goto err_put_gid_attrs;
+ }
p->gid_group.name = "gids";
p->gid_group.attrs = alloc_group_attrs(show_port_gid, attr.gid_tbl_len);
@@ -1118,9 +1120,9 @@ static int add_port(struct ib_device *device, int port_num,
}
/*
- * If port == 0, it means we have only one port and the parent
- * device, not this port device, should be the holder of the
- * hw_counters
+ * If port == 0, it means hw_counters are per device and not per
+ * port, so holder should be device. Therefore skip per port conunter
+ * initialization.
*/
if (device->alloc_hw_stats && port_num)
setup_hw_stats(device, p, port_num);
@@ -1173,7 +1175,8 @@ err_free_gid:
p->gid_group.attrs = NULL;
err_remove_pma:
- sysfs_remove_group(&p->kobj, p->pma_table);
+ if (p->pma_table)
+ sysfs_remove_group(&p->kobj, p->pma_table);
err_put_gid_attrs:
kobject_put(&p->gid_attr_group->kobj);
@@ -1183,7 +1186,7 @@ err_put:
return ret;
}
-static ssize_t show_node_type(struct device *device,
+static ssize_t node_type_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1198,8 +1201,9 @@ static ssize_t show_node_type(struct device *device,
default: return sprintf(buf, "%d: <unknown>\n", dev->node_type);
}
}
+static DEVICE_ATTR_RO(node_type);
-static ssize_t show_sys_image_guid(struct device *device,
+static ssize_t sys_image_guid_show(struct device *device,
struct device_attribute *dev_attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1210,8 +1214,9 @@ static ssize_t show_sys_image_guid(struct device *device,
be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[2]),
be16_to_cpu(((__be16 *) &dev->attrs.sys_image_guid)[3]));
}
+static DEVICE_ATTR_RO(sys_image_guid);
-static ssize_t show_node_guid(struct device *device,
+static ssize_t node_guid_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1222,8 +1227,9 @@ static ssize_t show_node_guid(struct device *device,
be16_to_cpu(((__be16 *) &dev->node_guid)[2]),
be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
}
+static DEVICE_ATTR_RO(node_guid);
-static ssize_t show_node_desc(struct device *device,
+static ssize_t node_desc_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1231,9 +1237,9 @@ static ssize_t show_node_desc(struct device *device,
return sprintf(buf, "%.64s\n", dev->node_desc);
}
-static ssize_t set_node_desc(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t node_desc_store(struct device *device,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
struct ib_device_modify desc = {};
@@ -1249,8 +1255,9 @@ static ssize_t set_node_desc(struct device *device,
return count;
}
+static DEVICE_ATTR_RW(node_desc);
-static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
+static ssize_t fw_ver_show(struct device *device, struct device_attribute *attr,
char *buf)
{
struct ib_device *dev = container_of(device, struct ib_device, dev);
@@ -1259,19 +1266,19 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
strlcat(buf, "\n", IB_FW_VERSION_NAME_MAX);
return strlen(buf);
}
+static DEVICE_ATTR_RO(fw_ver);
+
+static struct attribute *ib_dev_attrs[] = {
+ &dev_attr_node_type.attr,
+ &dev_attr_node_guid.attr,
+ &dev_attr_sys_image_guid.attr,
+ &dev_attr_fw_ver.attr,
+ &dev_attr_node_desc.attr,
+ NULL,
+};
-static DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
-static DEVICE_ATTR(sys_image_guid, S_IRUGO, show_sys_image_guid, NULL);
-static DEVICE_ATTR(node_guid, S_IRUGO, show_node_guid, NULL);
-static DEVICE_ATTR(node_desc, S_IRUGO | S_IWUSR, show_node_desc, set_node_desc);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-
-static struct device_attribute *ib_class_attributes[] = {
- &dev_attr_node_type,
- &dev_attr_sys_image_guid,
- &dev_attr_node_guid,
- &dev_attr_node_desc,
- &dev_attr_fw_ver,
+static const struct attribute_group dev_attr_group = {
+ .attrs = ib_dev_attrs,
};
static void free_port_list_attributes(struct ib_device *device)
@@ -1285,7 +1292,9 @@ static void free_port_list_attributes(struct ib_device *device)
kfree(port->hw_stats);
free_hsag(&port->kobj, port->hw_stats_ag);
}
- sysfs_remove_group(p, port->pma_table);
+
+ if (port->pma_table)
+ sysfs_remove_group(p, port->pma_table);
sysfs_remove_group(p, &port->pkey_group);
sysfs_remove_group(p, &port->gid_group);
sysfs_remove_group(&port->gid_attr_group->kobj,
@@ -1296,7 +1305,7 @@ static void free_port_list_attributes(struct ib_device *device)
kobject_put(p);
}
- kobject_put(device->ports_parent);
+ kobject_put(device->ports_kobj);
}
int ib_device_register_sysfs(struct ib_device *device,
@@ -1307,23 +1316,15 @@ int ib_device_register_sysfs(struct ib_device *device,
int ret;
int i;
- ret = dev_set_name(class_dev, "%s", device->name);
- if (ret)
- return ret;
+ device->groups[0] = &dev_attr_group;
+ class_dev->groups = device->groups;
ret = device_add(class_dev);
if (ret)
goto err;
- for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i) {
- ret = device_create_file(class_dev, ib_class_attributes[i]);
- if (ret)
- goto err_unregister;
- }
-
- device->ports_parent = kobject_create_and_add("ports",
- &class_dev->kobj);
- if (!device->ports_parent) {
+ device->ports_kobj = kobject_create_and_add("ports", &class_dev->kobj);
+ if (!device->ports_kobj) {
ret = -ENOMEM;
goto err_put;
}
@@ -1347,20 +1348,15 @@ int ib_device_register_sysfs(struct ib_device *device,
err_put:
free_port_list_attributes(device);
-
-err_unregister:
device_del(class_dev);
-
err:
return ret;
}
void ib_device_unregister_sysfs(struct ib_device *device)
{
- int i;
-
- /* Hold kobject until ib_dealloc_device() */
- kobject_get(&device->dev.kobj);
+ /* Hold device until ib_dealloc_device() */
+ get_device(&device->dev);
free_port_list_attributes(device);
@@ -1369,8 +1365,5 @@ void ib_device_unregister_sysfs(struct ib_device *device)
free_hsag(&device->dev.kobj, device->hw_stats_ag);
}
- for (i = 0; i < ARRAY_SIZE(ib_class_attributes); ++i)
- device_remove_file(&device->dev, ib_class_attributes[i]);
-
device_unregister(&device->dev);
}
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index a41792dbae1f..c6144df47ea4 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -85,7 +85,9 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
struct page **page_list;
struct vm_area_struct **vma_list;
unsigned long lock_limit;
+ unsigned long new_pinned;
unsigned long cur_base;
+ struct mm_struct *mm;
unsigned long npages;
int ret;
int i;
@@ -107,25 +109,32 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (!can_do_mlock())
return ERR_PTR(-EPERM);
- umem = kzalloc(sizeof *umem, GFP_KERNEL);
- if (!umem)
- return ERR_PTR(-ENOMEM);
+ if (access & IB_ACCESS_ON_DEMAND) {
+ umem = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
+ if (!umem)
+ return ERR_PTR(-ENOMEM);
+ umem->is_odp = 1;
+ } else {
+ umem = kzalloc(sizeof(*umem), GFP_KERNEL);
+ if (!umem)
+ return ERR_PTR(-ENOMEM);
+ }
umem->context = context;
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
umem->writable = ib_access_writable(access);
+ umem->owning_mm = mm = current->mm;
+ mmgrab(mm);
if (access & IB_ACCESS_ON_DEMAND) {
- ret = ib_umem_odp_get(context, umem, access);
+ ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
if (ret)
goto umem_kfree;
return umem;
}
- umem->odp_data = NULL;
-
/* We assume the memory is from hugetlb until proved otherwise */
umem->hugetlb = 1;
@@ -144,25 +153,25 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem->hugetlb = 0;
npages = ib_umem_num_pages(umem);
+ if (npages == 0 || npages > UINT_MAX) {
+ ret = -EINVAL;
+ goto out;
+ }
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
- down_write(&current->mm->mmap_sem);
- current->mm->pinned_vm += npages;
- if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
- up_write(&current->mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ if (check_add_overflow(mm->pinned_vm, npages, &new_pinned) ||
+ (new_pinned > lock_limit && !capable(CAP_IPC_LOCK))) {
+ up_write(&mm->mmap_sem);
ret = -ENOMEM;
- goto vma;
+ goto out;
}
- up_write(&current->mm->mmap_sem);
+ mm->pinned_vm = new_pinned;
+ up_write(&mm->mmap_sem);
cur_base = addr & PAGE_MASK;
- if (npages == 0 || npages > UINT_MAX) {
- ret = -EINVAL;
- goto vma;
- }
-
ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
if (ret)
goto vma;
@@ -172,14 +181,14 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
sg_list_start = umem->sg_head.sgl;
- down_read(&current->mm->mmap_sem);
while (npages) {
+ down_read(&mm->mmap_sem);
ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list);
if (ret < 0) {
- up_read(&current->mm->mmap_sem);
+ up_read(&mm->mmap_sem);
goto umem_release;
}
@@ -187,17 +196,20 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
cur_base += ret * PAGE_SIZE;
npages -= ret;
+ /* Continue to hold the mmap_sem as vma_list access
+ * needs to be protected.
+ */
for_each_sg(sg_list_start, sg, ret, i) {
if (vma_list && !is_vm_hugetlb_page(vma_list[i]))
umem->hugetlb = 0;
sg_set_page(sg, page_list[i], PAGE_SIZE, 0);
}
+ up_read(&mm->mmap_sem);
/* preparing for next loop */
sg_list_start = sg;
}
- up_read(&current->mm->mmap_sem);
umem->nmap = ib_dma_map_sg_attrs(context->device,
umem->sg_head.sgl,
@@ -216,29 +228,40 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
umem_release:
__ib_umem_release(context->device, umem, 0);
vma:
- down_write(&current->mm->mmap_sem);
- current->mm->pinned_vm -= ib_umem_num_pages(umem);
- up_write(&current->mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+ mm->pinned_vm -= ib_umem_num_pages(umem);
+ up_write(&mm->mmap_sem);
out:
if (vma_list)
free_page((unsigned long) vma_list);
free_page((unsigned long) page_list);
umem_kfree:
- if (ret)
+ if (ret) {
+ mmdrop(umem->owning_mm);
kfree(umem);
+ }
return ret ? ERR_PTR(ret) : umem;
}
EXPORT_SYMBOL(ib_umem_get);
-static void ib_umem_account(struct work_struct *work)
+static void __ib_umem_release_tail(struct ib_umem *umem)
+{
+ mmdrop(umem->owning_mm);
+ if (umem->is_odp)
+ kfree(to_ib_umem_odp(umem));
+ else
+ kfree(umem);
+}
+
+static void ib_umem_release_defer(struct work_struct *work)
{
struct ib_umem *umem = container_of(work, struct ib_umem, work);
- down_write(&umem->mm->mmap_sem);
- umem->mm->pinned_vm -= umem->diff;
- up_write(&umem->mm->mmap_sem);
- mmput(umem->mm);
- kfree(umem);
+ down_write(&umem->owning_mm->mmap_sem);
+ umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
+ up_write(&umem->owning_mm->mmap_sem);
+
+ __ib_umem_release_tail(umem);
}
/**
@@ -248,52 +271,36 @@ static void ib_umem_account(struct work_struct *work)
void ib_umem_release(struct ib_umem *umem)
{
struct ib_ucontext *context = umem->context;
- struct mm_struct *mm;
- struct task_struct *task;
- unsigned long diff;
- if (umem->odp_data) {
- ib_umem_odp_release(umem);
+ if (umem->is_odp) {
+ ib_umem_odp_release(to_ib_umem_odp(umem));
+ __ib_umem_release_tail(umem);
return;
}
__ib_umem_release(umem->context->device, umem, 1);
- task = get_pid_task(umem->context->tgid, PIDTYPE_PID);
- if (!task)
- goto out;
- mm = get_task_mm(task);
- put_task_struct(task);
- if (!mm)
- goto out;
-
- diff = ib_umem_num_pages(umem);
-
/*
* We may be called with the mm's mmap_sem already held. This
* can happen when a userspace munmap() is the call that drops
* the last reference to our file and calls our release
* method. If there are memory regions to destroy, we'll end
* up here and not be able to take the mmap_sem. In that case
- * we defer the vm_locked accounting to the system workqueue.
+ * we defer the vm_locked accounting a workqueue.
*/
if (context->closing) {
- if (!down_write_trylock(&mm->mmap_sem)) {
- INIT_WORK(&umem->work, ib_umem_account);
- umem->mm = mm;
- umem->diff = diff;
-
+ if (!down_write_trylock(&umem->owning_mm->mmap_sem)) {
+ INIT_WORK(&umem->work, ib_umem_release_defer);
queue_work(ib_wq, &umem->work);
return;
}
- } else
- down_write(&mm->mmap_sem);
+ } else {
+ down_write(&umem->owning_mm->mmap_sem);
+ }
+ umem->owning_mm->pinned_vm -= ib_umem_num_pages(umem);
+ up_write(&umem->owning_mm->mmap_sem);
- mm->pinned_vm -= diff;
- up_write(&mm->mmap_sem);
- mmput(mm);
-out:
- kfree(umem);
+ __ib_umem_release_tail(umem);
}
EXPORT_SYMBOL(ib_umem_release);
@@ -303,7 +310,7 @@ int ib_umem_page_count(struct ib_umem *umem)
int n;
struct scatterlist *sg;
- if (umem->odp_data)
+ if (umem->is_odp)
return ib_umem_num_pages(umem);
n = 0;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 6ec748eccff7..2b4c5e7dd5a1 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -58,7 +58,7 @@ static u64 node_start(struct umem_odp_node *n)
struct ib_umem_odp *umem_odp =
container_of(n, struct ib_umem_odp, interval_tree);
- return ib_umem_start(umem_odp->umem);
+ return ib_umem_start(&umem_odp->umem);
}
/* Note that the representation of the intervals in the interval tree
@@ -71,140 +71,86 @@ static u64 node_last(struct umem_odp_node *n)
struct ib_umem_odp *umem_odp =
container_of(n, struct ib_umem_odp, interval_tree);
- return ib_umem_end(umem_odp->umem) - 1;
+ return ib_umem_end(&umem_odp->umem) - 1;
}
INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
node_start, node_last, static, rbt_ib_umem)
-static void ib_umem_notifier_start_account(struct ib_umem *item)
+static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp)
{
- mutex_lock(&item->odp_data->umem_mutex);
-
- /* Only update private counters for this umem if it has them.
- * Otherwise skip it. All page faults will be delayed for this umem. */
- if (item->odp_data->mn_counters_active) {
- int notifiers_count = item->odp_data->notifiers_count++;
-
- if (notifiers_count == 0)
- /* Initialize the completion object for waiting on
- * notifiers. Since notifier_count is zero, no one
- * should be waiting right now. */
- reinit_completion(&item->odp_data->notifier_completion);
- }
- mutex_unlock(&item->odp_data->umem_mutex);
-}
-
-static void ib_umem_notifier_end_account(struct ib_umem *item)
-{
- mutex_lock(&item->odp_data->umem_mutex);
-
- /* Only update private counters for this umem if it has them.
- * Otherwise skip it. All page faults will be delayed for this umem. */
- if (item->odp_data->mn_counters_active) {
+ mutex_lock(&umem_odp->umem_mutex);
+ if (umem_odp->notifiers_count++ == 0)
/*
- * This sequence increase will notify the QP page fault that
- * the page that is going to be mapped in the spte could have
- * been freed.
+ * Initialize the completion object for waiting on
+ * notifiers. Since notifier_count is zero, no one should be
+ * waiting right now.
*/
- ++item->odp_data->notifiers_seq;
- if (--item->odp_data->notifiers_count == 0)
- complete_all(&item->odp_data->notifier_completion);
- }
- mutex_unlock(&item->odp_data->umem_mutex);
+ reinit_completion(&umem_odp->notifier_completion);
+ mutex_unlock(&umem_odp->umem_mutex);
}
-/* Account for a new mmu notifier in an ib_ucontext. */
-static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
+static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp)
{
- atomic_inc(&context->notifier_count);
+ mutex_lock(&umem_odp->umem_mutex);
+ /*
+ * This sequence increase will notify the QP page fault that the page
+ * that is going to be mapped in the spte could have been freed.
+ */
+ ++umem_odp->notifiers_seq;
+ if (--umem_odp->notifiers_count == 0)
+ complete_all(&umem_odp->notifier_completion);
+ mutex_unlock(&umem_odp->umem_mutex);
}
-/* Account for a terminating mmu notifier in an ib_ucontext.
- *
- * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
- * the function takes the semaphore itself. */
-static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
+static int ib_umem_notifier_release_trampoline(struct ib_umem_odp *umem_odp,
+ u64 start, u64 end, void *cookie)
{
- int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
-
- if (zero_notifiers &&
- !list_empty(&context->no_private_counters)) {
- /* No currently running mmu notifiers. Now is the chance to
- * add private accounting to all previously added umems. */
- struct ib_umem_odp *odp_data, *next;
-
- /* Prevent concurrent mmu notifiers from working on the
- * no_private_counters list. */
- down_write(&context->umem_rwsem);
-
- /* Read the notifier_count again, with the umem_rwsem
- * semaphore taken for write. */
- if (!atomic_read(&context->notifier_count)) {
- list_for_each_entry_safe(odp_data, next,
- &context->no_private_counters,
- no_private_counters) {
- mutex_lock(&odp_data->umem_mutex);
- odp_data->mn_counters_active = true;
- list_del(&odp_data->no_private_counters);
- complete_all(&odp_data->notifier_completion);
- mutex_unlock(&odp_data->umem_mutex);
- }
- }
-
- up_write(&context->umem_rwsem);
- }
-}
+ struct ib_umem *umem = &umem_odp->umem;
-static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
- u64 end, void *cookie) {
/*
* Increase the number of notifiers running, to
* prevent any further fault handling on this MR.
*/
- ib_umem_notifier_start_account(item);
- item->odp_data->dying = 1;
+ ib_umem_notifier_start_account(umem_odp);
+ umem_odp->dying = 1;
/* Make sure that the fact the umem is dying is out before we release
* all pending page faults. */
smp_wmb();
- complete_all(&item->odp_data->notifier_completion);
- item->context->invalidate_range(item, ib_umem_start(item),
- ib_umem_end(item));
+ complete_all(&umem_odp->notifier_completion);
+ umem->context->invalidate_range(umem_odp, ib_umem_start(umem),
+ ib_umem_end(umem));
return 0;
}
static void ib_umem_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
-
- if (!context->invalidate_range)
- return;
-
- ib_ucontext_notifier_start_account(context);
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
- ULLONG_MAX,
- ib_umem_notifier_release_trampoline,
- true,
- NULL);
- up_read(&context->umem_rwsem);
+ struct ib_ucontext_per_mm *per_mm =
+ container_of(mn, struct ib_ucontext_per_mm, mn);
+
+ down_read(&per_mm->umem_rwsem);
+ if (per_mm->active)
+ rbt_ib_umem_for_each_in_range(
+ &per_mm->umem_tree, 0, ULLONG_MAX,
+ ib_umem_notifier_release_trampoline, true, NULL);
+ up_read(&per_mm->umem_rwsem);
}
-static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
+static int invalidate_page_trampoline(struct ib_umem_odp *item, u64 start,
u64 end, void *cookie)
{
ib_umem_notifier_start_account(item);
- item->context->invalidate_range(item, start, start + PAGE_SIZE);
+ item->umem.context->invalidate_range(item, start, start + PAGE_SIZE);
ib_umem_notifier_end_account(item);
return 0;
}
-static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
- u64 end, void *cookie)
+static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
+ u64 start, u64 end, void *cookie)
{
ib_umem_notifier_start_account(item);
- item->context->invalidate_range(item, start, end);
+ item->umem.context->invalidate_range(item, start, end);
return 0;
}
@@ -214,28 +160,30 @@ static int ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
unsigned long end,
bool blockable)
{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
- int ret;
-
- if (!context->invalidate_range)
- return 0;
+ struct ib_ucontext_per_mm *per_mm =
+ container_of(mn, struct ib_ucontext_per_mm, mn);
if (blockable)
- down_read(&context->umem_rwsem);
- else if (!down_read_trylock(&context->umem_rwsem))
+ down_read(&per_mm->umem_rwsem);
+ else if (!down_read_trylock(&per_mm->umem_rwsem))
return -EAGAIN;
- ib_ucontext_notifier_start_account(context);
- ret = rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
- end,
- invalidate_range_start_trampoline,
- blockable, NULL);
- up_read(&context->umem_rwsem);
+ if (!per_mm->active) {
+ up_read(&per_mm->umem_rwsem);
+ /*
+ * At this point active is permanently set and visible to this
+ * CPU without a lock, that fact is relied on to skip the unlock
+ * in range_end.
+ */
+ return 0;
+ }
- return ret;
+ return rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, end,
+ invalidate_range_start_trampoline,
+ blockable, NULL);
}
-static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
+static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,
u64 end, void *cookie)
{
ib_umem_notifier_end_account(item);
@@ -247,22 +195,16 @@ static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
+ struct ib_ucontext_per_mm *per_mm =
+ container_of(mn, struct ib_ucontext_per_mm, mn);
- if (!context->invalidate_range)
+ if (unlikely(!per_mm->active))
return;
- /*
- * TODO: we currently bail out if there is any sleepable work to be done
- * in ib_umem_notifier_invalidate_range_start so we shouldn't really block
- * here. But this is ugly and fragile.
- */
- down_read(&context->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
+ rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start,
end,
invalidate_range_end_trampoline, true, NULL);
- up_read(&context->umem_rwsem);
- ib_ucontext_notifier_end_account(context);
+ up_read(&per_mm->umem_rwsem);
}
static const struct mmu_notifier_ops ib_umem_notifiers = {
@@ -271,31 +213,158 @@ static const struct mmu_notifier_ops ib_umem_notifiers = {
.invalidate_range_end = ib_umem_notifier_invalidate_range_end,
};
-struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
- unsigned long addr,
- size_t size)
+static void add_umem_to_per_mm(struct ib_umem_odp *umem_odp)
{
- struct ib_umem *umem;
+ struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
+ struct ib_umem *umem = &umem_odp->umem;
+
+ down_write(&per_mm->umem_rwsem);
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_insert(&umem_odp->interval_tree,
+ &per_mm->umem_tree);
+ up_write(&per_mm->umem_rwsem);
+}
+
+static void remove_umem_from_per_mm(struct ib_umem_odp *umem_odp)
+{
+ struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
+ struct ib_umem *umem = &umem_odp->umem;
+
+ down_write(&per_mm->umem_rwsem);
+ if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
+ rbt_ib_umem_remove(&umem_odp->interval_tree,
+ &per_mm->umem_tree);
+ complete_all(&umem_odp->notifier_completion);
+
+ up_write(&per_mm->umem_rwsem);
+}
+
+static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx,
+ struct mm_struct *mm)
+{
+ struct ib_ucontext_per_mm *per_mm;
+ int ret;
+
+ per_mm = kzalloc(sizeof(*per_mm), GFP_KERNEL);
+ if (!per_mm)
+ return ERR_PTR(-ENOMEM);
+
+ per_mm->context = ctx;
+ per_mm->mm = mm;
+ per_mm->umem_tree = RB_ROOT_CACHED;
+ init_rwsem(&per_mm->umem_rwsem);
+ per_mm->active = ctx->invalidate_range;
+
+ rcu_read_lock();
+ per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
+ rcu_read_unlock();
+
+ WARN_ON(mm != current->mm);
+
+ per_mm->mn.ops = &ib_umem_notifiers;
+ ret = mmu_notifier_register(&per_mm->mn, per_mm->mm);
+ if (ret) {
+ dev_err(&ctx->device->dev,
+ "Failed to register mmu_notifier %d\n", ret);
+ goto out_pid;
+ }
+
+ list_add(&per_mm->ucontext_list, &ctx->per_mm_list);
+ return per_mm;
+
+out_pid:
+ put_pid(per_mm->tgid);
+ kfree(per_mm);
+ return ERR_PTR(ret);
+}
+
+static int get_per_mm(struct ib_umem_odp *umem_odp)
+{
+ struct ib_ucontext *ctx = umem_odp->umem.context;
+ struct ib_ucontext_per_mm *per_mm;
+
+ /*
+ * Generally speaking we expect only one or two per_mm in this list,
+ * so no reason to optimize this search today.
+ */
+ mutex_lock(&ctx->per_mm_list_lock);
+ list_for_each_entry(per_mm, &ctx->per_mm_list, ucontext_list) {
+ if (per_mm->mm == umem_odp->umem.owning_mm)
+ goto found;
+ }
+
+ per_mm = alloc_per_mm(ctx, umem_odp->umem.owning_mm);
+ if (IS_ERR(per_mm)) {
+ mutex_unlock(&ctx->per_mm_list_lock);
+ return PTR_ERR(per_mm);
+ }
+
+found:
+ umem_odp->per_mm = per_mm;
+ per_mm->odp_mrs_count++;
+ mutex_unlock(&ctx->per_mm_list_lock);
+
+ return 0;
+}
+
+static void free_per_mm(struct rcu_head *rcu)
+{
+ kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu));
+}
+
+void put_per_mm(struct ib_umem_odp *umem_odp)
+{
+ struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
+ struct ib_ucontext *ctx = umem_odp->umem.context;
+ bool need_free;
+
+ mutex_lock(&ctx->per_mm_list_lock);
+ umem_odp->per_mm = NULL;
+ per_mm->odp_mrs_count--;
+ need_free = per_mm->odp_mrs_count == 0;
+ if (need_free)
+ list_del(&per_mm->ucontext_list);
+ mutex_unlock(&ctx->per_mm_list_lock);
+
+ if (!need_free)
+ return;
+
+ /*
+ * NOTE! mmu_notifier_unregister() can happen between a start/end
+ * callback, resulting in an start/end, and thus an unbalanced
+ * lock. This doesn't really matter to us since we are about to kfree
+ * the memory that holds the lock, however LOCKDEP doesn't like this.
+ */
+ down_write(&per_mm->umem_rwsem);
+ per_mm->active = false;
+ up_write(&per_mm->umem_rwsem);
+
+ WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root));
+ mmu_notifier_unregister_no_release(&per_mm->mn, per_mm->mm);
+ put_pid(per_mm->tgid);
+ mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm);
+}
+
+struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
+ unsigned long addr, size_t size)
+{
+ struct ib_ucontext *ctx = per_mm->context;
struct ib_umem_odp *odp_data;
+ struct ib_umem *umem;
int pages = size >> PAGE_SHIFT;
int ret;
- umem = kzalloc(sizeof(*umem), GFP_KERNEL);
- if (!umem)
+ odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
+ if (!odp_data)
return ERR_PTR(-ENOMEM);
-
- umem->context = context;
+ umem = &odp_data->umem;
+ umem->context = ctx;
umem->length = size;
umem->address = addr;
umem->page_shift = PAGE_SHIFT;
umem->writable = 1;
-
- odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
- if (!odp_data) {
- ret = -ENOMEM;
- goto out_umem;
- }
- odp_data->umem = umem;
+ umem->is_odp = 1;
+ odp_data->per_mm = per_mm;
mutex_init(&odp_data->umem_mutex);
init_completion(&odp_data->notifier_completion);
@@ -314,39 +383,34 @@ struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
goto out_page_list;
}
- down_write(&context->umem_rwsem);
- context->odp_mrs_count++;
- rbt_ib_umem_insert(&odp_data->interval_tree, &context->umem_tree);
- if (likely(!atomic_read(&context->notifier_count)))
- odp_data->mn_counters_active = true;
- else
- list_add(&odp_data->no_private_counters,
- &context->no_private_counters);
- up_write(&context->umem_rwsem);
-
- umem->odp_data = odp_data;
+ /*
+ * Caller must ensure that the umem_odp that the per_mm came from
+ * cannot be freed during the call to ib_alloc_odp_umem.
+ */
+ mutex_lock(&ctx->per_mm_list_lock);
+ per_mm->odp_mrs_count++;
+ mutex_unlock(&ctx->per_mm_list_lock);
+ add_umem_to_per_mm(odp_data);
- return umem;
+ return odp_data;
out_page_list:
vfree(odp_data->page_list);
out_odp_data:
kfree(odp_data);
-out_umem:
- kfree(umem);
return ERR_PTR(ret);
}
EXPORT_SYMBOL(ib_alloc_odp_umem);
-int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
- int access)
+int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
{
+ struct ib_umem *umem = &umem_odp->umem;
+ /*
+ * NOTE: This must called in a process context where umem->owning_mm
+ * == current->mm
+ */
+ struct mm_struct *mm = umem->owning_mm;
int ret_val;
- struct pid *our_pid;
- struct mm_struct *mm = get_task_mm(current);
-
- if (!mm)
- return -EINVAL;
if (access & IB_ACCESS_HUGETLB) {
struct vm_area_struct *vma;
@@ -366,111 +430,43 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
umem->hugetlb = 0;
}
- /* Prevent creating ODP MRs in child processes */
- rcu_read_lock();
- our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
- rcu_read_unlock();
- put_pid(our_pid);
- if (context->tgid != our_pid) {
- ret_val = -EINVAL;
- goto out_mm;
- }
-
- umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
- if (!umem->odp_data) {
- ret_val = -ENOMEM;
- goto out_mm;
- }
- umem->odp_data->umem = umem;
-
- mutex_init(&umem->odp_data->umem_mutex);
+ mutex_init(&umem_odp->umem_mutex);
- init_completion(&umem->odp_data->notifier_completion);
+ init_completion(&umem_odp->notifier_completion);
if (ib_umem_num_pages(umem)) {
- umem->odp_data->page_list =
- vzalloc(array_size(sizeof(*umem->odp_data->page_list),
+ umem_odp->page_list =
+ vzalloc(array_size(sizeof(*umem_odp->page_list),
ib_umem_num_pages(umem)));
- if (!umem->odp_data->page_list) {
- ret_val = -ENOMEM;
- goto out_odp_data;
- }
+ if (!umem_odp->page_list)
+ return -ENOMEM;
- umem->odp_data->dma_list =
- vzalloc(array_size(sizeof(*umem->odp_data->dma_list),
+ umem_odp->dma_list =
+ vzalloc(array_size(sizeof(*umem_odp->dma_list),
ib_umem_num_pages(umem)));
- if (!umem->odp_data->dma_list) {
+ if (!umem_odp->dma_list) {
ret_val = -ENOMEM;
goto out_page_list;
}
}
- /*
- * When using MMU notifiers, we will get a
- * notification before the "current" task (and MM) is
- * destroyed. We use the umem_rwsem semaphore to synchronize.
- */
- down_write(&context->umem_rwsem);
- context->odp_mrs_count++;
- if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
- rbt_ib_umem_insert(&umem->odp_data->interval_tree,
- &context->umem_tree);
- if (likely(!atomic_read(&context->notifier_count)) ||
- context->odp_mrs_count == 1)
- umem->odp_data->mn_counters_active = true;
- else
- list_add(&umem->odp_data->no_private_counters,
- &context->no_private_counters);
- downgrade_write(&context->umem_rwsem);
-
- if (context->odp_mrs_count == 1) {
- /*
- * Note that at this point, no MMU notifier is running
- * for this context!
- */
- atomic_set(&context->notifier_count, 0);
- INIT_HLIST_NODE(&context->mn.hlist);
- context->mn.ops = &ib_umem_notifiers;
- /*
- * Lock-dep detects a false positive for mmap_sem vs.
- * umem_rwsem, due to not grasping downgrade_write correctly.
- */
- lockdep_off();
- ret_val = mmu_notifier_register(&context->mn, mm);
- lockdep_on();
- if (ret_val) {
- pr_err("Failed to register mmu_notifier %d\n", ret_val);
- ret_val = -EBUSY;
- goto out_mutex;
- }
- }
-
- up_read(&context->umem_rwsem);
+ ret_val = get_per_mm(umem_odp);
+ if (ret_val)
+ goto out_dma_list;
+ add_umem_to_per_mm(umem_odp);
- /*
- * Note that doing an mmput can cause a notifier for the relevant mm.
- * If the notifier is called while we hold the umem_rwsem, this will
- * cause a deadlock. Therefore, we release the reference only after we
- * released the semaphore.
- */
- mmput(mm);
return 0;
-out_mutex:
- up_read(&context->umem_rwsem);
- vfree(umem->odp_data->dma_list);
+out_dma_list:
+ vfree(umem_odp->dma_list);
out_page_list:
- vfree(umem->odp_data->page_list);
-out_odp_data:
- kfree(umem->odp_data);
-out_mm:
- mmput(mm);
+ vfree(umem_odp->page_list);
return ret_val;
}
-void ib_umem_odp_release(struct ib_umem *umem)
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
{
- struct ib_ucontext *context = umem->context;
+ struct ib_umem *umem = &umem_odp->umem;
/*
* Ensure that no more pages are mapped in the umem.
@@ -478,61 +474,13 @@ void ib_umem_odp_release(struct ib_umem *umem)
* It is the driver's responsibility to ensure, before calling us,
* that the hardware will not attempt to access the MR any more.
*/
- ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
ib_umem_end(umem));
- down_write(&context->umem_rwsem);
- if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
- rbt_ib_umem_remove(&umem->odp_data->interval_tree,
- &context->umem_tree);
- context->odp_mrs_count--;
- if (!umem->odp_data->mn_counters_active) {
- list_del(&umem->odp_data->no_private_counters);
- complete_all(&umem->odp_data->notifier_completion);
- }
-
- /*
- * Downgrade the lock to a read lock. This ensures that the notifiers
- * (who lock the mutex for reading) will be able to finish, and we
- * will be able to enventually obtain the mmu notifiers SRCU. Note
- * that since we are doing it atomically, no other user could register
- * and unregister while we do the check.
- */
- downgrade_write(&context->umem_rwsem);
- if (!context->odp_mrs_count) {
- struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
-
- owning_process = get_pid_task(context->tgid,
- PIDTYPE_PID);
- if (owning_process == NULL)
- /*
- * The process is already dead, notifier were removed
- * already.
- */
- goto out;
-
- owning_mm = get_task_mm(owning_process);
- if (owning_mm == NULL)
- /*
- * The process' mm is already dead, notifier were
- * removed already.
- */
- goto out_put_task;
- mmu_notifier_unregister(&context->mn, owning_mm);
-
- mmput(owning_mm);
-
-out_put_task:
- put_task_struct(owning_process);
- }
-out:
- up_read(&context->umem_rwsem);
-
- vfree(umem->odp_data->dma_list);
- vfree(umem->odp_data->page_list);
- kfree(umem->odp_data);
- kfree(umem);
+ remove_umem_from_per_mm(umem_odp);
+ put_per_mm(umem_odp);
+ vfree(umem_odp->dma_list);
+ vfree(umem_odp->page_list);
}
/*
@@ -544,7 +492,7 @@ out:
* @access_mask: access permissions needed for this page.
* @current_seq: sequence number for synchronization with invalidations.
* the sequence number is taken from
- * umem->odp_data->notifiers_seq.
+ * umem_odp->notifiers_seq.
*
* The function returns -EFAULT if the DMA mapping operation fails. It returns
* -EAGAIN if a concurrent invalidation prevents us from updating the page.
@@ -554,12 +502,13 @@ out:
* umem.
*/
static int ib_umem_odp_map_dma_single_page(
- struct ib_umem *umem,
+ struct ib_umem_odp *umem_odp,
int page_index,
struct page *page,
u64 access_mask,
unsigned long current_seq)
{
+ struct ib_umem *umem = &umem_odp->umem;
struct ib_device *dev = umem->context->device;
dma_addr_t dma_addr;
int stored_page = 0;
@@ -571,11 +520,11 @@ static int ib_umem_odp_map_dma_single_page(
* handle case of a racing notifier. This check also allows us to bail
* early if we have a notifier running in parallel with us.
*/
- if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
+ if (ib_umem_mmu_notifier_retry(umem_odp, current_seq)) {
ret = -EAGAIN;
goto out;
}
- if (!(umem->odp_data->dma_list[page_index])) {
+ if (!(umem_odp->dma_list[page_index])) {
dma_addr = ib_dma_map_page(dev,
page,
0, BIT(umem->page_shift),
@@ -584,15 +533,15 @@ static int ib_umem_odp_map_dma_single_page(
ret = -EFAULT;
goto out;
}
- umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
- umem->odp_data->page_list[page_index] = page;
+ umem_odp->dma_list[page_index] = dma_addr | access_mask;
+ umem_odp->page_list[page_index] = page;
umem->npages++;
stored_page = 1;
- } else if (umem->odp_data->page_list[page_index] == page) {
- umem->odp_data->dma_list[page_index] |= access_mask;
+ } else if (umem_odp->page_list[page_index] == page) {
+ umem_odp->dma_list[page_index] |= access_mask;
} else {
pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
- umem->odp_data->page_list[page_index], page);
+ umem_odp->page_list[page_index], page);
/* Better remove the mapping now, to prevent any further
* damage. */
remove_existing_mapping = 1;
@@ -605,7 +554,7 @@ out:
if (remove_existing_mapping && umem->context->invalidate_range) {
invalidate_page_trampoline(
- umem,
+ umem_odp,
ib_umem_start(umem) + (page_index >> umem->page_shift),
ib_umem_start(umem) + ((page_index + 1) >>
umem->page_shift),
@@ -621,7 +570,7 @@ out:
*
* Pins the range of pages passed in the argument, and maps them to
* DMA addresses. The DMA addresses of the mapped pages is updated in
- * umem->odp_data->dma_list.
+ * umem_odp->dma_list.
*
* Returns the number of pages mapped in success, negative error code
* for failure.
@@ -629,7 +578,7 @@ out:
* the function from completing its task.
* An -ENOENT error code indicates that userspace process is being terminated
* and mm was already destroyed.
- * @umem: the umem to map and pin
+ * @umem_odp: the umem to map and pin
* @user_virt: the address from which we need to map.
* @bcnt: the minimal number of bytes to pin and map. The mapping might be
* bigger due to alignment, and may also be smaller in case of an error
@@ -639,13 +588,15 @@ out:
* range.
* @current_seq: the MMU notifiers sequance value for synchronization with
* invalidations. the sequance number is read from
- * umem->odp_data->notifiers_seq before calling this function
+ * umem_odp->notifiers_seq before calling this function
*/
-int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
- u64 access_mask, unsigned long current_seq)
+int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
+ u64 bcnt, u64 access_mask,
+ unsigned long current_seq)
{
+ struct ib_umem *umem = &umem_odp->umem;
struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = NULL;
+ struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
struct page **local_page_list = NULL;
u64 page_mask, off;
int j, k, ret = 0, start_idx, npages = 0, page_shift;
@@ -669,15 +620,14 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
user_virt = user_virt & page_mask;
bcnt += off; /* Charge for the first page offset as well. */
- owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
- if (owning_process == NULL) {
+ /*
+ * owning_process is allowed to be NULL, this means somehow the mm is
+ * existing beyond the lifetime of the originating process.. Presumably
+ * mmget_not_zero will fail in this case.
+ */
+ owning_process = get_pid_task(umem_odp->per_mm->tgid, PIDTYPE_PID);
+ if (WARN_ON(!mmget_not_zero(umem_odp->umem.owning_mm))) {
ret = -EINVAL;
- goto out_no_task;
- }
-
- owning_mm = get_task_mm(owning_process);
- if (owning_mm == NULL) {
- ret = -ENOENT;
goto out_put_task;
}
@@ -709,7 +659,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
break;
bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
- mutex_lock(&umem->odp_data->umem_mutex);
+ mutex_lock(&umem_odp->umem_mutex);
for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
if (user_virt & ~page_mask) {
p += PAGE_SIZE;
@@ -722,7 +672,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
}
ret = ib_umem_odp_map_dma_single_page(
- umem, k, local_page_list[j],
+ umem_odp, k, local_page_list[j],
access_mask, current_seq);
if (ret < 0)
break;
@@ -730,7 +680,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
p = page_to_phys(local_page_list[j]);
k++;
}
- mutex_unlock(&umem->odp_data->umem_mutex);
+ mutex_unlock(&umem_odp->umem_mutex);
if (ret < 0) {
/* Release left over pages when handling errors. */
@@ -749,16 +699,17 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
mmput(owning_mm);
out_put_task:
- put_task_struct(owning_process);
-out_no_task:
+ if (owning_process)
+ put_task_struct(owning_process);
free_page((unsigned long)local_page_list);
return ret;
}
EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
-void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
+void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
u64 bound)
{
+ struct ib_umem *umem = &umem_odp->umem;
int idx;
u64 addr;
struct ib_device *dev = umem->context->device;
@@ -770,12 +721,12 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
* faults from completion. We might be racing with other
* invalidations, so we must make sure we free each page only
* once. */
- mutex_lock(&umem->odp_data->umem_mutex);
+ mutex_lock(&umem_odp->umem_mutex);
for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
- if (umem->odp_data->page_list[idx]) {
- struct page *page = umem->odp_data->page_list[idx];
- dma_addr_t dma = umem->odp_data->dma_list[idx];
+ if (umem_odp->page_list[idx]) {
+ struct page *page = umem_odp->page_list[idx];
+ dma_addr_t dma = umem_odp->dma_list[idx];
dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
WARN_ON(!dma_addr);
@@ -798,12 +749,12 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
/* on demand pinning support */
if (!umem->context->invalidate_range)
put_page(page);
- umem->odp_data->page_list[idx] = NULL;
- umem->odp_data->dma_list[idx] = 0;
+ umem_odp->page_list[idx] = NULL;
+ umem_odp->dma_list[idx] = 0;
umem->npages--;
}
}
- mutex_unlock(&umem->odp_data->umem_mutex);
+ mutex_unlock(&umem_odp->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
@@ -830,7 +781,7 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
return -EAGAIN;
next = rbt_ib_umem_iter_next(node, start, last - 1);
umem = container_of(node, struct ib_umem_odp, interval_tree);
- ret_val = cb(umem->umem, start, last, cookie) || ret_val;
+ ret_val = cb(umem, start, last, cookie) || ret_val;
}
return ret_val;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index c34a6852d691..f55f48f6b272 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -138,7 +138,7 @@ static const dev_t base_issm_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE) +
static dev_t dynamic_umad_dev;
static dev_t dynamic_issm_dev;
-static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
+static DEFINE_IDA(umad_ida);
static void ib_umad_add_one(struct ib_device *device);
static void ib_umad_remove_one(struct ib_device *device, void *client_data);
@@ -1132,7 +1132,7 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
if (!port)
return -ENODEV;
- return sprintf(buf, "%s\n", port->ib_dev->name);
+ return sprintf(buf, "%s\n", dev_name(&port->ib_dev->dev));
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
@@ -1159,11 +1159,10 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
dev_t base_umad;
dev_t base_issm;
- devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
- if (devnum >= IB_UMAD_MAX_PORTS)
+ devnum = ida_alloc_max(&umad_ida, IB_UMAD_MAX_PORTS - 1, GFP_KERNEL);
+ if (devnum < 0)
return -1;
port->dev_num = devnum;
- set_bit(devnum, dev_map);
if (devnum >= IB_UMAD_NUM_FIXED_MINOR) {
base_umad = dynamic_umad_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
base_issm = dynamic_issm_dev + devnum - IB_UMAD_NUM_FIXED_MINOR;
@@ -1227,7 +1226,7 @@ err_dev:
err_cdev:
cdev_del(&port->cdev);
- clear_bit(devnum, dev_map);
+ ida_free(&umad_ida, devnum);
return -1;
}
@@ -1261,7 +1260,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
}
mutex_unlock(&port->file_mutex);
- clear_bit(port->dev_num, dev_map);
+ ida_free(&umad_ida, port->dev_num);
}
static void ib_umad_add_one(struct ib_device *device)
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 5df8e548cc14..c97935a0c7c6 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -100,13 +100,14 @@ struct ib_uverbs_device {
atomic_t refcount;
int num_comp_vectors;
struct completion comp;
- struct device *dev;
+ struct device dev;
+ /* First group for device attributes, NULL terminated array */
+ const struct attribute_group *groups[2];
struct ib_device __rcu *ib_dev;
int devnum;
struct cdev cdev;
struct rb_root xrcd_tree;
struct mutex xrcd_tree_mutex;
- struct kobject kobj;
struct srcu_struct disassociate_srcu;
struct mutex lists_mutex; /* protect lists */
struct list_head uverbs_file_list;
@@ -146,7 +147,6 @@ struct ib_uverbs_file {
struct ib_event_handler event_handler;
struct ib_uverbs_async_event_file *async_file;
struct list_head list;
- int is_closed;
/*
* To access the uobjects list hw_destroy_rwsem must be held for write
@@ -158,6 +158,9 @@ struct ib_uverbs_file {
spinlock_t uobjects_lock;
struct list_head uobjects;
+ struct mutex umap_lock;
+ struct list_head umaps;
+
u64 uverbs_cmd_mask;
u64 uverbs_ex_cmd_mask;
@@ -218,12 +221,6 @@ struct ib_ucq_object {
u32 async_events_reported;
};
-struct ib_uflow_resources;
-struct ib_uflow_object {
- struct ib_uobject uobject;
- struct ib_uflow_resources *resources;
-};
-
extern const struct file_operations uverbs_event_fops;
void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue);
struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index e012ca80f9d1..a93853770e3c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -117,18 +117,12 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
/* ufile is required when some objects are released */
ucontext->ufile = file;
- rcu_read_lock();
- ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
- rcu_read_unlock();
- ucontext->closing = 0;
+ ucontext->closing = false;
ucontext->cleanup_retryable = false;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- ucontext->umem_tree = RB_ROOT_CACHED;
- init_rwsem(&ucontext->umem_rwsem);
- ucontext->odp_mrs_count = 0;
- INIT_LIST_HEAD(&ucontext->no_private_counters);
-
+ mutex_init(&ucontext->per_mm_list_lock);
+ INIT_LIST_HEAD(&ucontext->per_mm_list);
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
ucontext->invalidate_range = NULL;
@@ -172,7 +166,6 @@ err_fd:
put_unused_fd(resp.async_fd);
err_free:
- put_pid(ucontext->tgid);
ib_dev->dealloc_ucontext(ucontext);
err_alloc:
@@ -2769,16 +2762,7 @@ out_put:
return ret ? ret : in_len;
}
-struct ib_uflow_resources {
- size_t max;
- size_t num;
- size_t collection_num;
- size_t counters_num;
- struct ib_counters **counters;
- struct ib_flow_action **collection;
-};
-
-static struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
+struct ib_uflow_resources *flow_resources_alloc(size_t num_specs)
{
struct ib_uflow_resources *resources;
@@ -2808,6 +2792,7 @@ err:
return NULL;
}
+EXPORT_SYMBOL(flow_resources_alloc);
void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
{
@@ -2826,10 +2811,11 @@ void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res)
kfree(uflow_res->counters);
kfree(uflow_res);
}
+EXPORT_SYMBOL(ib_uverbs_flow_resources_free);
-static void flow_resources_add(struct ib_uflow_resources *uflow_res,
- enum ib_flow_spec_type type,
- void *ibobj)
+void flow_resources_add(struct ib_uflow_resources *uflow_res,
+ enum ib_flow_spec_type type,
+ void *ibobj)
{
WARN_ON(uflow_res->num >= uflow_res->max);
@@ -2850,6 +2836,7 @@ static void flow_resources_add(struct ib_uflow_resources *uflow_res,
uflow_res->num++;
}
+EXPORT_SYMBOL(flow_resources_add);
static int kern_spec_to_ib_spec_action(struct ib_uverbs_file *ufile,
struct ib_uverbs_flow_spec *kern_spec,
@@ -3484,7 +3471,6 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
struct ib_uverbs_create_flow cmd;
struct ib_uverbs_create_flow_resp resp;
struct ib_uobject *uobj;
- struct ib_uflow_object *uflow;
struct ib_flow *flow_id;
struct ib_uverbs_flow_attr *kern_flow_attr;
struct ib_flow_attr *flow_attr;
@@ -3623,13 +3609,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
err = PTR_ERR(flow_id);
goto err_free;
}
- atomic_inc(&qp->usecnt);
- flow_id->qp = qp;
- flow_id->device = qp->device;
- flow_id->uobject = uobj;
- uobj->object = flow_id;
- uflow = container_of(uobj, typeof(*uflow), uobject);
- uflow->resources = uflow_res;
+
+ ib_set_flow(uobj, flow_id, qp, qp->device, uflow_res);
memset(&resp, 0, sizeof(resp));
resp.flow_handle = uobj->id;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index 1a6b229e3db3..b0e493e8d860 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -57,6 +57,7 @@ struct bundle_priv {
struct ib_uverbs_attr *uattrs;
DECLARE_BITMAP(uobj_finalize, UVERBS_API_ATTR_BKEY_LEN);
+ DECLARE_BITMAP(spec_finalize, UVERBS_API_ATTR_BKEY_LEN);
/*
* Must be last. bundle ends in a flex array which overlaps
@@ -143,6 +144,86 @@ static bool uverbs_is_attr_cleared(const struct ib_uverbs_attr *uattr,
0, uattr->len - len);
}
+static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
+ const struct uverbs_api_attr *attr_uapi,
+ struct uverbs_objs_arr_attr *attr,
+ struct ib_uverbs_attr *uattr,
+ u32 attr_bkey)
+{
+ const struct uverbs_attr_spec *spec = &attr_uapi->spec;
+ size_t array_len;
+ u32 *idr_vals;
+ int ret = 0;
+ size_t i;
+
+ if (uattr->attr_data.reserved)
+ return -EINVAL;
+
+ if (uattr->len % sizeof(u32))
+ return -EINVAL;
+
+ array_len = uattr->len / sizeof(u32);
+ if (array_len < spec->u2.objs_arr.min_len ||
+ array_len > spec->u2.objs_arr.max_len)
+ return -EINVAL;
+
+ attr->uobjects =
+ uverbs_alloc(&pbundle->bundle,
+ array_size(array_len, sizeof(*attr->uobjects)));
+ if (IS_ERR(attr->uobjects))
+ return PTR_ERR(attr->uobjects);
+
+ /*
+ * Since idr is 4B and *uobjects is >= 4B, we can use attr->uobjects
+ * to store idrs array and avoid additional memory allocation. The
+ * idrs array is offset to the end of the uobjects array so we will be
+ * able to read idr and replace with a pointer.
+ */
+ idr_vals = (u32 *)(attr->uobjects + array_len) - array_len;
+
+ if (uattr->len > sizeof(uattr->data)) {
+ ret = copy_from_user(idr_vals, u64_to_user_ptr(uattr->data),
+ uattr->len);
+ if (ret)
+ return -EFAULT;
+ } else {
+ memcpy(idr_vals, &uattr->data, uattr->len);
+ }
+
+ for (i = 0; i != array_len; i++) {
+ attr->uobjects[i] = uverbs_get_uobject_from_file(
+ spec->u2.objs_arr.obj_type, pbundle->bundle.ufile,
+ spec->u2.objs_arr.access, idr_vals[i]);
+ if (IS_ERR(attr->uobjects[i])) {
+ ret = PTR_ERR(attr->uobjects[i]);
+ break;
+ }
+ }
+
+ attr->len = i;
+ __set_bit(attr_bkey, pbundle->spec_finalize);
+ return ret;
+}
+
+static int uverbs_free_idrs_array(const struct uverbs_api_attr *attr_uapi,
+ struct uverbs_objs_arr_attr *attr,
+ bool commit)
+{
+ const struct uverbs_attr_spec *spec = &attr_uapi->spec;
+ int current_ret;
+ int ret = 0;
+ size_t i;
+
+ for (i = 0; i != attr->len; i++) {
+ current_ret = uverbs_finalize_object(
+ attr->uobjects[i], spec->u2.objs_arr.access, commit);
+ if (!ret)
+ ret = current_ret;
+ }
+
+ return ret;
+}
+
static int uverbs_process_attr(struct bundle_priv *pbundle,
const struct uverbs_api_attr *attr_uapi,
struct ib_uverbs_attr *uattr, u32 attr_bkey)
@@ -246,6 +327,11 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
}
break;
+
+ case UVERBS_ATTR_TYPE_IDRS_ARRAY:
+ return uverbs_process_idrs_array(pbundle, attr_uapi,
+ &e->objs_arr_attr, uattr,
+ attr_bkey);
default:
return -EOPNOTSUPP;
}
@@ -300,8 +386,7 @@ static int uverbs_set_attr(struct bundle_priv *pbundle,
return -EPROTONOSUPPORT;
return 0;
}
- attr = srcu_dereference(
- *slot, &pbundle->bundle.ufile->device->disassociate_srcu);
+ attr = rcu_dereference_protected(*slot, true);
/* Reject duplicate attributes from user-space */
if (test_bit(attr_bkey, pbundle->bundle.attr_present))
@@ -384,6 +469,7 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
unsigned int i;
int ret = 0;
+ /* fast path for simple uobjects */
i = -1;
while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
i + 1)) < key_bitmap_len) {
@@ -397,6 +483,30 @@ static int bundle_destroy(struct bundle_priv *pbundle, bool commit)
ret = current_ret;
}
+ i = -1;
+ while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len,
+ i + 1)) < key_bitmap_len) {
+ struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
+ const struct uverbs_api_attr *attr_uapi;
+ void __rcu **slot;
+ int current_ret;
+
+ slot = uapi_get_attr_for_method(
+ pbundle,
+ pbundle->method_key | uapi_bkey_to_key_attr(i));
+ if (WARN_ON(!slot))
+ continue;
+
+ attr_uapi = rcu_dereference_protected(*slot, true);
+
+ if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
+ current_ret = uverbs_free_idrs_array(
+ attr_uapi, &attr->objs_arr_attr, commit);
+ if (!ret)
+ ret = current_ret;
+ }
+ }
+
for (memblock = pbundle->allocated_mem; memblock;) {
struct bundle_alloc_head *tmp = memblock;
@@ -429,7 +539,7 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
uapi_key_ioctl_method(hdr->method_id));
if (unlikely(!slot))
return -EPROTONOSUPPORT;
- method_elm = srcu_dereference(*slot, &ufile->device->disassociate_srcu);
+ method_elm = rcu_dereference_protected(*slot, true);
if (!method_elm->use_stack) {
pbundle = kmalloc(method_elm->bundle_size, GFP_KERNEL);
@@ -461,6 +571,7 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
memset(pbundle->bundle.attr_present, 0,
sizeof(pbundle->bundle.attr_present));
memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
+ memset(pbundle->spec_finalize, 0, sizeof(pbundle->spec_finalize));
ret = ib_uverbs_run_method(pbundle, hdr->num_attrs);
destroy_ret = bundle_destroy(pbundle, ret == 0);
@@ -611,3 +722,26 @@ int uverbs_copy_to(const struct uverbs_attr_bundle *bundle, size_t idx,
return 0;
}
EXPORT_SYMBOL(uverbs_copy_to);
+
+int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val)
+{
+ const struct uverbs_attr *attr;
+
+ attr = uverbs_attr_get(attrs_bundle, idx);
+ if (IS_ERR(attr)) {
+ if ((PTR_ERR(attr) != -ENOENT) || !def_val)
+ return PTR_ERR(attr);
+
+ *to = *def_val;
+ } else {
+ *to = attr->ptr_attr.data;
+ }
+
+ if (*to < lower_bound || (*to > 0 && (u64)*to > upper_bound))
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL(_uverbs_get_const);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 50152c1b1004..6d373f5515b7 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -45,6 +45,7 @@
#include <linux/cdev.h>
#include <linux/anon_inodes.h>
#include <linux/slab.h>
+#include <linux/sched/mm.h>
#include <linux/uaccess.h>
@@ -72,7 +73,7 @@ enum {
static dev_t dynamic_uverbs_dev;
static struct class *uverbs_class;
-static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
+static DEFINE_IDA(uverbs_ida);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
@@ -169,20 +170,16 @@ int uverbs_dealloc_mw(struct ib_mw *mw)
return ret;
}
-static void ib_uverbs_release_dev(struct kobject *kobj)
+static void ib_uverbs_release_dev(struct device *device)
{
struct ib_uverbs_device *dev =
- container_of(kobj, struct ib_uverbs_device, kobj);
+ container_of(device, struct ib_uverbs_device, dev);
uverbs_destroy_api(dev->uapi);
cleanup_srcu_struct(&dev->disassociate_srcu);
kfree(dev);
}
-static struct kobj_type ib_uverbs_dev_ktype = {
- .release = ib_uverbs_release_dev,
-};
-
static void ib_uverbs_release_async_event_file(struct kref *ref)
{
struct ib_uverbs_async_event_file *file =
@@ -265,7 +262,7 @@ void ib_uverbs_release_file(struct kref *ref)
if (atomic_dec_and_test(&file->device->refcount))
ib_uverbs_comp_dev(file->device);
- kobject_put(&file->device->kobj);
+ put_device(&file->device->dev);
kfree(file);
}
@@ -817,6 +814,226 @@ out:
}
/*
+ * Each time we map IO memory into user space this keeps track of the mapping.
+ * When the device is hot-unplugged we 'zap' the mmaps in user space to point
+ * to the zero page and allow the hot unplug to proceed.
+ *
+ * This is necessary for cases like PCI physical hot unplug as the actual BAR
+ * memory may vanish after this and access to it from userspace could MCE.
+ *
+ * RDMA drivers supporting disassociation must have their user space designed
+ * to cope in some way with their IO pages going to the zero page.
+ */
+struct rdma_umap_priv {
+ struct vm_area_struct *vma;
+ struct list_head list;
+};
+
+static const struct vm_operations_struct rdma_umap_ops;
+
+static void rdma_umap_priv_init(struct rdma_umap_priv *priv,
+ struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+
+ priv->vma = vma;
+ vma->vm_private_data = priv;
+ vma->vm_ops = &rdma_umap_ops;
+
+ mutex_lock(&ufile->umap_lock);
+ list_add(&priv->list, &ufile->umaps);
+ mutex_unlock(&ufile->umap_lock);
+}
+
+/*
+ * The VMA has been dup'd, initialize the vm_private_data with a new tracking
+ * struct
+ */
+static void rdma_umap_open(struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+ struct rdma_umap_priv *opriv = vma->vm_private_data;
+ struct rdma_umap_priv *priv;
+
+ if (!opriv)
+ return;
+
+ /* We are racing with disassociation */
+ if (!down_read_trylock(&ufile->hw_destroy_rwsem))
+ goto out_zap;
+ /*
+ * Disassociation already completed, the VMA should already be zapped.
+ */
+ if (!ufile->ucontext)
+ goto out_unlock;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ goto out_unlock;
+ rdma_umap_priv_init(priv, vma);
+
+ up_read(&ufile->hw_destroy_rwsem);
+ return;
+
+out_unlock:
+ up_read(&ufile->hw_destroy_rwsem);
+out_zap:
+ /*
+ * We can't allow the VMA to be created with the actual IO pages, that
+ * would break our API contract, and it can't be stopped at this
+ * point, so zap it.
+ */
+ vma->vm_private_data = NULL;
+ zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+}
+
+static void rdma_umap_close(struct vm_area_struct *vma)
+{
+ struct ib_uverbs_file *ufile = vma->vm_file->private_data;
+ struct rdma_umap_priv *priv = vma->vm_private_data;
+
+ if (!priv)
+ return;
+
+ /*
+ * The vma holds a reference on the struct file that created it, which
+ * in turn means that the ib_uverbs_file is guaranteed to exist at
+ * this point.
+ */
+ mutex_lock(&ufile->umap_lock);
+ list_del(&priv->list);
+ mutex_unlock(&ufile->umap_lock);
+ kfree(priv);
+}
+
+static const struct vm_operations_struct rdma_umap_ops = {
+ .open = rdma_umap_open,
+ .close = rdma_umap_close,
+};
+
+static struct rdma_umap_priv *rdma_user_mmap_pre(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma,
+ unsigned long size)
+{
+ struct ib_uverbs_file *ufile = ucontext->ufile;
+ struct rdma_umap_priv *priv;
+
+ if (vma->vm_end - vma->vm_start != size)
+ return ERR_PTR(-EINVAL);
+
+ /* Driver is using this wrong, must be called by ib_uverbs_mmap */
+ if (WARN_ON(!vma->vm_file ||
+ vma->vm_file->private_data != ufile))
+ return ERR_PTR(-EINVAL);
+ lockdep_assert_held(&ufile->device->disassociate_srcu);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+ return priv;
+}
+
+/*
+ * Map IO memory into a process. This is to be called by drivers as part of
+ * their mmap() functions if they wish to send something like PCI-E BAR memory
+ * to userspace.
+ */
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
+
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ vma->vm_page_prot = prot;
+ if (io_remap_pfn_range(vma, vma->vm_start, pfn, size, prot)) {
+ kfree(priv);
+ return -EAGAIN;
+ }
+
+ rdma_umap_priv_init(priv, vma);
+ return 0;
+}
+EXPORT_SYMBOL(rdma_user_mmap_io);
+
+/*
+ * The page case is here for a slightly different reason, the driver expects
+ * to be able to free the page it is sharing to user space when it destroys
+ * its ucontext, which means we need to zap the user space references.
+ *
+ * We could handle this differently by providing an API to allocate a shared
+ * page and then only freeing the shared page when the last ufile is
+ * destroyed.
+ */
+int rdma_user_mmap_page(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma, struct page *page,
+ unsigned long size)
+{
+ struct rdma_umap_priv *priv = rdma_user_mmap_pre(ucontext, vma, size);
+
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ if (remap_pfn_range(vma, vma->vm_start, page_to_pfn(page), size,
+ vma->vm_page_prot)) {
+ kfree(priv);
+ return -EAGAIN;
+ }
+
+ rdma_umap_priv_init(priv, vma);
+ return 0;
+}
+EXPORT_SYMBOL(rdma_user_mmap_page);
+
+void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
+{
+ struct rdma_umap_priv *priv, *next_priv;
+
+ lockdep_assert_held(&ufile->hw_destroy_rwsem);
+
+ while (1) {
+ struct mm_struct *mm = NULL;
+
+ /* Get an arbitrary mm pointer that hasn't been cleaned yet */
+ mutex_lock(&ufile->umap_lock);
+ if (!list_empty(&ufile->umaps)) {
+ mm = list_first_entry(&ufile->umaps,
+ struct rdma_umap_priv, list)
+ ->vma->vm_mm;
+ mmget(mm);
+ }
+ mutex_unlock(&ufile->umap_lock);
+ if (!mm)
+ return;
+
+ /*
+ * The umap_lock is nested under mmap_sem since it used within
+ * the vma_ops callbacks, so we have to clean the list one mm
+ * at a time to get the lock ordering right. Typically there
+ * will only be one mm, so no big deal.
+ */
+ down_write(&mm->mmap_sem);
+ mutex_lock(&ufile->umap_lock);
+ list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
+ list) {
+ struct vm_area_struct *vma = priv->vma;
+
+ if (vma->vm_mm != mm)
+ continue;
+ list_del_init(&priv->list);
+
+ zap_vma_ptes(vma, vma->vm_start,
+ vma->vm_end - vma->vm_start);
+ vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
+ }
+ mutex_unlock(&ufile->umap_lock);
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+}
+
+/*
* ib_uverbs_open() does not need the BKL:
*
* - the ib_uverbs_device structures are properly reference counted and
@@ -839,6 +1056,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
if (!atomic_inc_not_zero(&dev->refcount))
return -ENXIO;
+ get_device(&dev->dev);
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
mutex_lock(&dev->lists_mutex);
ib_dev = srcu_dereference(dev->ib_dev,
@@ -876,9 +1094,10 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
spin_lock_init(&file->uobjects_lock);
INIT_LIST_HEAD(&file->uobjects);
init_rwsem(&file->hw_destroy_rwsem);
+ mutex_init(&file->umap_lock);
+ INIT_LIST_HEAD(&file->umaps);
filp->private_data = file;
- kobject_get(&dev->kobj);
list_add_tail(&file->list, &dev->uverbs_file_list);
mutex_unlock(&dev->lists_mutex);
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
@@ -899,6 +1118,7 @@ err:
if (atomic_dec_and_test(&dev->refcount))
ib_uverbs_comp_dev(dev);
+ put_device(&dev->dev);
return ret;
}
@@ -909,10 +1129,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
mutex_lock(&file->device->lists_mutex);
- if (!file->is_closed) {
- list_del(&file->list);
- file->is_closed = 1;
- }
+ list_del_init(&file->list);
mutex_unlock(&file->device->lists_mutex);
if (file->async_file)
@@ -951,37 +1168,34 @@ static struct ib_client uverbs_client = {
.remove = ib_uverbs_remove_one
};
-static ssize_t show_ibdev(struct device *device, struct device_attribute *attr,
+static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
char *buf)
{
+ struct ib_uverbs_device *dev =
+ container_of(device, struct ib_uverbs_device, dev);
int ret = -ENODEV;
int srcu_key;
- struct ib_uverbs_device *dev = dev_get_drvdata(device);
struct ib_device *ib_dev;
- if (!dev)
- return -ENODEV;
-
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
if (ib_dev)
- ret = sprintf(buf, "%s\n", ib_dev->name);
+ ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev));
srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
return ret;
}
-static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+static DEVICE_ATTR_RO(ibdev);
-static ssize_t show_dev_abi_version(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t abi_version_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
- struct ib_uverbs_device *dev = dev_get_drvdata(device);
+ struct ib_uverbs_device *dev =
+ container_of(device, struct ib_uverbs_device, dev);
int ret = -ENODEV;
int srcu_key;
struct ib_device *ib_dev;
- if (!dev)
- return -ENODEV;
srcu_key = srcu_read_lock(&dev->disassociate_srcu);
ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
if (ib_dev)
@@ -990,7 +1204,17 @@ static ssize_t show_dev_abi_version(struct device *device,
return ret;
}
-static DEVICE_ATTR(abi_version, S_IRUGO, show_dev_abi_version, NULL);
+static DEVICE_ATTR_RO(abi_version);
+
+static struct attribute *ib_dev_attrs[] = {
+ &dev_attr_abi_version.attr,
+ &dev_attr_ibdev.attr,
+ NULL,
+};
+
+static const struct attribute_group dev_attr_group = {
+ .attrs = ib_dev_attrs,
+};
static CLASS_ATTR_STRING(abi_version, S_IRUGO,
__stringify(IB_USER_VERBS_ABI_VERSION));
@@ -1028,65 +1252,56 @@ static void ib_uverbs_add_one(struct ib_device *device)
return;
}
+ device_initialize(&uverbs_dev->dev);
+ uverbs_dev->dev.class = uverbs_class;
+ uverbs_dev->dev.parent = device->dev.parent;
+ uverbs_dev->dev.release = ib_uverbs_release_dev;
+ uverbs_dev->groups[0] = &dev_attr_group;
+ uverbs_dev->dev.groups = uverbs_dev->groups;
atomic_set(&uverbs_dev->refcount, 1);
init_completion(&uverbs_dev->comp);
uverbs_dev->xrcd_tree = RB_ROOT;
mutex_init(&uverbs_dev->xrcd_tree_mutex);
- kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);
mutex_init(&uverbs_dev->lists_mutex);
INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
+ rcu_assign_pointer(uverbs_dev->ib_dev, device);
+ uverbs_dev->num_comp_vectors = device->num_comp_vectors;
- devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
- if (devnum >= IB_UVERBS_MAX_DEVICES)
+ devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
+ GFP_KERNEL);
+ if (devnum < 0)
goto err;
uverbs_dev->devnum = devnum;
- set_bit(devnum, dev_map);
if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
else
base = IB_UVERBS_BASE_DEV + devnum;
- rcu_assign_pointer(uverbs_dev->ib_dev, device);
- uverbs_dev->num_comp_vectors = device->num_comp_vectors;
-
if (ib_uverbs_create_uapi(device, uverbs_dev))
goto err_uapi;
- cdev_init(&uverbs_dev->cdev, NULL);
+ uverbs_dev->dev.devt = base;
+ dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
+
+ cdev_init(&uverbs_dev->cdev,
+ device->mmap ? &uverbs_mmap_fops : &uverbs_fops);
uverbs_dev->cdev.owner = THIS_MODULE;
- uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
- cdev_set_parent(&uverbs_dev->cdev, &uverbs_dev->kobj);
- kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
- if (cdev_add(&uverbs_dev->cdev, base, 1))
- goto err_cdev;
-
- uverbs_dev->dev = device_create(uverbs_class, device->dev.parent,
- uverbs_dev->cdev.dev, uverbs_dev,
- "uverbs%d", uverbs_dev->devnum);
- if (IS_ERR(uverbs_dev->dev))
- goto err_cdev;
-
- if (device_create_file(uverbs_dev->dev, &dev_attr_ibdev))
- goto err_class;
- if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
- goto err_class;
- ib_set_client_data(device, &uverbs_client, uverbs_dev);
+ ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
+ if (ret)
+ goto err_uapi;
+ ib_set_client_data(device, &uverbs_client, uverbs_dev);
return;
-err_class:
- device_destroy(uverbs_class, uverbs_dev->cdev.dev);
-err_cdev:
- cdev_del(&uverbs_dev->cdev);
err_uapi:
- clear_bit(devnum, dev_map);
+ ida_free(&uverbs_ida, devnum);
err:
if (atomic_dec_and_test(&uverbs_dev->refcount))
ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
- kobject_put(&uverbs_dev->kobj);
+ put_device(&uverbs_dev->dev);
return;
}
@@ -1107,8 +1322,7 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
while (!list_empty(&uverbs_dev->uverbs_file_list)) {
file = list_first_entry(&uverbs_dev->uverbs_file_list,
struct ib_uverbs_file, list);
- file->is_closed = 1;
- list_del(&file->list);
+ list_del_init(&file->list);
kref_get(&file->ref);
/* We must release the mutex before going ahead and calling
@@ -1156,10 +1370,8 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
if (!uverbs_dev)
return;
- dev_set_drvdata(uverbs_dev->dev, NULL);
- device_destroy(uverbs_class, uverbs_dev->cdev.dev);
- cdev_del(&uverbs_dev->cdev);
- clear_bit(uverbs_dev->devnum, dev_map);
+ cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
+ ida_free(&uverbs_ida, uverbs_dev->devnum);
if (device->disassociate_ucontext) {
/* We disassociate HW resources and immediately return.
@@ -1182,7 +1394,7 @@ static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
if (wait_clients)
wait_for_completion(&uverbs_dev->comp);
- kobject_put(&uverbs_dev->kobj);
+ put_device(&uverbs_dev->dev);
}
static char *uverbs_devnode(struct device *dev, umode_t *mode)
diff --git a/drivers/infiniband/core/uverbs_std_types_flow_action.c b/drivers/infiniband/core/uverbs_std_types_flow_action.c
index d8cfafe23bd9..cb9486ad5c67 100644
--- a/drivers/infiniband/core/uverbs_std_types_flow_action.c
+++ b/drivers/infiniband/core/uverbs_std_types_flow_action.c
@@ -326,11 +326,8 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_CREATE)(
if (IS_ERR(action))
return PTR_ERR(action);
- atomic_set(&action->usecnt, 0);
- action->device = ib_dev;
- action->type = IB_FLOW_ACTION_ESP;
- action->uobject = uobj;
- uobj->object = action;
+ uverbs_flow_action_fill_action(action, uobj, ib_dev,
+ IB_FLOW_ACTION_ESP);
return 0;
}
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index be854628a7c6..86f3fc5e04b4 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -73,6 +73,18 @@ static int uapi_merge_method(struct uverbs_api *uapi,
if (attr->attr.type == UVERBS_ATTR_TYPE_ENUM_IN)
method_elm->driver_method |= is_driver;
+ /*
+ * Like other uobject based things we only support a single
+ * uobject being NEW'd or DESTROY'd
+ */
+ if (attr->attr.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
+ u8 access = attr->attr.u2.objs_arr.access;
+
+ if (WARN_ON(access == UVERBS_ACCESS_NEW ||
+ access == UVERBS_ACCESS_DESTROY))
+ return -EINVAL;
+ }
+
attr_slot =
uapi_add_elm(uapi, method_key | uapi_key_attr(attr->id),
sizeof(*attr_slot));
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 8ec7418e99f0..178899e3ce73 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -264,7 +264,7 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
}
pd->res.type = RDMA_RESTRACK_PD;
- pd->res.kern_name = caller;
+ rdma_restrack_set_task(&pd->res, caller);
rdma_restrack_add(&pd->res);
if (mr_access_flags) {
@@ -710,7 +710,7 @@ static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
ah_attr->roce.dmac,
- sgid_attr->ndev, &hop_limit);
+ sgid_attr, &hop_limit);
grh->hop_limit = hop_limit;
return ret;
@@ -1509,8 +1509,7 @@ static const struct {
};
bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask,
- enum rdma_link_layer ll)
+ enum ib_qp_type type, enum ib_qp_attr_mask mask)
{
enum ib_qp_attr_mask req_param, opt_param;
@@ -1629,14 +1628,16 @@ static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
if (rdma_ib_or_roce(qp->device, port)) {
if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
- pr_warn("%s: %s rq_psn overflow, masking to 24 bits\n",
- __func__, qp->device->name);
+ dev_warn(&qp->device->dev,
+ "%s rq_psn overflow, masking to 24 bits\n",
+ __func__);
attr->rq_psn &= 0xffffff;
}
if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
- pr_warn("%s: %s sq_psn overflow, masking to 24 bits\n",
- __func__, qp->device->name);
+ dev_warn(&qp->device->dev,
+ " %s sq_psn overflow, masking to 24 bits\n",
+ __func__);
attr->sq_psn &= 0xffffff;
}
}
@@ -1888,7 +1889,7 @@ struct ib_cq *__ib_create_cq(struct ib_device *device,
cq->cq_context = cq_context;
atomic_set(&cq->usecnt, 0);
cq->res.type = RDMA_RESTRACK_CQ;
- cq->res.kern_name = caller;
+ rdma_restrack_set_task(&cq->res, caller);
rdma_restrack_add(&cq->res);
}
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 96f76896488d..31baa8939a4f 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -40,7 +40,6 @@
#ifndef __BNXT_RE_H__
#define __BNXT_RE_H__
#define ROCE_DRV_MODULE_NAME "bnxt_re"
-#define ROCE_DRV_MODULE_VERSION "1.0.0"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
#define BNXT_RE_PAGE_SHIFT_4K (12)
@@ -120,6 +119,8 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
+#define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7
+#define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 77416bc61e6e..604b71875f5f 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -68,6 +68,8 @@ static const char * const bnxt_re_stat_name[] = {
[BNXT_RE_TX_PKTS] = "tx_pkts",
[BNXT_RE_TX_BYTES] = "tx_bytes",
[BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors",
+ [BNXT_RE_RX_DROPS] = "rx_roce_drops",
+ [BNXT_RE_RX_DISCARDS] = "rx_roce_discards",
[BNXT_RE_TO_RETRANSMITS] = "to_retransmits",
[BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd",
[BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded",
@@ -106,7 +108,8 @@ static const char * const bnxt_re_stat_name[] = {
[BNXT_RE_RES_CQ_LOAD_ERR] = "res_cq_load_err",
[BNXT_RE_RES_SRQ_LOAD_ERR] = "res_srq_load_err",
[BNXT_RE_RES_TX_PCI_ERR] = "res_tx_pci_err",
- [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err"
+ [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err",
+ [BNXT_RE_OUT_OF_SEQ_ERR] = "oos_drop_count"
};
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
@@ -128,6 +131,10 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
if (bnxt_re_stats) {
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
+ stats->value[BNXT_RE_RX_DROPS] =
+ le64_to_cpu(bnxt_re_stats->rx_drop_pkts);
+ stats->value[BNXT_RE_RX_DISCARDS] =
+ le64_to_cpu(bnxt_re_stats->rx_discard_pkts);
stats->value[BNXT_RE_RX_PKTS] =
le64_to_cpu(bnxt_re_stats->rx_ucast_pkts);
stats->value[BNXT_RE_RX_BYTES] =
@@ -220,6 +227,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
rdev->stats.res_tx_pci_err;
stats->value[BNXT_RE_RES_RX_PCI_ERR] =
rdev->stats.res_rx_pci_err;
+ stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
+ rdev->stats.res_oos_drop_count;
}
return ARRAY_SIZE(bnxt_re_stat_name);
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index a01a922717d5..76399f477e5c 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -51,6 +51,8 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_RX_DROPS,
+ BNXT_RE_RX_DISCARDS,
BNXT_RE_TO_RETRANSMITS,
BNXT_RE_SEQ_ERR_NAKS_RCVD,
BNXT_RE_MAX_RETRY_EXCEEDED,
@@ -90,6 +92,7 @@ enum bnxt_re_hw_stats {
BNXT_RE_RES_SRQ_LOAD_ERR,
BNXT_RE_RES_TX_PCI_ERR,
BNXT_RE_RES_RX_PCI_ERR,
+ BNXT_RE_OUT_OF_SEQ_ERR,
BNXT_RE_NUM_COUNTERS
};
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index bc2b9e038439..54fdd4cf5288 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1598,8 +1598,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
new_qp_state = qp_attr->qp_state;
if (!ib_modify_qp_is_ok(curr_qp_state, new_qp_state,
- ib_qp->qp_type, qp_attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ ib_qp->qp_type, qp_attr_mask)) {
dev_err(rdev_to_dev(rdev),
"Invalid attribute mask: %#x specified ",
qp_attr_mask);
@@ -2664,6 +2663,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
nq->budget++;
atomic_inc(&rdev->cq_count);
+ spin_lock_init(&cq->cq_lock);
if (context) {
struct bnxt_re_cq_resp resp;
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 85cd1a3593d6..cf2282654210 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -67,7 +67,7 @@
#include "hw_counters.h"
static char version[] =
- BNXT_RE_DESC " v" ROCE_DRV_MODULE_VERSION "\n";
+ BNXT_RE_DESC "\n";
MODULE_AUTHOR("Eddie Wai <eddie.wai@broadcom.com>");
MODULE_DESCRIPTION(BNXT_RE_DESC " Driver");
@@ -535,6 +535,34 @@ static struct bnxt_en_dev *bnxt_re_dev_probe(struct net_device *netdev)
return en_dev;
}
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *bnxt_re_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group bnxt_re_dev_attr_group = {
+ .attrs = bnxt_re_attributes,
+};
+
static void bnxt_re_unregister_ib(struct bnxt_re_dev *rdev)
{
ib_unregister_device(&rdev->ibdev);
@@ -547,7 +575,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
/* ib device init */
ibdev->owner = THIS_MODULE;
ibdev->node_type = RDMA_NODE_IB_CA;
- strlcpy(ibdev->name, "bnxt_re%d", IB_DEVICE_NAME_MAX);
strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
strlen(BNXT_RE_DESC) + 5);
ibdev->phys_port_cnt = 1;
@@ -639,34 +666,11 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->get_hw_stats = bnxt_re_ib_get_hw_stats;
ibdev->alloc_hw_stats = bnxt_re_ib_alloc_hw_stats;
+ rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
ibdev->driver_id = RDMA_DRIVER_BNXT_RE;
- return ib_register_device(ibdev, NULL);
-}
-
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
-}
-
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
+ return ib_register_device(ibdev, "bnxt_re%d", NULL);
}
-static DEVICE_ATTR(hw_rev, 0444, show_rev, NULL);
-static DEVICE_ATTR(hca_type, 0444, show_hca, NULL);
-
-static struct device_attribute *bnxt_re_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type
-};
-
static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
{
dev_put(rdev->netdev);
@@ -864,10 +868,8 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
{
int i;
- if (rdev->nq[0].hwq.max_elements) {
- for (i = 1; i < rdev->num_msix; i++)
- bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
- }
+ for (i = 1; i < rdev->num_msix; i++)
+ bnxt_qplib_disable_nq(&rdev->nq[i - 1]);
if (rdev->qplib_res.rcfw)
bnxt_qplib_cleanup_res(&rdev->qplib_res);
@@ -876,6 +878,7 @@ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
{
int rc = 0, i;
+ int num_vec_enabled = 0;
bnxt_qplib_init_res(&rdev->qplib_res);
@@ -891,9 +894,13 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
"Failed to enable NQ with rc = 0x%x", rc);
goto fail;
}
+ num_vec_enabled++;
}
return 0;
fail:
+ for (i = num_vec_enabled; i >= 0; i--)
+ bnxt_qplib_disable_nq(&rdev->nq[i]);
+
return rc;
}
@@ -925,6 +932,7 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
int rc = 0, i;
+ int num_vec_created = 0;
/* Configure and allocate resources for qplib */
rdev->qplib_res.rcfw = &rdev->rcfw;
@@ -951,7 +959,7 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
if (rc) {
dev_err(rdev_to_dev(rdev), "Alloc Failed NQ%d rc:%#x",
i, rc);
- goto dealloc_dpi;
+ goto free_nq;
}
rc = bnxt_re_net_ring_alloc
(rdev, rdev->nq[i].hwq.pbl[PBL_LVL_0].pg_map_arr,
@@ -964,14 +972,17 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
dev_err(rdev_to_dev(rdev),
"Failed to allocate NQ fw id with rc = 0x%x",
rc);
+ bnxt_qplib_free_nq(&rdev->nq[i]);
goto free_nq;
}
+ num_vec_created++;
}
return 0;
free_nq:
- for (i = 0; i < rdev->num_msix - 1; i++)
+ for (i = num_vec_created; i >= 0; i--) {
+ bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
bnxt_qplib_free_nq(&rdev->nq[i]);
-dealloc_dpi:
+ }
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
&rdev->qplib_res.dpi_tbl,
&rdev->dpi_privileged);
@@ -989,12 +1000,17 @@ static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
struct ib_event ib_event;
ib_event.device = ibdev;
- if (qp)
+ if (qp) {
ib_event.element.qp = qp;
- else
+ ib_event.event = event;
+ if (qp->event_handler)
+ qp->event_handler(&ib_event, qp->qp_context);
+
+ } else {
ib_event.element.port_num = port_num;
- ib_event.event = event;
- ib_dispatch_event(&ib_event);
+ ib_event.event = event;
+ ib_dispatch_event(&ib_event);
+ }
}
#define HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN 0x02
@@ -1189,20 +1205,20 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
{
- int i, rc;
+ int rc;
if (test_and_clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
- for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++)
- device_remove_file(&rdev->ibdev.dev,
- bnxt_re_attributes[i]);
/* Cleanup ib dev */
bnxt_re_unregister_ib(rdev);
}
if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
- cancel_delayed_work(&rdev->worker);
+ cancel_delayed_work_sync(&rdev->worker);
- bnxt_re_cleanup_res(rdev);
- bnxt_re_free_res(rdev);
+ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
+ &rdev->flags))
+ bnxt_re_cleanup_res(rdev);
+ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags))
+ bnxt_re_free_res(rdev);
if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
@@ -1241,7 +1257,7 @@ static void bnxt_re_worker(struct work_struct *work)
static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
{
- int i, j, rc;
+ int rc;
bool locked;
@@ -1331,12 +1347,15 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
pr_err("Failed to allocate resources: %#x\n", rc);
goto fail;
}
+ set_bit(BNXT_RE_FLAG_RESOURCES_ALLOCATED, &rdev->flags);
rc = bnxt_re_init_res(rdev);
if (rc) {
pr_err("Failed to initialize resources: %#x\n", rc);
goto fail;
}
+ set_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, &rdev->flags);
+
if (!rdev->is_virtfn) {
rc = bnxt_re_setup_qos(rdev);
if (rc)
@@ -1358,20 +1377,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
}
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
dev_info(rdev_to_dev(rdev), "Device registered successfully");
- for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
- rc = device_create_file(&rdev->ibdev.dev,
- bnxt_re_attributes[i]);
- if (rc) {
- dev_err(rdev_to_dev(rdev),
- "Failed to create IB sysfs: %#x", rc);
- /* Must clean up all created device files */
- for (j = 0; j < i; j++)
- device_remove_file(&rdev->ibdev.dev,
- bnxt_re_attributes[j]);
- bnxt_re_unregister_ib(rdev);
- goto fail;
- }
- }
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
&rdev->active_width);
set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 6ad0d46ab879..b98b054148cd 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -36,6 +36,8 @@
* Description: Fast Path Operators
*/
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
@@ -71,8 +73,7 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
if (!qp->sq.flushed) {
dev_dbg(&scq->hwq.pdev->dev,
- "QPLIB: FP: Adding to SQ Flush list = %p",
- qp);
+ "FP: Adding to SQ Flush list = %p\n", qp);
bnxt_qplib_cancel_phantom_processing(qp);
list_add_tail(&qp->sq_flush, &scq->sqf_head);
qp->sq.flushed = true;
@@ -80,8 +81,7 @@ static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
if (!qp->srq) {
if (!qp->rq.flushed) {
dev_dbg(&rcq->hwq.pdev->dev,
- "QPLIB: FP: Adding to RQ Flush list = %p",
- qp);
+ "FP: Adding to RQ Flush list = %p\n", qp);
list_add_tail(&qp->rq_flush, &rcq->rqf_head);
qp->rq.flushed = true;
}
@@ -207,7 +207,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
if (!qp->sq_hdr_buf) {
rc = -ENOMEM;
dev_err(&res->pdev->dev,
- "QPLIB: Failed to create sq_hdr_buf");
+ "Failed to create sq_hdr_buf\n");
goto fail;
}
}
@@ -221,7 +221,7 @@ static int bnxt_qplib_alloc_qp_hdr_buf(struct bnxt_qplib_res *res,
if (!qp->rq_hdr_buf) {
rc = -ENOMEM;
dev_err(&res->pdev->dev,
- "QPLIB: Failed to create rq_hdr_buf");
+ "Failed to create rq_hdr_buf\n");
goto fail;
}
}
@@ -277,8 +277,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
num_cqne_processed++;
else
dev_warn(&nq->pdev->dev,
- "QPLIB: cqn - type 0x%x not handled",
- type);
+ "cqn - type 0x%x not handled\n", type);
spin_unlock_bh(&cq->compl_lock);
break;
}
@@ -298,7 +297,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
num_srqne_processed++;
else
dev_warn(&nq->pdev->dev,
- "QPLIB: SRQ event 0x%x not handled",
+ "SRQ event 0x%x not handled\n",
nqsrqe->event);
break;
}
@@ -306,8 +305,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
break;
default:
dev_warn(&nq->pdev->dev,
- "QPLIB: nqe with type = 0x%x not handled",
- type);
+ "nqe with type = 0x%x not handled\n", type);
break;
}
raw_cons++;
@@ -360,7 +358,8 @@ void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
}
/* Make sure the HW is stopped! */
- bnxt_qplib_nq_stop_irq(nq, true);
+ if (nq->requested)
+ bnxt_qplib_nq_stop_irq(nq, true);
if (nq->bar_reg_iomem)
iounmap(nq->bar_reg_iomem);
@@ -396,7 +395,7 @@ int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
rc = irq_set_affinity_hint(nq->vector, &nq->mask);
if (rc) {
dev_warn(&nq->pdev->dev,
- "QPLIB: set affinity failed; vector: %d nq_idx: %d\n",
+ "set affinity failed; vector: %d nq_idx: %d\n",
nq->vector, nq_indx);
}
nq->requested = true;
@@ -443,7 +442,7 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
if (rc) {
dev_err(&nq->pdev->dev,
- "QPLIB: Failed to request irq for nq-idx %d", nq_idx);
+ "Failed to request irq for nq-idx %d\n", nq_idx);
goto fail;
}
@@ -662,8 +661,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
spin_lock(&srq_hwq->lock);
if (srq->start_idx == srq->last_idx) {
- dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!",
- srq->id);
+ dev_err(&srq_hwq->pdev->dev,
+ "FP: SRQ (0x%x) is full!\n", srq->id);
rc = -EINVAL;
spin_unlock(&srq_hwq->lock);
goto done;
@@ -1324,7 +1323,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
}
}
if (i == res->sgid_tbl.max)
- dev_warn(&res->pdev->dev, "QPLIB: SGID not found??");
+ dev_warn(&res->pdev->dev, "SGID not found??\n");
qp->ah.hop_limit = sb->hop_limit;
qp->ah.traffic_class = sb->traffic_class;
@@ -1536,7 +1535,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
if (bnxt_qplib_queue_full(sq)) {
dev_err(&sq->hwq.pdev->dev,
- "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
+ "prod = %#x cons = %#x qdepth = %#x delta = %#x\n",
sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
sq->q_full_delta);
rc = -ENOMEM;
@@ -1561,7 +1560,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
/* Copy the inline data */
if (wqe->inline_len > BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH) {
dev_warn(&sq->hwq.pdev->dev,
- "QPLIB: Inline data length > 96 detected");
+ "Inline data length > 96 detected\n");
data_len = BNXT_QPLIB_SWQE_MAX_INLINE_LENGTH;
} else {
data_len = wqe->inline_len;
@@ -1776,7 +1775,7 @@ done:
queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
} else {
dev_err(&sq->hwq.pdev->dev,
- "QPLIB: FP: Failed to allocate SQ nq_work!");
+ "FP: Failed to allocate SQ nq_work!\n");
rc = -ENOMEM;
}
}
@@ -1815,13 +1814,12 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
sch_handler = true;
dev_dbg(&rq->hwq.pdev->dev,
- "%s Error QP. Scheduling for poll_cq\n",
- __func__);
+ "%s: Error QP. Scheduling for poll_cq\n", __func__);
goto queue_err;
}
if (bnxt_qplib_queue_full(rq)) {
dev_err(&rq->hwq.pdev->dev,
- "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
+ "FP: QP (0x%x) RQ is full!\n", qp->id);
rc = -EINVAL;
goto done;
}
@@ -1870,7 +1868,7 @@ queue_err:
queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
} else {
dev_err(&rq->hwq.pdev->dev,
- "QPLIB: FP: Failed to allocate RQ nq_work!");
+ "FP: Failed to allocate RQ nq_work!\n");
rc = -ENOMEM;
}
}
@@ -1932,7 +1930,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
if (!cq->dpi) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: FP: CREATE_CQ failed due to NULL DPI");
+ "FP: CREATE_CQ failed due to NULL DPI\n");
return -EINVAL;
}
req.dpi = cpu_to_le32(cq->dpi->dpi);
@@ -1969,6 +1967,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
INIT_LIST_HEAD(&cq->sqf_head);
INIT_LIST_HEAD(&cq->rqf_head);
spin_lock_init(&cq->compl_lock);
+ spin_lock_init(&cq->flush_lock);
bnxt_qplib_arm_cq_enable(cq);
return 0;
@@ -2172,7 +2171,7 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
* comes back
*/
dev_dbg(&cq->hwq.pdev->dev,
- "FP:Got Phantom CQE");
+ "FP: Got Phantom CQE\n");
sq->condition = false;
sq->single = true;
rc = 0;
@@ -2189,7 +2188,7 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
peek_raw_cq_cons++;
}
dev_err(&cq->hwq.pdev->dev,
- "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
+ "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
rc = -EINVAL;
}
@@ -2213,7 +2212,7 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: Process Req qp is NULL");
+ "FP: Process Req qp is NULL\n");
return -EINVAL;
}
sq = &qp->sq;
@@ -2221,16 +2220,14 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
if (cqe_sq_cons > sq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process req reported ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
+ "FP: CQ Process req reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
cqe_sq_cons, sq->hwq.max_elements);
return -EINVAL;
}
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
/* Require to walk the sq's swq to fabricate CQEs for all previously
@@ -2262,9 +2259,7 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
hwcqe->status != CQ_REQ_STATUS_OK) {
cqe->status = hwcqe->status;
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Processed Req ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
+ "FP: CQ Processed Req wr_id[%d] = 0x%llx with status 0x%x\n",
sw_sq_cons, cqe->wr_id, cqe->status);
cqe++;
(*budget)--;
@@ -2330,12 +2325,12 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL");
+ dev_err(&cq->hwq.pdev->dev, "process_cq RC qp is NULL\n");
return -EINVAL;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
@@ -2356,9 +2351,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
return -EINVAL;
if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process RC ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ "FP: CQ Process RC wr_id idx 0x%x exceeded SRQ max 0x%x\n",
wr_id_idx, srq->hwq.max_elements);
return -EINVAL;
}
@@ -2371,9 +2364,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
rq = &qp->rq;
if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process RC ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ "FP: CQ Process RC wr_id idx 0x%x exceeded RQ max 0x%x\n",
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
@@ -2409,12 +2400,12 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
- dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL");
+ dev_err(&cq->hwq.pdev->dev, "process_cq UD qp is NULL\n");
return -EINVAL;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
cqe = *pcqe;
@@ -2439,9 +2430,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process UD ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ "FP: CQ Process UD wr_id idx 0x%x exceeded SRQ max 0x%x\n",
wr_id_idx, srq->hwq.max_elements);
return -EINVAL;
}
@@ -2454,9 +2443,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
rq = &qp->rq;
if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process UD ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x",
+ "FP: CQ Process UD wr_id idx 0x%x exceeded RQ max 0x%x\n",
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
@@ -2508,13 +2495,12 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cq Raw/QP1 qp is NULL");
+ dev_err(&cq->hwq.pdev->dev, "process_cq Raw/QP1 qp is NULL\n");
return -EINVAL;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto done;
}
cqe = *pcqe;
@@ -2543,14 +2529,12 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
srq = qp->srq;
if (!srq) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: SRQ used but not defined??");
+ "FP: SRQ used but not defined??\n");
return -EINVAL;
}
if (wr_id_idx >= srq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Raw/QP1 ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x",
+ "FP: CQ Process Raw/QP1 wr_id idx 0x%x exceeded SRQ max 0x%x\n",
wr_id_idx, srq->hwq.max_elements);
return -EINVAL;
}
@@ -2563,9 +2547,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
rq = &qp->rq;
if (wr_id_idx >= rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: ix 0x%x exceeded RQ max 0x%x",
+ "FP: CQ Process Raw/QP1 RQ wr_id idx 0x%x exceeded RQ max 0x%x\n",
wr_id_idx, rq->hwq.max_elements);
return -EINVAL;
}
@@ -2600,14 +2582,14 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
/* Check the Status */
if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
dev_warn(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Terminal Error status = 0x%x",
+ "FP: CQ Process Terminal Error status = 0x%x\n",
hwcqe->status);
qp = (struct bnxt_qplib_qp *)((unsigned long)
le64_to_cpu(hwcqe->qp_handle));
if (!qp) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process terminal qp is NULL");
+ "FP: CQ Process terminal qp is NULL\n");
return -EINVAL;
}
@@ -2623,16 +2605,14 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
if (cqe_cons > sq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process terminal reported ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
+ "FP: CQ Process terminal reported sq_cons_idx 0x%x which exceeded max 0x%x\n",
cqe_cons, sq->hwq.max_elements);
goto do_rq;
}
if (qp->sq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
goto sq_done;
}
@@ -2673,16 +2653,14 @@ do_rq:
goto done;
} else if (cqe_cons > rq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Processed terminal ");
- dev_err(&cq->hwq.pdev->dev,
- "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x",
+ "FP: CQ Processed terminal reported rq_cons_idx 0x%x exceeds max 0x%x\n",
cqe_cons, rq->hwq.max_elements);
goto done;
}
if (qp->rq.flushed) {
dev_dbg(&cq->hwq.pdev->dev,
- "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ "%s: QP in Flush QP = %p\n", __func__, qp);
rc = 0;
goto done;
}
@@ -2704,7 +2682,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
/* Check the Status */
if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: FP: CQ Process Cutoff Error status = 0x%x",
+ "FP: CQ Process Cutoff Error status = 0x%x\n",
hwcqe->status);
return -EINVAL;
}
@@ -2724,16 +2702,12 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
spin_lock_irqsave(&cq->flush_lock, flags);
list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
- dev_dbg(&cq->hwq.pdev->dev,
- "QPLIB: FP: Flushing SQ QP= %p",
- qp);
+ dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing SQ QP= %p\n", qp);
__flush_sq(&qp->sq, qp, &cqe, &budget);
}
list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
- dev_dbg(&cq->hwq.pdev->dev,
- "QPLIB: FP: Flushing RQ QP= %p",
- qp);
+ dev_dbg(&cq->hwq.pdev->dev, "FP: Flushing RQ QP= %p\n", qp);
__flush_rq(&qp->rq, qp, &cqe, &budget);
}
spin_unlock_irqrestore(&cq->flush_lock, flags);
@@ -2801,7 +2775,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
goto exit;
default:
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cq unknown type 0x%lx",
+ "process_cq unknown type 0x%lx\n",
hw_cqe->cqe_type_toggle &
CQ_BASE_CQE_TYPE_MASK);
rc = -EINVAL;
@@ -2814,7 +2788,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
* next one
*/
dev_err(&cq->hwq.pdev->dev,
- "QPLIB: process_cqe error rc = 0x%x", rc);
+ "process_cqe error rc = 0x%x\n", rc);
}
raw_cons++;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 2852d350ada1..be4e33e9f962 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -35,6 +35,9 @@
*
* Description: RDMA Controller HW interface
*/
+
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
@@ -96,14 +99,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW not initialized, reject opcode 0x%x",
- opcode);
+ "RCFW not initialized, reject opcode 0x%x\n", opcode);
return -EINVAL;
}
if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
- dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
+ dev_err(&rcfw->pdev->dev, "RCFW already initialized!\n");
return -EINVAL;
}
@@ -115,7 +117,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
*/
spin_lock_irqsave(&cmdq->lock, flags);
if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
- dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
+ dev_err(&rcfw->pdev->dev, "RCFW: CMDQ is full!\n");
spin_unlock_irqrestore(&cmdq->lock, flags);
return -EAGAIN;
}
@@ -154,7 +156,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
if (!cmdqe) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: RCFW request failed with no cmdqe!");
+ "RCFW request failed with no cmdqe!\n");
goto done;
}
/* Copy a segment of the req cmd to the cmdq */
@@ -210,7 +212,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
/* send failed */
- dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x send failed\n",
cookie, opcode);
return rc;
}
@@ -224,7 +226,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
rc = __wait_for_resp(rcfw, cookie);
if (rc) {
/* timed out */
- dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x timedout (%d)msec\n",
cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
return rc;
@@ -232,7 +234,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
if (evnt->status) {
/* failed with status */
- dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
cookie, opcode, evnt->status);
rc = -EFAULT;
}
@@ -298,9 +300,9 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
qp_id = le32_to_cpu(err_event->xid);
qp = rcfw->qp_tbl[qp_id].qp_handle;
dev_dbg(&rcfw->pdev->dev,
- "QPLIB: Received QP error notification");
+ "Received QP error notification\n");
dev_dbg(&rcfw->pdev->dev,
- "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
+ "qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
qp_id, err_event->req_err_state_reason,
err_event->res_err_state_reason);
if (!qp)
@@ -309,8 +311,17 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
rcfw->aeq_handler(rcfw, qp_event, qp);
break;
default:
- /* Command Response */
- spin_lock_irqsave(&cmdq->lock, flags);
+ /*
+ * Command Response
+ * cmdq->lock needs to be acquired to synchronie
+ * the command send and completion reaping. This function
+ * is always called with creq->lock held. Using
+ * the nested variant of spin_lock.
+ *
+ */
+
+ spin_lock_irqsave_nested(&cmdq->lock, flags,
+ SINGLE_DEPTH_NESTING);
cookie = le16_to_cpu(qp_event->cookie);
mcookie = qp_event->cookie;
blocked = cookie & RCFW_CMD_IS_BLOCKING;
@@ -322,14 +333,16 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
crsqe->resp = NULL;
} else {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
- crsqe->resp ? "mismatch" : "collision",
- crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
+ if (crsqe->resp && crsqe->resp->cookie)
+ dev_err(&rcfw->pdev->dev,
+ "CMD %s cookie sent=%#x, recd=%#x\n",
+ crsqe->resp ? "mismatch" : "collision",
+ crsqe->resp ? crsqe->resp->cookie : 0,
+ mcookie);
}
if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
dev_warn(&rcfw->pdev->dev,
- "QPLIB: CMD bit %d was not requested", cbit);
+ "CMD bit %d was not requested\n", cbit);
cmdq->cons += crsqe->req_size;
crsqe->req_size = 0;
@@ -376,14 +389,14 @@ static void bnxt_qplib_service_creq(unsigned long data)
(rcfw, (struct creq_func_event *)creqe))
rcfw->creq_func_event_processed++;
else
- dev_warn
- (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
- type);
+ dev_warn(&rcfw->pdev->dev,
+ "aeqe:%#x Not handled\n", type);
break;
default:
- dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
- dev_warn(&rcfw->pdev->dev,
- "QPLIB: op_event = 0x%x not handled", type);
+ if (type != ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT)
+ dev_warn(&rcfw->pdev->dev,
+ "creqe with event 0x%x not handled\n",
+ type);
break;
}
raw_cons++;
@@ -551,7 +564,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
HWQ_TYPE_L2_CMPL)) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: HW channel CREQ allocation failed");
+ "HW channel CREQ allocation failed\n");
goto fail;
}
rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
@@ -560,7 +573,7 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
HWQ_TYPE_CTX)) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: HW channel CMDQ allocation failed");
+ "HW channel CMDQ allocation failed\n");
goto fail;
}
@@ -605,21 +618,18 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
bnxt_qplib_rcfw_stop_irq(rcfw, true);
- if (rcfw->cmdq_bar_reg_iomem)
- iounmap(rcfw->cmdq_bar_reg_iomem);
- rcfw->cmdq_bar_reg_iomem = NULL;
-
- if (rcfw->creq_bar_reg_iomem)
- iounmap(rcfw->creq_bar_reg_iomem);
- rcfw->creq_bar_reg_iomem = NULL;
+ iounmap(rcfw->cmdq_bar_reg_iomem);
+ iounmap(rcfw->creq_bar_reg_iomem);
indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
if (indx != rcfw->bmap_size)
dev_err(&rcfw->pdev->dev,
- "QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
+ "disabling RCFW with pending cmd-bit %lx\n", indx);
kfree(rcfw->cmdq_bitmap);
rcfw->bmap_size = 0;
+ rcfw->cmdq_bar_reg_iomem = NULL;
+ rcfw->creq_bar_reg_iomem = NULL;
rcfw->aeq_handler = NULL;
rcfw->vector = 0;
}
@@ -681,8 +691,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
RCFW_COMM_BASE_OFFSET,
RCFW_COMM_SIZE);
if (!rcfw->cmdq_bar_reg_iomem) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: CMDQ BAR region %d mapping failed",
+ dev_err(&rcfw->pdev->dev, "CMDQ BAR region %d mapping failed\n",
rcfw->cmdq_bar_reg);
return -ENOMEM;
}
@@ -697,14 +706,15 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
if (!res_base)
dev_err(&rcfw->pdev->dev,
- "QPLIB: CREQ BAR region %d resc start is 0!",
+ "CREQ BAR region %d resc start is 0!\n",
rcfw->creq_bar_reg);
rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
4);
if (!rcfw->creq_bar_reg_iomem) {
- dev_err(&rcfw->pdev->dev,
- "QPLIB: CREQ BAR region %d mapping failed",
+ dev_err(&rcfw->pdev->dev, "CREQ BAR region %d mapping failed\n",
rcfw->creq_bar_reg);
+ iounmap(rcfw->cmdq_bar_reg_iomem);
+ rcfw->cmdq_bar_reg_iomem = NULL;
return -ENOMEM;
}
rcfw->creq_qp_event_processed = 0;
@@ -717,7 +727,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
if (rc) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
+ "Failed to request IRQ for CREQ rc = 0x%x\n", rc);
bnxt_qplib_disable_rcfw_channel(rcfw);
return rc;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 46416dfe8830..9a8687dc0a79 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -154,6 +154,8 @@ struct bnxt_qplib_qp_node {
void *qp_handle; /* ptr to qplib_qp */
};
+#define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF
+
/* RCFW Communication Channels */
struct bnxt_qplib_rcfw {
struct pci_dev *pdev;
@@ -190,6 +192,8 @@ struct bnxt_qplib_rcfw {
struct bnxt_qplib_crsq *crsqe_tbl;
int qp_tbl_size;
struct bnxt_qplib_qp_node *qp_tbl;
+ u64 oos_prev;
+ u32 init_oos_stats;
};
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 539a5d44e6db..59eeac55626f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -36,6 +36,8 @@
* Description: QPLib resource manager
*/
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -68,8 +70,7 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
pbl->pg_map_arr[i]);
else
dev_warn(&pdev->dev,
- "QPLIB: PBL free pg_arr[%d] empty?!",
- i);
+ "PBL free pg_arr[%d] empty?!\n", i);
pbl->pg_arr[i] = NULL;
}
}
@@ -537,7 +538,7 @@ static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl)
{
if (!pkey_tbl->tbl)
- dev_dbg(&res->pdev->dev, "QPLIB: PKEY tbl not present");
+ dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
else
kfree(pkey_tbl->tbl);
@@ -578,7 +579,7 @@ int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
struct bnxt_qplib_pd *pd)
{
if (test_and_set_bit(pd->id, pdt->tbl)) {
- dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d",
+ dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
pd->id);
return -EINVAL;
}
@@ -639,11 +640,11 @@ int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
struct bnxt_qplib_dpi *dpi)
{
if (dpi->dpi >= dpit->max) {
- dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d", dpi->dpi);
+ dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
return -EINVAL;
}
if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
- dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d",
+ dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
dpi->dpi);
return -EINVAL;
}
@@ -673,22 +674,21 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
u32 dbr_len, bytes;
if (dpit->dbr_bar_reg_iomem) {
- dev_err(&res->pdev->dev,
- "QPLIB: DBR BAR region %d already mapped", dbr_bar_reg);
+ dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
+ dbr_bar_reg);
return -EALREADY;
}
bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
if (!bar_reg_base) {
- dev_err(&res->pdev->dev,
- "QPLIB: BAR region %d resc start failed", dbr_bar_reg);
+ dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
+ dbr_bar_reg);
return -ENOMEM;
}
dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
- dev_err(&res->pdev->dev, "QPLIB: Invalid DBR length %d",
- dbr_len);
+ dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
return -ENOMEM;
}
@@ -696,8 +696,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
dbr_len);
if (!dpit->dbr_bar_reg_iomem) {
dev_err(&res->pdev->dev,
- "QPLIB: FP: DBR BAR region %d mapping failed",
- dbr_bar_reg);
+ "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
return -ENOMEM;
}
@@ -767,7 +766,7 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
&stats->dma_map, GFP_KERNEL);
if (!stats->dma) {
- dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
+ dev_err(&pdev->dev, "Stats DMA allocation failed\n");
return -ENOMEM;
}
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 4097f3fa25c5..5216b5f844cc 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -36,6 +36,8 @@
* Description: Slow Path Operators
*/
+#define dev_fmt(fmt) "QPLIB: " fmt
+
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
@@ -89,7 +91,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
if (!sbuf) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: SP: QUERY_FUNC alloc side buffer failed");
+ "SP: QUERY_FUNC alloc side buffer failed\n");
return -ENOMEM;
}
@@ -135,8 +137,16 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_srq = le16_to_cpu(sb->max_srq);
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
- /* Bono only reports 1 PKEY for now, but it can support > 1 */
attr->max_pkey = le32_to_cpu(sb->max_pkeys);
+ /*
+ * Some versions of FW reports more than 0xFFFF.
+ * Restrict it for now to 0xFFFF to avoid
+ * reporting trucated value
+ */
+ if (attr->max_pkey > 0xFFFF) {
+ /* ib_port_attr::pkey_tbl_len is u16 */
+ attr->max_pkey = 0xFFFF;
+ }
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) *
@@ -186,8 +196,7 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
(void *)&resp,
NULL, 0);
if (rc) {
- dev_err(&res->pdev->dev,
- "QPLIB: Failed to set function resources");
+ dev_err(&res->pdev->dev, "Failed to set function resources\n");
}
return rc;
}
@@ -199,7 +208,7 @@ int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
{
if (index >= sgid_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: Index %d exceeded SGID table max (%d)",
+ "Index %d exceeded SGID table max (%d)\n",
index, sgid_tbl->max);
return -EINVAL;
}
@@ -217,13 +226,12 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int index;
if (!sgid_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
+ dev_err(&res->pdev->dev, "SGID table not allocated\n");
return -EINVAL;
}
/* Do we need a sgid_lock here? */
if (!sgid_tbl->active) {
- dev_err(&res->pdev->dev,
- "QPLIB: SGID table has no active entries");
+ dev_err(&res->pdev->dev, "SGID table has no active entries\n");
return -ENOMEM;
}
for (index = 0; index < sgid_tbl->max; index++) {
@@ -231,7 +239,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
break;
}
if (index == sgid_tbl->max) {
- dev_warn(&res->pdev->dev, "GID not found in the SGID table");
+ dev_warn(&res->pdev->dev, "GID not found in the SGID table\n");
return 0;
}
/* Remove GID from the SGID table */
@@ -244,7 +252,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
RCFW_CMD_PREP(req, DELETE_GID, cmd_flags);
if (sgid_tbl->hw_id[index] == 0xFFFF) {
dev_err(&res->pdev->dev,
- "QPLIB: GID entry contains an invalid HW id");
+ "GID entry contains an invalid HW id\n");
return -EINVAL;
}
req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
@@ -258,7 +266,7 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
sgid_tbl->vlan[index] = 0;
sgid_tbl->active--;
dev_dbg(&res->pdev->dev,
- "QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x",
+ "SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n",
index, sgid_tbl->hw_id[index], sgid_tbl->active);
sgid_tbl->hw_id[index] = (u16)-1;
@@ -277,20 +285,19 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int i, free_idx;
if (!sgid_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated");
+ dev_err(&res->pdev->dev, "SGID table not allocated\n");
return -EINVAL;
}
/* Do we need a sgid_lock here? */
if (sgid_tbl->active == sgid_tbl->max) {
- dev_err(&res->pdev->dev, "QPLIB: SGID table is full");
+ dev_err(&res->pdev->dev, "SGID table is full\n");
return -ENOMEM;
}
free_idx = sgid_tbl->max;
for (i = 0; i < sgid_tbl->max; i++) {
if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid))) {
dev_dbg(&res->pdev->dev,
- "QPLIB: SGID entry already exist in entry %d!",
- i);
+ "SGID entry already exist in entry %d!\n", i);
*index = i;
return -EALREADY;
} else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
@@ -301,7 +308,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
if (free_idx == sgid_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: SGID table is FULL but count is not MAX??");
+ "SGID table is FULL but count is not MAX??\n");
return -ENOMEM;
}
if (update) {
@@ -348,7 +355,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
sgid_tbl->vlan[free_idx] = 1;
dev_dbg(&res->pdev->dev,
- "QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x",
+ "SGID added hw_id[0x%x] = 0x%x active = 0x%x\n",
free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
*index = free_idx;
@@ -404,7 +411,7 @@ int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
}
if (index >= pkey_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: Index %d exceeded PKEY table max (%d)",
+ "Index %d exceeded PKEY table max (%d)\n",
index, pkey_tbl->max);
return -EINVAL;
}
@@ -419,14 +426,13 @@ int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
int i, rc = 0;
if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
+ dev_err(&res->pdev->dev, "PKEY table not allocated\n");
return -EINVAL;
}
/* Do we need a pkey_lock here? */
if (!pkey_tbl->active) {
- dev_err(&res->pdev->dev,
- "QPLIB: PKEY table has no active entries");
+ dev_err(&res->pdev->dev, "PKEY table has no active entries\n");
return -ENOMEM;
}
for (i = 0; i < pkey_tbl->max; i++) {
@@ -435,8 +441,7 @@ int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
}
if (i == pkey_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: PKEY 0x%04x not found in the pkey table",
- *pkey);
+ "PKEY 0x%04x not found in the pkey table\n", *pkey);
return -ENOMEM;
}
memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey));
@@ -453,13 +458,13 @@ int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
int i, free_idx, rc = 0;
if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "QPLIB: PKEY table not allocated");
+ dev_err(&res->pdev->dev, "PKEY table not allocated\n");
return -EINVAL;
}
/* Do we need a pkey_lock here? */
if (pkey_tbl->active == pkey_tbl->max) {
- dev_err(&res->pdev->dev, "QPLIB: PKEY table is full");
+ dev_err(&res->pdev->dev, "PKEY table is full\n");
return -ENOMEM;
}
free_idx = pkey_tbl->max;
@@ -471,7 +476,7 @@ int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
}
if (free_idx == pkey_tbl->max) {
dev_err(&res->pdev->dev,
- "QPLIB: PKEY table is FULL but count is not MAX??");
+ "PKEY table is FULL but count is not MAX??\n");
return -ENOMEM;
}
/* Add PKEY to the pkey_tbl */
@@ -555,8 +560,7 @@ int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
int rc;
if (mrw->lkey == 0xFFFFFFFF) {
- dev_info(&res->pdev->dev,
- "QPLIB: SP: Free a reserved lkey MRW");
+ dev_info(&res->pdev->dev, "SP: Free a reserved lkey MRW\n");
return 0;
}
@@ -666,9 +670,8 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
pages++;
if (pages > MAX_PBL_LVL_1_PGS) {
- dev_err(&res->pdev->dev, "QPLIB: SP: Reg MR pages ");
dev_err(&res->pdev->dev,
- "requested (0x%x) exceeded max (0x%x)",
+ "SP: Reg MR pages requested (0x%x) exceeded max (0x%x)\n",
pages, MAX_PBL_LVL_1_PGS);
return -ENOMEM;
}
@@ -684,7 +687,7 @@ int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr,
HWQ_TYPE_CTX);
if (rc) {
dev_err(&res->pdev->dev,
- "SP: Reg MR memory allocation failed");
+ "SP: Reg MR memory allocation failed\n");
return -ENOMEM;
}
/* Write to the hwq */
@@ -795,7 +798,7 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
if (!sbuf) {
dev_err(&rcfw->pdev->dev,
- "QPLIB: SP: QUERY_ROCE_STATS alloc side buffer failed");
+ "SP: QUERY_ROCE_STATS alloc side buffer failed\n");
return -ENOMEM;
}
@@ -845,6 +848,16 @@ int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
+ if (!rcfw->init_oos_stats) {
+ rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
+ rcfw->init_oos_stats = 1;
+ } else {
+ stats->res_oos_drop_count +=
+ (le64_to_cpu(sb->res_oos_drop_count) -
+ rcfw->oos_prev) & BNXT_QPLIB_OOS_COUNT_MASK;
+ rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
+ }
+
bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 9d3e8b994945..8079d7f5a008 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -205,6 +205,16 @@ struct bnxt_qplib_roce_stats {
/* res_tx_pci_err is 64 b */
u64 res_rx_pci_err;
/* res_rx_pci_err is 64 b */
+ u64 res_oos_drop_count;
+ /* res_oos_drop_count */
+ u64 active_qp_count_p0;
+ /* port 0 active qps */
+ u64 active_qp_count_p1;
+ /* port 1 active qps */
+ u64 active_qp_count_p2;
+ /* port 2 active qps */
+ u64 active_qp_count_p3;
+ /* port 3 active qps */
};
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 3e5a4f760d0e..8a9ead419ac2 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -2929,6 +2929,11 @@ struct creq_query_roce_stats_resp_sb {
__le64 res_srq_load_err;
__le64 res_tx_pci_err;
__le64 res_rx_pci_err;
+ __le64 res_oos_drop_count;
+ __le64 active_qp_count_p0;
+ __le64 active_qp_count_p1;
+ __le64 active_qp_count_p2;
+ __le64 active_qp_count_p3;
};
/* QP error notification event (16 bytes) */
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 1b9ff21aa1d5..ebbec02cebe0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1127,17 +1127,18 @@ static int iwch_query_port(struct ib_device *ibdev,
return 0;
}
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
pr_debug("%s dev 0x%p\n", __func__, dev);
return sprintf(buf, "%d\n", iwch_dev->rdev.t3cdev_p->type);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
@@ -1148,9 +1149,10 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct iwch_dev *iwch_dev = container_of(dev, struct iwch_dev,
ibdev.dev);
@@ -1158,6 +1160,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%x.%x\n", iwch_dev->rdev.rnic_info.pdev->vendor,
iwch_dev->rdev.rnic_info.pdev->device);
}
+static DEVICE_ATTR_RO(board_id);
enum counters {
IPINRECEIVES,
@@ -1274,14 +1277,15 @@ static int iwch_get_mib(struct ib_device *ibdev, struct rdma_hw_stats *stats,
return stats->num_counters;
}
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *iwch_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *iwch_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
+static const struct attribute_group iwch_attr_group = {
+ .attrs = iwch_class_attributes,
};
static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -1316,10 +1320,8 @@ static void get_dev_fw_ver_str(struct ib_device *ibdev, char *str)
int iwch_register_device(struct iwch_dev *dev)
{
int ret;
- int i;
pr_debug("%s iwch_dev %p\n", __func__, dev);
- strlcpy(dev->ibdev.name, "cxgb3_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.t3cdev_p->lldev->dev_addr, 6);
dev->ibdev.owner = THIS_MODULE;
@@ -1402,33 +1404,16 @@ int iwch_register_device(struct iwch_dev *dev)
sizeof(dev->ibdev.iwcm->ifname));
dev->ibdev.driver_id = RDMA_DRIVER_CXGB3;
- ret = ib_register_device(&dev->ibdev, NULL);
+ rdma_set_device_sysfs_group(&dev->ibdev, &iwch_attr_group);
+ ret = ib_register_device(&dev->ibdev, "cxgb3_%d", NULL);
if (ret)
- goto bail1;
-
- for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i) {
- ret = device_create_file(&dev->ibdev.dev,
- iwch_class_attributes[i]);
- if (ret) {
- goto bail2;
- }
- }
- return 0;
-bail2:
- ib_unregister_device(&dev->ibdev);
-bail1:
- kfree(dev->ibdev.iwcm);
+ kfree(dev->ibdev.iwcm);
return ret;
}
void iwch_unregister_device(struct iwch_dev *dev)
{
- int i;
-
pr_debug("%s iwch_dev %p\n", __func__, dev);
- for (i = 0; i < ARRAY_SIZE(iwch_class_attributes); ++i)
- device_remove_file(&dev->ibdev.dev,
- iwch_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
kfree(dev->ibdev.iwcm);
return;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0f83cbec33f3..615413bd3e8d 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -403,8 +403,7 @@ void _c4iw_free_ep(struct kref *kref)
ep->com.local_addr.ss_family);
dst_release(ep->dst);
cxgb4_l2t_release(ep->l2t);
- if (ep->mpa_skb)
- kfree_skb(ep->mpa_skb);
+ kfree_skb(ep->mpa_skb);
}
if (!skb_queue_empty(&ep->com.ep_skb_list))
skb_queue_purge(&ep->com.ep_skb_list);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 6d3042794094..1fd8798d91a7 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -161,7 +161,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
cq->gts = rdev->lldi.gts_reg;
cq->rdev = rdev;
- cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
+ cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, CXGB4_BAR2_QTYPE_INGRESS,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
if (user && !cq->bar2_pa) {
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 4eda6872e617..cbb3c0ddd990 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -373,8 +373,8 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
return 0;
}
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
@@ -382,9 +382,10 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n",
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
@@ -395,9 +396,10 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev);
@@ -405,6 +407,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
c4iw_dev->rdev.lldi.pdev->device);
}
+static DEVICE_ATTR_RO(board_id);
enum counters {
IP4INSEGS,
@@ -461,14 +464,15 @@ static int c4iw_get_mib(struct ib_device *ibdev,
return stats->num_counters;
}
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *c4iw_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *c4iw_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
+static const struct attribute_group c4iw_attr_group = {
+ .attrs = c4iw_class_attributes,
};
static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num,
@@ -530,12 +534,10 @@ static int fill_res_entry(struct sk_buff *msg, struct rdma_restrack_entry *res)
void c4iw_register_device(struct work_struct *work)
{
int ret;
- int i;
struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
struct c4iw_dev *dev = ctx->dev;
pr_debug("c4iw_dev %p\n", dev);
- strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
dev->ibdev.owner = THIS_MODULE;
@@ -626,20 +628,13 @@ void c4iw_register_device(struct work_struct *work)
memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
sizeof(dev->ibdev.iwcm->ifname));
+ rdma_set_device_sysfs_group(&dev->ibdev, &c4iw_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_CXGB4;
- ret = ib_register_device(&dev->ibdev, NULL);
+ ret = ib_register_device(&dev->ibdev, "cxgb4_%d", NULL);
if (ret)
goto err_kfree_iwcm;
-
- for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
- ret = device_create_file(&dev->ibdev.dev,
- c4iw_class_attributes[i]);
- if (ret)
- goto err_unregister_device;
- }
return;
-err_unregister_device:
- ib_unregister_device(&dev->ibdev);
+
err_kfree_iwcm:
kfree(dev->ibdev.iwcm);
err_dealloc_ctx:
@@ -651,12 +646,7 @@ err_dealloc_ctx:
void c4iw_unregister_device(struct c4iw_dev *dev)
{
- int i;
-
pr_debug("c4iw_dev %p\n", dev);
- for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
- device_remove_file(&dev->ibdev.dev,
- c4iw_class_attributes[i]);
ib_unregister_device(&dev->ibdev);
kfree(dev->ibdev.iwcm);
return;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 347fe18b1a41..13478f3b7057 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -99,7 +99,7 @@ static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
{
dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
- pci_unmap_addr(sq, mapping));
+ dma_unmap_addr(sq, mapping));
}
static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
@@ -132,7 +132,7 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
if (!sq->queue)
return -ENOMEM;
sq->phys_addr = virt_to_phys(sq->queue);
- pci_unmap_addr_set(sq, mapping, sq->dma_addr);
+ dma_unmap_addr_set(sq, mapping, sq->dma_addr);
return 0;
}
@@ -279,12 +279,13 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
wq->db = rdev->lldi.db_reg;
- wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS,
+ wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid,
+ CXGB4_BAR2_QTYPE_EGRESS,
&wq->sq.bar2_qid,
user ? &wq->sq.bar2_pa : NULL);
if (need_rq)
wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
- T4_BAR2_QTYPE_EGRESS,
+ CXGB4_BAR2_QTYPE_EGRESS,
&wq->rq.bar2_qid,
user ? &wq->rq.bar2_pa : NULL);
@@ -2521,7 +2522,7 @@ static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
dma_free_coherent(&rdev->lldi.pdev->dev,
wq->memsize, wq->queue,
- pci_unmap_addr(wq, mapping));
+ dma_unmap_addr(wq, mapping));
c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
kfree(wq->sw_rq);
c4iw_put_qpid(rdev, wq->qid, uctx);
@@ -2570,9 +2571,9 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
goto err_free_rqtpool;
memset(wq->queue, 0, wq->memsize);
- pci_unmap_addr_set(wq, mapping, wq->dma_addr);
+ dma_unmap_addr_set(wq, mapping, wq->dma_addr);
- wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, T4_BAR2_QTYPE_EGRESS,
+ wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
&wq->bar2_qid,
user ? &wq->bar2_pa : NULL);
@@ -2649,7 +2650,7 @@ static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
err_free_queue:
dma_free_coherent(&rdev->lldi.pdev->dev,
wq->memsize, wq->queue,
- pci_unmap_addr(wq, mapping));
+ dma_unmap_addr(wq, mapping));
err_free_rqtpool:
c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
err_free_pending_wrs:
@@ -2813,8 +2814,7 @@ err_free_queue:
free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
srq->wr_waitp);
err_free_skb:
- if (srq->destroy_skb)
- kfree_skb(srq->destroy_skb);
+ kfree_skb(srq->destroy_skb);
err_free_srq_idx:
c4iw_free_srq_idx(&rhp->rdev, srq->idx);
err_free_wr_wait:
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index e42021fd6fd6..fff6d48d262f 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -397,7 +397,7 @@ struct t4_srq_pending_wr {
struct t4_srq {
union t4_recv_wr *queue;
dma_addr_t dma_addr;
- DECLARE_PCI_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_ADDR(mapping);
struct t4_swrqe *sw_rq;
void __iomem *bar2_va;
u64 bar2_pa;
diff --git a/drivers/infiniband/hw/hfi1/Makefile b/drivers/infiniband/hw/hfi1/Makefile
index f451ba912f47..ff790390c91a 100644
--- a/drivers/infiniband/hw/hfi1/Makefile
+++ b/drivers/infiniband/hw/hfi1/Makefile
@@ -8,12 +8,42 @@
#
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1.o
-hfi1-y := affinity.o chip.o device.o driver.o efivar.o \
- eprom.o exp_rcv.o file_ops.o firmware.o \
- init.o intr.o mad.o mmu_rb.o pcie.o pio.o pio_copy.o platform.o \
- qp.o qsfp.o rc.o ruc.o sdma.o sysfs.o trace.o \
- uc.o ud.o user_exp_rcv.o user_pages.o user_sdma.o verbs.o \
- verbs_txreq.o vnic_main.o vnic_sdma.o
+hfi1-y := \
+ affinity.o \
+ chip.o \
+ device.o \
+ driver.o \
+ efivar.o \
+ eprom.o \
+ exp_rcv.o \
+ file_ops.o \
+ firmware.o \
+ init.o \
+ intr.o \
+ iowait.o \
+ mad.o \
+ mmu_rb.o \
+ msix.o \
+ pcie.o \
+ pio.o \
+ pio_copy.o \
+ platform.o \
+ qp.o \
+ qsfp.o \
+ rc.o \
+ ruc.o \
+ sdma.o \
+ sysfs.o \
+ trace.o \
+ uc.o \
+ ud.o \
+ user_exp_rcv.o \
+ user_pages.o \
+ user_sdma.o \
+ verbs.o \
+ verbs_txreq.o \
+ vnic_main.o \
+ vnic_sdma.o
ifdef CONFIG_DEBUG_FS
hfi1-y += debugfs.o
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index bedd5fba33b0..2baf38cc1e23 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -817,10 +817,10 @@ static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
set = &entry->def_intr;
cpumask_set_cpu(cpu, &set->mask);
cpumask_set_cpu(cpu, &set->used);
- for (i = 0; i < dd->num_msix_entries; i++) {
+ for (i = 0; i < dd->msix_info.max_requested; i++) {
struct hfi1_msix_entry *other_msix;
- other_msix = &dd->msix_entries[i];
+ other_msix = &dd->msix_info.msix_entries[i];
if (other_msix->type != IRQ_SDMA || other_msix == msix)
continue;
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index e1668bcc2d13..9b20479dc710 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -67,8 +67,6 @@
#include "debugfs.h"
#include "fault.h"
-#define NUM_IB_PORTS 1
-
uint kdeth_qp;
module_param_named(kdeth_qp, kdeth_qp, uint, S_IRUGO);
MODULE_PARM_DESC(kdeth_qp, "Set the KDETH queue pair prefix");
@@ -1100,9 +1098,9 @@ struct err_reg_info {
const char *desc;
};
-#define NUM_MISC_ERRS (IS_GENERAL_ERR_END - IS_GENERAL_ERR_START)
-#define NUM_DC_ERRS (IS_DC_END - IS_DC_START)
-#define NUM_VARIOUS (IS_VARIOUS_END - IS_VARIOUS_START)
+#define NUM_MISC_ERRS (IS_GENERAL_ERR_END + 1 - IS_GENERAL_ERR_START)
+#define NUM_DC_ERRS (IS_DC_END + 1 - IS_DC_START)
+#define NUM_VARIOUS (IS_VARIOUS_END + 1 - IS_VARIOUS_START)
/*
* Helpers for building HFI and DC error interrupt table entries. Different
@@ -8181,7 +8179,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
/**
* is_rcv_urgent_int() - User receive context urgent IRQ handler
* @dd: valid dd
- * @source: logical IRQ source (ofse from IS_RCVURGENT_START)
+ * @source: logical IRQ source (offset from IS_RCVURGENT_START)
*
* RX block receive urgent interrupt. Source is < 160.
*
@@ -8231,7 +8229,7 @@ static const struct is_table is_table[] = {
is_sdma_eng_err_name, is_sdma_eng_err_int },
{ IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END,
is_sendctxt_err_name, is_sendctxt_err_int },
-{ IS_SDMA_START, IS_SDMA_END,
+{ IS_SDMA_START, IS_SDMA_IDLE_END,
is_sdma_eng_name, is_sdma_eng_int },
{ IS_VARIOUS_START, IS_VARIOUS_END,
is_various_name, is_various_int },
@@ -8257,7 +8255,7 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
/* avoids a double compare by walking the table in-order */
for (entry = &is_table[0]; entry->is_name; entry++) {
- if (source < entry->end) {
+ if (source <= entry->end) {
trace_hfi1_interrupt(dd, entry, source);
entry->is_int(dd, source - entry->start);
return;
@@ -8276,7 +8274,7 @@ static void is_interrupt(struct hfi1_devdata *dd, unsigned int source)
* context DATA IRQs are threaded and are not supported by this handler.
*
*/
-static irqreturn_t general_interrupt(int irq, void *data)
+irqreturn_t general_interrupt(int irq, void *data)
{
struct hfi1_devdata *dd = data;
u64 regs[CCE_NUM_INT_CSRS];
@@ -8309,7 +8307,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
return handled;
}
-static irqreturn_t sdma_interrupt(int irq, void *data)
+irqreturn_t sdma_interrupt(int irq, void *data)
{
struct sdma_engine *sde = data;
struct hfi1_devdata *dd = sde->dd;
@@ -8401,7 +8399,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
* invoked) is finished. The intent is to avoid extra interrupts while we
* are processing packets anyway.
*/
-static irqreturn_t receive_context_interrupt(int irq, void *data)
+irqreturn_t receive_context_interrupt(int irq, void *data)
{
struct hfi1_ctxtdata *rcd = data;
struct hfi1_devdata *dd = rcd->dd;
@@ -8441,7 +8439,7 @@ static irqreturn_t receive_context_interrupt(int irq, void *data)
* Receive packet thread handler. This expects to be invoked with the
* receive interrupt still blocked.
*/
-static irqreturn_t receive_context_thread(int irq, void *data)
+irqreturn_t receive_context_thread(int irq, void *data)
{
struct hfi1_ctxtdata *rcd = data;
int present;
@@ -9651,30 +9649,10 @@ void qsfp_event(struct work_struct *work)
}
}
-static void init_qsfp_int(struct hfi1_devdata *dd)
+void init_qsfp_int(struct hfi1_devdata *dd)
{
struct hfi1_pportdata *ppd = dd->pport;
- u64 qsfp_mask, cce_int_mask;
- const int qsfp1_int_smask = QSFP1_INT % 64;
- const int qsfp2_int_smask = QSFP2_INT % 64;
-
- /*
- * disable QSFP1 interrupts for HFI1, QSFP2 interrupts for HFI0
- * Qsfp1Int and Qsfp2Int are adjacent bits in the same CSR,
- * therefore just one of QSFP1_INT/QSFP2_INT can be used to find
- * the index of the appropriate CSR in the CCEIntMask CSR array
- */
- cce_int_mask = read_csr(dd, CCE_INT_MASK +
- (8 * (QSFP1_INT / 64)));
- if (dd->hfi1_id) {
- cce_int_mask &= ~((u64)1 << qsfp1_int_smask);
- write_csr(dd, CCE_INT_MASK + (8 * (QSFP1_INT / 64)),
- cce_int_mask);
- } else {
- cce_int_mask &= ~((u64)1 << qsfp2_int_smask);
- write_csr(dd, CCE_INT_MASK + (8 * (QSFP2_INT / 64)),
- cce_int_mask);
- }
+ u64 qsfp_mask;
qsfp_mask = (u64)(QSFP_HFI0_INT_N | QSFP_HFI0_MODPRST_N);
/* Clear current status to avoid spurious interrupts */
@@ -9691,6 +9669,12 @@ static void init_qsfp_int(struct hfi1_devdata *dd)
write_csr(dd,
dd->hfi1_id ? ASIC_QSFP2_INVERT : ASIC_QSFP1_INVERT,
qsfp_mask);
+
+ /* Enable the appropriate QSFP IRQ source */
+ if (!dd->hfi1_id)
+ set_intr_bits(dd, QSFP1_INT, QSFP1_INT, true);
+ else
+ set_intr_bits(dd, QSFP2_INT, QSFP2_INT, true);
}
/*
@@ -10577,12 +10561,29 @@ void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
}
}
-/*
- * Verify if BCT for data VLs is non-zero.
+/**
+ * data_vls_operational() - Verify if data VL BCT credits and MTU
+ * are both set.
+ * @ppd: pointer to hfi1_pportdata structure
+ *
+ * Return: true - Ok, false -otherwise.
*/
static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
{
- return !!ppd->actual_vls_operational;
+ int i;
+ u64 reg;
+
+ if (!ppd->actual_vls_operational)
+ return false;
+
+ for (i = 0; i < ppd->vls_supported; i++) {
+ reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
+ if ((reg && !ppd->dd->vld[i].mtu) ||
+ (!reg && ppd->dd->vld[i].mtu))
+ return false;
+ }
+
+ return true;
}
/*
@@ -10695,7 +10696,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
if (!data_vls_operational(ppd)) {
dd_dev_err(dd,
- "%s: data VLs not operational\n", __func__);
+ "%s: Invalid data VL credits or mtu\n",
+ __func__);
ret = -EINVAL;
break;
}
@@ -11932,10 +11934,16 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl &= ~RCV_CTXT_CTRL_ENABLE_SMASK;
}
- if (op & HFI1_RCVCTRL_INTRAVAIL_ENB)
+ if (op & HFI1_RCVCTRL_INTRAVAIL_ENB) {
+ set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
+ IS_RCVAVAIL_START + rcd->ctxt, true);
rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
- if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
+ }
+ if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) {
+ set_intr_bits(dd, IS_RCVAVAIL_START + rcd->ctxt,
+ IS_RCVAVAIL_START + rcd->ctxt, false);
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
+ }
if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
@@ -11965,6 +11973,13 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
rcvctrl |= RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
if (op & HFI1_RCVCTRL_NO_EGR_DROP_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_DONT_DROP_EGR_FULL_SMASK;
+ if (op & HFI1_RCVCTRL_URGENT_ENB)
+ set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
+ IS_RCVURGENT_START + rcd->ctxt, true);
+ if (op & HFI1_RCVCTRL_URGENT_DIS)
+ set_intr_bits(dd, IS_RCVURGENT_START + rcd->ctxt,
+ IS_RCVURGENT_START + rcd->ctxt, false);
+
hfi1_cdbg(RCVCTRL, "ctxt %d rcvctrl 0x%llx\n", ctxt, rcvctrl);
write_kctxt_csr(dd, ctxt, RCV_CTXT_CTRL, rcvctrl);
@@ -12963,63 +12978,71 @@ int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
return ret;
}
+/* ========================================================================= */
+
/**
- * get_int_mask - get 64 bit int mask
- * @dd - the devdata
- * @i - the csr (relative to CCE_INT_MASK)
+ * read_mod_write() - Calculate the IRQ register index and set/clear the bits
+ * @dd: valid devdata
+ * @src: IRQ source to determine register index from
+ * @bits: the bits to set or clear
+ * @set: true == set the bits, false == clear the bits
*
- * Returns the mask with the urgent interrupt mask
- * bit clear for kernel receive contexts.
*/
-static u64 get_int_mask(struct hfi1_devdata *dd, u32 i)
+static void read_mod_write(struct hfi1_devdata *dd, u16 src, u64 bits,
+ bool set)
{
- u64 mask = U64_MAX; /* default to no change */
-
- if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) {
- int j = (i - (IS_RCVURGENT_START / 64)) * 64;
- int k = !j ? IS_RCVURGENT_START % 64 : 0;
+ u64 reg;
+ u16 idx = src / BITS_PER_REGISTER;
- if (j)
- j -= IS_RCVURGENT_START % 64;
- /* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */
- for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++)
- /* convert to bit in mask and clear */
- mask &= ~BIT_ULL(k);
- }
- return mask;
+ spin_lock(&dd->irq_src_lock);
+ reg = read_csr(dd, CCE_INT_MASK + (8 * idx));
+ if (set)
+ reg |= bits;
+ else
+ reg &= ~bits;
+ write_csr(dd, CCE_INT_MASK + (8 * idx), reg);
+ spin_unlock(&dd->irq_src_lock);
}
-/* ========================================================================= */
-
-/*
- * Enable/disable chip from delivering interrupts.
+/**
+ * set_intr_bits() - Enable/disable a range (one or more) IRQ sources
+ * @dd: valid devdata
+ * @first: first IRQ source to set/clear
+ * @last: last IRQ source (inclusive) to set/clear
+ * @set: true == set the bits, false == clear the bits
+ *
+ * If first == last, set the exact source.
*/
-void set_intr_state(struct hfi1_devdata *dd, u32 enable)
+int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set)
{
- int i;
+ u64 bits = 0;
+ u64 bit;
+ u16 src;
- /*
- * In HFI, the mask needs to be 1 to allow interrupts.
- */
- if (enable) {
- /* enable all interrupts but urgent on kernel contexts */
- for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
- u64 mask = get_int_mask(dd, i);
+ if (first > NUM_INTERRUPT_SOURCES || last > NUM_INTERRUPT_SOURCES)
+ return -EINVAL;
- write_csr(dd, CCE_INT_MASK + (8 * i), mask);
- }
+ if (last < first)
+ return -ERANGE;
- init_qsfp_int(dd);
- } else {
- for (i = 0; i < CCE_NUM_INT_CSRS; i++)
- write_csr(dd, CCE_INT_MASK + (8 * i), 0ull);
+ for (src = first; src <= last; src++) {
+ bit = src % BITS_PER_REGISTER;
+ /* wrapped to next register? */
+ if (!bit && bits) {
+ read_mod_write(dd, src - 1, bits, set);
+ bits = 0;
+ }
+ bits |= BIT_ULL(bit);
}
+ read_mod_write(dd, last, bits, set);
+
+ return 0;
}
/*
* Clear all interrupt sources on the chip.
*/
-static void clear_all_interrupts(struct hfi1_devdata *dd)
+void clear_all_interrupts(struct hfi1_devdata *dd)
{
int i;
@@ -13043,38 +13066,11 @@ static void clear_all_interrupts(struct hfi1_devdata *dd)
write_csr(dd, DC_DC8051_ERR_CLR, ~(u64)0);
}
-/**
- * hfi1_clean_up_interrupts() - Free all IRQ resources
- * @dd: valid device data data structure
- *
- * Free the MSIx and assoicated PCI resources, if they have been allocated.
- */
-void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
-{
- int i;
- struct hfi1_msix_entry *me = dd->msix_entries;
-
- /* remove irqs - must happen before disabling/turning off */
- for (i = 0; i < dd->num_msix_entries; i++, me++) {
- if (!me->arg) /* => no irq, no affinity */
- continue;
- hfi1_put_irq_affinity(dd, me);
- pci_free_irq(dd->pcidev, i, me->arg);
- }
-
- /* clean structures */
- kfree(dd->msix_entries);
- dd->msix_entries = NULL;
- dd->num_msix_entries = 0;
-
- pci_free_irq_vectors(dd->pcidev);
-}
-
/*
* Remap the interrupt source from the general handler to the given MSI-X
* interrupt.
*/
-static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
+void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
{
u64 reg;
int m, n;
@@ -13098,8 +13094,7 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
write_csr(dd, CCE_INT_MAP + (8 * m), reg);
}
-static void remap_sdma_interrupts(struct hfi1_devdata *dd,
- int engine, int msix_intr)
+void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr)
{
/*
* SDMA engine interrupt sources grouped by type, rather than
@@ -13108,204 +13103,16 @@ static void remap_sdma_interrupts(struct hfi1_devdata *dd,
* SDMAProgress
* SDMAIdle
*/
- remap_intr(dd, IS_SDMA_START + 0 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
- remap_intr(dd, IS_SDMA_START + 1 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
- remap_intr(dd, IS_SDMA_START + 2 * TXE_NUM_SDMA_ENGINES + engine,
- msix_intr);
-}
-
-static int request_msix_irqs(struct hfi1_devdata *dd)
-{
- int first_general, last_general;
- int first_sdma, last_sdma;
- int first_rx, last_rx;
- int i, ret = 0;
-
- /* calculate the ranges we are going to use */
- first_general = 0;
- last_general = first_general + 1;
- first_sdma = last_general;
- last_sdma = first_sdma + dd->num_sdma;
- first_rx = last_sdma;
- last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
-
- /* VNIC MSIx interrupts get mapped when VNIC contexts are created */
- dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
-
- /*
- * Sanity check - the code expects all SDMA chip source
- * interrupts to be in the same CSR, starting at bit 0. Verify
- * that this is true by checking the bit location of the start.
- */
- BUILD_BUG_ON(IS_SDMA_START % 64);
-
- for (i = 0; i < dd->num_msix_entries; i++) {
- struct hfi1_msix_entry *me = &dd->msix_entries[i];
- const char *err_info;
- irq_handler_t handler;
- irq_handler_t thread = NULL;
- void *arg = NULL;
- int idx;
- struct hfi1_ctxtdata *rcd = NULL;
- struct sdma_engine *sde = NULL;
- char name[MAX_NAME_SIZE];
-
- /* obtain the arguments to pci_request_irq */
- if (first_general <= i && i < last_general) {
- idx = i - first_general;
- handler = general_interrupt;
- arg = dd;
- snprintf(name, sizeof(name),
- DRIVER_NAME "_%d", dd->unit);
- err_info = "general";
- me->type = IRQ_GENERAL;
- } else if (first_sdma <= i && i < last_sdma) {
- idx = i - first_sdma;
- sde = &dd->per_sdma[idx];
- handler = sdma_interrupt;
- arg = sde;
- snprintf(name, sizeof(name),
- DRIVER_NAME "_%d sdma%d", dd->unit, idx);
- err_info = "sdma";
- remap_sdma_interrupts(dd, idx, i);
- me->type = IRQ_SDMA;
- } else if (first_rx <= i && i < last_rx) {
- idx = i - first_rx;
- rcd = hfi1_rcd_get_by_index_safe(dd, idx);
- if (rcd) {
- /*
- * Set the interrupt register and mask for this
- * context's interrupt.
- */
- rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
- rcd->imask = ((u64)1) <<
- ((IS_RCVAVAIL_START + idx) % 64);
- handler = receive_context_interrupt;
- thread = receive_context_thread;
- arg = rcd;
- snprintf(name, sizeof(name),
- DRIVER_NAME "_%d kctxt%d",
- dd->unit, idx);
- err_info = "receive context";
- remap_intr(dd, IS_RCVAVAIL_START + idx, i);
- me->type = IRQ_RCVCTXT;
- rcd->msix_intr = i;
- hfi1_rcd_put(rcd);
- }
- } else {
- /* not in our expected range - complain, then
- * ignore it
- */
- dd_dev_err(dd,
- "Unexpected extra MSI-X interrupt %d\n", i);
- continue;
- }
- /* no argument, no interrupt */
- if (!arg)
- continue;
- /* make sure the name is terminated */
- name[sizeof(name) - 1] = 0;
- me->irq = pci_irq_vector(dd->pcidev, i);
- ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
- name);
- if (ret) {
- dd_dev_err(dd,
- "unable to allocate %s interrupt, irq %d, index %d, err %d\n",
- err_info, me->irq, idx, ret);
- return ret;
- }
- /*
- * assign arg after pci_request_irq call, so it will be
- * cleaned up
- */
- me->arg = arg;
-
- ret = hfi1_get_irq_affinity(dd, me);
- if (ret)
- dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
- }
-
- return ret;
-}
-
-void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
-{
- int i;
-
- for (i = 0; i < dd->vnic.num_ctxt; i++) {
- struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
- struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
-
- synchronize_irq(me->irq);
- }
-}
-
-void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_devdata *dd = rcd->dd;
- struct hfi1_msix_entry *me = &dd->msix_entries[rcd->msix_intr];
-
- if (!me->arg) /* => no irq, no affinity */
- return;
-
- hfi1_put_irq_affinity(dd, me);
- pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
-
- me->arg = NULL;
-}
-
-void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
-{
- struct hfi1_devdata *dd = rcd->dd;
- struct hfi1_msix_entry *me;
- int idx = rcd->ctxt;
- void *arg = rcd;
- int ret;
-
- rcd->msix_intr = dd->vnic.msix_idx++;
- me = &dd->msix_entries[rcd->msix_intr];
-
- /*
- * Set the interrupt register and mask for this
- * context's interrupt.
- */
- rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
- rcd->imask = ((u64)1) <<
- ((IS_RCVAVAIL_START + idx) % 64);
- me->type = IRQ_RCVCTXT;
- me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
- remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
-
- ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
- receive_context_interrupt,
- receive_context_thread, arg,
- DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
- if (ret) {
- dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
- me->irq, idx, ret);
- return;
- }
- /*
- * assign arg after pci_request_irq call, so it will be
- * cleaned up
- */
- me->arg = arg;
-
- ret = hfi1_get_irq_affinity(dd, me);
- if (ret) {
- dd_dev_err(dd,
- "unable to pin IRQ %d\n", ret);
- pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
- }
+ remap_intr(dd, IS_SDMA_START + engine, msix_intr);
+ remap_intr(dd, IS_SDMA_PROGRESS_START + engine, msix_intr);
+ remap_intr(dd, IS_SDMA_IDLE_START + engine, msix_intr);
}
/*
* Set the general handler to accept all interrupts, remap all
* chip interrupts back to MSI-X 0.
*/
-static void reset_interrupts(struct hfi1_devdata *dd)
+void reset_interrupts(struct hfi1_devdata *dd)
{
int i;
@@ -13318,54 +13125,33 @@ static void reset_interrupts(struct hfi1_devdata *dd)
write_csr(dd, CCE_INT_MAP + (8 * i), 0);
}
+/**
+ * set_up_interrupts() - Initialize the IRQ resources and state
+ * @dd: valid devdata
+ *
+ */
static int set_up_interrupts(struct hfi1_devdata *dd)
{
- u32 total;
- int ret, request;
-
- /*
- * Interrupt count:
- * 1 general, "slow path" interrupt (includes the SDMA engines
- * slow source, SDMACleanupDone)
- * N interrupts - one per used SDMA engine
- * M interrupt - one per kernel receive context
- * V interrupt - one for each VNIC context
- */
- total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
-
- /* ask for MSI-X interrupts */
- request = request_msix(dd, total);
- if (request < 0) {
- ret = request;
- goto fail;
- } else {
- dd->msix_entries = kcalloc(total, sizeof(*dd->msix_entries),
- GFP_KERNEL);
- if (!dd->msix_entries) {
- ret = -ENOMEM;
- goto fail;
- }
- /* using MSI-X */
- dd->num_msix_entries = total;
- dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
- }
+ int ret;
/* mask all interrupts */
- set_intr_state(dd, 0);
+ set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
+
/* clear all pending interrupts */
clear_all_interrupts(dd);
/* reset general handler mask, chip MSI-X mappings */
reset_interrupts(dd);
- ret = request_msix_irqs(dd);
+ /* ask for MSI-X interrupts */
+ ret = msix_initialize(dd);
if (ret)
- goto fail;
+ return ret;
- return 0;
+ ret = msix_request_irqs(dd);
+ if (ret)
+ msix_clean_up_interrupts(dd);
-fail:
- hfi1_clean_up_interrupts(dd);
return ret;
}
@@ -14918,20 +14704,16 @@ err_exit:
}
/**
- * Allocate and initialize the device structure for the hfi.
+ * hfi1_init_dd() - Initialize most of the dd structure.
* @dev: the pci_dev for hfi1_ib device
* @ent: pci_device_id struct for this dev
*
- * Also allocates, initializes, and returns the devdata struct for this
- * device instance
- *
* This is global, and is called directly at init to set up the
* chip-specific function pointers for later use.
*/
-struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+int hfi1_init_dd(struct hfi1_devdata *dd)
{
- struct hfi1_devdata *dd;
+ struct pci_dev *pdev = dd->pcidev;
struct hfi1_pportdata *ppd;
u64 reg;
int i, ret;
@@ -14942,13 +14724,8 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
"Functional simulator"
};
struct pci_dev *parent = pdev->bus->self;
- u32 sdma_engines;
+ u32 sdma_engines = chip_sdma_engines(dd);
- dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
- sizeof(struct hfi1_pportdata));
- if (IS_ERR(dd))
- goto bail;
- sdma_engines = chip_sdma_engines(dd);
ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++) {
int vl;
@@ -15127,6 +14904,12 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
if (ret)
goto bail_cleanup;
+ /*
+ * This should probably occur in hfi1_pcie_init(), but historically
+ * occurs after the do_pcie_gen3_transition() code.
+ */
+ tune_pcie_caps(dd);
+
/* start setting dd values and adjusting CSRs */
init_early_variables(dd);
@@ -15239,14 +15022,13 @@ bail_free_cntrs:
free_cntrs(dd);
bail_clear_intr:
hfi1_comp_vectors_clean_up(dd);
- hfi1_clean_up_interrupts(dd);
+ msix_clean_up_interrupts(dd);
bail_cleanup:
hfi1_pcie_ddcleanup(dd);
bail_free:
hfi1_free_devdata(dd);
- dd = ERR_PTR(ret);
bail:
- return dd;
+ return ret;
}
static u16 delay_cycles(struct hfi1_pportdata *ppd, u32 desired_egress_rate,
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index 36b04d6300e5..6b9c8f12dff8 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -52,9 +52,7 @@
*/
/* sizes */
-#define CCE_NUM_MSIX_VECTORS 256
-#define CCE_NUM_INT_CSRS 12
-#define CCE_NUM_INT_MAP_CSRS 96
+#define BITS_PER_REGISTER (BITS_PER_BYTE * sizeof(u64))
#define NUM_INTERRUPT_SOURCES 768
#define RXE_NUM_CONTEXTS 160
#define RXE_PER_CONTEXT_SIZE 0x1000 /* 4k */
@@ -161,34 +159,49 @@
(CR_CREDIT_RETURN_DUE_TO_FORCE_MASK << \
CR_CREDIT_RETURN_DUE_TO_FORCE_SHIFT)
-/* interrupt source numbers */
-#define IS_GENERAL_ERR_START 0
-#define IS_SDMAENG_ERR_START 16
-#define IS_SENDCTXT_ERR_START 32
-#define IS_SDMA_START 192 /* includes SDmaProgress,SDmaIdle */
+/* Specific IRQ sources */
+#define CCE_ERR_INT 0
+#define RXE_ERR_INT 1
+#define MISC_ERR_INT 2
+#define PIO_ERR_INT 4
+#define SDMA_ERR_INT 5
+#define EGRESS_ERR_INT 6
+#define TXE_ERR_INT 7
+#define PBC_INT 240
+#define GPIO_ASSERT_INT 241
+#define QSFP1_INT 242
+#define QSFP2_INT 243
+#define TCRIT_INT 244
+
+/* interrupt source ranges */
+#define IS_FIRST_SOURCE CCE_ERR_INT
+#define IS_GENERAL_ERR_START 0
+#define IS_SDMAENG_ERR_START 16
+#define IS_SENDCTXT_ERR_START 32
+#define IS_SDMA_START 192
+#define IS_SDMA_PROGRESS_START 208
+#define IS_SDMA_IDLE_START 224
#define IS_VARIOUS_START 240
#define IS_DC_START 248
#define IS_RCVAVAIL_START 256
#define IS_RCVURGENT_START 416
#define IS_SENDCREDIT_START 576
#define IS_RESERVED_START 736
-#define IS_MAX_SOURCES 768
+#define IS_LAST_SOURCE 767
/* derived interrupt source values */
-#define IS_GENERAL_ERR_END IS_SDMAENG_ERR_START
-#define IS_SDMAENG_ERR_END IS_SENDCTXT_ERR_START
-#define IS_SENDCTXT_ERR_END IS_SDMA_START
-#define IS_SDMA_END IS_VARIOUS_START
-#define IS_VARIOUS_END IS_DC_START
-#define IS_DC_END IS_RCVAVAIL_START
-#define IS_RCVAVAIL_END IS_RCVURGENT_START
-#define IS_RCVURGENT_END IS_SENDCREDIT_START
-#define IS_SENDCREDIT_END IS_RESERVED_START
-#define IS_RESERVED_END IS_MAX_SOURCES
-
-/* absolute interrupt numbers for QSFP1Int and QSFP2Int */
-#define QSFP1_INT 242
-#define QSFP2_INT 243
+#define IS_GENERAL_ERR_END 7
+#define IS_SDMAENG_ERR_END 31
+#define IS_SENDCTXT_ERR_END 191
+#define IS_SDMA_END 207
+#define IS_SDMA_PROGRESS_END 223
+#define IS_SDMA_IDLE_END 239
+#define IS_VARIOUS_END 244
+#define IS_DC_END 255
+#define IS_RCVAVAIL_END 415
+#define IS_RCVURGENT_END 575
+#define IS_SENDCREDIT_END 735
+#define IS_RESERVED_END IS_LAST_SOURCE
/* DCC_CFG_PORT_CONFIG logical link states */
#define LSTATE_DOWN 0x1
@@ -1416,6 +1429,18 @@ void hfi1_read_link_quality(struct hfi1_devdata *dd, u8 *link_quality);
void hfi1_init_vnic_rsm(struct hfi1_devdata *dd);
void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd);
+irqreturn_t general_interrupt(int irq, void *data);
+irqreturn_t sdma_interrupt(int irq, void *data);
+irqreturn_t receive_context_interrupt(int irq, void *data);
+irqreturn_t receive_context_thread(int irq, void *data);
+
+int set_intr_bits(struct hfi1_devdata *dd, u16 first, u16 last, bool set);
+void init_qsfp_int(struct hfi1_devdata *dd);
+void clear_all_interrupts(struct hfi1_devdata *dd);
+void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr);
+void remap_sdma_interrupts(struct hfi1_devdata *dd, int engine, int msix_intr);
+void reset_interrupts(struct hfi1_devdata *dd);
+
/*
* Interrupt source table.
*
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h
index ee6dca5e2a2f..c6163a347e93 100644
--- a/drivers/infiniband/hw/hfi1/chip_registers.h
+++ b/drivers/infiniband/hw/hfi1/chip_registers.h
@@ -878,6 +878,10 @@
#define SEND_CTRL (TXE + 0x000000000000)
#define SEND_CTRL_CM_RESET_SMASK 0x4ull
#define SEND_CTRL_SEND_ENABLE_SMASK 0x1ull
+#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
+#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xFFull
+#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
+ << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
#define SEND_CTRL_VL_ARBITER_ENABLE_SMASK 0x2ull
#define SEND_CTXT_CHECK_ENABLE (TXE + 0x000000100080)
#define SEND_CTXT_CHECK_ENABLE_CHECK_BYPASS_VL_MAPPING_SMASK 0x80ull
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 1fc75647e47b..c22ebc774a6a 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -681,7 +681,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
HFI1_RCVCTRL_TAILUPD_DIS |
HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
- HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
+ HFI1_RCVCTRL_NO_EGR_DROP_DIS |
+ HFI1_RCVCTRL_URGENT_DIS, uctxt);
/* Clear the context's J_KEY */
hfi1_clear_ctxt_jkey(dd, uctxt);
/*
@@ -1096,6 +1097,7 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
hfi1_set_ctxt_jkey(uctxt->dd, uctxt, uctxt->jkey);
rcvctrl_ops = HFI1_RCVCTRL_CTXT_ENB;
+ rcvctrl_ops |= HFI1_RCVCTRL_URGENT_ENB;
if (HFI1_CAP_UGET_MASK(uctxt->flags, HDRSUPP))
rcvctrl_ops |= HFI1_RCVCTRL_TIDFLOW_ENB;
/*
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index d9470317983f..1401b6ea4a28 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -80,6 +80,7 @@
#include "qsfp.h"
#include "platform.h"
#include "affinity.h"
+#include "msix.h"
/* bumped 1 from s/w major version of TrueScale */
#define HFI1_CHIP_VERS_MAJ 3U
@@ -620,6 +621,8 @@ struct rvt_sge_state;
#define HFI1_RCVCTRL_NO_RHQ_DROP_DIS 0x8000
#define HFI1_RCVCTRL_NO_EGR_DROP_ENB 0x10000
#define HFI1_RCVCTRL_NO_EGR_DROP_DIS 0x20000
+#define HFI1_RCVCTRL_URGENT_ENB 0x40000
+#define HFI1_RCVCTRL_URGENT_DIS 0x80000
/* partition enforcement flags */
#define HFI1_PART_ENFORCE_IN 0x1
@@ -667,6 +670,14 @@ struct hfi1_msix_entry {
struct irq_affinity_notify notify;
};
+struct hfi1_msix_info {
+ /* lock to synchronize in_use_msix access */
+ spinlock_t msix_lock;
+ DECLARE_BITMAP(in_use_msix, CCE_NUM_MSIX_VECTORS);
+ struct hfi1_msix_entry *msix_entries;
+ u16 max_requested;
+};
+
/* per-SL CCA information */
struct cca_timer {
struct hrtimer hrtimer;
@@ -992,7 +1003,6 @@ struct hfi1_vnic_data {
struct idr vesw_idr;
u8 rmt_start;
u8 num_ctxt;
- u32 msix_idx;
};
struct hfi1_vnic_vport_info;
@@ -1205,11 +1215,6 @@ struct hfi1_devdata {
struct diag_client *diag_client;
- /* MSI-X information */
- struct hfi1_msix_entry *msix_entries;
- u32 num_msix_entries;
- u32 first_dyn_msix_idx;
-
/* general interrupt: mask of handled interrupts */
u64 gi_mask[CCE_NUM_INT_CSRS];
@@ -1223,6 +1228,9 @@ struct hfi1_devdata {
*/
struct timer_list synth_stats_timer;
+ /* MSI-X information */
+ struct hfi1_msix_info msix_info;
+
/*
* device counters
*/
@@ -1349,6 +1357,8 @@ struct hfi1_devdata {
/* vnic data */
struct hfi1_vnic_data vnic;
+ /* Lock to protect IRQ SRC register access */
+ spinlock_t irq_src_lock;
};
static inline bool hfi1_vnic_is_rsm_full(struct hfi1_devdata *dd, int spare)
@@ -1431,9 +1441,6 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *rcd, int thread);
void set_all_slowpath(struct hfi1_devdata *dd);
-void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd);
-void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd);
-void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd);
extern const struct pci_device_id hfi1_pci_tbl[];
void hfi1_make_ud_req_9B(struct rvt_qp *qp,
@@ -1887,10 +1894,8 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
#define HFI1_CTXT_WAITING_URG 4
/* free up any allocated data at closes */
-struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
- const struct pci_device_id *ent);
+int hfi1_init_dd(struct hfi1_devdata *dd);
void hfi1_free_devdata(struct hfi1_devdata *dd);
-struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra);
/* LED beaconing functions */
void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
@@ -1963,6 +1968,7 @@ static inline u32 get_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
*/
extern const char ib_hfi1_version[];
+extern const struct attribute_group ib_hfi1_attr_group;
int hfi1_device_create(struct hfi1_devdata *dd);
void hfi1_device_remove(struct hfi1_devdata *dd);
@@ -1974,16 +1980,15 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
/* Hook for sysfs read of QSFP */
int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
-int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent);
-void hfi1_clean_up_interrupts(struct hfi1_devdata *dd);
+int hfi1_pcie_init(struct hfi1_devdata *dd);
void hfi1_pcie_cleanup(struct pci_dev *pdev);
int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
int pcie_speeds(struct hfi1_devdata *dd);
-int request_msix(struct hfi1_devdata *dd, u32 msireq);
int restore_pci_variables(struct hfi1_devdata *dd);
int save_pci_variables(struct hfi1_devdata *dd);
int do_pcie_gen3_transition(struct hfi1_devdata *dd);
+void tune_pcie_caps(struct hfi1_devdata *dd);
int parse_platform_config(struct hfi1_devdata *dd);
int get_platform_config_field(struct hfi1_devdata *dd,
enum platform_config_table_type_encoding
@@ -2124,19 +2129,6 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd)
return base_sdma_integrity;
}
-/*
- * hfi1_early_err is used (only!) to print early errors before devdata is
- * allocated, or when dd->pcidev may not be valid, and at the tail end of
- * cleanup when devdata may have been freed, etc. hfi1_dev_porterr is
- * the same as dd_dev_err, but is used when the message really needs
- * the IB port# to be definitive as to what's happening..
- */
-#define hfi1_early_err(dev, fmt, ...) \
- dev_err(dev, fmt, ##__VA_ARGS__)
-
-#define hfi1_early_info(dev, fmt, ...) \
- dev_info(dev, fmt, ##__VA_ARGS__)
-
#define dd_dev_emerg(dd, fmt, ...) \
dev_emerg(&(dd)->pcidev->dev, "%s: " fmt, \
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), ##__VA_ARGS__)
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 758d273c32cf..09044905284f 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -83,6 +83,8 @@
#define HFI1_MIN_EAGER_BUFFER_SIZE (4 * 1024) /* 4KB */
#define HFI1_MAX_EAGER_BUFFER_SIZE (256 * 1024) /* 256KB */
+#define NUM_IB_PORTS 1
+
/*
* Number of user receive contexts we are configured to use (to allow for more
* pio buffers per ctxt, etc.) Zero means use one user context per CPU.
@@ -654,9 +656,8 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
ppd->part_enforce |= HFI1_PART_ENFORCE_IN;
if (loopback) {
- hfi1_early_err(&pdev->dev,
- "Faking data partition 0x8001 in idx %u\n",
- !default_pkey_idx);
+ dd_dev_err(dd, "Faking data partition 0x8001 in idx %u\n",
+ !default_pkey_idx);
ppd->pkeys[!default_pkey_idx] = 0x8001;
}
@@ -702,9 +703,7 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
return;
bail:
-
- hfi1_early_err(&pdev->dev,
- "Congestion Control Agent disabled for port %d\n", port);
+ dd_dev_err(dd, "Congestion Control Agent disabled for port %d\n", port);
}
/*
@@ -833,6 +832,23 @@ wq_error:
}
/**
+ * enable_general_intr() - Enable the IRQs that will be handled by the
+ * general interrupt handler.
+ * @dd: valid devdata
+ *
+ */
+static void enable_general_intr(struct hfi1_devdata *dd)
+{
+ set_intr_bits(dd, CCE_ERR_INT, MISC_ERR_INT, true);
+ set_intr_bits(dd, PIO_ERR_INT, TXE_ERR_INT, true);
+ set_intr_bits(dd, IS_SENDCTXT_ERR_START, IS_SENDCTXT_ERR_END, true);
+ set_intr_bits(dd, PBC_INT, GPIO_ASSERT_INT, true);
+ set_intr_bits(dd, TCRIT_INT, TCRIT_INT, true);
+ set_intr_bits(dd, IS_DC_START, IS_DC_END, true);
+ set_intr_bits(dd, IS_SENDCREDIT_START, IS_SENDCREDIT_END, true);
+}
+
+/**
* hfi1_init - do the actual initialization sequence on the chip
* @dd: the hfi1_ib device
* @reinit: re-initializing, so don't allocate new memory
@@ -916,6 +932,7 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit)
"failed to allocate kernel ctxt's rcvhdrq and/or egr bufs\n");
ret = lastfail;
}
+ /* enable IRQ */
hfi1_rcd_put(rcd);
}
@@ -954,7 +971,8 @@ done:
HFI1_STATUS_INITTED;
if (!ret) {
/* enable all interrupts from the chip */
- set_intr_state(dd, 1);
+ enable_general_intr(dd);
+ init_qsfp_int(dd);
/* chip is OK for user apps; mark it as initialized */
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
@@ -1051,9 +1069,9 @@ static void shutdown_device(struct hfi1_devdata *dd)
}
dd->flags &= ~HFI1_INITTED;
- /* mask and clean up interrupts, but not errors */
- set_intr_state(dd, 0);
- hfi1_clean_up_interrupts(dd);
+ /* mask and clean up interrupts */
+ set_intr_bits(dd, IS_FIRST_SOURCE, IS_LAST_SOURCE, false);
+ msix_clean_up_interrupts(dd);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
ppd = dd->pport + pidx;
@@ -1246,15 +1264,19 @@ void hfi1_free_devdata(struct hfi1_devdata *dd)
kobject_put(&dd->kobj);
}
-/*
- * Allocate our primary per-unit data structure. Must be done via verbs
- * allocator, because the verbs cleanup process both does cleanup and
- * free of the data structure.
+/**
+ * hfi1_alloc_devdata - Allocate our primary per-unit data structure.
+ * @pdev: Valid PCI device
+ * @extra: How many bytes to alloc past the default
+ *
+ * Must be done via verbs allocator, because the verbs cleanup process
+ * both does cleanup and free of the data structure.
* "extra" is for chip-specific data.
*
* Use the idr mechanism to get a unit number for this unit.
*/
-struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
+static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev,
+ size_t extra)
{
unsigned long flags;
struct hfi1_devdata *dd;
@@ -1287,8 +1309,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
idr_preload_end();
if (ret < 0) {
- hfi1_early_err(&pdev->dev,
- "Could not allocate unit ID: error %d\n", -ret);
+ dev_err(&pdev->dev,
+ "Could not allocate unit ID: error %d\n", -ret);
goto bail;
}
rvt_set_ibdev_name(&dd->verbs_dev.rdi, "%s_%d", class_name(), dd->unit);
@@ -1309,6 +1331,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
spin_lock_init(&dd->pio_map_lock);
mutex_init(&dd->dc8051_lock);
init_waitqueue_head(&dd->event_queue);
+ spin_lock_init(&dd->irq_src_lock);
dd->int_counter = alloc_percpu(u64);
if (!dd->int_counter) {
@@ -1481,9 +1504,6 @@ static int __init hfi1_mod_init(void)
idr_init(&hfi1_unit_table);
hfi1_dbg_init();
- ret = hfi1_wss_init();
- if (ret < 0)
- goto bail_wss;
ret = pci_register_driver(&hfi1_pci_driver);
if (ret < 0) {
pr_err("Unable to register driver: error %d\n", -ret);
@@ -1492,8 +1512,6 @@ static int __init hfi1_mod_init(void)
goto bail; /* all OK */
bail_dev:
- hfi1_wss_exit();
-bail_wss:
hfi1_dbg_exit();
idr_destroy(&hfi1_unit_table);
dev_cleanup();
@@ -1510,7 +1528,6 @@ static void __exit hfi1_mod_cleanup(void)
{
pci_unregister_driver(&hfi1_pci_driver);
node_affinity_destroy_all();
- hfi1_wss_exit();
hfi1_dbg_exit();
idr_destroy(&hfi1_unit_table);
@@ -1604,23 +1621,23 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
hfi1_free_devdata(dd);
}
-static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt)
+static int init_validate_rcvhdrcnt(struct hfi1_devdata *dd, uint thecnt)
{
if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(dev, "Receive header queue count too small\n");
+ dd_dev_err(dd, "Receive header queue count too small\n");
return -EINVAL;
}
if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) {
- hfi1_early_err(dev,
- "Receive header queue count cannot be greater than %u\n",
- HFI1_MAX_HDRQ_EGRBUF_CNT);
+ dd_dev_err(dd,
+ "Receive header queue count cannot be greater than %u\n",
+ HFI1_MAX_HDRQ_EGRBUF_CNT);
return -EINVAL;
}
if (thecnt % HDRQ_INCREMENT) {
- hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n",
- thecnt, HDRQ_INCREMENT);
+ dd_dev_err(dd, "Receive header queue count %d must be divisible by %lu\n",
+ thecnt, HDRQ_INCREMENT);
return -EINVAL;
}
@@ -1639,22 +1656,29 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Validate dev ids */
if (!(ent->device == PCI_DEVICE_ID_INTEL0 ||
ent->device == PCI_DEVICE_ID_INTEL1)) {
- hfi1_early_err(&pdev->dev,
- "Failing on unknown Intel deviceid 0x%x\n",
- ent->device);
+ dev_err(&pdev->dev, "Failing on unknown Intel deviceid 0x%x\n",
+ ent->device);
ret = -ENODEV;
goto bail;
}
+ /* Allocate the dd so we can get to work */
+ dd = hfi1_alloc_devdata(pdev, NUM_IB_PORTS *
+ sizeof(struct hfi1_pportdata));
+ if (IS_ERR(dd)) {
+ ret = PTR_ERR(dd);
+ goto bail;
+ }
+
/* Validate some global module parameters */
- ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt);
+ ret = init_validate_rcvhdrcnt(dd, rcvhdrcnt);
if (ret)
goto bail;
/* use the encoding function as a sanitization check */
if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) {
- hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n",
- hfi1_hdrq_entsize);
+ dd_dev_err(dd, "Invalid HdrQ Entry size %u\n",
+ hfi1_hdrq_entsize);
ret = -EINVAL;
goto bail;
}
@@ -1676,10 +1700,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
clamp_val(eager_buffer_size,
MIN_EAGER_BUFFER * 8,
MAX_EAGER_BUFFER_TOTAL);
- hfi1_early_info(&pdev->dev, "Eager buffer size %u\n",
- eager_buffer_size);
+ dd_dev_info(dd, "Eager buffer size %u\n",
+ eager_buffer_size);
} else {
- hfi1_early_err(&pdev->dev, "Invalid Eager buffer size of 0\n");
+ dd_dev_err(dd, "Invalid Eager buffer size of 0\n");
ret = -EINVAL;
goto bail;
}
@@ -1687,7 +1711,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* restrict value of hfi1_rcvarr_split */
hfi1_rcvarr_split = clamp_val(hfi1_rcvarr_split, 0, 100);
- ret = hfi1_pcie_init(pdev, ent);
+ ret = hfi1_pcie_init(dd);
if (ret)
goto bail;
@@ -1695,12 +1719,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* Do device-specific initialization, function table setup, dd
* allocation, etc.
*/
- dd = hfi1_init_dd(pdev, ent);
-
- if (IS_ERR(dd)) {
- ret = PTR_ERR(dd);
+ ret = hfi1_init_dd(dd);
+ if (ret)
goto clean_bail; /* error already printed */
- }
ret = create_workqueues(dd);
if (ret)
@@ -1731,7 +1752,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
if (initfail || ret) {
- hfi1_clean_up_interrupts(dd);
+ msix_clean_up_interrupts(dd);
stop_timers(dd);
flush_workqueue(ib_wq);
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c
new file mode 100644
index 000000000000..582f1ba136ff
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/iowait.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#include "iowait.h"
+#include "trace_iowait.h"
+
+void iowait_set_flag(struct iowait *wait, u32 flag)
+{
+ trace_hfi1_iowait_set(wait, flag);
+ set_bit(flag, &wait->flags);
+}
+
+bool iowait_flag_set(struct iowait *wait, u32 flag)
+{
+ return test_bit(flag, &wait->flags);
+}
+
+inline void iowait_clear_flag(struct iowait *wait, u32 flag)
+{
+ trace_hfi1_iowait_clear(wait, flag);
+ clear_bit(flag, &wait->flags);
+}
+
+/**
+ * iowait_init() - initialize wait structure
+ * @wait: wait struct to initialize
+ * @tx_limit: limit for overflow queuing
+ * @func: restart function for workqueue
+ * @sleep: sleep function for no space
+ * @resume: wakeup function for no space
+ *
+ * This function initializes the iowait
+ * structure embedded in the QP or PQ.
+ *
+ */
+void iowait_init(struct iowait *wait, u32 tx_limit,
+ void (*func)(struct work_struct *work),
+ void (*tidfunc)(struct work_struct *work),
+ int (*sleep)(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ uint seq,
+ bool pkts_sent),
+ void (*wakeup)(struct iowait *wait, int reason),
+ void (*sdma_drained)(struct iowait *wait))
+{
+ int i;
+
+ wait->count = 0;
+ INIT_LIST_HEAD(&wait->list);
+ init_waitqueue_head(&wait->wait_dma);
+ init_waitqueue_head(&wait->wait_pio);
+ atomic_set(&wait->sdma_busy, 0);
+ atomic_set(&wait->pio_busy, 0);
+ wait->tx_limit = tx_limit;
+ wait->sleep = sleep;
+ wait->wakeup = wakeup;
+ wait->sdma_drained = sdma_drained;
+ wait->flags = 0;
+ for (i = 0; i < IOWAIT_SES; i++) {
+ wait->wait[i].iow = wait;
+ INIT_LIST_HEAD(&wait->wait[i].tx_head);
+ if (i == IOWAIT_IB_SE)
+ INIT_WORK(&wait->wait[i].iowork, func);
+ else
+ INIT_WORK(&wait->wait[i].iowork, tidfunc);
+ }
+}
+
+/**
+ * iowait_cancel_work - cancel all work in iowait
+ * @w: the iowait struct
+ */
+void iowait_cancel_work(struct iowait *w)
+{
+ cancel_work_sync(&iowait_get_ib_work(w)->iowork);
+ cancel_work_sync(&iowait_get_tid_work(w)->iowork);
+}
+
+/**
+ * iowait_set_work_flag - set work flag based on leg
+ * @w - the iowait work struct
+ */
+int iowait_set_work_flag(struct iowait_work *w)
+{
+ if (w == &w->iow->wait[IOWAIT_IB_SE]) {
+ iowait_set_flag(w->iow, IOWAIT_PENDING_IB);
+ return IOWAIT_IB_SE;
+ }
+ iowait_set_flag(w->iow, IOWAIT_PENDING_TID);
+ return IOWAIT_TID_SE;
+}
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 3d9c32c7c340..23a58ac0d47c 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_IOWAIT_H
#define _HFI1_IOWAIT_H
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -49,6 +49,7 @@
#include <linux/list.h>
#include <linux/workqueue.h>
+#include <linux/wait.h>
#include <linux/sched.h>
#include "sdma_txreq.h"
@@ -59,16 +60,47 @@
*/
typedef void (*restart_t)(struct work_struct *work);
+#define IOWAIT_PENDING_IB 0x0
+#define IOWAIT_PENDING_TID 0x1
+
+/*
+ * A QP can have multiple Send Engines (SEs).
+ *
+ * The current use case is for supporting a TID RDMA
+ * packet build/xmit mechanism independent from verbs.
+ */
+#define IOWAIT_SES 2
+#define IOWAIT_IB_SE 0
+#define IOWAIT_TID_SE 1
+
struct sdma_txreq;
struct sdma_engine;
/**
- * struct iowait - linkage for delayed progress/waiting
+ * @iowork: the work struct
+ * @tx_head: list of prebuilt packets
+ * @iow: the parent iowait structure
+ *
+ * This structure is the work item (process) specific
+ * details associated with the each of the two SEs of the
+ * QP.
+ *
+ * The workstruct and the queued TXs are unique to each
+ * SE.
+ */
+struct iowait;
+struct iowait_work {
+ struct work_struct iowork;
+ struct list_head tx_head;
+ struct iowait *iow;
+};
+
+/**
* @list: used to add/insert into QP/PQ wait lists
- * @lock: uses to record the list head lock
* @tx_head: overflow list of sdma_txreq's
* @sleep: no space callback
* @wakeup: space callback wakeup
* @sdma_drained: sdma count drained
+ * @lock: lock protected head of wait queue
* @iowork: workqueue overhead
* @wait_dma: wait for sdma_busy == 0
* @wait_pio: wait for pio_busy == 0
@@ -76,6 +108,8 @@ struct sdma_engine;
* @count: total number of descriptors in tx_head'ed list
* @tx_limit: limit for overflow queuing
* @tx_count: number of tx entry's in tx_head'ed list
+ * @flags: wait flags (one per QP)
+ * @wait: SE array
*
* This is to be embedded in user's state structure
* (QP or PQ).
@@ -98,13 +132,11 @@ struct sdma_engine;
* Waiters explicity know that, but the destroy
* code that unwaits QPs does not.
*/
-
struct iowait {
struct list_head list;
- struct list_head tx_head;
int (*sleep)(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
uint seq,
bool pkts_sent
@@ -112,7 +144,6 @@ struct iowait {
void (*wakeup)(struct iowait *wait, int reason);
void (*sdma_drained)(struct iowait *wait);
seqlock_t *lock;
- struct work_struct iowork;
wait_queue_head_t wait_dma;
wait_queue_head_t wait_pio;
atomic_t sdma_busy;
@@ -121,63 +152,37 @@ struct iowait {
u32 tx_limit;
u32 tx_count;
u8 starved_cnt;
+ unsigned long flags;
+ struct iowait_work wait[IOWAIT_SES];
};
#define SDMA_AVAIL_REASON 0
-/**
- * iowait_init() - initialize wait structure
- * @wait: wait struct to initialize
- * @tx_limit: limit for overflow queuing
- * @func: restart function for workqueue
- * @sleep: sleep function for no space
- * @resume: wakeup function for no space
- *
- * This function initializes the iowait
- * structure embedded in the QP or PQ.
- *
- */
+void iowait_set_flag(struct iowait *wait, u32 flag);
+bool iowait_flag_set(struct iowait *wait, u32 flag);
+void iowait_clear_flag(struct iowait *wait, u32 flag);
-static inline void iowait_init(
- struct iowait *wait,
- u32 tx_limit,
- void (*func)(struct work_struct *work),
- int (*sleep)(
- struct sdma_engine *sde,
- struct iowait *wait,
- struct sdma_txreq *tx,
- uint seq,
- bool pkts_sent),
- void (*wakeup)(struct iowait *wait, int reason),
- void (*sdma_drained)(struct iowait *wait))
-{
- wait->count = 0;
- wait->lock = NULL;
- INIT_LIST_HEAD(&wait->list);
- INIT_LIST_HEAD(&wait->tx_head);
- INIT_WORK(&wait->iowork, func);
- init_waitqueue_head(&wait->wait_dma);
- init_waitqueue_head(&wait->wait_pio);
- atomic_set(&wait->sdma_busy, 0);
- atomic_set(&wait->pio_busy, 0);
- wait->tx_limit = tx_limit;
- wait->sleep = sleep;
- wait->wakeup = wakeup;
- wait->sdma_drained = sdma_drained;
-}
+void iowait_init(struct iowait *wait, u32 tx_limit,
+ void (*func)(struct work_struct *work),
+ void (*tidfunc)(struct work_struct *work),
+ int (*sleep)(struct sdma_engine *sde,
+ struct iowait_work *wait,
+ struct sdma_txreq *tx,
+ uint seq,
+ bool pkts_sent),
+ void (*wakeup)(struct iowait *wait, int reason),
+ void (*sdma_drained)(struct iowait *wait));
/**
- * iowait_schedule() - initialize wait structure
+ * iowait_schedule() - schedule the default send engine work
* @wait: wait struct to schedule
* @wq: workqueue for schedule
* @cpu: cpu
*/
-static inline void iowait_schedule(
- struct iowait *wait,
- struct workqueue_struct *wq,
- int cpu)
+static inline bool iowait_schedule(struct iowait *wait,
+ struct workqueue_struct *wq, int cpu)
{
- queue_work_on(cpu, wq, &wait->iowork);
+ return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_IB_SE].iowork);
}
/**
@@ -228,6 +233,8 @@ static inline void iowait_sdma_add(struct iowait *wait, int count)
*/
static inline int iowait_sdma_dec(struct iowait *wait)
{
+ if (!wait)
+ return 0;
return atomic_dec_and_test(&wait->sdma_busy);
}
@@ -267,11 +274,13 @@ static inline void iowait_pio_inc(struct iowait *wait)
}
/**
- * iowait_sdma_dec - note pio complete
+ * iowait_pio_dec - note pio complete
* @wait: iowait structure
*/
static inline int iowait_pio_dec(struct iowait *wait)
{
+ if (!wait)
+ return 0;
return atomic_dec_and_test(&wait->pio_busy);
}
@@ -293,9 +302,9 @@ static inline void iowait_drain_wakeup(struct iowait *wait)
/**
* iowait_get_txhead() - get packet off of iowait list
*
- * @wait wait struture
+ * @wait iowait_work struture
*/
-static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
+static inline struct sdma_txreq *iowait_get_txhead(struct iowait_work *wait)
{
struct sdma_txreq *tx = NULL;
@@ -309,6 +318,28 @@ static inline struct sdma_txreq *iowait_get_txhead(struct iowait *wait)
return tx;
}
+static inline u16 iowait_get_desc(struct iowait_work *w)
+{
+ u16 num_desc = 0;
+ struct sdma_txreq *tx = NULL;
+
+ if (!list_empty(&w->tx_head)) {
+ tx = list_first_entry(&w->tx_head, struct sdma_txreq,
+ list);
+ num_desc = tx->num_desc;
+ }
+ return num_desc;
+}
+
+static inline u32 iowait_get_all_desc(struct iowait *w)
+{
+ u32 num_desc = 0;
+
+ num_desc = iowait_get_desc(&w->wait[IOWAIT_IB_SE]);
+ num_desc += iowait_get_desc(&w->wait[IOWAIT_TID_SE]);
+ return num_desc;
+}
+
/**
* iowait_queue - Put the iowait on a wait queue
* @pkts_sent: have some packets been sent before queuing?
@@ -372,12 +403,57 @@ static inline void iowait_starve_find_max(struct iowait *w, u8 *max,
}
/**
- * iowait_packet_queued() - determine if a packet is already built
- * @wait: the wait structure
+ * iowait_packet_queued() - determine if a packet is queued
+ * @wait: the iowait_work structure
*/
-static inline bool iowait_packet_queued(struct iowait *wait)
+static inline bool iowait_packet_queued(struct iowait_work *wait)
{
return !list_empty(&wait->tx_head);
}
+/**
+ * inc_wait_count - increment wait counts
+ * @w: the log work struct
+ * @n: the count
+ */
+static inline void iowait_inc_wait_count(struct iowait_work *w, u16 n)
+{
+ if (!w)
+ return;
+ w->iow->tx_count++;
+ w->iow->count += n;
+}
+
+/**
+ * iowait_get_tid_work - return iowait_work for tid SE
+ * @w: the iowait struct
+ */
+static inline struct iowait_work *iowait_get_tid_work(struct iowait *w)
+{
+ return &w->wait[IOWAIT_TID_SE];
+}
+
+/**
+ * iowait_get_ib_work - return iowait_work for ib SE
+ * @w: the iowait struct
+ */
+static inline struct iowait_work *iowait_get_ib_work(struct iowait *w)
+{
+ return &w->wait[IOWAIT_IB_SE];
+}
+
+/**
+ * iowait_ioww_to_iow - return iowait given iowait_work
+ * @w: the iowait_work struct
+ */
+static inline struct iowait *iowait_ioww_to_iow(struct iowait_work *w)
+{
+ if (likely(w))
+ return w->iow;
+ return NULL;
+}
+
+void iowait_cancel_work(struct iowait *w);
+int iowait_set_work_flag(struct iowait_work *w);
+
#endif
diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
index 0307405491e0..88a0cf930136 100644
--- a/drivers/infiniband/hw/hfi1/mad.c
+++ b/drivers/infiniband/hw/hfi1/mad.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015-2017 Intel Corporation.
+ * Copyright(c) 2015-2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -4836,7 +4836,7 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
int ret;
int pkey_idx;
int local_mad = 0;
- u32 resp_len = 0;
+ u32 resp_len = in_wc->byte_len - sizeof(*in_grh);
struct hfi1_ibport *ibp = to_iport(ibdev, port);
pkey_idx = hfi1_lookup_pkey_idx(ibp, LIM_MGMT_P_KEY);
diff --git a/drivers/infiniband/hw/hfi1/msix.c b/drivers/infiniband/hw/hfi1/msix.c
new file mode 100644
index 000000000000..d920b165d696
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/msix.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "hfi.h"
+#include "affinity.h"
+#include "sdma.h"
+
+/**
+ * msix_initialize() - Calculate, request and configure MSIx IRQs
+ * @dd: valid hfi1 devdata
+ *
+ */
+int msix_initialize(struct hfi1_devdata *dd)
+{
+ u32 total;
+ int ret;
+ struct hfi1_msix_entry *entries;
+
+ /*
+ * MSIx interrupt count:
+ * one for the general, "slow path" interrupt
+ * one per used SDMA engine
+ * one per kernel receive context
+ * one for each VNIC context
+ * ...any new IRQs should be added here.
+ */
+ total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
+
+ if (total >= CCE_NUM_MSIX_VECTORS)
+ return -EINVAL;
+
+ ret = pci_alloc_irq_vectors(dd->pcidev, total, total, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", ret);
+ return ret;
+ }
+
+ entries = kcalloc(total, sizeof(*dd->msix_info.msix_entries),
+ GFP_KERNEL);
+ if (!entries) {
+ pci_free_irq_vectors(dd->pcidev);
+ return -ENOMEM;
+ }
+
+ dd->msix_info.msix_entries = entries;
+ spin_lock_init(&dd->msix_info.msix_lock);
+ bitmap_zero(dd->msix_info.in_use_msix, total);
+ dd->msix_info.max_requested = total;
+ dd_dev_info(dd, "%u MSI-X interrupts allocated\n", total);
+
+ return 0;
+}
+
+/**
+ * msix_request_irq() - Allocate a free MSIx IRQ
+ * @dd: valid devdata
+ * @arg: context information for the IRQ
+ * @handler: IRQ handler
+ * @thread: IRQ thread handler (could be NULL)
+ * @idx: zero base idx if multiple devices are needed
+ * @type: affinty IRQ type
+ *
+ * Allocated an MSIx vector if available, and then create the appropriate
+ * meta data needed to keep track of the pci IRQ request.
+ *
+ * Return:
+ * < 0 Error
+ * >= 0 MSIx vector
+ *
+ */
+static int msix_request_irq(struct hfi1_devdata *dd, void *arg,
+ irq_handler_t handler, irq_handler_t thread,
+ u32 idx, enum irq_type type)
+{
+ unsigned long nr;
+ int irq;
+ int ret;
+ const char *err_info;
+ char name[MAX_NAME_SIZE];
+ struct hfi1_msix_entry *me;
+
+ /* Allocate an MSIx vector */
+ spin_lock(&dd->msix_info.msix_lock);
+ nr = find_first_zero_bit(dd->msix_info.in_use_msix,
+ dd->msix_info.max_requested);
+ if (nr < dd->msix_info.max_requested)
+ __set_bit(nr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+
+ if (nr == dd->msix_info.max_requested)
+ return -ENOSPC;
+
+ /* Specific verification and determine the name */
+ switch (type) {
+ case IRQ_GENERAL:
+ /* general interrupt must be MSIx vector 0 */
+ if (nr) {
+ spin_lock(&dd->msix_info.msix_lock);
+ __clear_bit(nr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+ dd_dev_err(dd, "Invalid index %lu for GENERAL IRQ\n",
+ nr);
+ return -EINVAL;
+ }
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d", dd->unit);
+ err_info = "general";
+ break;
+ case IRQ_SDMA:
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d sdma%d",
+ dd->unit, idx);
+ err_info = "sdma";
+ break;
+ case IRQ_RCVCTXT:
+ snprintf(name, sizeof(name), DRIVER_NAME "_%d kctxt%d",
+ dd->unit, idx);
+ err_info = "receive context";
+ break;
+ case IRQ_OTHER:
+ default:
+ return -EINVAL;
+ }
+ name[sizeof(name) - 1] = 0;
+
+ irq = pci_irq_vector(dd->pcidev, nr);
+ ret = pci_request_irq(dd->pcidev, nr, handler, thread, arg, name);
+ if (ret) {
+ dd_dev_err(dd,
+ "%s: request for IRQ %d failed, MSIx %d, err %d\n",
+ err_info, irq, idx, ret);
+ spin_lock(&dd->msix_info.msix_lock);
+ __clear_bit(nr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+ return ret;
+ }
+
+ /*
+ * assign arg after pci_request_irq call, so it will be
+ * cleaned up
+ */
+ me = &dd->msix_info.msix_entries[nr];
+ me->irq = irq;
+ me->arg = arg;
+ me->type = type;
+
+ /* This is a request, so a failure is not fatal */
+ ret = hfi1_get_irq_affinity(dd, me);
+ if (ret)
+ dd_dev_err(dd, "unable to pin IRQ %d\n", ret);
+
+ return nr;
+}
+
+/**
+ * msix_request_rcd_irq() - Helper function for RCVAVAIL IRQs
+ * @rcd: valid rcd context
+ *
+ */
+int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd)
+{
+ int nr;
+
+ nr = msix_request_irq(rcd->dd, rcd, receive_context_interrupt,
+ receive_context_thread, rcd->ctxt, IRQ_RCVCTXT);
+ if (nr < 0)
+ return nr;
+
+ /*
+ * Set the interrupt register and mask for this
+ * context's interrupt.
+ */
+ rcd->ireg = (IS_RCVAVAIL_START + rcd->ctxt) / 64;
+ rcd->imask = ((u64)1) << ((IS_RCVAVAIL_START + rcd->ctxt) % 64);
+ rcd->msix_intr = nr;
+ remap_intr(rcd->dd, IS_RCVAVAIL_START + rcd->ctxt, nr);
+
+ return 0;
+}
+
+/**
+ * msix_request_smda_ira() - Helper for getting SDMA IRQ resources
+ * @sde: valid sdma engine
+ *
+ */
+int msix_request_sdma_irq(struct sdma_engine *sde)
+{
+ int nr;
+
+ nr = msix_request_irq(sde->dd, sde, sdma_interrupt, NULL,
+ sde->this_idx, IRQ_SDMA);
+ if (nr < 0)
+ return nr;
+ sde->msix_intr = nr;
+ remap_sdma_interrupts(sde->dd, sde->this_idx, nr);
+
+ return 0;
+}
+
+/**
+ * enable_sdma_src() - Helper to enable SDMA IRQ srcs
+ * @dd: valid devdata structure
+ * @i: index of SDMA engine
+ */
+static void enable_sdma_srcs(struct hfi1_devdata *dd, int i)
+{
+ set_intr_bits(dd, IS_SDMA_START + i, IS_SDMA_START + i, true);
+ set_intr_bits(dd, IS_SDMA_PROGRESS_START + i,
+ IS_SDMA_PROGRESS_START + i, true);
+ set_intr_bits(dd, IS_SDMA_IDLE_START + i, IS_SDMA_IDLE_START + i, true);
+ set_intr_bits(dd, IS_SDMAENG_ERR_START + i, IS_SDMAENG_ERR_START + i,
+ true);
+}
+
+/**
+ * msix_request_irqs() - Allocate all MSIx IRQs
+ * @dd: valid devdata structure
+ *
+ * Helper function to request the used MSIx IRQs.
+ *
+ */
+int msix_request_irqs(struct hfi1_devdata *dd)
+{
+ int i;
+ int ret;
+
+ ret = msix_request_irq(dd, dd, general_interrupt, NULL, 0, IRQ_GENERAL);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < dd->num_sdma; i++) {
+ struct sdma_engine *sde = &dd->per_sdma[i];
+
+ ret = msix_request_sdma_irq(sde);
+ if (ret)
+ return ret;
+ enable_sdma_srcs(sde->dd, i);
+ }
+
+ for (i = 0; i < dd->n_krcv_queues; i++) {
+ struct hfi1_ctxtdata *rcd = hfi1_rcd_get_by_index_safe(dd, i);
+
+ if (rcd)
+ ret = msix_request_rcd_irq(rcd);
+ hfi1_rcd_put(rcd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * msix_free_irq() - Free the specified MSIx resources and IRQ
+ * @dd: valid devdata
+ * @msix_intr: MSIx vector to free.
+ *
+ */
+void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr)
+{
+ struct hfi1_msix_entry *me;
+
+ if (msix_intr >= dd->msix_info.max_requested)
+ return;
+
+ me = &dd->msix_info.msix_entries[msix_intr];
+
+ if (!me->arg) /* => no irq, no affinity */
+ return;
+
+ hfi1_put_irq_affinity(dd, me);
+ pci_free_irq(dd->pcidev, msix_intr, me->arg);
+
+ me->arg = NULL;
+
+ spin_lock(&dd->msix_info.msix_lock);
+ __clear_bit(msix_intr, dd->msix_info.in_use_msix);
+ spin_unlock(&dd->msix_info.msix_lock);
+}
+
+/**
+ * hfi1_clean_up_msix_interrupts() - Free all MSIx IRQ resources
+ * @dd: valid device data data structure
+ *
+ * Free the MSIx and associated PCI resources, if they have been allocated.
+ */
+void msix_clean_up_interrupts(struct hfi1_devdata *dd)
+{
+ int i;
+ struct hfi1_msix_entry *me = dd->msix_info.msix_entries;
+
+ /* remove irqs - must happen before disabling/turning off */
+ for (i = 0; i < dd->msix_info.max_requested; i++, me++)
+ msix_free_irq(dd, i);
+
+ /* clean structures */
+ kfree(dd->msix_info.msix_entries);
+ dd->msix_info.msix_entries = NULL;
+ dd->msix_info.max_requested = 0;
+
+ pci_free_irq_vectors(dd->pcidev);
+}
+
+/**
+ * msix_vnic_syncrhonize_irq() - Vnic IRQ synchronize
+ * @dd: valid devdata
+ */
+void msix_vnic_synchronize_irq(struct hfi1_devdata *dd)
+{
+ int i;
+
+ for (i = 0; i < dd->vnic.num_ctxt; i++) {
+ struct hfi1_ctxtdata *rcd = dd->vnic.ctxt[i];
+ struct hfi1_msix_entry *me;
+
+ me = &dd->msix_info.msix_entries[rcd->msix_intr];
+
+ synchronize_irq(me->irq);
+ }
+}
diff --git a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c b/drivers/infiniband/hw/hfi1/msix.h
index d08805032f01..a514881632a4 100644
--- a/arch/x86/crypto/sha512-mb/sha512_mb_mgr_init_avx2.c
+++ b/drivers/infiniband/hw/hfi1/msix.h
@@ -1,13 +1,12 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
/*
- * Initialization code for multi buffer SHA256 algorithm for AVX2
+ * Copyright(c) 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2016 Intel Corporation.
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -17,26 +16,21 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
- * Contact Information:
- * Megha Dey <megha.dey@linux.intel.com>
- *
* BSD LICENSE
*
- * Copyright(c) 2016 Intel Corporation.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@@ -49,21 +43,22 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
*/
+#ifndef _HFI1_MSIX_H
+#define _HFI1_MSIX_H
-#include "sha512_mb_mgr.h"
+#include "hfi.h"
-void sha512_mb_mgr_init_avx2(struct sha512_mb_mgr *state)
-{
- unsigned int j;
+/* MSIx interface */
+int msix_initialize(struct hfi1_devdata *dd);
+int msix_request_irqs(struct hfi1_devdata *dd);
+void msix_clean_up_interrupts(struct hfi1_devdata *dd);
+int msix_request_rcd_irq(struct hfi1_ctxtdata *rcd);
+int msix_request_sdma_irq(struct sdma_engine *sde);
+void msix_free_irq(struct hfi1_devdata *dd, u8 msix_intr);
- /* initially all lanes are unused */
- state->lens[0] = 0xFFFFFFFF00000000;
- state->lens[1] = 0xFFFFFFFF00000001;
- state->lens[2] = 0xFFFFFFFF00000002;
- state->lens[3] = 0xFFFFFFFF00000003;
+/* VNIC interface */
+void msix_vnic_synchronize_irq(struct hfi1_devdata *dd);
- state->unused_lanes = 0xFF03020100;
- for (j = 0; j < 4; j++)
- state->ldata[j].job_in_lane = NULL;
-}
+#endif
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 6c967dde58e7..c96d193bb236 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -61,19 +61,12 @@
*/
/*
- * Code to adjust PCIe capabilities.
- */
-static void tune_pcie_caps(struct hfi1_devdata *);
-
-/*
* Do all the common PCIe setup and initialization.
- * devdata is not yet allocated, and is not allocated until after this
- * routine returns success. Therefore dd_dev_err() can't be used for error
- * printing.
*/
-int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
+int hfi1_pcie_init(struct hfi1_devdata *dd)
{
int ret;
+ struct pci_dev *pdev = dd->pcidev;
ret = pci_enable_device(pdev);
if (ret) {
@@ -89,15 +82,13 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
* about that, it appears. If the original BAR was retained
* in the kernel data structures, this may be OK.
*/
- hfi1_early_err(&pdev->dev, "pci enable failed: error %d\n",
- -ret);
- goto done;
+ dd_dev_err(dd, "pci enable failed: error %d\n", -ret);
+ return ret;
}
ret = pci_request_regions(pdev, DRIVER_NAME);
if (ret) {
- hfi1_early_err(&pdev->dev,
- "pci_request_regions fails: err %d\n", -ret);
+ dd_dev_err(dd, "pci_request_regions fails: err %d\n", -ret);
goto bail;
}
@@ -110,8 +101,7 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
- hfi1_early_err(&pdev->dev,
- "Unable to set DMA mask: %d\n", ret);
+ dd_dev_err(dd, "Unable to set DMA mask: %d\n", ret);
goto bail;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
@@ -119,18 +109,16 @@ int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
}
if (ret) {
- hfi1_early_err(&pdev->dev,
- "Unable to set DMA consistent mask: %d\n", ret);
+ dd_dev_err(dd, "Unable to set DMA consistent mask: %d\n", ret);
goto bail;
}
pci_set_master(pdev);
(void)pci_enable_pcie_error_reporting(pdev);
- goto done;
+ return 0;
bail:
hfi1_pcie_cleanup(pdev);
-done:
return ret;
}
@@ -206,7 +194,7 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
dd_dev_err(dd, "WC mapping of send buffers failed\n");
goto nomem;
}
- dd_dev_info(dd, "WC piobase: %p\n for %x", dd->piobase, TXE_PIO_SIZE);
+ dd_dev_info(dd, "WC piobase: %p for %x\n", dd->piobase, TXE_PIO_SIZE);
dd->physaddr = addr; /* used for io_remap, etc. */
@@ -344,26 +332,6 @@ int pcie_speeds(struct hfi1_devdata *dd)
return 0;
}
-/*
- * Returns:
- * - actual number of interrupts allocated or
- * - error
- */
-int request_msix(struct hfi1_devdata *dd, u32 msireq)
-{
- int nvec;
-
- nvec = pci_alloc_irq_vectors(dd->pcidev, msireq, msireq, PCI_IRQ_MSIX);
- if (nvec < 0) {
- dd_dev_err(dd, "pci_alloc_irq_vectors() failed: %d\n", nvec);
- return nvec;
- }
-
- tune_pcie_caps(dd);
-
- return nvec;
-}
-
/* restore command and BARs after a reset has wiped them out */
int restore_pci_variables(struct hfi1_devdata *dd)
{
@@ -479,14 +447,19 @@ error:
* Check and optionally adjust them to maximize our throughput.
*/
static int hfi1_pcie_caps;
-module_param_named(pcie_caps, hfi1_pcie_caps, int, S_IRUGO);
+module_param_named(pcie_caps, hfi1_pcie_caps, int, 0444);
MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
uint aspm_mode = ASPM_MODE_DISABLED;
-module_param_named(aspm, aspm_mode, uint, S_IRUGO);
+module_param_named(aspm, aspm_mode, uint, 0444);
MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
-static void tune_pcie_caps(struct hfi1_devdata *dd)
+/**
+ * tune_pcie_caps() - Code to adjust PCIe capabilities.
+ * @dd: Valid device data structure
+ *
+ */
+void tune_pcie_caps(struct hfi1_devdata *dd)
{
struct pci_dev *parent;
u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
@@ -650,7 +623,6 @@ pci_resume(struct pci_dev *pdev)
struct hfi1_devdata *dd = pci_get_drvdata(pdev);
dd_dev_info(dd, "HFI1 resume function called\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
/*
* Running jobs will fail, since it's asynchronous
* unlike sysfs-requested reset. Better than
@@ -1029,6 +1001,7 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
const u8 (*ctle_tunings)[4];
uint static_ctle_mode;
int return_error = 0;
+ u32 target_width;
/* PCIe Gen3 is for the ASIC only */
if (dd->icode != ICODE_RTL_SILICON)
@@ -1068,6 +1041,9 @@ int do_pcie_gen3_transition(struct hfi1_devdata *dd)
return 0;
}
+ /* Previous Gen1/Gen2 bus width */
+ target_width = dd->lbus_width;
+
/*
* Do the Gen3 transition. Steps are those of the PCIe Gen3
* recipe.
@@ -1436,11 +1412,12 @@ retry:
dd_dev_info(dd, "%s: new speed and width: %s\n", __func__,
dd->lbus_info);
- if (dd->lbus_speed != target_speed) { /* not target */
+ if (dd->lbus_speed != target_speed ||
+ dd->lbus_width < target_width) { /* not target */
/* maybe retry */
do_retry = retry_count < pcie_retry;
- dd_dev_err(dd, "PCIe link speed did not switch to Gen%d%s\n",
- pcie_target, do_retry ? ", retrying" : "");
+ dd_dev_err(dd, "PCIe link speed or width did not match target%s\n",
+ do_retry ? ", retrying" : "");
retry_count++;
if (do_retry) {
msleep(100); /* allow time to settle */
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 752057647f09..9ab50d2308dc 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -71,14 +71,6 @@ void __cm_reset(struct hfi1_devdata *dd, u64 sendctrl)
}
}
-/* defined in header release 48 and higher */
-#ifndef SEND_CTRL_UNSUPPORTED_VL_SHIFT
-#define SEND_CTRL_UNSUPPORTED_VL_SHIFT 3
-#define SEND_CTRL_UNSUPPORTED_VL_MASK 0xffull
-#define SEND_CTRL_UNSUPPORTED_VL_SMASK (SEND_CTRL_UNSUPPORTED_VL_MASK \
- << SEND_CTRL_UNSUPPORTED_VL_SHIFT)
-#endif
-
/* global control of PIO send */
void pio_send_control(struct hfi1_devdata *dd, int op)
{
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 9b1e84a6b1cc..6f3bc4dab858 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(qp_table_size, "QP table size");
static void flush_tx_list(struct rvt_qp *qp);
static int iowait_sleep(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *stx,
unsigned int seq,
bool pkts_sent);
@@ -134,15 +134,13 @@ const struct rvt_operation_params hfi1_post_parms[RVT_OPERATION_MAX] = {
};
-static void flush_tx_list(struct rvt_qp *qp)
+static void flush_list_head(struct list_head *l)
{
- struct hfi1_qp_priv *priv = qp->priv;
-
- while (!list_empty(&priv->s_iowait.tx_head)) {
+ while (!list_empty(l)) {
struct sdma_txreq *tx;
tx = list_first_entry(
- &priv->s_iowait.tx_head,
+ l,
struct sdma_txreq,
list);
list_del_init(&tx->list);
@@ -151,6 +149,14 @@ static void flush_tx_list(struct rvt_qp *qp)
}
}
+static void flush_tx_list(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ flush_list_head(&iowait_get_ib_work(&priv->s_iowait)->tx_head);
+ flush_list_head(&iowait_get_tid_work(&priv->s_iowait)->tx_head);
+}
+
static void flush_iowait(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
@@ -282,33 +288,46 @@ void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
}
/**
- * hfi1_check_send_wqe - validate wqe
+ * hfi1_setup_wqe - set up the wqe
* @qp - The qp
* @wqe - The built wqe
+ * @call_send - Determine if the send should be posted or scheduled.
*
- * validate wqe. This is called
- * prior to inserting the wqe into
- * the ring but after the wqe has been
- * setup.
+ * Perform setup of the wqe. This is called
+ * prior to inserting the wqe into the ring but after
+ * the wqe has been setup by RDMAVT. This function
+ * allows the driver the opportunity to perform
+ * validation and additional setup of the wqe.
*
* Returns 0 on success, -EINVAL on failure
*
*/
-int hfi1_check_send_wqe(struct rvt_qp *qp,
- struct rvt_swqe *wqe)
+int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct rvt_ah *ah;
+ struct hfi1_pportdata *ppd;
+ struct hfi1_devdata *dd;
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
case IB_QPT_UC:
if (wqe->length > 0x80000000U)
return -EINVAL;
+ if (wqe->length > qp->pmtu)
+ *call_send = false;
break;
case IB_QPT_SMI:
- ah = ibah_to_rvtah(wqe->ud_wr.ah);
- if (wqe->length > (1 << ah->log_pmtu))
+ /*
+ * SM packets should exclusively use VL15 and their SL is
+ * ignored (IBTA v1.3, Section 3.5.8.2). Therefore, when ah
+ * is created, SL is 0 in most cases and as a result some
+ * fields (vl and pmtu) in ah may not be set correctly,
+ * depending on the SL2SC and SC2VL tables at the time.
+ */
+ ppd = ppd_from_ibp(ibp);
+ dd = dd_from_ppd(ppd);
+ if (wqe->length > dd->vld[15].mtu)
return -EINVAL;
break;
case IB_QPT_GSI:
@@ -321,7 +340,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp,
default:
break;
}
- return wqe->length <= piothreshold;
+ return 0;
}
/**
@@ -333,7 +352,7 @@ int hfi1_check_send_wqe(struct rvt_qp *qp,
* It is only used in the post send, which doesn't hold
* the s_lock.
*/
-void _hfi1_schedule_send(struct rvt_qp *qp)
+bool _hfi1_schedule_send(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
struct hfi1_ibport *ibp =
@@ -341,10 +360,10 @@ void _hfi1_schedule_send(struct rvt_qp *qp)
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
- iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
- priv->s_sde ?
- priv->s_sde->cpu :
- cpumask_first(cpumask_of_node(dd->node)));
+ return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
+ priv->s_sde ?
+ priv->s_sde->cpu :
+ cpumask_first(cpumask_of_node(dd->node)));
}
static void qp_pio_drain(struct rvt_qp *qp)
@@ -372,12 +391,32 @@ static void qp_pio_drain(struct rvt_qp *qp)
*
* This schedules qp progress and caller should hold
* the s_lock.
+ * @return true if the first leg is scheduled;
+ * false if the first leg is not scheduled.
*/
-void hfi1_schedule_send(struct rvt_qp *qp)
+bool hfi1_schedule_send(struct rvt_qp *qp)
{
lockdep_assert_held(&qp->s_lock);
- if (hfi1_send_ok(qp))
+ if (hfi1_send_ok(qp)) {
_hfi1_schedule_send(qp);
+ return true;
+ }
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&((struct hfi1_qp_priv *)qp->priv)->s_iowait,
+ IOWAIT_PENDING_IB);
+ return false;
+}
+
+static void hfi1_qp_schedule(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+ bool ret;
+
+ if (iowait_flag_set(&priv->s_iowait, IOWAIT_PENDING_IB)) {
+ ret = hfi1_schedule_send(qp);
+ if (ret)
+ iowait_clear_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
+ }
}
void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
@@ -388,16 +427,22 @@ void hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag)
if (qp->s_flags & flag) {
qp->s_flags &= ~flag;
trace_hfi1_qpwakeup(qp, flag);
- hfi1_schedule_send(qp);
+ hfi1_qp_schedule(qp);
}
spin_unlock_irqrestore(&qp->s_lock, flags);
/* Notify hfi1_destroy_qp() if it is waiting. */
rvt_put_qp(qp);
}
+void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait)
+{
+ if (iowait_set_work_flag(wait) == IOWAIT_IB_SE)
+ qp->s_flags &= ~RVT_S_BUSY;
+}
+
static int iowait_sleep(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *stx,
uint seq,
bool pkts_sent)
@@ -438,7 +483,7 @@ static int iowait_sleep(
rvt_get_qp(qp);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_qp_unbusy(qp, wait);
spin_unlock_irqrestore(&qp->s_lock, flags);
ret = -EBUSY;
} else {
@@ -637,6 +682,7 @@ void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
&priv->s_iowait,
1,
_hfi1_do_send,
+ NULL,
iowait_sleep,
iowait_wakeup,
iowait_sdma_drained);
@@ -686,7 +732,7 @@ void stop_send_queue(struct rvt_qp *qp)
{
struct hfi1_qp_priv *priv = qp->priv;
- cancel_work_sync(&priv->s_iowait.iowork);
+ iowait_cancel_work(&priv->s_iowait);
}
void quiesce_qp(struct rvt_qp *qp)
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index 078cff7560b6..7adb6dff6813 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -58,18 +58,6 @@ extern unsigned int hfi1_qp_table_size;
extern const struct rvt_operation_params hfi1_post_parms[];
/*
- * Send if not busy or waiting for I/O and either
- * a RC response is pending or we can process send work requests.
- */
-static inline int hfi1_send_ok(struct rvt_qp *qp)
-{
- return !(qp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT_IO)) &&
- (verbs_txreq_queued(qp) ||
- (qp->s_flags & RVT_S_RESP_PENDING) ||
- !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
-}
-
-/*
* Driver specific s_flags starting at bit 31 down to HFI1_S_MIN_BIT_MASK
*
* HFI1_S_AHG_VALID - ahg header valid on chip
@@ -90,6 +78,20 @@ static inline int hfi1_send_ok(struct rvt_qp *qp)
#define HFI1_S_ANY_WAIT (HFI1_S_ANY_WAIT_IO | RVT_S_ANY_WAIT_SEND)
/*
+ * Send if not busy or waiting for I/O and either
+ * a RC response is pending or we can process send work requests.
+ */
+static inline int hfi1_send_ok(struct rvt_qp *qp)
+{
+ struct hfi1_qp_priv *priv = qp->priv;
+
+ return !(qp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT_IO)) &&
+ (verbs_txreq_queued(iowait_get_ib_work(&priv->s_iowait)) ||
+ (qp->s_flags & RVT_S_RESP_PENDING) ||
+ !(qp->s_flags & RVT_S_ANY_WAIT_SEND));
+}
+
+/*
* free_ahg - clear ahg from QP
*/
static inline void clear_ahg(struct rvt_qp *qp)
@@ -129,8 +131,8 @@ struct send_context *qp_to_send_context(struct rvt_qp *qp, u8 sc5);
void qp_iter_print(struct seq_file *s, struct rvt_qp_iter *iter);
-void _hfi1_schedule_send(struct rvt_qp *qp);
-void hfi1_schedule_send(struct rvt_qp *qp);
+bool _hfi1_schedule_send(struct rvt_qp *qp);
+bool hfi1_schedule_send(struct rvt_qp *qp);
void hfi1_migrate_qp(struct rvt_qp *qp);
@@ -150,4 +152,5 @@ void quiesce_qp(struct rvt_qp *qp);
u32 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu);
int mtu_to_path_mtu(u32 mtu);
void hfi1_error_port_qps(struct hfi1_ibport *ibp, u8 sl);
+void hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait);
#endif /* _QP_H */
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 9bd63abb2dfe..188aa4f686a0 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -309,7 +309,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
clear_ahg(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
+ rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */
goto done_free_tx;
@@ -378,9 +378,9 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
wqe->wr.ex.invalidate_rkey);
local_ops = 1;
}
- hfi1_send_complete(qp, wqe,
- err ? IB_WC_LOC_PROT_ERR
- : IB_WC_SUCCESS);
+ rvt_send_complete(qp, wqe,
+ err ? IB_WC_LOC_PROT_ERR
+ : IB_WC_SUCCESS);
if (local_ops)
atomic_dec(&qp->local_ops_pending);
goto done_free_tx;
@@ -1043,7 +1043,7 @@ void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
hfi1_migrate_qp(qp);
qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
return;
} else { /* need to handle delayed completion */
@@ -1468,7 +1468,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
ibp->rvp.n_other_naks++;
class_b:
if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
break;
@@ -1644,7 +1644,8 @@ read_middle:
qp->s_rdma_read_len -= pmtu;
update_last_psn(qp, psn);
spin_unlock_irqrestore(&qp->s_lock, flags);
- hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, false, false);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, pmtu, false, false);
goto bail;
case OP(RDMA_READ_RESPONSE_ONLY):
@@ -1684,7 +1685,8 @@ read_last:
if (unlikely(tlen != qp->s_rdma_read_len))
goto ack_len_err;
aeth = be32_to_cpu(ohdr->u.aeth);
- hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, false, false);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, tlen, false, false);
WARN_ON(qp->s_rdma_read_sge.num_sge);
(void)do_rc_ack(qp, aeth, psn,
OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
@@ -1704,7 +1706,7 @@ ack_len_err:
status = IB_WC_LOC_LEN_ERR;
ack_err:
if (qp->s_last == qp->s_acked) {
- hfi1_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
ack_done:
@@ -2144,7 +2146,7 @@ send_middle:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto nack_inv;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -2200,7 +2202,7 @@ send_last:
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
- hfi1_copy_sge(&qp->r_sge, data, tlen, true, copy_last);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last);
rvt_put_ss(&qp->r_sge);
qp->r_msn++;
if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 5f56f3c1b4c4..7fb317c711df 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -156,333 +156,6 @@ int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
}
/**
- * ruc_loopback - handle UC and RC loopback requests
- * @sqp: the sending QP
- *
- * This is called from hfi1_do_send() to
- * forward a WQE addressed to the same HFI.
- * Note that although we are single threaded due to the send engine, we still
- * have to protect against post_send(). We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void ruc_loopback(struct rvt_qp *sqp)
-{
- struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct rvt_qp *qp;
- struct rvt_swqe *wqe;
- struct rvt_sge *sge;
- unsigned long flags;
- struct ib_wc wc;
- u64 sdata;
- atomic64_t *maddr;
- enum ib_wc_status send_status;
- bool release;
- int ret;
- bool copy_last = false;
- int local_ops = 0;
-
- rcu_read_lock();
-
- /*
- * Note that we check the responder QP state after
- * checking the requester's state.
- */
- qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
- sqp->remote_qpn);
-
- spin_lock_irqsave(&sqp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT)) ||
- !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- goto unlock;
-
- sqp->s_flags |= RVT_S_BUSY;
-
-again:
- if (sqp->s_last == READ_ONCE(sqp->s_head))
- goto clr_busy;
- wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
-
- /* Return if it is not OK to start a new work request. */
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
- goto clr_busy;
- /* We are in the error state, flush the work request. */
- send_status = IB_WC_WR_FLUSH_ERR;
- goto flush_send;
- }
-
- /*
- * We can rely on the entry not changing without the s_lock
- * being held until we update s_last.
- * We increment s_cur to indicate s_last is in progress.
- */
- if (sqp->s_last == sqp->s_cur) {
- if (++sqp->s_cur >= sqp->s_size)
- sqp->s_cur = 0;
- }
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-
- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
- qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->rvp.n_pkt_drops++;
- /*
- * For RC, the requester would timeout and retry so
- * shortcut the timeouts and just signal too many retries.
- */
- if (sqp->ibqp.qp_type == IB_QPT_RC)
- send_status = IB_WC_RETRY_EXC_ERR;
- else
- send_status = IB_WC_SUCCESS;
- goto serr;
- }
-
- memset(&wc, 0, sizeof(wc));
- send_status = IB_WC_SUCCESS;
-
- release = true;
- sqp->s_sge.sge = wqe->sg_list[0];
- sqp->s_sge.sg_list = wqe->sg_list + 1;
- sqp->s_sge.num_sge = wqe->wr.num_sge;
- sqp->s_len = wqe->length;
- switch (wqe->wr.opcode) {
- case IB_WR_REG_MR:
- goto send_comp;
-
- case IB_WR_LOCAL_INV:
- if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
- if (rvt_invalidate_rkey(sqp,
- wqe->wr.ex.invalidate_rkey))
- send_status = IB_WC_LOC_PROT_ERR;
- local_ops = 1;
- }
- goto send_comp;
-
- case IB_WR_SEND_WITH_INV:
- if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
- wc.wc_flags = IB_WC_WITH_INVALIDATE;
- wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
- }
- goto send;
-
- case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
- case IB_WR_SEND:
-send:
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- break;
-
- case IB_WR_RDMA_WRITE_WITH_IMM:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- /* skip copy_last set and qp_access_flags recheck */
- goto do_write;
- case IB_WR_RDMA_WRITE:
- copy_last = rvt_is_user_qp(qp);
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
-do_write:
- if (wqe->length == 0)
- break;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_WRITE)))
- goto acc_err;
- qp->r_sge.sg_list = NULL;
- qp->r_sge.num_sge = 1;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_RDMA_READ:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_READ)))
- goto acc_err;
- release = false;
- sqp->s_sge.sg_list = NULL;
- sqp->s_sge.num_sge = 1;
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->wr.num_sge;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- wqe->atomic_wr.remote_addr,
- wqe->atomic_wr.rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
- sdata = wqe->atomic_wr.compare_add;
- *(u64 *)sqp->s_sge.sge.vaddr =
- (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
- (u64)atomic64_add_return(sdata, maddr) - sdata :
- (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
- sdata, wqe->atomic_wr.swap);
- rvt_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- goto send_comp;
-
- default:
- send_status = IB_WC_LOC_QP_OP_ERR;
- goto serr;
- }
-
- sge = &sqp->s_sge.sge;
- while (sqp->s_len) {
- u32 len = sqp->s_len;
-
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- WARN_ON_ONCE(len == 0);
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (!release)
- rvt_put_mr(sge->mr);
- if (--sqp->s_sge.num_sge)
- *sge = *sqp->s_sge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- sqp->s_len -= len;
- }
- if (release)
- rvt_put_ss(&qp->r_sge);
-
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- goto send_comp;
-
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
- spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->rvp.n_loop_pkts++;
-flush_send:
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
- hfi1_send_complete(sqp, wqe, send_status);
- if (local_ops) {
- atomic_dec(&sqp->local_ops_pending);
- local_ops = 0;
- }
- goto again;
-
-rnr_nak:
- /* Handle RNR NAK */
- if (qp->ibqp.qp_type == IB_QPT_UC)
- goto send_comp;
- ibp->rvp.n_rnr_naks++;
- /*
- * Note: we don't need the s_lock held since the BUSY flag
- * makes this single threaded.
- */
- if (sqp->s_rnr_retry == 0) {
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
- goto serr;
- }
- if (sqp->s_rnr_retry_cnt < 7)
- sqp->s_rnr_retry--;
- spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
- goto clr_busy;
- rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
- IB_AETH_CREDIT_SHIFT);
- goto clr_busy;
-
-op_err:
- send_status = IB_WC_REM_OP_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-acc_err:
- send_status = IB_WC_REM_ACCESS_ERR;
- wc.status = IB_WC_LOC_PROT_ERR;
-err:
- /* responder goes to error state */
- rvt_rc_error(qp, wc.status);
-
-serr:
- spin_lock_irqsave(&sqp->s_lock, flags);
- hfi1_send_complete(sqp, wqe, send_status);
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
- sqp->s_flags &= ~RVT_S_BUSY;
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = sqp->ibqp.device;
- ev.element.qp = &sqp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
- }
- goto done;
- }
-clr_busy:
- sqp->s_flags &= ~RVT_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
- rcu_read_unlock();
-}
-
-/**
* hfi1_make_grh - construct a GRH header
* @ibp: a pointer to the IB port
* @hdr: a pointer to the GRH header being constructed
@@ -825,8 +498,8 @@ void hfi1_do_send_from_rvt(struct rvt_qp *qp)
void _hfi1_do_send(struct work_struct *work)
{
- struct iowait *wait = container_of(work, struct iowait, iowork);
- struct rvt_qp *qp = iowait_to_qp(wait);
+ struct iowait_work *w = container_of(work, struct iowait_work, iowork);
+ struct rvt_qp *qp = iowait_to_qp(w->iow);
hfi1_do_send(qp, true);
}
@@ -850,6 +523,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
ps.ppd = ppd_from_ibp(ps.ibp);
ps.in_thread = in_thread;
+ ps.wait = iowait_get_ib_work(&priv->s_iowait);
trace_hfi1_rc_do_send(qp, in_thread);
@@ -858,7 +532,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
~((1 << ps.ppd->lmc) - 1)) ==
ps.ppd->lid)) {
- ruc_loopback(qp);
+ rvt_ruc_loopback(qp);
return;
}
make_req = hfi1_make_rc_req;
@@ -868,7 +542,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
~((1 << ps.ppd->lmc) - 1)) ==
ps.ppd->lid)) {
- ruc_loopback(qp);
+ rvt_ruc_loopback(qp);
return;
}
make_req = hfi1_make_uc_req;
@@ -883,6 +557,8 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
/* Return if we are already busy processing a work request. */
if (!hfi1_send_ok(qp)) {
+ if (qp->s_flags & HFI1_S_ANY_WAIT_IO)
+ iowait_set_flag(&priv->s_iowait, IOWAIT_PENDING_IB);
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
return;
}
@@ -896,7 +572,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
ps.pkts_sent = false;
/* insure a pre-built packet is handled */
- ps.s_txreq = get_waiting_verbs_txreq(qp);
+ ps.s_txreq = get_waiting_verbs_txreq(ps.wait);
do {
/* Check for a constructed packet to be sent. */
if (ps.s_txreq) {
@@ -907,6 +583,7 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
*/
if (hfi1_verbs_send(qp, &ps))
return;
+
/* allow other tasks to run */
if (schedule_send_yield(qp, &ps))
return;
@@ -917,44 +594,3 @@ void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
spin_unlock_irqrestore(&qp->s_lock, ps.flags);
}
-
-/*
- * This should be called with s_lock held.
- */
-void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status)
-{
- u32 old_last, last;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- return;
-
- last = qp->s_last;
- old_last = last;
- trace_hfi1_qp_send_completion(qp, wqe, last);
- if (++last >= qp->s_size)
- last = 0;
- trace_hfi1_qp_send_completion(qp, wqe, last);
- qp->s_last = last;
- /* See post_send() */
- barrier();
- rvt_put_swqe(wqe);
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
-
- rvt_qp_swqe_complete(qp,
- wqe,
- ib_hfi1_wc_opcode[wqe->wr.opcode],
- status);
-
- if (qp->s_acked == old_last)
- qp->s_acked = last;
- if (qp->s_cur == old_last)
- qp->s_cur = last;
- if (qp->s_tail == old_last)
- qp->s_tail = last;
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
- qp->s_draining = 0;
-}
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c
index 88e326d6cc49..891d2386d1ca 100644
--- a/drivers/infiniband/hw/hfi1/sdma.c
+++ b/drivers/infiniband/hw/hfi1/sdma.c
@@ -378,7 +378,7 @@ static inline void complete_tx(struct sdma_engine *sde,
__sdma_txclean(sde->dd, tx);
if (complete)
(*complete)(tx, res);
- if (wait && iowait_sdma_dec(wait))
+ if (iowait_sdma_dec(wait))
iowait_drain_wakeup(wait);
}
@@ -1758,7 +1758,6 @@ static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
struct iowait *wait, *nw;
struct iowait *waits[SDMA_WAIT_BATCH_SIZE];
uint i, n = 0, seq, max_idx = 0;
- struct sdma_txreq *stx;
struct hfi1_ibdev *dev = &sde->dd->verbs_dev;
u8 max_starved_cnt = 0;
@@ -1779,19 +1778,13 @@ static void sdma_desc_avail(struct sdma_engine *sde, uint avail)
nw,
&sde->dmawait,
list) {
- u16 num_desc = 0;
+ u32 num_desc;
if (!wait->wakeup)
continue;
if (n == ARRAY_SIZE(waits))
break;
- if (!list_empty(&wait->tx_head)) {
- stx = list_first_entry(
- &wait->tx_head,
- struct sdma_txreq,
- list);
- num_desc = stx->num_desc;
- }
+ num_desc = iowait_get_all_desc(wait);
if (num_desc > avail)
break;
avail -= num_desc;
@@ -2346,7 +2339,7 @@ static inline u16 submit_tx(struct sdma_engine *sde, struct sdma_txreq *tx)
*/
static int sdma_check_progress(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
bool pkts_sent)
{
@@ -2356,12 +2349,12 @@ static int sdma_check_progress(
if (tx->num_desc <= sde->desc_avail)
return -EAGAIN;
/* pulse the head_lock */
- if (wait && wait->sleep) {
+ if (wait && iowait_ioww_to_iow(wait)->sleep) {
unsigned seq;
seq = raw_seqcount_begin(
(const seqcount_t *)&sde->head_lock.seqcount);
- ret = wait->sleep(sde, wait, tx, seq, pkts_sent);
+ ret = wait->iow->sleep(sde, wait, tx, seq, pkts_sent);
if (ret == -EAGAIN)
sde->desc_avail = sdma_descq_freecnt(sde);
} else {
@@ -2373,7 +2366,7 @@ static int sdma_check_progress(
/**
* sdma_send_txreq() - submit a tx req to ring
* @sde: sdma engine to use
- * @wait: wait structure to use when full (may be NULL)
+ * @wait: SE wait structure to use when full (may be NULL)
* @tx: sdma_txreq to submit
* @pkts_sent: has any packet been sent yet?
*
@@ -2386,7 +2379,7 @@ static int sdma_check_progress(
* -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
*/
int sdma_send_txreq(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
bool pkts_sent)
{
@@ -2397,7 +2390,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
/* user should have supplied entire packet */
if (unlikely(tx->tlen))
return -EINVAL;
- tx->wait = wait;
+ tx->wait = iowait_ioww_to_iow(wait);
spin_lock_irqsave(&sde->tail_lock, flags);
retry:
if (unlikely(!__sdma_running(sde)))
@@ -2406,14 +2399,14 @@ retry:
goto nodesc;
tail = submit_tx(sde, tx);
if (wait)
- iowait_sdma_inc(wait);
+ iowait_sdma_inc(iowait_ioww_to_iow(wait));
sdma_update_tail(sde, tail);
unlock:
spin_unlock_irqrestore(&sde->tail_lock, flags);
return ret;
unlock_noconn:
if (wait)
- iowait_sdma_inc(wait);
+ iowait_sdma_inc(iowait_ioww_to_iow(wait));
tx->next_descq_idx = 0;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
tx->sn = sde->tail_sn++;
@@ -2422,10 +2415,7 @@ unlock_noconn:
spin_lock(&sde->flushlist_lock);
list_add_tail(&tx->list, &sde->flushlist);
spin_unlock(&sde->flushlist_lock);
- if (wait) {
- wait->tx_count++;
- wait->count += tx->num_desc;
- }
+ iowait_inc_wait_count(wait, tx->num_desc);
schedule_work(&sde->flush_worker);
ret = -ECOMM;
goto unlock;
@@ -2442,9 +2432,9 @@ nodesc:
/**
* sdma_send_txlist() - submit a list of tx req to ring
* @sde: sdma engine to use
- * @wait: wait structure to use when full (may be NULL)
+ * @wait: SE wait structure to use when full (may be NULL)
* @tx_list: list of sdma_txreqs to submit
- * @count: pointer to a u32 which, after return will contain the total number of
+ * @count: pointer to a u16 which, after return will contain the total number of
* sdma_txreqs removed from the tx_list. This will include sdma_txreqs
* whose SDMA descriptors are submitted to the ring and the sdma_txreqs
* which are added to SDMA engine flush list if the SDMA engine state is
@@ -2467,8 +2457,8 @@ nodesc:
* -EINVAL - sdma_txreq incomplete, -EBUSY - no space in ring (wait == NULL)
* -EIOCBQUEUED - tx queued to iowait, -ECOMM bad sdma state
*/
-int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
- struct list_head *tx_list, u32 *count_out)
+int sdma_send_txlist(struct sdma_engine *sde, struct iowait_work *wait,
+ struct list_head *tx_list, u16 *count_out)
{
struct sdma_txreq *tx, *tx_next;
int ret = 0;
@@ -2479,7 +2469,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
spin_lock_irqsave(&sde->tail_lock, flags);
retry:
list_for_each_entry_safe(tx, tx_next, tx_list, list) {
- tx->wait = wait;
+ tx->wait = iowait_ioww_to_iow(wait);
if (unlikely(!__sdma_running(sde)))
goto unlock_noconn;
if (unlikely(tx->num_desc > sde->desc_avail))
@@ -2500,8 +2490,9 @@ retry:
update_tail:
total_count = submit_count + flush_count;
if (wait) {
- iowait_sdma_add(wait, total_count);
- iowait_starve_clear(submit_count > 0, wait);
+ iowait_sdma_add(iowait_ioww_to_iow(wait), total_count);
+ iowait_starve_clear(submit_count > 0,
+ iowait_ioww_to_iow(wait));
}
if (tail != INVALID_TAIL)
sdma_update_tail(sde, tail);
@@ -2511,7 +2502,7 @@ update_tail:
unlock_noconn:
spin_lock(&sde->flushlist_lock);
list_for_each_entry_safe(tx, tx_next, tx_list, list) {
- tx->wait = wait;
+ tx->wait = iowait_ioww_to_iow(wait);
list_del_init(&tx->list);
tx->next_descq_idx = 0;
#ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
@@ -2520,10 +2511,7 @@ unlock_noconn:
#endif
list_add_tail(&tx->list, &sde->flushlist);
flush_count++;
- if (wait) {
- wait->tx_count++;
- wait->count += tx->num_desc;
- }
+ iowait_inc_wait_count(wait, tx->num_desc);
}
spin_unlock(&sde->flushlist_lock);
schedule_work(&sde->flush_worker);
diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h
index 46c775f255d1..6dc63d7c5685 100644
--- a/drivers/infiniband/hw/hfi1/sdma.h
+++ b/drivers/infiniband/hw/hfi1/sdma.h
@@ -1,7 +1,7 @@
#ifndef _HFI1_SDMA_H
#define _HFI1_SDMA_H
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -62,16 +62,6 @@
/* Hardware limit for SDMA packet size */
#define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1)
-#define SDMA_TXREQ_S_OK 0
-#define SDMA_TXREQ_S_SENDERROR 1
-#define SDMA_TXREQ_S_ABORTED 2
-#define SDMA_TXREQ_S_SHUTDOWN 3
-
-/* flags bits */
-#define SDMA_TXREQ_F_URGENT 0x0001
-#define SDMA_TXREQ_F_AHG_COPY 0x0002
-#define SDMA_TXREQ_F_USE_AHG 0x0004
-
#define SDMA_MAP_NONE 0
#define SDMA_MAP_SINGLE 1
#define SDMA_MAP_PAGE 2
@@ -415,6 +405,7 @@ struct sdma_engine {
struct list_head flushlist;
struct cpumask cpu_mask;
struct kobject kobj;
+ u32 msix_intr;
};
int sdma_init(struct hfi1_devdata *dd, u8 port);
@@ -849,16 +840,16 @@ static inline int sdma_txadd_kvaddr(
dd, SDMA_MAP_SINGLE, tx, addr, len);
}
-struct iowait;
+struct iowait_work;
int sdma_send_txreq(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *tx,
bool pkts_sent);
int sdma_send_txlist(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct list_head *tx_list,
- u32 *count);
+ u16 *count_out);
int sdma_ahg_alloc(struct sdma_engine *sde);
void sdma_ahg_free(struct sdma_engine *sde, int ahg_index);
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
index 25e867393463..2be513d4c9da 100644
--- a/drivers/infiniband/hw/hfi1/sysfs.c
+++ b/drivers/infiniband/hw/hfi1/sysfs.c
@@ -494,17 +494,18 @@ static struct kobj_type hfi1_vl2mtu_ktype = {
* Start of per-unit (or driver, in some cases, but replicated
* per unit) functions (these get a device *)
*/
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
{
struct hfi1_ibdev *dev =
container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hfi(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
container_of(device, struct hfi1_ibdev, rdi.ibdev.dev);
@@ -517,8 +518,9 @@ static ssize_t show_hfi(struct device *device, struct device_attribute *attr,
ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
return ret;
}
+static DEVICE_ATTR_RO(board_id);
-static ssize_t show_boardversion(struct device *device,
+static ssize_t boardversion_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -528,8 +530,9 @@ static ssize_t show_boardversion(struct device *device,
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
}
+static DEVICE_ATTR_RO(boardversion);
-static ssize_t show_nctxts(struct device *device,
+static ssize_t nctxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -546,8 +549,9 @@ static ssize_t show_nctxts(struct device *device,
min(dd->num_user_contexts,
(u32)dd->sc_sizes[SC_USER].count));
}
+static DEVICE_ATTR_RO(nctxts);
-static ssize_t show_nfreectxts(struct device *device,
+static ssize_t nfreectxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -557,8 +561,9 @@ static ssize_t show_nfreectxts(struct device *device,
/* Return the number of free user ports (contexts) available. */
return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
}
+static DEVICE_ATTR_RO(nfreectxts);
-static ssize_t show_serial(struct device *device,
+static ssize_t serial_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -567,8 +572,9 @@ static ssize_t show_serial(struct device *device,
return scnprintf(buf, PAGE_SIZE, "%s", dd->serial);
}
+static DEVICE_ATTR_RO(serial);
-static ssize_t store_chip_reset(struct device *device,
+static ssize_t chip_reset_store(struct device *device,
struct device_attribute *attr, const char *buf,
size_t count)
{
@@ -586,6 +592,7 @@ static ssize_t store_chip_reset(struct device *device,
bail:
return ret < 0 ? ret : count;
}
+static DEVICE_ATTR_WO(chip_reset);
/*
* Convert the reported temperature from an integer (reported in
@@ -598,7 +605,7 @@ bail:
/*
* Dump tempsense values, in decimal, to ease shell-scripts.
*/
-static ssize_t show_tempsense(struct device *device,
+static ssize_t tempsense_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct hfi1_ibdev *dev =
@@ -622,6 +629,7 @@ static ssize_t show_tempsense(struct device *device,
}
return ret;
}
+static DEVICE_ATTR_RO(tempsense);
/*
* end of per-unit (or driver, in some cases, but replicated
@@ -629,24 +637,20 @@ static ssize_t show_tempsense(struct device *device,
*/
/* start of per-unit file structures and support code */
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hfi, NULL);
-static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
-static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
-static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-
-static struct device_attribute *hfi1_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_board_id,
- &dev_attr_nctxts,
- &dev_attr_nfreectxts,
- &dev_attr_serial,
- &dev_attr_boardversion,
- &dev_attr_tempsense,
- &dev_attr_chip_reset,
+static struct attribute *hfi1_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_nctxts.attr,
+ &dev_attr_nfreectxts.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_boardversion.attr,
+ &dev_attr_tempsense.attr,
+ &dev_attr_chip_reset.attr,
+ NULL,
+};
+
+const struct attribute_group ib_hfi1_attr_group = {
+ .attrs = hfi1_attributes,
};
int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
@@ -832,12 +836,6 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
struct device *class_dev = &dev->dev;
int i, j, ret;
- for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i) {
- ret = device_create_file(&dev->dev, hfi1_attributes[i]);
- if (ret)
- goto bail;
- }
-
for (i = 0; i < dd->num_sdma; i++) {
ret = kobject_init_and_add(&dd->per_sdma[i].kobj,
&sde_ktype, &class_dev->kobj,
@@ -855,9 +853,6 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
return 0;
bail:
- for (i = 0; i < ARRAY_SIZE(hfi1_attributes); ++i)
- device_remove_file(&dev->dev, hfi1_attributes[i]);
-
for (i = 0; i < dd->num_sdma; i++)
kobject_del(&dd->per_sdma[i].kobj);
diff --git a/drivers/infiniband/hw/hfi1/trace.h b/drivers/infiniband/hw/hfi1/trace.h
index 8540463ef3f7..84458f1325e1 100644
--- a/drivers/infiniband/hw/hfi1/trace.h
+++ b/drivers/infiniband/hw/hfi1/trace.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -62,3 +62,4 @@ __print_symbolic(etype, \
#include "trace_rx.h"
#include "trace_tx.h"
#include "trace_mmu.h"
+#include "trace_iowait.h"
diff --git a/drivers/infiniband/hw/hfi1/trace_iowait.h b/drivers/infiniband/hw/hfi1/trace_iowait.h
new file mode 100644
index 000000000000..27f4334ece2b
--- /dev/null
+++ b/drivers/infiniband/hw/hfi1/trace_iowait.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2018 Intel Corporation.
+ *
+ */
+#if !defined(__HFI1_TRACE_IOWAIT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __HFI1_TRACE_IOWAIT_H
+
+#include <linux/tracepoint.h>
+#include "iowait.h"
+#include "verbs.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hfi1_iowait
+
+DECLARE_EVENT_CLASS(hfi1_iowait_template,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag),
+ TP_STRUCT__entry(/* entry */
+ __field(unsigned long, addr)
+ __field(unsigned long, flags)
+ __field(u32, flag)
+ __field(u32, qpn)
+ ),
+ TP_fast_assign(/* assign */
+ __entry->addr = (unsigned long)wait;
+ __entry->flags = wait->flags;
+ __entry->flag = (1 << flag);
+ __entry->qpn = iowait_to_qp(wait)->ibqp.qp_num;
+ ),
+ TP_printk(/* print */
+ "iowait 0x%lx qp %u flags 0x%lx flag 0x%x",
+ __entry->addr,
+ __entry->qpn,
+ __entry->flags,
+ __entry->flag
+ )
+ );
+
+DEFINE_EVENT(hfi1_iowait_template, hfi1_iowait_set,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag));
+
+DEFINE_EVENT(hfi1_iowait_template, hfi1_iowait_clear,
+ TP_PROTO(struct iowait *wait, u32 flag),
+ TP_ARGS(wait, flag));
+
+#endif /* __HFI1_TRACE_IOWAIT_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE trace_iowait
+#include <trace/define_trace.h>
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index e254dcec6f64..6aca0c5a7f97 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -88,7 +88,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
}
clear_ahg(qp);
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done_free_tx;
}
@@ -140,7 +140,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
qp, wqe->wr.ex.invalidate_rkey);
local_ops = 1;
}
- hfi1_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
+ rvt_send_complete(qp, wqe, err ? IB_WC_LOC_PROT_ERR
: IB_WC_SUCCESS);
if (local_ops)
atomic_dec(&qp->local_ops_pending);
@@ -426,7 +426,7 @@ send_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto rewind;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, false, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false);
break;
case OP(SEND_LAST_WITH_IMMEDIATE):
@@ -449,7 +449,7 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
- hfi1_copy_sge(&qp->r_sge, data, tlen, false, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false);
rvt_put_ss(&qp->s_rdma_read_sge);
last_imm:
wc.wr_id = qp->r_wr_id;
@@ -523,7 +523,7 @@ rdma_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto drop;
- hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -550,7 +550,7 @@ rdma_last_imm:
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
goto last_imm;
@@ -564,7 +564,7 @@ rdma_last:
tlen -= (hdrsize + extra_bytes);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- hfi1_copy_sge(&qp->r_sge, data, tlen, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
break;
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 70d39fc450a1..4baa8f4d49de 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -210,8 +210,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
}
hfi1_make_grh(ibp, &grh, &grd, 0, 0);
- hfi1_copy_sge(&qp->r_sge, &grh,
- sizeof(grh), true, false);
+ rvt_copy_sge(qp, &qp->r_sge, &grh,
+ sizeof(grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else {
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
@@ -228,7 +228,7 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (len > sge->sge_length)
len = sge->sge_length;
WARN_ON_ONCE(len == 0);
- hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, true, false);
+ rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
@@ -518,7 +518,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- hfi1_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done_free_tx;
}
@@ -560,7 +560,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, tflags);
ps->flags = tflags;
- hfi1_send_complete(qp, wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done_free_tx;
}
}
@@ -1019,8 +1019,8 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
goto drop;
}
if (packet->grh) {
- hfi1_copy_sge(&qp->r_sge, packet->grh,
- sizeof(struct ib_grh), true, false);
+ rvt_copy_sge(qp, &qp->r_sge, packet->grh,
+ sizeof(struct ib_grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else if (packet->etype == RHF_RCV_TYPE_BYPASS) {
struct ib_grh grh;
@@ -1030,14 +1030,14 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
* out when creating 16B, add back the GRH here.
*/
hfi1_make_ext_grh(packet, &grh, slid, dlid);
- hfi1_copy_sge(&qp->r_sge, &grh,
- sizeof(struct ib_grh), true, false);
+ rvt_copy_sge(qp, &qp->r_sge, &grh,
+ sizeof(struct ib_grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else {
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
}
- hfi1_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
- true, false);
+ rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
+ true, false);
rvt_put_ss(&qp->r_sge);
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 5c88706121c1..3f0aadccd9f6 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015 - 2017 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -76,8 +76,7 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12
static unsigned initial_pkt_count = 8;
-static int user_sdma_send_pkts(struct user_sdma_request *req,
- unsigned maxpkts);
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts);
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq);
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin);
@@ -101,7 +100,7 @@ static inline u32 get_lrh_len(struct hfi1_pkt_header, u32 len);
static int defer_packet_queue(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *txreq,
uint seq,
bool pkts_sent);
@@ -124,13 +123,13 @@ static struct mmu_rb_ops sdma_rb_ops = {
static int defer_packet_queue(
struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *txreq,
uint seq,
bool pkts_sent)
{
struct hfi1_user_sdma_pkt_q *pq =
- container_of(wait, struct hfi1_user_sdma_pkt_q, busy);
+ container_of(wait->iow, struct hfi1_user_sdma_pkt_q, busy);
struct hfi1_ibdev *dev = &pq->dd->verbs_dev;
struct user_sdma_txreq *tx =
container_of(txreq, struct user_sdma_txreq, txreq);
@@ -187,13 +186,12 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt;
pq->n_max_reqs = hfi1_sdma_comp_ring_size;
- pq->state = SDMA_PKT_Q_INACTIVE;
atomic_set(&pq->n_reqs, 0);
init_waitqueue_head(&pq->wait);
atomic_set(&pq->n_locked, 0);
pq->mm = fd->mm;
- iowait_init(&pq->busy, 0, NULL, defer_packet_queue,
+ iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
activate_packet_queue, NULL);
pq->reqidx = 0;
@@ -276,7 +274,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
/* Wait until all requests have been freed. */
wait_event_interruptible(
pq->wait,
- (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE));
+ !atomic_read(&pq->n_reqs));
kfree(pq->reqs);
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
@@ -312,6 +310,13 @@ static u8 dlid_to_selector(u16 dlid)
return mapping[hash];
}
+/**
+ * hfi1_user_sdma_process_request() - Process and start a user sdma request
+ * @fd: valid file descriptor
+ * @iovec: array of io vectors to process
+ * @dim: overall iovec array size
+ * @count: number of io vector array entries processed
+ */
int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
struct iovec *iovec, unsigned long dim,
unsigned long *count)
@@ -328,7 +333,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
u8 opcode, sc, vl;
u16 pkey;
u32 slid;
- int req_queued = 0;
u16 dlid;
u32 selector;
@@ -392,7 +396,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->data_len = 0;
req->pq = pq;
req->cq = cq;
- req->status = -1;
req->ahg_idx = -1;
req->iov_idx = 0;
req->sent = 0;
@@ -400,12 +403,14 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->seqcomp = 0;
req->seqsubmitted = 0;
req->tids = NULL;
- req->done = 0;
req->has_error = 0;
INIT_LIST_HEAD(&req->txps);
memcpy(&req->info, &info, sizeof(info));
+ /* The request is initialized, count it */
+ atomic_inc(&pq->n_reqs);
+
if (req_opcode(info.ctrl) == EXPECTED) {
/* expected must have a TID info and at least one data vector */
if (req->data_iovs < 2) {
@@ -500,7 +505,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
ret = pin_vector_pages(req, &req->iovs[i]);
if (ret) {
req->data_iovs = i;
- req->status = ret;
goto free_req;
}
req->data_len += req->iovs[i].iov.iov_len;
@@ -561,23 +565,11 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
req->ahg_idx = sdma_ahg_alloc(req->sde);
set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
- atomic_inc(&pq->n_reqs);
- req_queued = 1;
+ pq->state = SDMA_PKT_Q_ACTIVE;
/* Send the first N packets in the request to buy us some time */
ret = user_sdma_send_pkts(req, pcount);
- if (unlikely(ret < 0 && ret != -EBUSY)) {
- req->status = ret;
+ if (unlikely(ret < 0 && ret != -EBUSY))
goto free_req;
- }
-
- /*
- * It is possible that the SDMA engine would have processed all the
- * submitted packets by the time we get here. Therefore, only set
- * packet queue state to ACTIVE if there are still uncompleted
- * requests.
- */
- if (atomic_read(&pq->n_reqs))
- xchg(&pq->state, SDMA_PKT_Q_ACTIVE);
/*
* This is a somewhat blocking send implementation.
@@ -588,14 +580,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
while (req->seqsubmitted != req->info.npkts) {
ret = user_sdma_send_pkts(req, pcount);
if (ret < 0) {
- if (ret != -EBUSY) {
- req->status = ret;
- WRITE_ONCE(req->has_error, 1);
- if (READ_ONCE(req->seqcomp) ==
- req->seqsubmitted - 1)
- goto free_req;
- return ret;
- }
+ if (ret != -EBUSY)
+ goto free_req;
wait_event_interruptible_timeout(
pq->busy.wait_dma,
(pq->state == SDMA_PKT_Q_ACTIVE),
@@ -606,10 +592,19 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
*count += idx;
return 0;
free_req:
- user_sdma_free_request(req, true);
- if (req_queued)
+ /*
+ * If the submitted seqsubmitted == npkts, the completion routine
+ * controls the final state. If sequbmitted < npkts, wait for any
+ * outstanding packets to finish before cleaning up.
+ */
+ if (req->seqsubmitted < req->info.npkts) {
+ if (req->seqsubmitted)
+ wait_event(pq->busy.wait_dma,
+ (req->seqcomp == req->seqsubmitted - 1));
+ user_sdma_free_request(req, true);
pq_update(pq);
- set_comp_state(pq, cq, info.comp_idx, ERROR, req->status);
+ set_comp_state(pq, cq, info.comp_idx, ERROR, ret);
+ }
return ret;
}
@@ -760,9 +755,10 @@ static int user_sdma_txadd(struct user_sdma_request *req,
return ret;
}
-static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
+static int user_sdma_send_pkts(struct user_sdma_request *req, u16 maxpkts)
{
- int ret = 0, count;
+ int ret = 0;
+ u16 count;
unsigned npkts = 0;
struct user_sdma_txreq *tx = NULL;
struct hfi1_user_sdma_pkt_q *pq = NULL;
@@ -864,8 +860,10 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
changes = set_txreq_header_ahg(req, tx,
datalen);
- if (changes < 0)
+ if (changes < 0) {
+ ret = changes;
goto free_tx;
+ }
}
} else {
ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) +
@@ -914,10 +912,11 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
npkts++;
}
dosend:
- ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count);
+ ret = sdma_send_txlist(req->sde,
+ iowait_get_ib_work(&pq->busy),
+ &req->txps, &count);
req->seqsubmitted += count;
if (req->seqsubmitted == req->info.npkts) {
- WRITE_ONCE(req->done, 1);
/*
* The txreq has already been submitted to the HW queue
* so we can free the AHG entry now. Corruption will not
@@ -1365,11 +1364,15 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
return idx;
}
-/*
- * SDMA tx request completion callback. Called when the SDMA progress
- * state machine gets notification that the SDMA descriptors for this
- * tx request have been processed by the DMA engine. Called in
- * interrupt context.
+/**
+ * user_sdma_txreq_cb() - SDMA tx request completion callback.
+ * @txreq: valid sdma tx request
+ * @status: success/failure of request
+ *
+ * Called when the SDMA progress state machine gets notification that
+ * the SDMA descriptors for this tx request have been processed by the
+ * DMA engine. Called in interrupt context.
+ * Only do work on completed sequences.
*/
static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
{
@@ -1378,7 +1381,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
struct user_sdma_request *req;
struct hfi1_user_sdma_pkt_q *pq;
struct hfi1_user_sdma_comp_q *cq;
- u16 idx;
+ enum hfi1_sdma_comp_state state = COMPLETE;
if (!tx->req)
return;
@@ -1391,39 +1394,25 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status)
SDMA_DBG(req, "SDMA completion with error %d",
status);
WRITE_ONCE(req->has_error, 1);
+ state = ERROR;
}
req->seqcomp = tx->seqnum;
kmem_cache_free(pq->txreq_cache, tx);
- tx = NULL;
-
- idx = req->info.comp_idx;
- if (req->status == -1 && status == SDMA_TXREQ_S_OK) {
- if (req->seqcomp == req->info.npkts - 1) {
- req->status = 0;
- user_sdma_free_request(req, false);
- pq_update(pq);
- set_comp_state(pq, cq, idx, COMPLETE, 0);
- }
- } else {
- if (status != SDMA_TXREQ_S_OK)
- req->status = status;
- if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) &&
- (READ_ONCE(req->done) ||
- READ_ONCE(req->has_error))) {
- user_sdma_free_request(req, false);
- pq_update(pq);
- set_comp_state(pq, cq, idx, ERROR, req->status);
- }
- }
+
+ /* sequence isn't complete? We are done */
+ if (req->seqcomp != req->info.npkts - 1)
+ return;
+
+ user_sdma_free_request(req, false);
+ set_comp_state(pq, cq, req->info.comp_idx, state, status);
+ pq_update(pq);
}
static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
{
- if (atomic_dec_and_test(&pq->n_reqs)) {
- xchg(&pq->state, SDMA_PKT_Q_INACTIVE);
+ if (atomic_dec_and_test(&pq->n_reqs))
wake_up(&pq->wait);
- }
}
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
@@ -1448,6 +1437,8 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
if (!node)
continue;
+ req->iovs[i].node = NULL;
+
if (unpin)
hfi1_mmu_rb_remove(req->pq->handler,
&node->rb);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.h b/drivers/infiniband/hw/hfi1/user_sdma.h
index d2bc77f75253..14dfd757dafd 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.h
+++ b/drivers/infiniband/hw/hfi1/user_sdma.h
@@ -105,9 +105,10 @@ static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
#define TXREQ_FLAGS_REQ_DISABLE_SH BIT(1) /* Disable header suppression */
-#define SDMA_PKT_Q_INACTIVE BIT(0)
-#define SDMA_PKT_Q_ACTIVE BIT(1)
-#define SDMA_PKT_Q_DEFERRED BIT(2)
+enum pkt_q_sdma_state {
+ SDMA_PKT_Q_ACTIVE,
+ SDMA_PKT_Q_DEFERRED,
+};
/*
* Maximum retry attempts to submit a TX request
@@ -133,7 +134,7 @@ struct hfi1_user_sdma_pkt_q {
struct user_sdma_request *reqs;
unsigned long *req_in_use;
struct iowait busy;
- unsigned state;
+ enum pkt_q_sdma_state state;
wait_queue_head_t wait;
unsigned long unpinned;
struct mmu_rb_handler *handler;
@@ -203,14 +204,12 @@ struct user_sdma_request {
s8 ahg_idx;
/* Writeable fields shared with interrupt */
- u64 seqcomp ____cacheline_aligned_in_smp;
- u64 seqsubmitted;
- /* status of the last txreq completed */
- int status;
+ u16 seqcomp ____cacheline_aligned_in_smp;
+ u16 seqsubmitted;
/* Send side fields */
struct list_head txps ____cacheline_aligned_in_smp;
- u64 seqnum;
+ u16 seqnum;
/*
* KDETH.OFFSET (TID) field
* The offset can cover multiple packets, depending on the
@@ -228,7 +227,6 @@ struct user_sdma_request {
u16 tididx;
/* progress index moving along the iovs array */
u8 iov_idx;
- u8 done;
u8 has_error;
struct user_sdma_iovec iovs[MAX_VECTORS_PER_REQ];
@@ -248,7 +246,7 @@ struct user_sdma_txreq {
struct user_sdma_request *req;
u16 flags;
unsigned int busycount;
- u64 seqnum;
+ u16 seqnum;
};
int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index a7c586a5589d..48e11e510358 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -129,8 +129,6 @@ unsigned short piothreshold = 256;
module_param(piothreshold, ushort, S_IRUGO);
MODULE_PARM_DESC(piothreshold, "size used to determine sdma vs. pio");
-#define COPY_CACHELESS 1
-#define COPY_ADAPTIVE 2
static unsigned int sge_copy_mode;
module_param(sge_copy_mode, uint, S_IRUGO);
MODULE_PARM_DESC(sge_copy_mode,
@@ -151,159 +149,13 @@ static int pio_wait(struct rvt_qp *qp,
/* 16B trailing buffer */
static const u8 trail_buf[MAX_16B_PADDING];
-static uint wss_threshold;
+static uint wss_threshold = 80;
module_param(wss_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
static uint wss_clean_period = 256;
module_param(wss_clean_period, uint, S_IRUGO);
MODULE_PARM_DESC(wss_clean_period, "Count of verbs copies before an entry in the page copy table is cleaned");
-/* memory working set size */
-struct hfi1_wss {
- unsigned long *entries;
- atomic_t total_count;
- atomic_t clean_counter;
- atomic_t clean_entry;
-
- int threshold;
- int num_entries;
- long pages_mask;
-};
-
-static struct hfi1_wss wss;
-
-int hfi1_wss_init(void)
-{
- long llc_size;
- long llc_bits;
- long table_size;
- long table_bits;
-
- /* check for a valid percent range - default to 80 if none or invalid */
- if (wss_threshold < 1 || wss_threshold > 100)
- wss_threshold = 80;
- /* reject a wildly large period */
- if (wss_clean_period > 1000000)
- wss_clean_period = 256;
- /* reject a zero period */
- if (wss_clean_period == 0)
- wss_clean_period = 1;
-
- /*
- * Calculate the table size - the next power of 2 larger than the
- * LLC size. LLC size is in KiB.
- */
- llc_size = wss_llc_size() * 1024;
- table_size = roundup_pow_of_two(llc_size);
-
- /* one bit per page in rounded up table */
- llc_bits = llc_size / PAGE_SIZE;
- table_bits = table_size / PAGE_SIZE;
- wss.pages_mask = table_bits - 1;
- wss.num_entries = table_bits / BITS_PER_LONG;
-
- wss.threshold = (llc_bits * wss_threshold) / 100;
- if (wss.threshold == 0)
- wss.threshold = 1;
-
- atomic_set(&wss.clean_counter, wss_clean_period);
-
- wss.entries = kcalloc(wss.num_entries, sizeof(*wss.entries),
- GFP_KERNEL);
- if (!wss.entries) {
- hfi1_wss_exit();
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void hfi1_wss_exit(void)
-{
- /* coded to handle partially initialized and repeat callers */
- kfree(wss.entries);
- wss.entries = NULL;
-}
-
-/*
- * Advance the clean counter. When the clean period has expired,
- * clean an entry.
- *
- * This is implemented in atomics to avoid locking. Because multiple
- * variables are involved, it can be racy which can lead to slightly
- * inaccurate information. Since this is only a heuristic, this is
- * OK. Any innaccuracies will clean themselves out as the counter
- * advances. That said, it is unlikely the entry clean operation will
- * race - the next possible racer will not start until the next clean
- * period.
- *
- * The clean counter is implemented as a decrement to zero. When zero
- * is reached an entry is cleaned.
- */
-static void wss_advance_clean_counter(void)
-{
- int entry;
- int weight;
- unsigned long bits;
-
- /* become the cleaner if we decrement the counter to zero */
- if (atomic_dec_and_test(&wss.clean_counter)) {
- /*
- * Set, not add, the clean period. This avoids an issue
- * where the counter could decrement below the clean period.
- * Doing a set can result in lost decrements, slowing the
- * clean advance. Since this a heuristic, this possible
- * slowdown is OK.
- *
- * An alternative is to loop, advancing the counter by a
- * clean period until the result is > 0. However, this could
- * lead to several threads keeping another in the clean loop.
- * This could be mitigated by limiting the number of times
- * we stay in the loop.
- */
- atomic_set(&wss.clean_counter, wss_clean_period);
-
- /*
- * Uniquely grab the entry to clean and move to next.
- * The current entry is always the lower bits of
- * wss.clean_entry. The table size, wss.num_entries,
- * is always a power-of-2.
- */
- entry = (atomic_inc_return(&wss.clean_entry) - 1)
- & (wss.num_entries - 1);
-
- /* clear the entry and count the bits */
- bits = xchg(&wss.entries[entry], 0);
- weight = hweight64((u64)bits);
- /* only adjust the contended total count if needed */
- if (weight)
- atomic_sub(weight, &wss.total_count);
- }
-}
-
-/*
- * Insert the given address into the working set array.
- */
-static void wss_insert(void *address)
-{
- u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss.pages_mask;
- u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
- u32 nr = page & (BITS_PER_LONG - 1);
-
- if (!test_and_set_bit(nr, &wss.entries[entry]))
- atomic_inc(&wss.total_count);
-
- wss_advance_clean_counter();
-}
-
-/*
- * Is the working set larger than the threshold?
- */
-static inline bool wss_exceeds_threshold(void)
-{
- return atomic_read(&wss.total_count) >= wss.threshold;
-}
-
/*
* Translate ib_wr_opcode into ib_wc_opcode.
*/
@@ -438,79 +290,6 @@ static const u32 pio_opmask[BIT(3)] = {
*/
__be64 ib_hfi1_sys_image_guid;
-/**
- * hfi1_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- * @release: boolean to release MR
- * @copy_last: do a separate copy of the last 8 bytes
- */
-void hfi1_copy_sge(
- struct rvt_sge_state *ss,
- void *data, u32 length,
- bool release,
- bool copy_last)
-{
- struct rvt_sge *sge = &ss->sge;
- int i;
- bool in_last = false;
- bool cacheless_copy = false;
-
- if (sge_copy_mode == COPY_CACHELESS) {
- cacheless_copy = length >= PAGE_SIZE;
- } else if (sge_copy_mode == COPY_ADAPTIVE) {
- if (length >= PAGE_SIZE) {
- /*
- * NOTE: this *assumes*:
- * o The first vaddr is the dest.
- * o If multiple pages, then vaddr is sequential.
- */
- wss_insert(sge->vaddr);
- if (length >= (2 * PAGE_SIZE))
- wss_insert(sge->vaddr + PAGE_SIZE);
-
- cacheless_copy = wss_exceeds_threshold();
- } else {
- wss_advance_clean_counter();
- }
- }
- if (copy_last) {
- if (length > 8) {
- length -= 8;
- } else {
- copy_last = false;
- in_last = true;
- }
- }
-
-again:
- while (length) {
- u32 len = rvt_get_sge_length(sge, length);
-
- WARN_ON_ONCE(len == 0);
- if (unlikely(in_last)) {
- /* enforce byte transfer ordering */
- for (i = 0; i < len; i++)
- ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
- } else if (cacheless_copy) {
- cacheless_memcpy(sge->vaddr, data, len);
- } else {
- memcpy(sge->vaddr, data, len);
- }
- rvt_update_sge(ss, len, release);
- data += len;
- length -= len;
- }
-
- if (copy_last) {
- copy_last = false;
- in_last = true;
- length = 8;
- goto again;
- }
-}
-
/*
* Make sure the QP is ready and able to accept the given opcode.
*/
@@ -713,7 +492,7 @@ static void verbs_sdma_complete(
spin_lock(&qp->s_lock);
if (tx->wqe) {
- hfi1_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
struct hfi1_opa_header *hdr;
@@ -737,7 +516,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
write_seqlock(&dev->iowait_lock);
list_add_tail(&ps->s_txreq->txreq.list,
- &priv->s_iowait.tx_head);
+ &ps->wait->tx_head);
if (list_empty(&priv->s_iowait.list)) {
if (list_empty(&dev->memwait))
mod_timer(&dev->mem_timer, jiffies + 1);
@@ -748,7 +527,7 @@ static int wait_kmem(struct hfi1_ibdev *dev,
rvt_get_qp(qp);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_qp_unbusy(qp, ps->wait);
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -950,8 +729,7 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (unlikely(ret))
goto bail_build;
}
- ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq,
- ps->pkts_sent);
+ ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent);
if (unlikely(ret < 0)) {
if (ret == -ECOMM)
goto bail_ecomm;
@@ -1001,7 +779,7 @@ static int pio_wait(struct rvt_qp *qp,
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
write_seqlock(&dev->iowait_lock);
list_add_tail(&ps->s_txreq->txreq.list,
- &priv->s_iowait.tx_head);
+ &ps->wait->tx_head);
if (list_empty(&priv->s_iowait.list)) {
struct hfi1_ibdev *dev = &dd->verbs_dev;
int was_empty;
@@ -1020,7 +798,7 @@ static int pio_wait(struct rvt_qp *qp,
hfi1_sc_wantpiobuf_intr(sc, 1);
}
write_sequnlock(&dev->iowait_lock);
- qp->s_flags &= ~RVT_S_BUSY;
+ hfi1_qp_unbusy(qp, ps->wait);
ret = -EBUSY;
}
spin_unlock_irqrestore(&qp->s_lock, flags);
@@ -1160,7 +938,7 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
pio_bail:
if (qp->s_wqe) {
spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(qp, qp->s_wqe, wc_status);
+ rvt_send_complete(qp, qp->s_wqe, wc_status);
spin_unlock_irqrestore(&qp->s_lock, flags);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
spin_lock_irqsave(&qp->s_lock, flags);
@@ -1367,7 +1145,7 @@ int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
hfi1_cdbg(PIO, "%s() Failed. Completing with err",
__func__);
spin_lock_irqsave(&qp->s_lock, flags);
- hfi1_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+ rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock_irqrestore(&qp->s_lock, flags);
}
return -EINVAL;
@@ -1943,7 +1721,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.driver_f.check_modify_qp = hfi1_check_modify_qp;
dd->verbs_dev.rdi.driver_f.modify_qp = hfi1_modify_qp;
dd->verbs_dev.rdi.driver_f.notify_restart_rc = hfi1_restart_rc;
- dd->verbs_dev.rdi.driver_f.check_send_wqe = hfi1_check_send_wqe;
+ dd->verbs_dev.rdi.driver_f.setup_wqe = hfi1_setup_wqe;
dd->verbs_dev.rdi.driver_f.comp_vect_cpu_lookup =
hfi1_comp_vect_mappings_lookup;
@@ -1956,10 +1734,16 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
dd->verbs_dev.rdi.dparms.lkey_table_size = hfi1_lkey_table_size;
dd->verbs_dev.rdi.dparms.nports = dd->num_pports;
dd->verbs_dev.rdi.dparms.npkeys = hfi1_get_npkeys(dd);
+ dd->verbs_dev.rdi.dparms.sge_copy_mode = sge_copy_mode;
+ dd->verbs_dev.rdi.dparms.wss_threshold = wss_threshold;
+ dd->verbs_dev.rdi.dparms.wss_clean_period = wss_clean_period;
/* post send table */
dd->verbs_dev.rdi.post_parms = hfi1_post_parms;
+ /* opcode translation table */
+ dd->verbs_dev.rdi.wc_opcode = ib_hfi1_wc_opcode;
+
ppd = dd->pport;
for (i = 0; i < dd->num_pports; i++, ppd++)
rvt_init_port(&dd->verbs_dev.rdi,
@@ -1967,6 +1751,9 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
i,
ppd->pkeys);
+ rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev,
+ &ib_hfi1_attr_group);
+
ret = rvt_register_device(&dd->verbs_dev.rdi, RDMA_DRIVER_HFI1);
if (ret)
goto err_verbs_txreq;
diff --git a/drivers/infiniband/hw/hfi1/verbs.h b/drivers/infiniband/hw/hfi1/verbs.h
index a4d06502f06d..64c9054db5f3 100644
--- a/drivers/infiniband/hw/hfi1/verbs.h
+++ b/drivers/infiniband/hw/hfi1/verbs.h
@@ -166,11 +166,13 @@ struct hfi1_qp_priv {
* This structure is used to hold commonly lookedup and computed values during
* the send engine progress.
*/
+struct iowait_work;
struct hfi1_pkt_state {
struct hfi1_ibdev *dev;
struct hfi1_ibport *ibp;
struct hfi1_pportdata *ppd;
struct verbs_txreq *s_txreq;
+ struct iowait_work *wait;
unsigned long flags;
unsigned long timeout;
unsigned long timeout_int;
@@ -247,7 +249,7 @@ static inline struct hfi1_ibdev *to_idev(struct ib_device *ibdev)
return container_of(rdi, struct hfi1_ibdev, rdi);
}
-static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
+static inline struct rvt_qp *iowait_to_qp(struct iowait *s_iowait)
{
struct hfi1_qp_priv *priv;
@@ -313,9 +315,6 @@ void hfi1_put_txreq(struct verbs_txreq *tx);
int hfi1_verbs_send(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
-void hfi1_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
- bool release, bool copy_last);
-
void hfi1_cnp_rcv(struct hfi1_packet *packet);
void hfi1_uc_rcv(struct hfi1_packet *packet);
@@ -343,7 +342,8 @@ int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
void hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata);
void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
-int hfi1_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ bool *call_send);
extern const u32 rc_only_opcode;
extern const u32 uc_only_opcode;
@@ -363,9 +363,6 @@ void hfi1_do_send_from_rvt(struct rvt_qp *qp);
void hfi1_do_send(struct rvt_qp *qp, bool in_thread);
-void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status);
-
void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn);
int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
@@ -390,28 +387,6 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
u64 pbc);
-int hfi1_wss_init(void);
-void hfi1_wss_exit(void);
-
-/* platform specific: return the lowest level cache (llc) size, in KiB */
-static inline int wss_llc_size(void)
-{
- /* assume that the boot CPU value is universal for all CPUs */
- return boot_cpu_data.x86_cache_size;
-}
-
-/* platform specific: cacheless copy */
-static inline void cacheless_memcpy(void *dst, void *src, size_t n)
-{
- /*
- * Use the only available X64 cacheless copy. Add a __user cast
- * to quiet sparse. The src agument is already in the kernel so
- * there are no security issues. The extra fault recovery machinery
- * is not invoked.
- */
- __copy_user_nocache(dst, (void __user *)src, n, 0);
-}
-
static inline bool opa_bth_is_migration(struct ib_other_headers *ohdr)
{
return ohdr->bth[1] & cpu_to_be32(OPA_BTH_MIG_REQ);
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 1c19bbc764b2..2a77af26a231 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -102,22 +102,19 @@ static inline struct sdma_txreq *get_sdma_txreq(struct verbs_txreq *tx)
return &tx->txreq;
}
-static inline struct verbs_txreq *get_waiting_verbs_txreq(struct rvt_qp *qp)
+static inline struct verbs_txreq *get_waiting_verbs_txreq(struct iowait_work *w)
{
struct sdma_txreq *stx;
- struct hfi1_qp_priv *priv = qp->priv;
- stx = iowait_get_txhead(&priv->s_iowait);
+ stx = iowait_get_txhead(w);
if (stx)
return container_of(stx, struct verbs_txreq, txreq);
return NULL;
}
-static inline bool verbs_txreq_queued(struct rvt_qp *qp)
+static inline bool verbs_txreq_queued(struct iowait_work *w)
{
- struct hfi1_qp_priv *priv = qp->priv;
-
- return iowait_packet_queued(&priv->s_iowait);
+ return iowait_packet_queued(w);
}
void hfi1_put_txreq(struct verbs_txreq *tx);
diff --git a/drivers/infiniband/hw/hfi1/vnic_main.c b/drivers/infiniband/hw/hfi1/vnic_main.c
index c643d80c5a53..c9876d9e3cb9 100644
--- a/drivers/infiniband/hw/hfi1/vnic_main.c
+++ b/drivers/infiniband/hw/hfi1/vnic_main.c
@@ -120,7 +120,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
uctxt->seq_cnt = 1;
uctxt->is_vnic = true;
- hfi1_set_vnic_msix_info(uctxt);
+ msix_request_rcd_irq(uctxt);
hfi1_stats.sps_ctxts++;
dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
@@ -135,8 +135,6 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
dd_dev_dbg(dd, "closing vnic context %d\n", uctxt->ctxt);
flush_wc();
- hfi1_reset_vnic_msix_info(uctxt);
-
/*
* Disable receive context and interrupt available, reset all
* RcvCtxtCtrl bits to default values.
@@ -148,6 +146,10 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
+ /* msix_intr will always be > 0, only clean up if this is true */
+ if (uctxt->msix_intr)
+ msix_free_irq(dd, uctxt->msix_intr);
+
uctxt->event_flags = 0;
hfi1_clear_tids(uctxt);
@@ -626,7 +628,7 @@ static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo)
idr_remove(&dd->vnic.vesw_idr, vinfo->vesw_id);
/* ensure irqs see the change */
- hfi1_vnic_synchronize_irq(dd);
+ msix_vnic_synchronize_irq(dd);
/* remove unread skbs */
for (i = 0; i < vinfo->num_rx_q; i++) {
@@ -690,8 +692,6 @@ static int hfi1_vnic_init(struct hfi1_vnic_vport_info *vinfo)
rc = hfi1_vnic_txreq_init(dd);
if (rc)
goto txreq_fail;
-
- dd->vnic.msix_idx = dd->first_dyn_msix_idx;
}
for (i = dd->vnic.num_ctxt; i < vinfo->num_rx_q; i++) {
diff --git a/drivers/infiniband/hw/hfi1/vnic_sdma.c b/drivers/infiniband/hw/hfi1/vnic_sdma.c
index c3c96c5869ed..97bd940a056a 100644
--- a/drivers/infiniband/hw/hfi1/vnic_sdma.c
+++ b/drivers/infiniband/hw/hfi1/vnic_sdma.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -198,8 +198,8 @@ int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx,
goto free_desc;
tx->retry_count = 0;
- ret = sdma_send_txreq(sde, &vnic_sdma->wait, &tx->txreq,
- vnic_sdma->pkts_sent);
+ ret = sdma_send_txreq(sde, iowait_get_ib_work(&vnic_sdma->wait),
+ &tx->txreq, vnic_sdma->pkts_sent);
/* When -ECOMM, sdma callback will be called with ABORT status */
if (unlikely(ret && unlikely(ret != -ECOMM)))
goto free_desc;
@@ -230,13 +230,13 @@ tx_err:
* become available.
*/
static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
- struct iowait *wait,
+ struct iowait_work *wait,
struct sdma_txreq *txreq,
uint seq,
bool pkts_sent)
{
struct hfi1_vnic_sdma *vnic_sdma =
- container_of(wait, struct hfi1_vnic_sdma, wait);
+ container_of(wait->iow, struct hfi1_vnic_sdma, wait);
struct hfi1_ibdev *dev = &vnic_sdma->dd->verbs_dev;
struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq);
@@ -247,7 +247,7 @@ static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde,
vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED;
write_seqlock(&dev->iowait_lock);
if (list_empty(&vnic_sdma->wait.list))
- iowait_queue(pkts_sent, wait, &sde->dmawait);
+ iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
write_sequnlock(&dev->iowait_lock);
return -EBUSY;
}
@@ -285,7 +285,8 @@ void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
for (i = 0; i < vinfo->num_tx_q; i++) {
struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i];
- iowait_init(&vnic_sdma->wait, 0, NULL, hfi1_vnic_sdma_sleep,
+ iowait_init(&vnic_sdma->wait, 0, NULL, NULL,
+ hfi1_vnic_sdma_sleep,
hfi1_vnic_sdma_wakeup, NULL);
vnic_sdma->sde = &vinfo->dd->per_sdma[i];
vnic_sdma->dd = vinfo->dd;
@@ -295,10 +296,12 @@ void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo)
/* Add a free descriptor watermark for wakeups */
if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) {
+ struct iowait_work *work;
+
INIT_LIST_HEAD(&vnic_sdma->stx.list);
vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK;
- list_add_tail(&vnic_sdma->stx.list,
- &vnic_sdma->wait.tx_head);
+ work = iowait_get_ib_work(&vnic_sdma->wait);
+ list_add_tail(&vnic_sdma->stx.list, &work->tx_head);
}
}
}
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index fddb5fdf92de..21c2100b2ea9 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -1,6 +1,7 @@
config INFINIBAND_HNS
tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON
+ depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
depends on ARM64 || (COMPILE_TEST && 64BIT)
---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index 0d96c5bb38cd..9990dc9eb96a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -49,6 +49,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff;
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
+ bool vlan_en = false;
ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
if (!ah)
@@ -58,8 +59,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
gid_attr = ah_attr->grh.sgid_attr;
- if (is_vlan_dev(gid_attr->ndev))
+ if (is_vlan_dev(gid_attr->ndev)) {
vlan_tag = vlan_dev_vlan_id(gid_attr->ndev);
+ vlan_en = true;
+ }
if (vlan_tag < 0x1000)
vlan_tag |= (rdma_ah_get_sl(ah_attr) &
@@ -71,6 +74,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
HNS_ROCE_PORT_NUM_SHIFT));
ah->av.gid_index = grh->sgid_index;
ah->av.vlan = cpu_to_le16(vlan_tag);
+ ah->av.vlan_en = vlan_en;
dev_dbg(dev, "gid_index = 0x%x,vlan = 0x%x\n", ah->av.gid_index,
ah->av.vlan);
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 9a24fd0ee3e7..d39bdfdb5de9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -88,8 +88,11 @@
#define BITMAP_RR 1
#define MR_TYPE_MR 0x00
+#define MR_TYPE_FRMR 0x01
#define MR_TYPE_DMA 0x03
+#define HNS_ROCE_FRMR_MAX_PA 512
+
#define PKEY_ID 0xffff
#define GUID_LEN 8
#define NODE_DESC_SIZE 64
@@ -193,6 +196,9 @@ enum {
HNS_ROCE_CAP_FLAG_RQ_INLINE = BIT(2),
HNS_ROCE_CAP_FLAG_RECORD_DB = BIT(3),
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB = BIT(4),
+ HNS_ROCE_CAP_FLAG_MW = BIT(7),
+ HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
+ HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
};
enum hns_roce_mtt_type {
@@ -219,19 +225,11 @@ struct hns_roce_uar {
unsigned long logic_idx;
};
-struct hns_roce_vma_data {
- struct list_head list;
- struct vm_area_struct *vma;
- struct mutex *vma_list_mutex;
-};
-
struct hns_roce_ucontext {
struct ib_ucontext ibucontext;
struct hns_roce_uar uar;
struct list_head page_list;
struct mutex page_mutex;
- struct list_head vma_list;
- struct mutex vma_list_mutex;
};
struct hns_roce_pd {
@@ -293,6 +291,16 @@ struct hns_roce_mtt {
enum hns_roce_mtt_type mtt_type;
};
+struct hns_roce_mw {
+ struct ib_mw ibmw;
+ u32 pdn;
+ u32 rkey;
+ int enabled; /* MW's active status */
+ u32 pbl_hop_num;
+ u32 pbl_ba_pg_sz;
+ u32 pbl_buf_pg_sz;
+};
+
/* Only support 4K page size for mr register */
#define MR_SIZE_4K 0
@@ -304,6 +312,7 @@ struct hns_roce_mr {
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
u32 access;/* Access permission of MR */
+ u32 npages;
int enabled; /* MR's active status */
int type; /* MR's register type */
u64 *pbl_buf;/* MR's PBL space */
@@ -457,6 +466,7 @@ struct hns_roce_av {
u8 dgid[HNS_ROCE_GID_SIZE];
u8 mac[6];
__le16 vlan;
+ bool vlan_en;
};
struct hns_roce_ah {
@@ -656,6 +666,7 @@ struct hns_roce_eq_table {
};
struct hns_roce_caps {
+ u64 fw_ver;
u8 num_ports;
int gid_table_len[HNS_ROCE_MAX_PORTS];
int pkey_table_len[HNS_ROCE_MAX_PORTS];
@@ -665,7 +676,9 @@ struct hns_roce_caps {
u32 max_sq_sg; /* 2 */
u32 max_sq_inline; /* 32 */
u32 max_rq_sg; /* 2 */
+ u32 max_extend_sg;
int num_qps; /* 256k */
+ int reserved_qps;
u32 max_wqes; /* 16k */
u32 max_sq_desc_sz; /* 64 */
u32 max_rq_desc_sz; /* 64 */
@@ -738,6 +751,7 @@ struct hns_roce_work {
struct hns_roce_dev *hr_dev;
struct work_struct work;
u32 qpn;
+ u32 cqn;
int event_type;
int sub_type;
};
@@ -764,6 +778,8 @@ struct hns_roce_hw {
struct hns_roce_mr *mr, int flags, u32 pdn,
int mr_access_flags, u64 iova, u64 size,
void *mb_buf);
+ int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+ int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector);
@@ -863,6 +879,11 @@ static inline struct hns_roce_mr *to_hr_mr(struct ib_mr *ibmr)
return container_of(ibmr, struct hns_roce_mr, ibmr);
}
+static inline struct hns_roce_mw *to_hr_mw(struct ib_mw *ibmw)
+{
+ return container_of(ibmw, struct hns_roce_mw, ibmw);
+}
+
static inline struct hns_roce_qp *to_hr_qp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct hns_roce_qp, ibqp);
@@ -968,12 +989,20 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
struct ib_udata *udata);
+struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg);
+int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset);
int hns_roce_dereg_mr(struct ib_mr *ibmr);
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox,
unsigned long mpt_index);
unsigned long key_to_hw_index(u32 key);
+struct ib_mw *hns_roce_alloc_mw(struct ib_pd *pd, enum ib_mw_type,
+ struct ib_udata *udata);
+int hns_roce_dealloc_mw(struct ib_mw *ibmw);
+
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 081aa91fc162..ca05810c92dc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -731,7 +731,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
cq_init_attr.comp_vector = 0;
cq = hns_roce_ib_create_cq(&hr_dev->ib_dev, &cq_init_attr, NULL, NULL);
if (IS_ERR(cq)) {
- dev_err(dev, "Create cq for reseved loop qp failed!");
+ dev_err(dev, "Create cq for reserved loop qp failed!");
return -ENOMEM;
}
free_mr->mr_free_cq = to_hr_cq(cq);
@@ -744,7 +744,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
pd = hns_roce_alloc_pd(&hr_dev->ib_dev, NULL, NULL);
if (IS_ERR(pd)) {
- dev_err(dev, "Create pd for reseved loop qp failed!");
+ dev_err(dev, "Create pd for reserved loop qp failed!");
ret = -ENOMEM;
goto alloc_pd_failed;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 0218c0f8c2a7..a4c62ae23a9a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -54,6 +54,59 @@ static void set_data_seg_v2(struct hns_roce_v2_wqe_data_seg *dseg,
dseg->len = cpu_to_le32(sg->length);
}
+static void set_frmr_seg(struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
+ struct hns_roce_wqe_frmr_seg *fseg,
+ const struct ib_reg_wr *wr)
+{
+ struct hns_roce_mr *mr = to_hr_mr(wr->mr);
+
+ /* use ib_access_flags */
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S,
+ wr->access & IB_ACCESS_MW_BIND ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S,
+ wr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_RR_S,
+ wr->access & IB_ACCESS_REMOTE_READ ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_RW_S,
+ wr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0);
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_FRMR_WQE_BYTE_4_LW_S,
+ wr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0);
+
+ /* Data structure reuse may lead to confusion */
+ rc_sq_wqe->msg_len = cpu_to_le32(mr->pbl_ba & 0xffffffff);
+ rc_sq_wqe->inv_key = cpu_to_le32(mr->pbl_ba >> 32);
+
+ rc_sq_wqe->byte_16 = cpu_to_le32(wr->mr->length & 0xffffffff);
+ rc_sq_wqe->byte_20 = cpu_to_le32(wr->mr->length >> 32);
+ rc_sq_wqe->rkey = cpu_to_le32(wr->key);
+ rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
+
+ fseg->pbl_size = cpu_to_le32(mr->pbl_size);
+ roce_set_field(fseg->mode_buf_pg_sz,
+ V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M,
+ V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S,
+ mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+ roce_set_bit(fseg->mode_buf_pg_sz,
+ V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S, 0);
+}
+
+static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
+ const struct ib_atomic_wr *wr)
+{
+ if (wr->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
+ aseg->fetchadd_swap_data = cpu_to_le64(wr->swap);
+ aseg->cmp_data = cpu_to_le64(wr->compare_add);
+ } else {
+ aseg->fetchadd_swap_data = cpu_to_le64(wr->compare_add);
+ aseg->cmp_data = 0;
+ }
+}
+
static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
unsigned int *sge_ind)
{
@@ -121,6 +174,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
}
if (wr->opcode == IB_WR_RDMA_READ) {
+ *bad_wr = wr;
dev_err(hr_dev->dev, "Not support inline data!\n");
return -EINVAL;
}
@@ -179,6 +233,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
struct hns_roce_v2_ud_send_wqe *ud_sq_wqe;
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe;
struct hns_roce_qp *qp = to_hr_qp(ibqp);
+ struct hns_roce_wqe_frmr_seg *fseg;
struct device *dev = hr_dev->dev;
struct hns_roce_v2_db sq_db;
struct ib_qp_attr attr;
@@ -191,6 +246,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
int attr_mask;
u32 tmp_len;
int ret = 0;
+ u32 hr_op;
u8 *smac;
int nreq;
int i;
@@ -356,6 +412,9 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
V2_UD_SEND_WQE_BYTE_40_PORTN_S,
qp->port);
+ roce_set_bit(ud_sq_wqe->byte_40,
+ V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S,
+ ah->av.vlan_en ? 1 : 0);
roce_set_field(ud_sq_wqe->byte_48,
V2_UD_SEND_WQE_BYTE_48_SGID_INDX_M,
V2_UD_SEND_WQE_BYTE_48_SGID_INDX_S,
@@ -406,99 +465,100 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_OWNER_S, owner_bit);
+ wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
switch (wr->opcode) {
case IB_WR_RDMA_READ:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_READ);
+ hr_op = HNS_ROCE_V2_WQE_OP_RDMA_READ;
rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_RDMA_WRITE:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_WRITE);
+ hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE;
rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM);
+ hr_op = HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM;
rc_sq_wqe->rkey =
cpu_to_le32(rdma_wr(wr)->rkey);
rc_sq_wqe->va =
cpu_to_le64(rdma_wr(wr)->remote_addr);
break;
case IB_WR_SEND:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND);
+ hr_op = HNS_ROCE_V2_WQE_OP_SEND;
break;
case IB_WR_SEND_WITH_INV:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND_WITH_INV);
+ hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_INV;
break;
case IB_WR_SEND_WITH_IMM:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM);
+ hr_op = HNS_ROCE_V2_WQE_OP_SEND_WITH_IMM;
break;
case IB_WR_LOCAL_INV:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_LOCAL_INV);
+ hr_op = HNS_ROCE_V2_WQE_OP_LOCAL_INV;
+ roce_set_bit(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_SO_S, 1);
+ rc_sq_wqe->inv_key =
+ cpu_to_le32(wr->ex.invalidate_rkey);
+ break;
+ case IB_WR_REG_MR:
+ hr_op = HNS_ROCE_V2_WQE_OP_FAST_REG_PMR;
+ fseg = wqe;
+ set_frmr_seg(rc_sq_wqe, fseg, reg_wr(wr));
break;
case IB_WR_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP);
+ hr_op = HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP;
+ rc_sq_wqe->rkey =
+ cpu_to_le32(atomic_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(atomic_wr(wr)->remote_addr);
break;
case IB_WR_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD);
+ hr_op = HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD;
+ rc_sq_wqe->rkey =
+ cpu_to_le32(atomic_wr(wr)->rkey);
+ rc_sq_wqe->va =
+ cpu_to_le64(atomic_wr(wr)->remote_addr);
break;
case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP);
+ hr_op =
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP;
break;
case IB_WR_MASKED_ATOMIC_FETCH_AND_ADD:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD);
+ hr_op =
+ HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD;
break;
default:
- roce_set_field(rc_sq_wqe->byte_4,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
- V2_RC_SEND_WQE_BYTE_4_OPCODE_S,
- HNS_ROCE_V2_WQE_OP_MASK);
+ hr_op = HNS_ROCE_V2_WQE_OP_MASK;
break;
}
- wqe += sizeof(struct hns_roce_v2_rc_send_wqe);
+ roce_set_field(rc_sq_wqe->byte_4,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_M,
+ V2_RC_SEND_WQE_BYTE_4_OPCODE_S, hr_op);
+
+ if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
+ wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
+ struct hns_roce_v2_wqe_data_seg *dseg;
+
+ dseg = wqe;
+ set_data_seg_v2(dseg, wr->sg_list);
+ wqe += sizeof(struct hns_roce_v2_wqe_data_seg);
+ set_atomic_seg(wqe, atomic_wr(wr));
+ roce_set_field(rc_sq_wqe->byte_16,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
+ V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
+ wr->num_sge);
+ } else if (wr->opcode != IB_WR_REG_MR) {
+ ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
+ wqe, &sge_ind, bad_wr);
+ if (ret)
+ goto out;
+ }
- ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe, wqe,
- &sge_ind, bad_wr);
- if (ret)
- goto out;
ind++;
} else {
dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
@@ -935,7 +995,24 @@ static int hns_roce_cmq_query_hw_info(struct hns_roce_dev *hr_dev)
resp = (struct hns_roce_query_version *)desc.data;
hr_dev->hw_rev = le32_to_cpu(resp->rocee_hw_version);
- hr_dev->vendor_id = le32_to_cpu(resp->rocee_vendor_id);
+ hr_dev->vendor_id = hr_dev->pci_dev->vendor;
+
+ return 0;
+}
+
+static int hns_roce_query_fw_ver(struct hns_roce_dev *hr_dev)
+{
+ struct hns_roce_query_fw_info *resp;
+ struct hns_roce_cmq_desc desc;
+ int ret;
+
+ hns_roce_cmq_setup_basic_desc(&desc, HNS_QUERY_FW_VER, true);
+ ret = hns_roce_cmq_send(hr_dev, &desc, 1);
+ if (ret)
+ return ret;
+
+ resp = (struct hns_roce_query_fw_info *)desc.data;
+ hr_dev->caps.fw_ver = (u64)(le32_to_cpu(resp->fw_ver));
return 0;
}
@@ -1158,6 +1235,13 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
ret = hns_roce_cmq_query_hw_info(hr_dev);
if (ret) {
+ dev_err(hr_dev->dev, "Query hardware version fail, ret = %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = hns_roce_query_fw_ver(hr_dev);
+ if (ret) {
dev_err(hr_dev->dev, "Query firmware version fail, ret = %d.\n",
ret);
return ret;
@@ -1185,14 +1269,16 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
return ret;
}
- hr_dev->vendor_part_id = 0;
- hr_dev->sys_image_guid = 0;
+
+ hr_dev->vendor_part_id = hr_dev->pci_dev->device;
+ hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid);
caps->num_qps = HNS_ROCE_V2_MAX_QP_NUM;
caps->max_wqes = HNS_ROCE_V2_MAX_WQE_NUM;
caps->num_cqs = HNS_ROCE_V2_MAX_CQ_NUM;
caps->max_cqes = HNS_ROCE_V2_MAX_CQE_NUM;
caps->max_sq_sg = HNS_ROCE_V2_MAX_SQ_SGE_NUM;
+ caps->max_extend_sg = HNS_ROCE_V2_MAX_EXTEND_SGE_NUM;
caps->max_rq_sg = HNS_ROCE_V2_MAX_RQ_SGE_NUM;
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
caps->num_uars = HNS_ROCE_V2_UAR_NUM;
@@ -1222,6 +1308,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->reserved_mrws = 1;
caps->reserved_uars = 0;
caps->reserved_cqs = 0;
+ caps->reserved_qps = HNS_ROCE_V2_RSV_QPS;
caps->qpc_ba_pg_sz = 0;
caps->qpc_buf_pg_sz = 0;
@@ -1255,6 +1342,11 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
HNS_ROCE_CAP_FLAG_RQ_INLINE |
HNS_ROCE_CAP_FLAG_RECORD_DB |
HNS_ROCE_CAP_FLAG_SQ_RECORD_DB;
+
+ if (hr_dev->pci_dev->revision == 0x21)
+ caps->flags |= HNS_ROCE_CAP_FLAG_MW |
+ HNS_ROCE_CAP_FLAG_FRMR;
+
caps->pkey_table_len[0] = 1;
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
caps->ceqe_depth = HNS_ROCE_V2_COMP_EQE_NUM;
@@ -1262,6 +1354,9 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
caps->local_ca_ack_delay = 0;
caps->max_mtu = IB_MTU_4096;
+ if (hr_dev->pci_dev->revision == 0x21)
+ caps->flags |= HNS_ROCE_CAP_FLAG_ATOMIC;
+
ret = hns_roce_v2_set_bt(hr_dev);
if (ret)
dev_err(hr_dev->dev, "Configure bt attribute fail, ret = %d.\n",
@@ -1690,10 +1785,11 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_BIND_EN_S,
(mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
- roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S, 0);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_ATOMIC_EN_S,
+ mr->access & IB_ACCESS_REMOTE_ATOMIC ? 1 : 0);
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RR_EN_S,
(mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RW_EN_S,
@@ -1817,6 +1913,88 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
return 0;
}
+static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+ V2_MPT_BYTE_4_PBL_HOP_NUM_S, 1);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
+ mr->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, mr->pd);
+
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_RA_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_FRE_S, 1);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 0);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
+
+ mpt_entry->pbl_size = cpu_to_le32(mr->pbl_size);
+
+ mpt_entry->pbl_ba_l = cpu_to_le32(lower_32_bits(mr->pbl_ba >> 3));
+ roce_set_field(mpt_entry->byte_48_mode_ba, V2_MPT_BYTE_48_PBL_BA_H_M,
+ V2_MPT_BYTE_48_PBL_BA_H_S,
+ upper_32_bits(mr->pbl_ba >> 3));
+
+ roce_set_field(mpt_entry->byte_64_buf_pa1,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+ mr->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+
+ return 0;
+}
+
+static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
+{
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_MPT_ST_M,
+ V2_MPT_BYTE_4_MPT_ST_S, V2_MPT_ST_FREE);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st, V2_MPT_BYTE_4_PD_M,
+ V2_MPT_BYTE_4_PD_S, mw->pdn);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st,
+ V2_MPT_BYTE_4_PBL_HOP_NUM_M,
+ V2_MPT_BYTE_4_PBL_HOP_NUM_S,
+ mw->pbl_hop_num == HNS_ROCE_HOP_NUM_0 ?
+ 0 : mw->pbl_hop_num);
+ roce_set_field(mpt_entry->byte_4_pd_hop_st,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_M,
+ V2_MPT_BYTE_4_PBL_BA_PG_SZ_S,
+ mw->pbl_ba_pg_sz + PG_SHIFT_OFFSET);
+
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
+ roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
+
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BPD_S, 1);
+ roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_BQP_S,
+ mw->ibmw.type == IB_MW_TYPE_1 ? 0 : 1);
+
+ roce_set_field(mpt_entry->byte_64_buf_pa1,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_M,
+ V2_MPT_BYTE_64_PBL_BUF_PG_SZ_S,
+ mw->pbl_buf_pg_sz + PG_SHIFT_OFFSET);
+
+ mpt_entry->lkey = cpu_to_le32(mw->rkey);
+
+ return 0;
+}
+
static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n)
{
return hns_roce_buf_offset(&hr_cq->hr_buf.hr_buf,
@@ -2274,6 +2452,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->src_qp = (u8)roce_get_field(cqe->byte_32,
V2_CQE_BYTE_32_RMT_QPN_M,
V2_CQE_BYTE_32_RMT_QPN_S);
+ wc->slid = 0;
wc->wc_flags |= (roce_get_bit(cqe->byte_32,
V2_CQE_BYTE_32_GRH_S) ?
IB_WC_GRH : 0);
@@ -2287,7 +2466,14 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
wc->smac[5] = roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_SMAC_5_M,
V2_CQE_BYTE_28_SMAC_5_S);
- wc->vlan_id = 0xffff;
+ if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
+ wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
+ V2_CQE_BYTE_28_VID_M,
+ V2_CQE_BYTE_28_VID_S);
+ } else {
+ wc->vlan_id = 0xffff;
+ }
+
wc->wc_flags |= (IB_WC_WITH_VLAN | IB_WC_WITH_SMAC);
wc->network_hdr_type = roce_get_field(cqe->byte_28,
V2_CQE_BYTE_28_PORT_TYPE_M,
@@ -2589,21 +2775,16 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_TX_ERR_S, 0);
roce_set_bit(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_RQ_RX_ERR_S, 0);
- roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_MAPID_M,
- V2_QPC_BYTE_60_MAPID_S, 0);
+ roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_TEMPID_M,
+ V2_QPC_BYTE_60_TEMPID_S, 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid,
- V2_QPC_BYTE_60_INNER_MAP_IND_S, 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_MAP_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_RQ_MAP_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_EXT_MAP_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_RLS_IND_S,
- 0);
- roce_set_bit(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_SQ_EXT_IND_S,
- 0);
+ roce_set_field(qpc_mask->byte_60_qpst_tempid,
+ V2_QPC_BYTE_60_SCC_TOKEN_M, V2_QPC_BYTE_60_SCC_TOKEN_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_tempid,
+ V2_QPC_BYTE_60_SQ_DB_DOING_S, 0);
+ roce_set_bit(qpc_mask->byte_60_qpst_tempid,
+ V2_QPC_BYTE_60_RQ_DB_DOING_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CNP_TX_FLAG_S, 0);
roce_set_bit(qpc_mask->byte_28_at_fl, V2_QPC_BYTE_28_CE_FLAG_S, 0);
@@ -2685,7 +2866,8 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_132_trrl, V2_QPC_BYTE_132_TRRL_TAIL_MAX_M,
V2_QPC_BYTE_132_TRRL_TAIL_MAX_S, 0);
- roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RSVD_RAQ_MAP_S, 0);
+ roce_set_bit(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S,
+ 0);
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M,
V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S, 0);
roce_set_field(qpc_mask->byte_140_raq, V2_QPC_BYTE_140_RAQ_TRRL_TAIL_M,
@@ -2694,8 +2876,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_144_raq,
V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M,
V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S, 0);
- roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S,
- 0);
roce_set_field(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RAQ_CREDIT_M,
V2_QPC_BYTE_144_RAQ_CREDIT_S, 0);
roce_set_bit(qpc_mask->byte_144_raq, V2_QPC_BYTE_144_RESP_RTY_FLG_S, 0);
@@ -2721,14 +2901,12 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_M,
V2_QPC_BYTE_160_SQ_CONSUMER_IDX_S, 0);
- roce_set_field(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
-
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S, 0);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S, 0);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S, 0);
roce_set_bit(qpc_mask->byte_168_irrl_idx,
V2_QPC_BYTE_168_MSG_RTY_LP_FLG_S, 0);
roce_set_bit(qpc_mask->byte_168_irrl_idx,
@@ -2746,6 +2924,9 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_MSG_RNR_FLG_S,
0);
+ roce_set_bit(context->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 1);
+ roce_set_bit(qpc_mask->byte_172_sq_psn, V2_QPC_BYTE_172_FRE_S, 0);
+
roce_set_field(qpc_mask->byte_176_msg_pktn,
V2_QPC_BYTE_176_MSG_USE_PKTN_M,
V2_QPC_BYTE_176_MSG_USE_PKTN_S, 0);
@@ -2790,6 +2971,13 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
V2_QPC_BYTE_232_IRRL_SGE_IDX_M,
V2_QPC_BYTE_232_IRRL_SGE_IDX_S, 0);
+ roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_SO_LP_VLD_S,
+ 0);
+ roce_set_bit(qpc_mask->byte_232_irrl_sge,
+ V2_QPC_BYTE_232_FENCE_LP_VLD_S, 0);
+ roce_set_bit(qpc_mask->byte_232_irrl_sge, V2_QPC_BYTE_232_IRRL_LP_VLD_S,
+ 0);
+
qpc_mask->irrl_cur_sge_offset = 0;
roce_set_field(qpc_mask->byte_240_irrl_tail,
@@ -2955,13 +3143,6 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_56_dqpn_err,
V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
}
- roce_set_field(context->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(qpc_mask->byte_168_irrl_idx,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
- V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, 0);
}
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
@@ -3271,13 +3452,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
* we should set all bits of the relevant fields in context mask to
* 0 at the same time, else set them to 0x1.
*/
- roce_set_field(context->byte_60_qpst_mapid,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, attr->retry_cnt);
- roce_set_field(qpc_mask->byte_60_qpst_mapid,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M,
- V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S, 0);
-
context->sq_cur_blk_addr = (u32)(mtts[0] >> PAGE_ADDR_SHIFT);
roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_CUR_BLK_ADDR_M,
@@ -3538,6 +3712,17 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
memcpy(src_mac, gid_attr->ndev->dev_addr, ETH_ALEN);
}
+ if (is_vlan_dev(gid_attr->ndev)) {
+ roce_set_bit(context->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RQ_VLAN_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_76_srqn_op_en,
+ V2_QPC_BYTE_76_RQ_VLAN_EN_S, 0);
+ roce_set_bit(context->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_VLAN_EN_S, 1);
+ roce_set_bit(qpc_mask->byte_168_irrl_idx,
+ V2_QPC_BYTE_168_SQ_VLAN_EN_S, 0);
+ }
+
roce_set_field(context->byte_24_mtu_tc,
V2_QPC_BYTE_24_VLAN_ID_M,
V2_QPC_BYTE_24_VLAN_ID_S, vlan);
@@ -3584,8 +3769,15 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
V2_QPC_BYTE_24_HOP_LIMIT_M,
V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
- V2_QPC_BYTE_24_TC_S, grh->traffic_class);
+ if (hr_dev->pci_dev->revision == 0x21 &&
+ gid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+ roce_set_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
+ grh->traffic_class >> 2);
+ else
+ roce_set_field(context->byte_24_mtu_tc,
+ V2_QPC_BYTE_24_TC_M, V2_QPC_BYTE_24_TC_S,
+ grh->traffic_class);
roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
V2_QPC_BYTE_24_TC_S, 0);
roce_set_field(context->byte_28_at_fl, V2_QPC_BYTE_28_FL_M,
@@ -3606,9 +3798,9 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask);
/* Every status migrate must change state */
- roce_set_field(context->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+ roce_set_field(context->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, new_state);
- roce_set_field(qpc_mask->byte_60_qpst_mapid, V2_QPC_BYTE_60_QP_ST_M,
+ roce_set_field(qpc_mask->byte_60_qpst_tempid, V2_QPC_BYTE_60_QP_ST_M,
V2_QPC_BYTE_60_QP_ST_S, 0);
/* SW pass context to HW */
@@ -3728,7 +3920,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
goto out;
}
- state = roce_get_field(context->byte_60_qpst_mapid,
+ state = roce_get_field(context->byte_60_qpst_tempid,
V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
if (tmp_qp_state == -1) {
@@ -3995,13 +4187,103 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
{
struct hns_roce_work *irq_work =
container_of(work, struct hns_roce_work, work);
+ struct device *dev = irq_work->hr_dev->dev;
u32 qpn = irq_work->qpn;
+ u32 cqn = irq_work->cqn;
switch (irq_work->event_type) {
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG:
+ dev_info(dev, "Path migrated succeeded.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
+ dev_warn(dev, "Path migration failed.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_COMM_EST:
+ dev_info(dev, "Communication established.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
+ dev_warn(dev, "Send queue drained.\n");
+ break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
+ dev_err(dev, "Local work queue catastrophic error.\n");
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ switch (irq_work->sub_type) {
+ case HNS_ROCE_LWQCE_QPC_ERROR:
+ dev_err(dev, "QP %d, QPC error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_MTU_ERROR:
+ dev_err(dev, "QP %d, MTU error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
+ dev_err(dev, "QP %d, WQE BA addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
+ dev_err(dev, "QP %d, WQE addr error.\n", qpn);
+ break;
+ case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
+ dev_err(dev, "QP %d, WQE shift error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n",
+ irq_work->sub_type);
+ break;
+ }
+ break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
+ dev_err(dev, "Invalid request local work queue error.\n");
+ hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
+ dev_err(dev, "Local access violation work queue error.\n");
hns_roce_set_qps_to_err(irq_work->hr_dev, qpn);
+ switch (irq_work->sub_type) {
+ case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
+ dev_err(dev, "QP %d, R_key violation.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_LENGTH_ERROR:
+ dev_err(dev, "QP %d, length error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_VA_ERROR:
+ dev_err(dev, "QP %d, VA error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_PD_ERROR:
+ dev_err(dev, "QP %d, PD error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
+ dev_err(dev, "QP %d, rw acc error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
+ dev_err(dev, "QP %d, key state error.\n", qpn);
+ break;
+ case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
+ dev_err(dev, "QP %d, MR operation error.\n", qpn);
+ break;
+ default:
+ dev_err(dev, "Unhandled sub_event type %d.\n",
+ irq_work->sub_type);
+ break;
+ }
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
+ dev_warn(dev, "SRQ limit reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
+ dev_warn(dev, "SRQ last wqe reach.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
+ dev_err(dev, "SRQ catas error.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
+ dev_err(dev, "CQ 0x%x access err.\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
+ dev_warn(dev, "CQ 0x%x overflow\n", cqn);
+ break;
+ case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
+ dev_warn(dev, "DB overflow.\n");
+ break;
+ case HNS_ROCE_EVENT_TYPE_FLR:
+ dev_warn(dev, "Function level reset.\n");
break;
default:
break;
@@ -4011,7 +4293,8 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
}
static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq, u32 qpn)
+ struct hns_roce_eq *eq,
+ u32 qpn, u32 cqn)
{
struct hns_roce_work *irq_work;
@@ -4022,6 +4305,7 @@ static void hns_roce_v2_init_irq_work(struct hns_roce_dev *hr_dev,
INIT_WORK(&(irq_work->work), hns_roce_irq_work_handle);
irq_work->hr_dev = hr_dev;
irq_work->qpn = qpn;
+ irq_work->cqn = cqn;
irq_work->event_type = eq->event_type;
irq_work->sub_type = eq->sub_type;
queue_work(hr_dev->irq_workq, &(irq_work->work));
@@ -4058,124 +4342,6 @@ static void set_eq_cons_index_v2(struct hns_roce_eq *eq)
hns_roce_write64_k(doorbell, eq->doorbell);
}
-static void hns_roce_v2_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- u32 qpn)
-{
- struct device *dev = hr_dev->dev;
- int sub_type;
-
- dev_warn(dev, "Local work queue catastrophic error.\n");
- sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
- HNS_ROCE_V2_AEQE_SUB_TYPE_S);
- switch (sub_type) {
- case HNS_ROCE_LWQCE_QPC_ERROR:
- dev_warn(dev, "QP %d, QPC error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_MTU_ERROR:
- dev_warn(dev, "QP %d, MTU error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
- dev_warn(dev, "QP %d, WQE shift error.\n", qpn);
- break;
- default:
- dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
- break;
- }
-}
-
-static void hns_roce_v2_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe, u32 qpn)
-{
- struct device *dev = hr_dev->dev;
- int sub_type;
-
- dev_warn(dev, "Local access violation work queue error.\n");
- sub_type = roce_get_field(aeqe->asyn, HNS_ROCE_V2_AEQE_SUB_TYPE_M,
- HNS_ROCE_V2_AEQE_SUB_TYPE_S);
- switch (sub_type) {
- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
- dev_warn(dev, "QP %d, R_key violation.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
- dev_warn(dev, "QP %d, length error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_VA_ERROR:
- dev_warn(dev, "QP %d, VA error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_PD_ERROR:
- dev_err(dev, "QP %d, PD error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
- dev_warn(dev, "QP %d, rw acc error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
- dev_warn(dev, "QP %d, key state error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
- dev_warn(dev, "QP %d, MR operation error.\n", qpn);
- break;
- default:
- dev_err(dev, "Unhandled sub_event type %d.\n", sub_type);
- break;
- }
-}
-
-static void hns_roce_v2_qp_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type, u32 qpn)
-{
- struct device *dev = hr_dev->dev;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_COMM_EST:
- dev_warn(dev, "Communication established.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "Send queue drained.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_v2_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "Invalid request local work queue error.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v2_local_wq_access_err_handle(hr_dev, aeqe, qpn);
- break;
- default:
- break;
- }
-
- hns_roce_qp_event(hr_dev, qpn, event_type);
-}
-
-static void hns_roce_v2_cq_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type, u32 cqn)
-{
- struct device *dev = hr_dev->dev;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%x access err.\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
- break;
- default:
- break;
- }
-
- hns_roce_cq_event(hr_dev, cqn, event_type);
-}
-
static struct hns_roce_aeqe *get_aeqe_v2(struct hns_roce_eq *eq, u32 entry)
{
u32 buf_chk_sz;
@@ -4251,31 +4417,23 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
switch (event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_warn(dev, "Path migrated succeeded.\n");
- break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "Path migration failed.\n");
- break;
case HNS_ROCE_EVENT_TYPE_COMM_EST:
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v2_qp_err_handle(hr_dev, aeqe, event_type,
- qpn);
+ hns_roce_qp_event(hr_dev, qpn, event_type);
break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- dev_warn(dev, "SRQ not support.\n");
break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- hns_roce_v2_cq_err_handle(hr_dev, aeqe, event_type,
- cqn);
+ hns_roce_cq_event(hr_dev, cqn, event_type);
break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- dev_warn(dev, "DB overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_MB:
hns_roce_cmd_event(hr_dev,
@@ -4284,10 +4442,8 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
le64_to_cpu(aeqe->event.cmd.out_param));
break;
case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW:
- dev_warn(dev, "CEQ overflow.\n");
break;
case HNS_ROCE_EVENT_TYPE_FLR:
- dev_warn(dev, "Function level reset.\n");
break;
default:
dev_err(dev, "Unhandled event %d on EQ %d at idx %u.\n",
@@ -4304,7 +4460,7 @@ static int hns_roce_v2_aeq_int(struct hns_roce_dev *hr_dev,
dev_warn(dev, "cons_index overflow, set back to 0.\n");
eq->cons_index = 0;
}
- hns_roce_v2_init_irq_work(hr_dev, eq, qpn);
+ hns_roce_v2_init_irq_work(hr_dev, eq, qpn, cqn);
}
set_eq_cons_index_v2(eq);
@@ -5125,6 +5281,7 @@ static int hns_roce_v2_init_eq_table(struct hns_roce_dev *hr_dev)
create_singlethread_workqueue("hns_roce_irq_workqueue");
if (!hr_dev->irq_workq) {
dev_err(dev, "Create irq workqueue failed!\n");
+ ret = -ENOMEM;
goto err_request_irq_fail;
}
@@ -5195,6 +5352,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.set_mac = hns_roce_v2_set_mac,
.write_mtpt = hns_roce_v2_write_mtpt,
.rereg_write_mtpt = hns_roce_v2_rereg_write_mtpt,
+ .frmr_write_mtpt = hns_roce_v2_frmr_write_mtpt,
+ .mw_write_mtpt = hns_roce_v2_mw_write_mtpt,
.write_cqc = hns_roce_v2_write_cqc,
.set_hem = hns_roce_v2_set_hem,
.clear_hem = hns_roce_v2_clear_hem,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 14aa308befef..8bc820635bbd 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -50,6 +50,7 @@
#define HNS_ROCE_V2_MAX_CQE_NUM 0x10000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 0x100
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 0xff
+#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
#define HNS_ROCE_V2_MAX_SQ_INLINE 0x20
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
@@ -78,6 +79,7 @@
#define HNS_ROCE_INVALID_LKEY 0x100
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
+#define HNS_ROCE_V2_RSV_QPS 8
#define HNS_ROCE_CONTEXT_HOP_NUM 1
#define HNS_ROCE_MTT_HOP_NUM 1
@@ -201,6 +203,7 @@ enum {
/* CMQ command */
enum hns_roce_opcode_type {
+ HNS_QUERY_FW_VER = 0x0001,
HNS_ROCE_OPC_QUERY_HW_VER = 0x8000,
HNS_ROCE_OPC_CFG_GLOBAL_PARAM = 0x8001,
HNS_ROCE_OPC_ALLOC_PF_RES = 0x8004,
@@ -324,6 +327,7 @@ struct hns_roce_v2_cq_context {
enum{
V2_MPT_ST_VALID = 0x1,
+ V2_MPT_ST_FREE = 0x2,
};
enum hns_roce_v2_qp_state {
@@ -350,7 +354,7 @@ struct hns_roce_v2_qp_context {
__le32 dmac;
__le32 byte_52_udpspn_dmac;
__le32 byte_56_dqpn_err;
- __le32 byte_60_qpst_mapid;
+ __le32 byte_60_qpst_tempid;
__le32 qkey_xrcd;
__le32 byte_68_rq_db;
__le32 rq_db_record_addr;
@@ -492,26 +496,15 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_56_LP_PKTN_INI_S 28
#define V2_QPC_BYTE_56_LP_PKTN_INI_M GENMASK(31, 28)
-#define V2_QPC_BYTE_60_MAPID_S 0
-#define V2_QPC_BYTE_60_MAPID_M GENMASK(12, 0)
+#define V2_QPC_BYTE_60_TEMPID_S 0
+#define V2_QPC_BYTE_60_TEMPID_M GENMASK(7, 0)
-#define V2_QPC_BYTE_60_INNER_MAP_IND_S 13
+#define V2_QPC_BYTE_60_SCC_TOKEN_S 8
+#define V2_QPC_BYTE_60_SCC_TOKEN_M GENMASK(26, 8)
-#define V2_QPC_BYTE_60_SQ_MAP_IND_S 14
+#define V2_QPC_BYTE_60_SQ_DB_DOING_S 27
-#define V2_QPC_BYTE_60_RQ_MAP_IND_S 15
-
-#define V2_QPC_BYTE_60_TEMPID_S 16
-#define V2_QPC_BYTE_60_TEMPID_M GENMASK(22, 16)
-
-#define V2_QPC_BYTE_60_EXT_MAP_IND_S 23
-
-#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_S 24
-#define V2_QPC_BYTE_60_RTY_NUM_INI_BAK_M GENMASK(26, 24)
-
-#define V2_QPC_BYTE_60_SQ_RLS_IND_S 27
-
-#define V2_QPC_BYTE_60_SQ_EXT_IND_S 28
+#define V2_QPC_BYTE_60_RQ_DB_DOING_S 28
#define V2_QPC_BYTE_60_QP_ST_S 29
#define V2_QPC_BYTE_60_QP_ST_M GENMASK(31, 29)
@@ -534,6 +527,7 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_76_RQIE_S 28
+#define V2_QPC_BYTE_76_RQ_VLAN_EN_S 30
#define V2_QPC_BYTE_80_RX_CQN_S 0
#define V2_QPC_BYTE_80_RX_CQN_M GENMASK(23, 0)
@@ -588,7 +582,7 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_140_RR_MAX_S 12
#define V2_QPC_BYTE_140_RR_MAX_M GENMASK(14, 12)
-#define V2_QPC_BYTE_140_RSVD_RAQ_MAP_S 15
+#define V2_QPC_BYTE_140_RQ_RTY_WAIT_DO_S 15
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_S 16
#define V2_QPC_BYTE_140_RAQ_TRRL_HEAD_M GENMASK(23, 16)
@@ -599,8 +593,6 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_S 0
#define V2_QPC_BYTE_144_RAQ_RTY_INI_PSN_M GENMASK(23, 0)
-#define V2_QPC_BYTE_144_RAQ_RTY_INI_IND_S 24
-
#define V2_QPC_BYTE_144_RAQ_CREDIT_S 25
#define V2_QPC_BYTE_144_RAQ_CREDIT_M GENMASK(29, 25)
@@ -637,9 +629,10 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_168_LP_SGEN_INI_S 22
#define V2_QPC_BYTE_168_LP_SGEN_INI_M GENMASK(23, 22)
-#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_S 24
-#define V2_QPC_BYTE_168_SQ_SHIFT_BAK_M GENMASK(27, 24)
-
+#define V2_QPC_BYTE_168_SQ_VLAN_EN_S 24
+#define V2_QPC_BYTE_168_POLL_DB_WAIT_DO_S 25
+#define V2_QPC_BYTE_168_SCC_TOKEN_FORBID_SQ_DEQ_S 26
+#define V2_QPC_BYTE_168_WAIT_ACK_TIMEOUT_S 27
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_S 28
#define V2_QPC_BYTE_168_IRRL_IDX_LSB_M GENMASK(31, 28)
@@ -725,6 +718,10 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_S 20
#define V2_QPC_BYTE_232_IRRL_SGE_IDX_M GENMASK(28, 20)
+#define V2_QPC_BYTE_232_SO_LP_VLD_S 29
+#define V2_QPC_BYTE_232_FENCE_LP_VLD_S 30
+#define V2_QPC_BYTE_232_IRRL_LP_VLD_S 31
+
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_S 0
#define V2_QPC_BYTE_240_IRRL_TAIL_REAL_M GENMASK(7, 0)
@@ -743,6 +740,9 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_244_RNR_CNT_S 27
#define V2_QPC_BYTE_244_RNR_CNT_M GENMASK(29, 27)
+#define V2_QPC_BYTE_244_LCL_OP_FLG_S 30
+#define V2_QPC_BYTE_244_IRRL_RD_FLG_S 31
+
#define V2_QPC_BYTE_248_IRRL_PSN_S 0
#define V2_QPC_BYTE_248_IRRL_PSN_M GENMASK(23, 0)
@@ -818,6 +818,11 @@ struct hns_roce_v2_cqe {
#define V2_CQE_BYTE_28_PORT_TYPE_S 16
#define V2_CQE_BYTE_28_PORT_TYPE_M GENMASK(17, 16)
+#define V2_CQE_BYTE_28_VID_S 18
+#define V2_CQE_BYTE_28_VID_M GENMASK(29, 18)
+
+#define V2_CQE_BYTE_28_VID_VLD_S 30
+
#define V2_CQE_BYTE_32_RMT_QPN_S 0
#define V2_CQE_BYTE_32_RMT_QPN_M GENMASK(23, 0)
@@ -878,8 +883,19 @@ struct hns_roce_v2_mpt_entry {
#define V2_MPT_BYTE_8_LW_EN_S 7
+#define V2_MPT_BYTE_8_MW_CNT_S 8
+#define V2_MPT_BYTE_8_MW_CNT_M GENMASK(31, 8)
+
+#define V2_MPT_BYTE_12_FRE_S 0
+
#define V2_MPT_BYTE_12_PA_S 1
+#define V2_MPT_BYTE_12_MR_MW_S 4
+
+#define V2_MPT_BYTE_12_BPD_S 5
+
+#define V2_MPT_BYTE_12_BQP_S 6
+
#define V2_MPT_BYTE_12_INNER_PA_VLD_S 7
#define V2_MPT_BYTE_12_MW_BIND_QPN_S 8
@@ -988,6 +1004,8 @@ struct hns_roce_v2_ud_send_wqe {
#define V2_UD_SEND_WQE_BYTE_40_PORTN_S 24
#define V2_UD_SEND_WQE_BYTE_40_PORTN_M GENMASK(26, 24)
+#define V2_UD_SEND_WQE_BYTE_40_UD_VLAN_EN_S 30
+
#define V2_UD_SEND_WQE_BYTE_40_LBI_S 31
#define V2_UD_SEND_WQE_DMAC_0_S 0
@@ -1042,6 +1060,16 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_4_INLINE_S 12
+#define V2_RC_FRMR_WQE_BYTE_4_BIND_EN_S 19
+
+#define V2_RC_FRMR_WQE_BYTE_4_ATOMIC_S 20
+
+#define V2_RC_FRMR_WQE_BYTE_4_RR_S 21
+
+#define V2_RC_FRMR_WQE_BYTE_4_RW_S 22
+
+#define V2_RC_FRMR_WQE_BYTE_4_LW_S 23
+
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_S 0
#define V2_RC_SEND_WQE_BYTE_16_XRC_SRQN_M GENMASK(23, 0)
@@ -1051,6 +1079,16 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S 0
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
+struct hns_roce_wqe_frmr_seg {
+ __le32 pbl_size;
+ __le32 mode_buf_pg_sz;
+};
+
+#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_S 4
+#define V2_RC_FRMR_WQE_BYTE_40_PBL_BUF_PG_SZ_M GENMASK(7, 4)
+
+#define V2_RC_FRMR_WQE_BYTE_40_BLK_MODE_S 8
+
struct hns_roce_v2_wqe_data_seg {
__le32 len;
__le32 lkey;
@@ -1068,6 +1106,11 @@ struct hns_roce_query_version {
__le32 rsv[5];
};
+struct hns_roce_query_fw_info {
+ __le32 fw_ver;
+ __le32 rsv[5];
+};
+
struct hns_roce_cfg_llm_a {
__le32 base_addr_l;
__le32 base_addr_h;
@@ -1564,4 +1607,9 @@ struct hns_roce_eq_context {
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_S 0
#define HNS_ROCE_V2_AEQE_EVENT_QUEUE_NUM_M GENMASK(23, 0)
+struct hns_roce_wqe_atomic_seg {
+ __le64 fetchadd_swap_data;
+ __le64 cmp_data;
+};
+
#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c5cae9a38c04..1b3ee514f2ef 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -196,6 +196,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
memset(props, 0, sizeof(*props));
+ props->fw_ver = hr_dev->caps.fw_ver;
props->sys_image_guid = cpu_to_be64(hr_dev->sys_image_guid);
props->max_mr_size = (u64)(~(0ULL));
props->page_size_cap = hr_dev->caps.page_size_cap;
@@ -215,7 +216,8 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
props->max_pd = hr_dev->caps.num_pds;
props->max_qp_rd_atom = hr_dev->caps.max_qp_dest_rdma;
props->max_qp_init_rd_atom = hr_dev->caps.max_qp_init_rdma;
- props->atomic_cap = IB_ATOMIC_NONE;
+ props->atomic_cap = hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_ATOMIC ?
+ IB_ATOMIC_HCA : IB_ATOMIC_NONE;
props->max_pkeys = 1;
props->local_ca_ack_delay = hr_dev->caps.local_ca_ack_delay;
@@ -344,8 +346,6 @@ static struct ib_ucontext *hns_roce_alloc_ucontext(struct ib_device *ib_dev,
if (ret)
goto error_fail_uar_alloc;
- INIT_LIST_HEAD(&context->vma_list);
- mutex_init(&context->vma_list_mutex);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RECORD_DB) {
INIT_LIST_HEAD(&context->page_list);
mutex_init(&context->page_mutex);
@@ -376,76 +376,34 @@ static int hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
return 0;
}
-static void hns_roce_vma_open(struct vm_area_struct *vma)
-{
- vma->vm_ops = NULL;
-}
-
-static void hns_roce_vma_close(struct vm_area_struct *vma)
-{
- struct hns_roce_vma_data *vma_data;
-
- vma_data = (struct hns_roce_vma_data *)vma->vm_private_data;
- vma_data->vma = NULL;
- mutex_lock(vma_data->vma_list_mutex);
- list_del(&vma_data->list);
- mutex_unlock(vma_data->vma_list_mutex);
- kfree(vma_data);
-}
-
-static const struct vm_operations_struct hns_roce_vm_ops = {
- .open = hns_roce_vma_open,
- .close = hns_roce_vma_close,
-};
-
-static int hns_roce_set_vma_data(struct vm_area_struct *vma,
- struct hns_roce_ucontext *context)
-{
- struct list_head *vma_head = &context->vma_list;
- struct hns_roce_vma_data *vma_data;
-
- vma_data = kzalloc(sizeof(*vma_data), GFP_KERNEL);
- if (!vma_data)
- return -ENOMEM;
-
- vma_data->vma = vma;
- vma_data->vma_list_mutex = &context->vma_list_mutex;
- vma->vm_private_data = vma_data;
- vma->vm_ops = &hns_roce_vm_ops;
-
- mutex_lock(&context->vma_list_mutex);
- list_add(&vma_data->list, vma_head);
- mutex_unlock(&context->vma_list_mutex);
-
- return 0;
-}
-
static int hns_roce_mmap(struct ib_ucontext *context,
struct vm_area_struct *vma)
{
struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
- if (((vma->vm_end - vma->vm_start) % PAGE_SIZE) != 0)
- return -EINVAL;
+ switch (vma->vm_pgoff) {
+ case 0:
+ return rdma_user_mmap_io(context, vma,
+ to_hr_ucontext(context)->uar.pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot));
+
+ /* vm_pgoff: 1 -- TPTR */
+ case 1:
+ if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
+ return -EINVAL;
+ /*
+ * FIXME: using io_remap_pfn_range on the dma address returned
+ * by dma_alloc_coherent is totally wrong.
+ */
+ return rdma_user_mmap_io(context, vma,
+ hr_dev->tptr_dma_addr >> PAGE_SHIFT,
+ hr_dev->tptr_size,
+ vma->vm_page_prot);
- if (vma->vm_pgoff == 0) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_hr_ucontext(context)->uar.pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
- } else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr &&
- hr_dev->tptr_size) {
- /* vm_pgoff: 1 -- TPTR */
- if (io_remap_pfn_range(vma, vma->vm_start,
- hr_dev->tptr_dma_addr >> PAGE_SHIFT,
- hr_dev->tptr_size,
- vma->vm_page_prot))
- return -EAGAIN;
- } else
+ default:
return -EINVAL;
-
- return hns_roce_set_vma_data(vma, to_hr_ucontext(context));
+ }
}
static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
@@ -471,21 +429,6 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
static void hns_roce_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
- struct hns_roce_vma_data *vma_data, *n;
- struct vm_area_struct *vma;
-
- mutex_lock(&context->vma_list_mutex);
- list_for_each_entry_safe(vma_data, n, &context->vma_list, list) {
- vma = vma_data->vma;
- zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
-
- vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
- vma->vm_ops = NULL;
- list_del(&vma_data->list);
- kfree(vma_data);
- }
- mutex_unlock(&context->vma_list_mutex);
}
static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
@@ -508,7 +451,6 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
spin_lock_init(&iboe->lock);
ib_dev = &hr_dev->ib_dev;
- strlcpy(ib_dev->name, "hns_%d", IB_DEVICE_NAME_MAX);
ib_dev->owner = THIS_MODULE;
ib_dev->node_type = RDMA_NODE_IB_CA;
@@ -584,12 +526,27 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
}
+ /* MW */
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
+ ib_dev->alloc_mw = hns_roce_alloc_mw;
+ ib_dev->dealloc_mw = hns_roce_dealloc_mw;
+ ib_dev->uverbs_cmd_mask |=
+ (1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
+ (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
+ }
+
+ /* FRMR */
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) {
+ ib_dev->alloc_mr = hns_roce_alloc_mr;
+ ib_dev->map_mr_sg = hns_roce_map_mr_sg;
+ }
+
/* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable;
ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext;
ib_dev->driver_id = RDMA_DRIVER_HNS;
- ret = ib_register_device(ib_dev, NULL);
+ ret = ib_register_device(ib_dev, "hns_%d", NULL);
if (ret) {
dev_err(dev, "ib_register_device failed!\n");
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index eb26a5f6fc58..521ad2aa3a4e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -329,7 +329,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
u64 bt_idx;
u64 size;
- mhop_num = hr_dev->caps.pbl_hop_num;
+ mhop_num = (mr->type == MR_TYPE_FRMR ? 1 : hr_dev->caps.pbl_hop_num);
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
@@ -351,7 +351,7 @@ static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
mr->pbl_size = npages;
mr->pbl_ba = mr->pbl_dma_addr;
- mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+ mr->pbl_hop_num = mhop_num;
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
return 0;
@@ -511,7 +511,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->key = hw_index_to_key(index); /* MR key */
if (size == ~0ull) {
- mr->type = MR_TYPE_DMA;
mr->pbl_buf = NULL;
mr->pbl_dma_addr = 0;
/* PBL multi-hop addressing parameters */
@@ -522,7 +521,6 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
mr->pbl_l1_dma_addr = NULL;
mr->pbl_l0_dma_addr = 0;
} else {
- mr->type = MR_TYPE_MR;
if (!hr_dev->caps.pbl_hop_num) {
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
&(mr->pbl_dma_addr),
@@ -548,9 +546,9 @@ static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
u32 mhop_num;
u64 bt_idx;
- npages = ib_umem_page_count(mr->umem);
+ npages = mr->pbl_size;
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
- mhop_num = hr_dev->caps.pbl_hop_num;
+ mhop_num = (mr->type == MR_TYPE_FRMR) ? 1 : hr_dev->caps.pbl_hop_num;
if (mhop_num == HNS_ROCE_HOP_NUM_0)
return;
@@ -636,7 +634,8 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
}
if (mr->size != ~0ULL) {
- npages = ib_umem_page_count(mr->umem);
+ if (mr->type == MR_TYPE_MR)
+ npages = ib_umem_page_count(mr->umem);
if (!hr_dev->caps.pbl_hop_num)
dma_free_coherent(dev, (unsigned int)(npages * 8),
@@ -674,7 +673,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
goto err_table;
}
- ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ if (mr->type != MR_TYPE_FRMR)
+ ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ else
+ ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
if (ret) {
dev_err(dev, "Write mtpt fail!\n");
goto err_page;
@@ -855,6 +857,8 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
if (mr == NULL)
return ERR_PTR(-ENOMEM);
+ mr->type = MR_TYPE_DMA;
+
/* Allocate memory region key */
ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
~0ULL, acc, 0, mr);
@@ -1031,6 +1035,8 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
}
}
+ mr->type = MR_TYPE_MR;
+
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
access_flags, n, mr);
if (ret)
@@ -1201,3 +1207,193 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr)
return ret;
}
+
+struct ib_mr *hns_roce_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
+ u32 max_num_sg)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
+ struct device *dev = hr_dev->dev;
+ struct hns_roce_mr *mr;
+ u64 length;
+ u32 page_size;
+ int ret;
+
+ page_size = 1 << (hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT);
+ length = max_num_sg * page_size;
+
+ if (mr_type != IB_MR_TYPE_MEM_REG)
+ return ERR_PTR(-EINVAL);
+
+ if (max_num_sg > HNS_ROCE_FRMR_MAX_PA) {
+ dev_err(dev, "max_num_sg larger than %d\n",
+ HNS_ROCE_FRMR_MAX_PA);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ mr->type = MR_TYPE_FRMR;
+
+ /* Allocate memory region key */
+ ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, 0, length,
+ 0, max_num_sg, mr);
+ if (ret)
+ goto err_free;
+
+ ret = hns_roce_mr_enable(hr_dev, mr);
+ if (ret)
+ goto err_mr;
+
+ mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
+ mr->umem = NULL;
+
+ return &mr->ibmr;
+
+err_mr:
+ hns_roce_mr_free(to_hr_dev(pd->device), mr);
+
+err_free:
+ kfree(mr);
+ return ERR_PTR(ret);
+}
+
+static int hns_roce_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+
+ mr->pbl_buf[mr->npages++] = cpu_to_le64(addr);
+
+ return 0;
+}
+
+int hns_roce_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
+ unsigned int *sg_offset)
+{
+ struct hns_roce_mr *mr = to_hr_mr(ibmr);
+
+ mr->npages = 0;
+
+ return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, hns_roce_set_page);
+}
+
+static void hns_roce_mw_free(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mw *mw)
+{
+ struct device *dev = hr_dev->dev;
+ int ret;
+
+ if (mw->enabled) {
+ ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mw->rkey)
+ & (hr_dev->caps.num_mtpts - 1));
+ if (ret)
+ dev_warn(dev, "MW HW2SW_MPT failed (%d)\n", ret);
+
+ hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
+ key_to_hw_index(mw->rkey));
+ }
+
+ hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
+ key_to_hw_index(mw->rkey), BITMAP_NO_RR);
+}
+
+static int hns_roce_mw_enable(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mw *mw)
+{
+ struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
+ struct hns_roce_cmd_mailbox *mailbox;
+ struct device *dev = hr_dev->dev;
+ unsigned long mtpt_idx = key_to_hw_index(mw->rkey);
+ int ret;
+
+ /* prepare HEM entry memory */
+ ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
+ if (ret)
+ return ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox)) {
+ ret = PTR_ERR(mailbox);
+ goto err_table;
+ }
+
+ ret = hr_dev->hw->mw_write_mtpt(mailbox->buf, mw);
+ if (ret) {
+ dev_err(dev, "MW write mtpt fail!\n");
+ goto err_page;
+ }
+
+ ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
+ mtpt_idx & (hr_dev->caps.num_mtpts - 1));
+ if (ret) {
+ dev_err(dev, "MW sw2hw_mpt failed (%d)\n", ret);
+ goto err_page;
+ }
+
+ mw->enabled = 1;
+
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+ return 0;
+
+err_page:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+
+err_table:
+ hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
+
+ return ret;
+}
+
+struct ib_mw *hns_roce_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+ struct ib_udata *udata)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ib_pd->device);
+ struct hns_roce_mw *mw;
+ unsigned long index = 0;
+ int ret;
+
+ mw = kmalloc(sizeof(*mw), GFP_KERNEL);
+ if (!mw)
+ return ERR_PTR(-ENOMEM);
+
+ /* Allocate a key for mw from bitmap */
+ ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
+ if (ret)
+ goto err_bitmap;
+
+ mw->rkey = hw_index_to_key(index);
+
+ mw->ibmw.rkey = mw->rkey;
+ mw->ibmw.type = type;
+ mw->pdn = to_hr_pd(ib_pd)->pdn;
+ mw->pbl_hop_num = hr_dev->caps.pbl_hop_num;
+ mw->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
+ mw->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
+
+ ret = hns_roce_mw_enable(hr_dev, mw);
+ if (ret)
+ goto err_mw;
+
+ return &mw->ibmw;
+
+err_mw:
+ hns_roce_mw_free(hr_dev, mw);
+
+err_bitmap:
+ kfree(mw);
+
+ return ERR_PTR(ret);
+}
+
+int hns_roce_dealloc_mw(struct ib_mw *ibmw)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibmw->device);
+ struct hns_roce_mw *mw = to_hr_mw(ibmw);
+
+ hns_roce_mw_free(hr_dev, mw);
+ kfree(mw);
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index efb7e961ca65..5ebf481a39d9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/pci.h>
#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
@@ -343,6 +344,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
{
u32 roundup_sq_stride = roundup_pow_of_two(hr_dev->caps.max_sq_desc_sz);
u8 max_sq_stride = ilog2(roundup_sq_stride);
+ u32 ex_sge_num;
u32 page_size;
u32 max_cnt;
@@ -372,7 +374,18 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
if (hr_qp->sq.max_gs > 2)
hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
(hr_qp->sq.max_gs - 2));
+
+ if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
+ if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
+ dev_err(hr_dev->dev,
+ "The extended sge cnt error! sge_cnt=%d\n",
+ hr_qp->sge.sge_cnt);
+ return -EINVAL;
+ }
+ }
+
hr_qp->sge.sge_shift = 4;
+ ex_sge_num = hr_qp->sge.sge_cnt;
/* Get buf size, SQ and RQ are aligned to page_szie */
if (hr_dev->caps.max_sq_sg <= 2) {
@@ -386,6 +399,8 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), PAGE_SIZE);
} else {
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
+ hr_qp->sge.sge_cnt =
+ max(page_size / (1 << hr_qp->sge.sge_shift), ex_sge_num);
hr_qp->buff_size = HNS_ROCE_ALOGN_UP((hr_qp->rq.wqe_cnt <<
hr_qp->rq.wqe_shift), page_size) +
HNS_ROCE_ALOGN_UP((hr_qp->sge.sge_cnt <<
@@ -394,7 +409,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sq.wqe_shift), page_size);
hr_qp->sq.offset = 0;
- if (hr_qp->sge.sge_cnt) {
+ if (ex_sge_num) {
hr_qp->sge.offset = HNS_ROCE_ALOGN_UP(
(hr_qp->sq.wqe_cnt <<
hr_qp->sq.wqe_shift),
@@ -465,6 +480,14 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->sge.sge_shift = 4;
}
+ if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
+ if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
+ dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
+ hr_qp->sge.sge_cnt);
+ return -EINVAL;
+ }
+ }
+
/* Get buf size, SQ and RQ are aligned to PAGE_SIZE */
page_size = 1 << (hr_dev->caps.mtt_buf_pg_sz + PAGE_SHIFT);
hr_qp->sq.offset = 0;
@@ -472,6 +495,8 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
page_size);
if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
+ hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
+ (u32)hr_qp->sge.sge_cnt);
hr_qp->sge.offset = size;
size += HNS_ROCE_ALOGN_UP(hr_qp->sge.sge_cnt <<
hr_qp->sge.sge_shift, page_size);
@@ -952,8 +977,8 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
}
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask)) {
dev_err(dev, "ib_modify_qp_is_ok failed\n");
goto out;
}
@@ -1106,14 +1131,20 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
int reserved_from_top = 0;
+ int reserved_from_bot;
int ret;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&hr_dev->qp_table_tree, GFP_ATOMIC);
- /* A port include two SQP, six port total 12 */
+ /* In hw v1, a port include two SQP, six ports total 12 */
+ if (hr_dev->caps.max_sq_sg <= 2)
+ reserved_from_bot = SQP_NUM;
+ else
+ reserved_from_bot = hr_dev->caps.reserved_qps;
+
ret = hns_roce_bitmap_init(&qp_table->bitmap, hr_dev->caps.num_qps,
- hr_dev->caps.num_qps - 1, SQP_NUM,
+ hr_dev->caps.num_qps - 1, reserved_from_bot,
reserved_from_top);
if (ret) {
dev_err(hr_dev->dev, "qp bitmap init failed!error=%d\n",
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 423818a7d333..771eb6bd0785 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1689,7 +1689,7 @@ static enum i40iw_status_code i40iw_add_mqh_6(struct i40iw_device *iwdev,
unsigned long flags;
rtnl_lock();
- for_each_netdev_rcu(&init_net, ip_dev) {
+ for_each_netdev(&init_net, ip_dev) {
if ((((rdma_vlan_dev_vlan_id(ip_dev) < I40IW_NO_VLAN) &&
(rdma_vlan_dev_real_dev(ip_dev) == iwdev->netdev)) ||
(ip_dev == iwdev->netdev)) && (ip_dev->flags & IFF_UP)) {
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index e2e6c74a7452..102875872bea 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -2135,10 +2135,10 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr)
}
/**
- * i40iw_show_rev
+ * hw_rev_show
*/
-static ssize_t i40iw_show_rev(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct i40iw_ib_device *iwibdev = container_of(dev,
struct i40iw_ib_device,
@@ -2147,34 +2147,37 @@ static ssize_t i40iw_show_rev(struct device *dev,
return sprintf(buf, "%x\n", hw_rev);
}
+static DEVICE_ATTR_RO(hw_rev);
/**
- * i40iw_show_hca
+ * hca_type_show
*/
-static ssize_t i40iw_show_hca(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "I40IW\n");
}
+static DEVICE_ATTR_RO(hca_type);
/**
- * i40iw_show_board
+ * board_id_show
*/
-static ssize_t i40iw_show_board(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%.*s\n", 32, "I40IW Board ID");
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, i40iw_show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, i40iw_show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, i40iw_show_board, NULL);
+static struct attribute *i40iw_dev_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *i40iw_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group i40iw_attr_group = {
+ .attrs = i40iw_dev_attributes,
};
/**
@@ -2752,7 +2755,6 @@ static struct i40iw_ib_device *i40iw_init_rdma_device(struct i40iw_device *iwdev
i40iw_pr_err("iwdev == NULL\n");
return NULL;
}
- strlcpy(iwibdev->ibdev.name, "i40iw%d", IB_DEVICE_NAME_MAX);
iwibdev->ibdev.owner = THIS_MODULE;
iwdev->iwibdev = iwibdev;
iwibdev->iwdev = iwdev;
@@ -2851,20 +2853,6 @@ void i40iw_port_ibevent(struct i40iw_device *iwdev)
}
/**
- * i40iw_unregister_rdma_device - unregister of iwarp from IB
- * @iwibdev: rdma device ptr
- */
-static void i40iw_unregister_rdma_device(struct i40iw_ib_device *iwibdev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i)
- device_remove_file(&iwibdev->ibdev.dev,
- i40iw_dev_attributes[i]);
- ib_unregister_device(&iwibdev->ibdev);
-}
-
-/**
* i40iw_destroy_rdma_device - destroy rdma device and free resources
* @iwibdev: IB device ptr
*/
@@ -2873,7 +2861,7 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
if (!iwibdev)
return;
- i40iw_unregister_rdma_device(iwibdev);
+ ib_unregister_device(&iwibdev->ibdev);
kfree(iwibdev->ibdev.iwcm);
iwibdev->ibdev.iwcm = NULL;
wait_event_timeout(iwibdev->iwdev->close_wq,
@@ -2888,32 +2876,19 @@ void i40iw_destroy_rdma_device(struct i40iw_ib_device *iwibdev)
*/
int i40iw_register_rdma_device(struct i40iw_device *iwdev)
{
- int i, ret;
+ int ret;
struct i40iw_ib_device *iwibdev;
iwdev->iwibdev = i40iw_init_rdma_device(iwdev);
if (!iwdev->iwibdev)
return -ENOMEM;
iwibdev = iwdev->iwibdev;
-
+ rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
iwibdev->ibdev.driver_id = RDMA_DRIVER_I40IW;
- ret = ib_register_device(&iwibdev->ibdev, NULL);
+ ret = ib_register_device(&iwibdev->ibdev, "i40iw%d", NULL);
if (ret)
goto error;
- for (i = 0; i < ARRAY_SIZE(i40iw_dev_attributes); ++i) {
- ret =
- device_create_file(&iwibdev->ibdev.dev,
- i40iw_dev_attributes[i]);
- if (ret) {
- while (i > 0) {
- i--;
- device_remove_file(&iwibdev->ibdev.dev, i40iw_dev_attributes[i]);
- }
- ib_unregister_device(&iwibdev->ibdev);
- goto error;
- }
- }
return 0;
error:
kfree(iwdev->iwibdev->ibdev.iwcm);
diff --git a/drivers/infiniband/hw/mlx4/Kconfig b/drivers/infiniband/hw/mlx4/Kconfig
index db4aa13ebae0..d1de3285fd88 100644
--- a/drivers/infiniband/hw/mlx4/Kconfig
+++ b/drivers/infiniband/hw/mlx4/Kconfig
@@ -1,6 +1,7 @@
config MLX4_INFINIBAND
tristate "Mellanox ConnectX HCA support"
depends on NETDEVICES && ETHERNET && PCI && INET
+ depends on INFINIBAND_USER_ACCESS || !INFINIBAND_USER_ACCESS
depends on MAY_USE_DEVLINK
select NET_VENDOR_MELLANOX
select MLX4_CORE
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index e5466d786bb1..8942f5f7f04d 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -807,15 +807,17 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
int err;
struct ib_port_attr pattr;
- if (in_wc && in_wc->qp->qp_num) {
- pr_debug("received MAD: slid:%d sqpn:%d "
- "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
- in_wc->slid, in_wc->src_qp,
- in_wc->dlid_path_bits,
- in_wc->qp->qp_num,
- in_wc->wc_flags,
- in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
- be16_to_cpu(in_mad->mad_hdr.attr_id));
+ if (in_wc && in_wc->qp) {
+ pr_debug("received MAD: port:%d slid:%d sqpn:%d "
+ "dlid_bits:%d dqpn:%d wc_flags:0x%x tid:%016llx cls:%x mtd:%x atr:%x\n",
+ port_num,
+ in_wc->slid, in_wc->src_qp,
+ in_wc->dlid_path_bits,
+ in_wc->qp->qp_num,
+ in_wc->wc_flags,
+ be64_to_cpu(in_mad->mad_hdr.tid),
+ in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
+ be16_to_cpu(in_mad->mad_hdr.attr_id));
if (in_wc->wc_flags & IB_WC_GRH) {
pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
be64_to_cpu(in_grh->sgid.global.subnet_prefix),
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0bbeaaae47e0..0def2323459c 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1140,144 +1140,50 @@ static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
return 0;
}
-static void mlx4_ib_vma_open(struct vm_area_struct *area)
-{
- /* vma_open is called when a new VMA is created on top of our VMA.
- * This is done through either mremap flow or split_vma (usually due
- * to mlock, madvise, munmap, etc.). We do not support a clone of the
- * vma, as this VMA is strongly hardware related. Therefore we set the
- * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
- * calling us again and trying to do incorrect actions. We assume that
- * the original vma size is exactly a single page that there will be no
- * "splitting" operations on.
- */
- area->vm_ops = NULL;
-}
-
-static void mlx4_ib_vma_close(struct vm_area_struct *area)
-{
- struct mlx4_ib_vma_private_data *mlx4_ib_vma_priv_data;
-
- /* It's guaranteed that all VMAs opened on a FD are closed before the
- * file itself is closed, therefore no sync is needed with the regular
- * closing flow. (e.g. mlx4_ib_dealloc_ucontext) However need a sync
- * with accessing the vma as part of mlx4_ib_disassociate_ucontext.
- * The close operation is usually called under mm->mmap_sem except when
- * process is exiting. The exiting case is handled explicitly as part
- * of mlx4_ib_disassociate_ucontext.
- */
- mlx4_ib_vma_priv_data = (struct mlx4_ib_vma_private_data *)
- area->vm_private_data;
-
- /* set the vma context pointer to null in the mlx4_ib driver's private
- * data to protect against a race condition in mlx4_ib_dissassociate_ucontext().
- */
- mlx4_ib_vma_priv_data->vma = NULL;
-}
-
-static const struct vm_operations_struct mlx4_ib_vm_ops = {
- .open = mlx4_ib_vma_open,
- .close = mlx4_ib_vma_close
-};
-
static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- int i;
- struct vm_area_struct *vma;
- struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
-
- /* need to protect from a race on closing the vma as part of
- * mlx4_ib_vma_close().
- */
- for (i = 0; i < HW_BAR_COUNT; i++) {
- vma = context->hw_bar_info[i].vma;
- if (!vma)
- continue;
-
- zap_vma_ptes(context->hw_bar_info[i].vma,
- context->hw_bar_info[i].vma->vm_start, PAGE_SIZE);
-
- context->hw_bar_info[i].vma->vm_flags &=
- ~(VM_SHARED | VM_MAYSHARE);
- /* context going to be destroyed, should not access ops any more */
- context->hw_bar_info[i].vma->vm_ops = NULL;
- }
-}
-
-static void mlx4_ib_set_vma_data(struct vm_area_struct *vma,
- struct mlx4_ib_vma_private_data *vma_private_data)
-{
- vma_private_data->vma = vma;
- vma->vm_private_data = vma_private_data;
- vma->vm_ops = &mlx4_ib_vm_ops;
}
static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct mlx4_ib_dev *dev = to_mdev(context->device);
- struct mlx4_ib_ucontext *mucontext = to_mucontext(context);
- if (vma->vm_end - vma->vm_start != PAGE_SIZE)
- return -EINVAL;
-
- if (vma->vm_pgoff == 0) {
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_DB].vma)
- return -EINVAL;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_mucontext(context)->uar.pfn,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_DB]);
+ switch (vma->vm_pgoff) {
+ case 0:
+ return rdma_user_mmap_io(context, vma,
+ to_mucontext(context)->uar.pfn,
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot));
- } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_BF].vma)
+ case 1:
+ if (dev->dev->caps.bf_reg_size == 0)
return -EINVAL;
+ return rdma_user_mmap_io(
+ context, vma,
+ to_mucontext(context)->uar.pfn +
+ dev->dev->caps.num_uars,
+ PAGE_SIZE, pgprot_writecombine(vma->vm_page_prot));
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- if (io_remap_pfn_range(vma, vma->vm_start,
- to_mucontext(context)->uar.pfn +
- dev->dev->caps.num_uars,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma, &mucontext->hw_bar_info[HW_BAR_BF]);
-
- } else if (vma->vm_pgoff == 3) {
+ case 3: {
struct mlx4_clock_params params;
int ret;
- /* We prevent double mmaping on same context */
- if (mucontext->hw_bar_info[HW_BAR_CLOCK].vma)
- return -EINVAL;
-
ret = mlx4_get_internal_clock_params(dev->dev, &params);
-
if (ret)
return ret;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- if (io_remap_pfn_range(vma, vma->vm_start,
- (pci_resource_start(dev->dev->persist->pdev,
- params.bar) +
- params.offset)
- >> PAGE_SHIFT,
- PAGE_SIZE, vma->vm_page_prot))
- return -EAGAIN;
-
- mlx4_ib_set_vma_data(vma,
- &mucontext->hw_bar_info[HW_BAR_CLOCK]);
- } else {
- return -EINVAL;
+ return rdma_user_mmap_io(
+ context, vma,
+ (pci_resource_start(dev->dev->persist->pdev,
+ params.bar) +
+ params.offset) >>
+ PAGE_SHIFT,
+ PAGE_SIZE, pgprot_noncached(vma->vm_page_prot));
}
- return 0;
+ default:
+ return -EINVAL;
+ }
}
static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
@@ -2133,39 +2039,43 @@ out:
return err;
}
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "MT%d\n", dev->dev->persist->pdev->device);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "%x\n", dev->dev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx4_ib_dev *dev =
container_of(device, struct mlx4_ib_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
dev->dev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *mlx4_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *mlx4_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group mlx4_attr_group = {
+ .attrs = mlx4_class_attributes,
};
struct diag_counter {
@@ -2636,7 +2546,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->dev = dev;
ibdev->bond_next_port = 0;
- strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
ibdev->ib_dev.owner = THIS_MODULE;
ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
@@ -2898,8 +2807,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (mlx4_ib_alloc_diag_counters(ibdev))
goto err_steer_free_bitmap;
+ rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group);
ibdev->ib_dev.driver_id = RDMA_DRIVER_MLX4;
- if (ib_register_device(&ibdev->ib_dev, NULL))
+ if (ib_register_device(&ibdev->ib_dev, "mlx4_%d", NULL))
goto err_diag_counters;
if (mlx4_ib_mad_init(ibdev))
@@ -2922,12 +2832,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
goto err_notif;
}
- for (j = 0; j < ARRAY_SIZE(mlx4_class_attributes); ++j) {
- if (device_create_file(&ibdev->ib_dev.dev,
- mlx4_class_attributes[j]))
- goto err_notif;
- }
-
ibdev->ib_active = true;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
devlink_port_type_ib_set(mlx4_get_devlink_port(dev, i),
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 81ffc007e0a1..d844831179cf 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -673,7 +673,7 @@ static void mlx4_ib_mcg_work_handler(struct work_struct *work)
if (!list_empty(&group->pending_list))
req = list_first_entry(&group->pending_list,
struct mcast_req, group_list);
- if ((method == IB_MGMT_METHOD_GET_RESP)) {
+ if (method == IB_MGMT_METHOD_GET_RESP) {
if (req) {
send_reply_to_slave(req->func, group, &req->sa_mad, status);
--group->func[req->func].num_pend_reqs;
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e10dccc7958f..8850dfc3826d 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -80,16 +80,11 @@ enum hw_bar_type {
HW_BAR_COUNT
};
-struct mlx4_ib_vma_private_data {
- struct vm_area_struct *vma;
-};
-
struct mlx4_ib_ucontext {
struct ib_ucontext ibucontext;
struct mlx4_uar uar;
struct list_head db_page_list;
struct mutex db_page_mutex;
- struct mlx4_ib_vma_private_data hw_bar_info[HW_BAR_COUNT];
struct list_head wqn_ranges_list;
struct mutex wqn_ranges_mutex; /* protect wqn_ranges_list */
};
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 6dd3cd2c2f80..0711ca1dfb8f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2629,7 +2629,6 @@ enum {
static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
- enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
struct mlx4_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
@@ -2639,13 +2638,8 @@ static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (cur_state != new_state || cur_state != IB_QPS_RESET) {
- int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- ll = rdma_port_get_link_layer(&dev->ib_dev, port);
- }
-
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, ll)) {
+ attr_mask)) {
pr_debug("qpn 0x%x: invalid attribute mask specified "
"for transition %d to %d. qp_type %d,"
" attr_mask 0x%x\n",
diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c
index e219093d2764..752bdd536130 100644
--- a/drivers/infiniband/hw/mlx4/sysfs.c
+++ b/drivers/infiniband/hw/mlx4/sysfs.c
@@ -818,9 +818,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)
if (!mlx4_is_master(dev->dev))
return 0;
- dev->iov_parent =
- kobject_create_and_add("iov",
- kobject_get(dev->ib_dev.ports_parent->parent));
+ dev->iov_parent = kobject_create_and_add("iov", &dev->ib_dev.dev.kobj);
if (!dev->iov_parent) {
ret = -ENOMEM;
goto err;
@@ -850,7 +848,6 @@ err_add_entries:
err_ports:
kobject_put(dev->iov_parent);
err:
- kobject_put(dev->ib_dev.ports_parent->parent);
pr_err("mlx4_ib_device_register_sysfs error (%d)\n", ret);
return ret;
}
@@ -886,5 +883,4 @@ void mlx4_ib_device_unregister_sysfs(struct mlx4_ib_dev *device)
kobject_put(device->ports_parent);
kobject_put(device->iov_parent);
kobject_put(device->iov_parent);
- kobject_put(device->ib_dev.ports_parent->parent);
}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index c84fef9a8a08..ca060a2e2b36 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -197,3 +197,132 @@ int mlx5_cmd_query_ext_ppcnt_counters(struct mlx5_core_dev *dev, void *out)
return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPCNT,
0, 0);
}
+
+void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)] = {};
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+ MLX5_SET(destroy_tir_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)] = {0};
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+ MLX5_SET(destroy_tis_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)] = {};
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, rqtn);
+ MLX5_SET(destroy_rqt_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
+ u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {0};
+ int err;
+
+ MLX5_SET(alloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
+
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *tdn = MLX5_GET(alloc_transport_domain_out, out,
+ transport_domain);
+
+ return err;
+}
+
+void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
+ u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {0};
+ u32 out[MLX5_ST_SZ_DW(dealloc_transport_domain_out)] = {0};
+
+ MLX5_SET(dealloc_transport_domain_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {};
+
+ MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD);
+ MLX5_SET(dealloc_pd_in, in, pd, pdn);
+ MLX5_SET(dealloc_pd_in, in, uid, uid);
+ mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(attach_to_mcg_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(attach_to_mcg_in)] = {};
+ void *gid;
+
+ MLX5_SET(attach_to_mcg_in, in, opcode, MLX5_CMD_OP_ATTACH_TO_MCG);
+ MLX5_SET(attach_to_mcg_in, in, qpn, qpn);
+ MLX5_SET(attach_to_mcg_in, in, uid, uid);
+ gid = MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(detach_from_mcg_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(detach_from_mcg_in)] = {};
+ void *gid;
+
+ MLX5_SET(detach_from_mcg_in, in, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
+ MLX5_SET(detach_from_mcg_in, in, qpn, qpn);
+ MLX5_SET(detach_from_mcg_in, in, uid, uid);
+ gid = MLX5_ADDR_OF(detach_from_mcg_in, in, multicast_gid);
+ memcpy(gid, mgid, sizeof(*mgid));
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
+ MLX5_SET(alloc_xrcd_in, in, uid, uid);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
+ return err;
+}
+
+int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
+
+ MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
+ MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
+ MLX5_SET(dealloc_xrcd_in, in, uid, uid);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 88cbb1c41703..c03c56455534 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -47,4 +47,18 @@ int mlx5_cmd_modify_cong_params(struct mlx5_core_dev *mdev,
int mlx5_cmd_alloc_memic(struct mlx5_memic *memic, phys_addr_t *addr,
u64 length, u32 alignment);
int mlx5_cmd_dealloc_memic(struct mlx5_memic *memic, u64 addr, u64 length);
+void mlx5_cmd_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn, u16 uid);
+void mlx5_cmd_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u16 uid);
+void mlx5_cmd_destroy_tis(struct mlx5_core_dev *dev, u32 tisn, u16 uid);
+void mlx5_cmd_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn, u16 uid);
+int mlx5_cmd_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn,
+ u16 uid);
+void mlx5_cmd_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn,
+ u16 uid);
+int mlx5_cmd_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid);
+int mlx5_cmd_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid,
+ u32 qpn, u16 uid);
+int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
+int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index cca1820802b8..7d769b5538b4 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -874,6 +874,7 @@ static int create_cq_user(struct mlx5_ib_dev *dev, struct ib_udata *udata,
cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD;
}
+ MLX5_SET(create_cq_in, *cqb, uid, to_mucontext(context)->devx_uid);
return 0;
err_cqb:
@@ -1454,7 +1455,7 @@ ex:
return err;
}
-int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
{
struct mlx5_ib_cq *cq;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index 66dc337e49a7..61aab7c0c513 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -19,7 +19,7 @@
#define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
struct devx_obj {
struct mlx5_core_dev *mdev;
- u32 obj_id;
+ u64 obj_id;
u32 dinlen; /* destroy inbox length */
u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
};
@@ -45,13 +45,14 @@ static struct mlx5_ib_ucontext *devx_ufile2uctx(struct ib_uverbs_file *file)
return to_mucontext(ib_uverbs_get_ucontext(file));
}
-int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context)
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
u64 general_obj_types;
void *hdr;
int err;
+ u16 uid;
hdr = MLX5_ADDR_OF(create_uctx_in, in, hdr);
@@ -60,9 +61,6 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *contex
!(general_obj_types & MLX5_GENERAL_OBJ_TYPES_CAP_UMEM))
return -EINVAL;
- if (!capable(CAP_NET_RAW))
- return -EPERM;
-
MLX5_SET(general_obj_in_cmd_hdr, hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, hdr, obj_type, MLX5_OBJ_TYPE_UCTX);
@@ -70,19 +68,18 @@ int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *contex
if (err)
return err;
- context->devx_uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
- return 0;
+ uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+ return uid;
}
-void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context)
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
{
u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {0};
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_UCTX);
- MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, context->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, uid);
mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
}
@@ -109,150 +106,218 @@ bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
}
}
+/*
+ * As the obj_id in the firmware is not globally unique the object type
+ * must be considered upon checking for a valid object id.
+ * For that the opcode of the creator command is encoded as part of the obj_id.
+ */
+static u64 get_enc_obj_id(u16 opcode, u32 obj_id)
+{
+ return ((u64)opcode << 32) | obj_id;
+}
+
static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
- u32 obj_id;
+ u64 obj_id;
switch (opcode) {
case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
- obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT,
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_id));
break;
case MLX5_CMD_OP_QUERY_MKEY:
- obj_id = MLX5_GET(query_mkey_in, in, mkey_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
+ MLX5_GET(query_mkey_in, in,
+ mkey_index));
break;
case MLX5_CMD_OP_QUERY_CQ:
- obj_id = MLX5_GET(query_cq_in, in, cqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ MLX5_GET(query_cq_in, in, cqn));
break;
case MLX5_CMD_OP_MODIFY_CQ:
- obj_id = MLX5_GET(modify_cq_in, in, cqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
+ MLX5_GET(modify_cq_in, in, cqn));
break;
case MLX5_CMD_OP_QUERY_SQ:
- obj_id = MLX5_GET(query_sq_in, in, sqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ MLX5_GET(query_sq_in, in, sqn));
break;
case MLX5_CMD_OP_MODIFY_SQ:
- obj_id = MLX5_GET(modify_sq_in, in, sqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
+ MLX5_GET(modify_sq_in, in, sqn));
break;
case MLX5_CMD_OP_QUERY_RQ:
- obj_id = MLX5_GET(query_rq_in, in, rqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(query_rq_in, in, rqn));
break;
case MLX5_CMD_OP_MODIFY_RQ:
- obj_id = MLX5_GET(modify_rq_in, in, rqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(modify_rq_in, in, rqn));
break;
case MLX5_CMD_OP_QUERY_RMP:
- obj_id = MLX5_GET(query_rmp_in, in, rmpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
+ MLX5_GET(query_rmp_in, in, rmpn));
break;
case MLX5_CMD_OP_MODIFY_RMP:
- obj_id = MLX5_GET(modify_rmp_in, in, rmpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
+ MLX5_GET(modify_rmp_in, in, rmpn));
break;
case MLX5_CMD_OP_QUERY_RQT:
- obj_id = MLX5_GET(query_rqt_in, in, rqtn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ MLX5_GET(query_rqt_in, in, rqtn));
break;
case MLX5_CMD_OP_MODIFY_RQT:
- obj_id = MLX5_GET(modify_rqt_in, in, rqtn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
+ MLX5_GET(modify_rqt_in, in, rqtn));
break;
case MLX5_CMD_OP_QUERY_TIR:
- obj_id = MLX5_GET(query_tir_in, in, tirn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ MLX5_GET(query_tir_in, in, tirn));
break;
case MLX5_CMD_OP_MODIFY_TIR:
- obj_id = MLX5_GET(modify_tir_in, in, tirn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
+ MLX5_GET(modify_tir_in, in, tirn));
break;
case MLX5_CMD_OP_QUERY_TIS:
- obj_id = MLX5_GET(query_tis_in, in, tisn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ MLX5_GET(query_tis_in, in, tisn));
break;
case MLX5_CMD_OP_MODIFY_TIS:
- obj_id = MLX5_GET(modify_tis_in, in, tisn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
+ MLX5_GET(modify_tis_in, in, tisn));
break;
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
- obj_id = MLX5_GET(query_flow_table_in, in, table_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
+ MLX5_GET(query_flow_table_in, in,
+ table_id));
break;
case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
- obj_id = MLX5_GET(modify_flow_table_in, in, table_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
+ MLX5_GET(modify_flow_table_in, in,
+ table_id));
break;
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
- obj_id = MLX5_GET(query_flow_group_in, in, group_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
+ MLX5_GET(query_flow_group_in, in,
+ group_id));
break;
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
- obj_id = MLX5_GET(query_fte_in, in, flow_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
+ MLX5_GET(query_fte_in, in,
+ flow_index));
break;
case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
- obj_id = MLX5_GET(set_fte_in, in, flow_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
+ MLX5_GET(set_fte_in, in, flow_index));
break;
case MLX5_CMD_OP_QUERY_Q_COUNTER:
- obj_id = MLX5_GET(query_q_counter_in, in, counter_set_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
+ MLX5_GET(query_q_counter_in, in,
+ counter_set_id));
break;
case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
- obj_id = MLX5_GET(query_flow_counter_in, in, flow_counter_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
+ MLX5_GET(query_flow_counter_in, in,
+ flow_counter_id));
break;
case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
- obj_id = MLX5_GET(general_obj_in_cmd_hdr, in, obj_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
+ MLX5_GET(general_obj_in_cmd_hdr, in,
+ obj_id));
break;
case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
- obj_id = MLX5_GET(query_scheduling_element_in, in,
- scheduling_element_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
+ MLX5_GET(query_scheduling_element_in,
+ in, scheduling_element_id));
break;
case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
- obj_id = MLX5_GET(modify_scheduling_element_in, in,
- scheduling_element_id);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
+ MLX5_GET(modify_scheduling_element_in,
+ in, scheduling_element_id));
break;
case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
- obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
+ MLX5_GET(add_vxlan_udp_dport_in, in,
+ vxlan_udp_port));
break;
case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
- obj_id = MLX5_GET(query_l2_table_entry_in, in, table_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
+ MLX5_GET(query_l2_table_entry_in, in,
+ table_index));
break;
case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
- obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
+ MLX5_GET(set_l2_table_entry_in, in,
+ table_index));
break;
case MLX5_CMD_OP_QUERY_QP:
- obj_id = MLX5_GET(query_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(query_qp_in, in, qpn));
break;
case MLX5_CMD_OP_RST2INIT_QP:
- obj_id = MLX5_GET(rst2init_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rst2init_qp_in, in, qpn));
break;
case MLX5_CMD_OP_INIT2RTR_QP:
- obj_id = MLX5_GET(init2rtr_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(init2rtr_qp_in, in, qpn));
break;
case MLX5_CMD_OP_RTR2RTS_QP:
- obj_id = MLX5_GET(rtr2rts_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rtr2rts_qp_in, in, qpn));
break;
case MLX5_CMD_OP_RTS2RTS_QP:
- obj_id = MLX5_GET(rts2rts_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(rts2rts_qp_in, in, qpn));
break;
case MLX5_CMD_OP_SQERR2RTS_QP:
- obj_id = MLX5_GET(sqerr2rts_qp_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(sqerr2rts_qp_in, in, qpn));
break;
case MLX5_CMD_OP_2ERR_QP:
- obj_id = MLX5_GET(qp_2err_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(qp_2err_in, in, qpn));
break;
case MLX5_CMD_OP_2RST_QP:
- obj_id = MLX5_GET(qp_2rst_in, in, qpn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
+ MLX5_GET(qp_2rst_in, in, qpn));
break;
case MLX5_CMD_OP_QUERY_DCT:
- obj_id = MLX5_GET(query_dct_in, in, dctn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ MLX5_GET(query_dct_in, in, dctn));
break;
case MLX5_CMD_OP_QUERY_XRQ:
- obj_id = MLX5_GET(query_xrq_in, in, xrqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
+ MLX5_GET(query_xrq_in, in, xrqn));
break;
case MLX5_CMD_OP_QUERY_XRC_SRQ:
- obj_id = MLX5_GET(query_xrc_srq_in, in, xrc_srqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
+ MLX5_GET(query_xrc_srq_in, in,
+ xrc_srqn));
break;
case MLX5_CMD_OP_ARM_XRC_SRQ:
- obj_id = MLX5_GET(arm_xrc_srq_in, in, xrc_srqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
+ MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
break;
case MLX5_CMD_OP_QUERY_SRQ:
- obj_id = MLX5_GET(query_srq_in, in, srqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
+ MLX5_GET(query_srq_in, in, srqn));
break;
case MLX5_CMD_OP_ARM_RQ:
- obj_id = MLX5_GET(arm_rq_in, in, srq_number);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
+ MLX5_GET(arm_rq_in, in, srq_number));
break;
case MLX5_CMD_OP_DRAIN_DCT:
case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
- obj_id = MLX5_GET(drain_dct_in, in, dctn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
+ MLX5_GET(drain_dct_in, in, dctn));
break;
case MLX5_CMD_OP_ARM_XRQ:
- obj_id = MLX5_GET(arm_xrq_in, in, xrqn);
+ obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
+ MLX5_GET(arm_xrq_in, in, xrqn));
break;
default:
return false;
@@ -264,11 +329,102 @@ static int devx_is_valid_obj_id(struct devx_obj *obj, const void *in)
return false;
}
-static bool devx_is_obj_create_cmd(const void *in)
+static void devx_set_umem_valid(const void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
switch (opcode) {
+ case MLX5_CMD_OP_CREATE_MKEY:
+ MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
+ break;
+ case MLX5_CMD_OP_CREATE_CQ:
+ {
+ void *cqc;
+
+ MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
+ break;
+ }
+ case MLX5_CMD_OP_CREATE_QP:
+ {
+ void *qpc;
+
+ qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+ MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
+ MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_RQ:
+ {
+ void *rqc, *wq;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_SQ:
+ {
+ void *sqc, *wq;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_MODIFY_CQ:
+ MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
+ break;
+
+ case MLX5_CMD_OP_CREATE_RMP:
+ {
+ void *rmpc, *wq;
+
+ rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_XRQ:
+ {
+ void *xrqc, *wq;
+
+ xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
+ wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
+ MLX5_SET(wq, wq, dbr_umem_valid, 1);
+ MLX5_SET(wq, wq, wq_umem_valid, 1);
+ break;
+ }
+
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ {
+ void *xrc_srqc;
+
+ MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
+ xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
+ xrc_srq_context_entry);
+ MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
+ break;
+ }
+
+ default:
+ return;
+ }
+}
+
+static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
+{
+ *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (*opcode) {
case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
case MLX5_CMD_OP_CREATE_MKEY:
case MLX5_CMD_OP_CREATE_CQ:
@@ -385,12 +541,49 @@ static bool devx_is_obj_query_cmd(const void *in)
}
}
+static bool devx_is_whitelist_cmd(void *in)
+{
+ u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
+
+ switch (opcode) {
+ case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
+{
+ if (devx_is_whitelist_cmd(cmd_in)) {
+ struct mlx5_ib_dev *dev;
+
+ if (c->devx_uid)
+ return c->devx_uid;
+
+ dev = to_mdev(c->ibucontext.device);
+ if (dev->devx_whitelist_uid)
+ return dev->devx_whitelist_uid;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (!c->devx_uid)
+ return -EINVAL;
+
+ if (!capable(CAP_NET_RAW))
+ return -EPERM;
+
+ return c->devx_uid;
+}
static bool devx_is_general_cmd(void *in)
{
u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
switch (opcode) {
case MLX5_CMD_OP_QUERY_HCA_CAP:
+ case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_VPORT_STATE:
case MLX5_CMD_OP_QUERY_ADAPTER:
case MLX5_CMD_OP_QUERY_ISSI:
@@ -498,14 +691,16 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
void *cmd_out;
int err;
+ int uid;
c = devx_ufile2uctx(file);
if (IS_ERR(c))
return PTR_ERR(c);
dev = to_mdev(c->ibucontext.device);
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
/* Only white list of some general HCA commands are allowed for this method. */
if (!devx_is_general_cmd(cmd_in))
@@ -515,7 +710,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
if (IS_ERR(cmd_out))
return PTR_ERR(cmd_out);
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
err = mlx5_cmd_exec(dev->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
cmd_out, cmd_out_len);
@@ -726,11 +921,15 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
struct devx_obj *obj;
int err;
+ int uid;
+ u32 obj_id;
+ u16 opcode;
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
- if (!devx_is_obj_create_cmd(cmd_in))
+ if (!devx_is_obj_create_cmd(cmd_in, &opcode))
return -EINVAL;
cmd_out = uverbs_zalloc(attrs, cmd_out_len);
@@ -741,7 +940,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (!obj)
return -ENOMEM;
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ devx_set_umem_valid(cmd_in);
+
err = mlx5_cmd_exec(dev->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN),
cmd_out, cmd_out_len);
@@ -750,13 +951,15 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
uobj->object = obj;
obj->mdev = dev->mdev;
- devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen, &obj->obj_id);
+ devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
+ &obj_id);
WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
if (err)
goto obj_destroy;
+ obj->obj_id = get_enc_obj_id(opcode, obj_id);
return 0;
obj_destroy:
@@ -778,9 +981,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
struct devx_obj *obj = uobj->object;
void *cmd_out;
int err;
+ int uid;
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
if (!devx_is_obj_modify_cmd(cmd_in))
return -EINVAL;
@@ -792,7 +997,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
if (IS_ERR(cmd_out))
return PTR_ERR(cmd_out);
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
+ devx_set_umem_valid(cmd_in);
+
err = mlx5_cmd_exec(obj->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
cmd_out, cmd_out_len);
@@ -815,9 +1022,11 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
struct devx_obj *obj = uobj->object;
void *cmd_out;
int err;
+ int uid;
- if (!c->devx_uid)
- return -EPERM;
+ uid = devx_get_uid(c, cmd_in);
+ if (uid < 0)
+ return uid;
if (!devx_is_obj_query_cmd(cmd_in))
return -EINVAL;
@@ -829,7 +1038,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
if (IS_ERR(cmd_out))
return PTR_ERR(cmd_out);
- MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, c->devx_uid);
+ MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
err = mlx5_cmd_exec(obj->mdev, cmd_in,
uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
cmd_out, cmd_out_len);
@@ -928,6 +1137,9 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
int err;
if (!c->devx_uid)
+ return -EINVAL;
+
+ if (!capable(CAP_NET_RAW))
return -EPERM;
obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c
index 1a29f47f836e..f86cdcafdafc 100644
--- a/drivers/infiniband/hw/mlx5/flow.c
+++ b/drivers/infiniband/hw/mlx5/flow.c
@@ -7,7 +7,9 @@
#include <rdma/ib_verbs.h>
#include <rdma/uverbs_types.h>
#include <rdma/uverbs_ioctl.h>
+#include <rdma/uverbs_std_types.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
+#include <rdma/mlx5_user_ioctl_verbs.h>
#include <rdma/ib_umem.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
@@ -16,6 +18,24 @@
#define UVERBS_MODULE_NAME mlx5_ib
#include <rdma/uverbs_named_ioctl.h>
+static int
+mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type,
+ enum mlx5_flow_namespace_type *namespace)
+{
+ switch (table_type) {
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX:
+ *namespace = MLX5_FLOW_NAMESPACE_BYPASS;
+ break;
+ case MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX:
+ *namespace = MLX5_FLOW_NAMESPACE_EGRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
[MLX5_IB_FLOW_TYPE_NORMAL] = {
.type = UVERBS_ATTR_TYPE_PTR_IN,
@@ -38,11 +58,15 @@ static const struct uverbs_attr_spec mlx5_ib_flow_type[] = {
},
};
+#define MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS 2
static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct ib_uverbs_file *file, struct uverbs_attr_bundle *attrs)
{
+ struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
struct mlx5_ib_flow_handler *flow_handler;
struct mlx5_ib_flow_matcher *fs_matcher;
+ struct ib_uobject **arr_flow_actions;
+ struct ib_uflow_resources *uflow_res;
void *devx_obj;
int dest_id, dest_type;
void *cmd_in;
@@ -52,6 +76,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
struct ib_uobject *uobj =
uverbs_attr_get_uobject(attrs, MLX5_IB_ATTR_CREATE_FLOW_HANDLE);
struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
+ int len, ret, i;
if (!capable(CAP_NET_RAW))
return -EPERM;
@@ -61,7 +86,14 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
dest_qp = uverbs_attr_is_valid(attrs,
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP);
- if ((dest_devx && dest_qp) || (!dest_devx && !dest_qp))
+ fs_matcher = uverbs_attr_get_obj(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS &&
+ ((dest_devx && dest_qp) || (!dest_devx && !dest_qp)))
+ return -EINVAL;
+
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_EGRESS &&
+ (dest_devx || dest_qp))
return -EINVAL;
if (dest_devx) {
@@ -75,7 +107,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
*/
if (!mlx5_ib_devx_is_flow_dest(devx_obj, &dest_id, &dest_type))
return -EINVAL;
- } else {
+ } else if (dest_qp) {
struct mlx5_ib_qp *mqp;
qp = uverbs_attr_get_obj(attrs,
@@ -92,6 +124,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
else
dest_id = mqp->raw_packet_qp.rq.tirn;
dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ } else {
+ dest_type = MLX5_FLOW_DESTINATION_TYPE_PORT;
}
if (dev->rep)
@@ -101,16 +135,48 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_CREATE_FLOW)(
attrs, MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
inlen = uverbs_attr_get_len(attrs,
MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE);
- fs_matcher = uverbs_attr_get_obj(attrs,
- MLX5_IB_ATTR_CREATE_FLOW_MATCHER);
- flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, cmd_in, inlen,
+
+ uflow_res = flow_resources_alloc(MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS);
+ if (!uflow_res)
+ return -ENOMEM;
+
+ len = uverbs_attr_get_uobjs_arr(attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS, &arr_flow_actions);
+ for (i = 0; i < len; i++) {
+ struct mlx5_ib_flow_action *maction =
+ to_mflow_act(arr_flow_actions[i]->object);
+
+ ret = parse_flow_flow_action(maction, false, &flow_act);
+ if (ret)
+ goto err_out;
+ flow_resources_add(uflow_res, IB_FLOW_SPEC_ACTION_HANDLE,
+ arr_flow_actions[i]->object);
+ }
+
+ ret = uverbs_copy_from(&flow_act.flow_tag, attrs,
+ MLX5_IB_ATTR_CREATE_FLOW_TAG);
+ if (!ret) {
+ if (flow_act.flow_tag >= BIT(24)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+ flow_act.flags |= FLOW_ACT_HAS_TAG;
+ }
+
+ flow_handler = mlx5_ib_raw_fs_rule_add(dev, fs_matcher, &flow_act,
+ cmd_in, inlen,
dest_id, dest_type);
- if (IS_ERR(flow_handler))
- return PTR_ERR(flow_handler);
+ if (IS_ERR(flow_handler)) {
+ ret = PTR_ERR(flow_handler);
+ goto err_out;
+ }
- ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev);
+ ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res);
return 0;
+err_out:
+ ib_uverbs_flow_resources_free(uflow_res);
+ return ret;
}
static int flow_matcher_cleanup(struct ib_uobject *uobject,
@@ -134,12 +200,14 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
attrs, MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE);
struct mlx5_ib_dev *dev = to_mdev(uobj->context->device);
struct mlx5_ib_flow_matcher *obj;
+ u32 flags;
int err;
obj = kzalloc(sizeof(struct mlx5_ib_flow_matcher), GFP_KERNEL);
if (!obj)
return -ENOMEM;
+ obj->ns_type = MLX5_FLOW_NAMESPACE_BYPASS;
obj->mask_len = uverbs_attr_get_len(
attrs, MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK);
err = uverbs_copy_from(&obj->matcher_mask,
@@ -165,6 +233,19 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)(
if (err)
goto end;
+ err = uverbs_get_flags32(&flags, attrs,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ IB_FLOW_ATTR_FLAGS_EGRESS);
+ if (err)
+ goto end;
+
+ if (flags) {
+ err = mlx5_ib_ft_type_to_namespace(
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX, &obj->ns_type);
+ if (err)
+ goto end;
+ }
+
uobj->object = obj;
obj->mdev = dev->mdev;
atomic_set(&obj->usecnt, 0);
@@ -175,6 +256,248 @@ end:
return err;
}
+void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
+{
+ switch (maction->flow_action_raw.sub_type) {
+ case MLX5_IB_FLOW_ACTION_MODIFY_HEADER:
+ mlx5_modify_header_dealloc(maction->flow_action_raw.dev->mdev,
+ maction->flow_action_raw.action_id);
+ break;
+ case MLX5_IB_FLOW_ACTION_PACKET_REFORMAT:
+ mlx5_packet_reformat_dealloc(maction->flow_action_raw.dev->mdev,
+ maction->flow_action_raw.action_id);
+ break;
+ case MLX5_IB_FLOW_ACTION_DECAP:
+ break;
+ default:
+ break;
+ }
+}
+
+static struct ib_flow_action *
+mlx5_ib_create_modify_header(struct mlx5_ib_dev *dev,
+ enum mlx5_ib_uapi_flow_table_type ft_type,
+ u8 num_actions, void *in)
+{
+ enum mlx5_flow_namespace_type namespace;
+ struct mlx5_ib_flow_action *maction;
+ int ret;
+
+ ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ maction = kzalloc(sizeof(*maction), GFP_KERNEL);
+ if (!maction)
+ return ERR_PTR(-ENOMEM);
+
+ ret = mlx5_modify_header_alloc(dev->mdev, namespace, num_actions, in,
+ &maction->flow_action_raw.action_id);
+
+ if (ret) {
+ kfree(maction);
+ return ERR_PTR(ret);
+ }
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER;
+ maction->flow_action_raw.dev = dev;
+
+ return &maction->ib_action;
+}
+
+static bool mlx5_ib_modify_header_supported(struct mlx5_ib_dev *dev)
+{
+ return MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ max_modify_header_actions) ||
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, max_modify_header_actions);
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER)(
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(
+ attrs, MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE);
+ struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+ enum mlx5_ib_uapi_flow_table_type ft_type;
+ struct ib_flow_action *action;
+ size_t num_actions;
+ void *in;
+ int len;
+ int ret;
+
+ if (!mlx5_ib_modify_header_supported(mdev))
+ return -EOPNOTSUPP;
+
+ in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
+ len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM);
+
+ if (len % MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto))
+ return -EINVAL;
+
+ ret = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE);
+ if (ret)
+ return ret;
+
+ num_actions = len / MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto),
+ action = mlx5_ib_create_modify_header(mdev, ft_type, num_actions, in);
+ if (IS_ERR(action))
+ return PTR_ERR(action);
+
+ uverbs_flow_action_fill_action(action, uobj, uobj->context->device,
+ IB_FLOW_ACTION_UNSPECIFIED);
+
+ return 0;
+}
+
+static bool mlx5_ib_flow_action_packet_reformat_valid(struct mlx5_ib_dev *ibdev,
+ u8 packet_reformat_type,
+ u8 ft_type)
+{
+ switch (packet_reformat_type) {
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
+ return MLX5_CAP_FLOWTABLE(ibdev->mdev,
+ encap_general_header);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX)
+ return MLX5_CAP_FLOWTABLE_NIC_TX(ibdev->mdev,
+ reformat_l2_to_l3_tunnel);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
+ return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev,
+ reformat_l3_tunnel_to_l2);
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2:
+ if (ft_type == MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX)
+ return MLX5_CAP_FLOWTABLE_NIC_RX(ibdev->mdev, decap);
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int mlx5_ib_dv_to_prm_packet_reforamt_type(u8 dv_prt, u8 *prm_prt)
+{
+ switch (dv_prt) {
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+ *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+ *prm_prt = MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+ break;
+ case MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+ *prm_prt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5_ib_flow_action_create_packet_reformat_ctx(
+ struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_action *maction,
+ u8 ft_type, u8 dv_prt,
+ void *in, size_t len)
+{
+ enum mlx5_flow_namespace_type namespace;
+ u8 prm_prt;
+ int ret;
+
+ ret = mlx5_ib_ft_type_to_namespace(ft_type, &namespace);
+ if (ret)
+ return ret;
+
+ ret = mlx5_ib_dv_to_prm_packet_reforamt_type(dv_prt, &prm_prt);
+ if (ret)
+ return ret;
+
+ ret = mlx5_packet_reformat_alloc(dev->mdev, prm_prt, len,
+ in, namespace,
+ &maction->flow_action_raw.action_id);
+ if (ret)
+ return ret;
+
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT;
+ maction->flow_action_raw.dev = dev;
+
+ return 0;
+}
+
+static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT)(
+ struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs)
+{
+ struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE);
+ struct mlx5_ib_dev *mdev = to_mdev(uobj->context->device);
+ enum mlx5_ib_uapi_flow_action_packet_reformat_type dv_prt;
+ enum mlx5_ib_uapi_flow_table_type ft_type;
+ struct mlx5_ib_flow_action *maction;
+ int ret;
+
+ ret = uverbs_get_const(&ft_type, attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE);
+ if (ret)
+ return ret;
+
+ ret = uverbs_get_const(&dv_prt, attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE);
+ if (ret)
+ return ret;
+
+ if (!mlx5_ib_flow_action_packet_reformat_valid(mdev, dv_prt, ft_type))
+ return -EOPNOTSUPP;
+
+ maction = kzalloc(sizeof(*maction), GFP_KERNEL);
+ if (!maction)
+ return -ENOMEM;
+
+ if (dv_prt ==
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2) {
+ maction->flow_action_raw.sub_type =
+ MLX5_IB_FLOW_ACTION_DECAP;
+ maction->flow_action_raw.dev = mdev;
+ } else {
+ void *in;
+ int len;
+
+ in = uverbs_attr_get_alloced_ptr(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
+ if (IS_ERR(in)) {
+ ret = PTR_ERR(in);
+ goto free_maction;
+ }
+
+ len = uverbs_attr_get_len(attrs,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF);
+
+ ret = mlx5_ib_flow_action_create_packet_reformat_ctx(mdev,
+ maction, ft_type, dv_prt, in, len);
+ if (ret)
+ goto free_maction;
+ }
+
+ uverbs_flow_action_fill_action(&maction->ib_action, uobj,
+ uobj->context->device,
+ IB_FLOW_ACTION_UNSPECIFIED);
+ return 0;
+
+free_maction:
+ kfree(maction);
+ return ret;
+}
+
DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_CREATE_FLOW,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_HANDLE,
@@ -195,7 +518,15 @@ DECLARE_UVERBS_NAMED_METHOD(
UVERBS_ACCESS_READ),
UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
MLX5_IB_OBJECT_DEVX_OBJ,
- UVERBS_ACCESS_READ));
+ UVERBS_ACCESS_READ),
+ UVERBS_ATTR_IDRS_ARR(MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_READ, 1,
+ MLX5_IB_CREATE_FLOW_MAX_FLOW_ACTIONS,
+ UA_OPTIONAL),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_FLOW_TAG,
+ UVERBS_ATTR_TYPE(u32),
+ UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_DESTROY_FLOW,
@@ -210,6 +541,44 @@ ADD_UVERBS_METHODS(mlx5_ib_fs,
&UVERBS_METHOD(MLX5_IB_METHOD_DESTROY_FLOW));
DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
+ UVERBS_ATTR_MIN_SIZE(MLX5_UN_SZ_BYTES(
+ set_action_in_add_action_in_auto)),
+ UA_MANDATORY,
+ UA_ALLOC_AND_COPY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY));
+
+DECLARE_UVERBS_NAMED_METHOD(
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
+ UVERBS_ATTR_IDR(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE,
+ UVERBS_OBJECT_FLOW_ACTION,
+ UVERBS_ACCESS_NEW,
+ UA_MANDATORY),
+ UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
+ UVERBS_ATTR_MIN_SIZE(1),
+ UA_ALLOC_AND_COPY,
+ UA_OPTIONAL),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
+ enum mlx5_ib_uapi_flow_action_packet_reformat_type,
+ UA_MANDATORY),
+ UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
+ enum mlx5_ib_uapi_flow_table_type,
+ UA_MANDATORY));
+
+ADD_UVERBS_METHODS(
+ mlx5_ib_flow_actions,
+ UVERBS_OBJECT_FLOW_ACTION,
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER),
+ &UVERBS_METHOD(MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT));
+
+DECLARE_UVERBS_NAMED_METHOD(
MLX5_IB_METHOD_FLOW_MATCHER_CREATE,
UVERBS_ATTR_IDR(MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE,
MLX5_IB_OBJECT_FLOW_MATCHER,
@@ -224,7 +593,10 @@ DECLARE_UVERBS_NAMED_METHOD(
UA_MANDATORY),
UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
UVERBS_ATTR_TYPE(u8),
- UA_MANDATORY));
+ UA_MANDATORY),
+ UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
+ enum ib_flow_flags,
+ UA_OPTIONAL));
DECLARE_UVERBS_NAMED_METHOD_DESTROY(
MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
@@ -247,6 +619,7 @@ int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
root[i++] = &flow_objects;
root[i++] = &mlx5_ib_fs;
+ root[i++] = &mlx5_ib_flow_actions;
return i;
}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 35a0e04c38f2..584ff2ea7810 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -39,9 +39,6 @@ static const struct mlx5_ib_profile rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_post_ib_reg_umr_init,
NULL),
- STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
- mlx5_ib_stage_class_attr_init,
- NULL),
};
static int
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index af32899bb72a..e9c428071df3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1571,14 +1571,57 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
}
-static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
+int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+{
+ int err = 0;
+
+ mutex_lock(&dev->lb.mutex);
+ if (td)
+ dev->lb.user_td++;
+ if (qp)
+ dev->lb.qps++;
+
+ if (dev->lb.user_td == 2 ||
+ dev->lb.qps == 1) {
+ if (!dev->lb.enabled) {
+ err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
+ dev->lb.enabled = true;
+ }
+ }
+
+ mutex_unlock(&dev->lb.mutex);
+
+ return err;
+}
+
+void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
+{
+ mutex_lock(&dev->lb.mutex);
+ if (td)
+ dev->lb.user_td--;
+ if (qp)
+ dev->lb.qps--;
+
+ if (dev->lb.user_td == 1 &&
+ dev->lb.qps == 0) {
+ if (dev->lb.enabled) {
+ mlx5_nic_vport_update_local_lb(dev->mdev, false);
+ dev->lb.enabled = false;
+ }
+ }
+
+ mutex_unlock(&dev->lb.mutex);
+}
+
+static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn,
+ u16 uid)
{
int err;
if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
return 0;
- err = mlx5_core_alloc_transport_domain(dev->mdev, tdn);
+ err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid);
if (err)
return err;
@@ -1587,35 +1630,23 @@ static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn)
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return err;
- mutex_lock(&dev->lb_mutex);
- dev->user_td++;
-
- if (dev->user_td == 2)
- err = mlx5_nic_vport_update_local_lb(dev->mdev, true);
-
- mutex_unlock(&dev->lb_mutex);
- return err;
+ return mlx5_ib_enable_lb(dev, true, false);
}
-static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn)
+static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn,
+ u16 uid)
{
if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain))
return;
- mlx5_core_dealloc_transport_domain(dev->mdev, tdn);
+ mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid);
if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) ||
(!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) &&
!MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
return;
- mutex_lock(&dev->lb_mutex);
- dev->user_td--;
-
- if (dev->user_td < 2)
- mlx5_nic_vport_update_local_lb(dev->mdev, false);
-
- mutex_unlock(&dev->lb_mutex);
+ mlx5_ib_disable_lb(dev, true, false);
}
static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
@@ -1727,30 +1758,24 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
#endif
- err = mlx5_ib_alloc_transport_domain(dev, &context->tdn);
- if (err)
- goto out_uars;
-
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
- /* Block DEVX on Infiniband as of SELinux */
- if (mlx5_ib_port_link_layer(ibdev, 1) != IB_LINK_LAYER_ETHERNET) {
- err = -EPERM;
- goto out_td;
- }
-
- err = mlx5_ib_devx_create(dev, context);
- if (err)
- goto out_td;
+ err = mlx5_ib_devx_create(dev);
+ if (err < 0)
+ goto out_uars;
+ context->devx_uid = err;
}
+ err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
+ context->devx_uid);
+ if (err)
+ goto out_devx;
+
if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) {
err = mlx5_cmd_dump_fill_mkey(dev->mdev, &dump_fill_mkey);
if (err)
goto out_mdev;
}
- INIT_LIST_HEAD(&context->vma_private_list);
- mutex_init(&context->vma_private_list_mutex);
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -1826,13 +1851,21 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
context->lib_caps = req.lib_caps;
print_lib_caps(dev, context->lib_caps);
+ if (mlx5_lag_is_active(dev->mdev)) {
+ u8 port = mlx5_core_native_port_num(dev->mdev);
+
+ atomic_set(&context->tx_port_affinity,
+ atomic_add_return(
+ 1, &dev->roce[port].tx_port_affinity));
+ }
+
return &context->ibucontext;
out_mdev:
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+out_devx:
if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
- mlx5_ib_devx_destroy(dev, context);
-out_td:
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
out_uars:
deallocate_uars(dev, context);
@@ -1855,11 +1888,18 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
struct mlx5_bfreg_info *bfregi;
- if (context->devx_uid)
- mlx5_ib_devx_destroy(dev, context);
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+ /* All umem's must be destroyed before destroying the ucontext. */
+ mutex_lock(&ibcontext->per_mm_list_lock);
+ WARN_ON(!list_empty(&ibcontext->per_mm_list));
+ mutex_unlock(&ibcontext->per_mm_list_lock);
+#endif
bfregi = &context->bfregi;
- mlx5_ib_dealloc_transport_domain(dev, context->tdn);
+ mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
+
+ if (context->devx_uid)
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
deallocate_uars(dev, context);
kfree(bfregi->sys_pages);
@@ -1900,94 +1940,9 @@ static int get_extended_index(unsigned long offset)
return get_arg(offset) | ((offset >> 16) & 0xff) << 8;
}
-static void mlx5_ib_vma_open(struct vm_area_struct *area)
-{
- /* vma_open is called when a new VMA is created on top of our VMA. This
- * is done through either mremap flow or split_vma (usually due to
- * mlock, madvise, munmap, etc.) We do not support a clone of the VMA,
- * as this VMA is strongly hardware related. Therefore we set the
- * vm_ops of the newly created/cloned VMA to NULL, to prevent it from
- * calling us again and trying to do incorrect actions. We assume that
- * the original VMA size is exactly a single page, and therefore all
- * "splitting" operation will not happen to it.
- */
- area->vm_ops = NULL;
-}
-
-static void mlx5_ib_vma_close(struct vm_area_struct *area)
-{
- struct mlx5_ib_vma_private_data *mlx5_ib_vma_priv_data;
-
- /* It's guaranteed that all VMAs opened on a FD are closed before the
- * file itself is closed, therefore no sync is needed with the regular
- * closing flow. (e.g. mlx5 ib_dealloc_ucontext)
- * However need a sync with accessing the vma as part of
- * mlx5_ib_disassociate_ucontext.
- * The close operation is usually called under mm->mmap_sem except when
- * process is exiting.
- * The exiting case is handled explicitly as part of
- * mlx5_ib_disassociate_ucontext.
- */
- mlx5_ib_vma_priv_data = (struct mlx5_ib_vma_private_data *)area->vm_private_data;
-
- /* setting the vma context pointer to null in the mlx5_ib driver's
- * private data, to protect a race condition in
- * mlx5_ib_disassociate_ucontext().
- */
- mlx5_ib_vma_priv_data->vma = NULL;
- mutex_lock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
- list_del(&mlx5_ib_vma_priv_data->list);
- mutex_unlock(mlx5_ib_vma_priv_data->vma_private_list_mutex);
- kfree(mlx5_ib_vma_priv_data);
-}
-
-static const struct vm_operations_struct mlx5_ib_vm_ops = {
- .open = mlx5_ib_vma_open,
- .close = mlx5_ib_vma_close
-};
-
-static int mlx5_ib_set_vma_data(struct vm_area_struct *vma,
- struct mlx5_ib_ucontext *ctx)
-{
- struct mlx5_ib_vma_private_data *vma_prv;
- struct list_head *vma_head = &ctx->vma_private_list;
-
- vma_prv = kzalloc(sizeof(*vma_prv), GFP_KERNEL);
- if (!vma_prv)
- return -ENOMEM;
-
- vma_prv->vma = vma;
- vma_prv->vma_private_list_mutex = &ctx->vma_private_list_mutex;
- vma->vm_private_data = vma_prv;
- vma->vm_ops = &mlx5_ib_vm_ops;
-
- mutex_lock(&ctx->vma_private_list_mutex);
- list_add(&vma_prv->list, vma_head);
- mutex_unlock(&ctx->vma_private_list_mutex);
-
- return 0;
-}
static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
{
- struct vm_area_struct *vma;
- struct mlx5_ib_vma_private_data *vma_private, *n;
- struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
-
- mutex_lock(&context->vma_private_list_mutex);
- list_for_each_entry_safe(vma_private, n, &context->vma_private_list,
- list) {
- vma = vma_private->vma;
- zap_vma_ptes(vma, vma->vm_start, PAGE_SIZE);
- /* context going to be destroyed, should
- * not access ops any more.
- */
- vma->vm_flags &= ~(VM_SHARED | VM_MAYSHARE);
- vma->vm_ops = NULL;
- list_del(&vma_private->list);
- kfree(vma_private);
- }
- mutex_unlock(&context->vma_private_list_mutex);
}
static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd)
@@ -2010,9 +1965,6 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
struct vm_area_struct *vma,
struct mlx5_ib_ucontext *context)
{
- phys_addr_t pfn;
- int err;
-
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
@@ -2025,13 +1977,8 @@ static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev,
if (!dev->mdev->clock_info_page)
return -EOPNOTSUPP;
- pfn = page_to_pfn(dev->mdev->clock_info_page);
- err = remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
- vma->vm_page_prot);
- if (err)
- return err;
-
- return mlx5_ib_set_vma_data(vma, context);
+ return rdma_user_mmap_page(&context->ibucontext, vma,
+ dev->mdev->clock_info_page, PAGE_SIZE);
}
static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
@@ -2121,21 +2068,15 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
pfn = uar_index2pfn(dev, uar_index);
mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn);
- vma->vm_page_prot = prot;
- err = io_remap_pfn_range(vma, vma->vm_start, pfn,
- PAGE_SIZE, vma->vm_page_prot);
+ err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE,
+ prot);
if (err) {
mlx5_ib_err(dev,
- "io_remap_pfn_range failed with error=%d, mmap_cmd=%s\n",
+ "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n",
err, mmap_cmd2str(cmd));
- err = -EAGAIN;
goto err;
}
- err = mlx5_ib_set_vma_data(vma, context);
- if (err)
- goto err;
-
if (dyn_uar)
bfregi->sys_pages[idx] = uar_index;
return 0;
@@ -2160,7 +2101,6 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
size_t map_size = vma->vm_end - vma->vm_start;
u32 npages = map_size >> PAGE_SHIFT;
phys_addr_t pfn;
- pgprot_t prot;
if (find_next_zero_bit(mctx->dm_pages, page_idx + npages, page_idx) !=
page_idx + npages)
@@ -2170,14 +2110,8 @@ static int dm_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
MLX5_CAP64_DEV_MEM(dev->mdev, memic_bar_start_addr)) >>
PAGE_SHIFT) +
page_idx;
- prot = pgprot_writecombine(vma->vm_page_prot);
- vma->vm_page_prot = prot;
-
- if (io_remap_pfn_range(vma, vma->vm_start, pfn, map_size,
- vma->vm_page_prot))
- return -EAGAIN;
-
- return mlx5_ib_set_vma_data(vma, mctx);
+ return rdma_user_mmap_io(context, vma, pfn, map_size,
+ pgprot_writecombine(vma->vm_page_prot));
}
static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
@@ -2318,21 +2252,30 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
struct mlx5_ib_alloc_pd_resp resp;
struct mlx5_ib_pd *pd;
int err;
+ u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {};
+ u16 uid = 0;
pd = kmalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return ERR_PTR(-ENOMEM);
- err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
+ uid = context ? to_mucontext(context)->devx_uid : 0;
+ MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD);
+ MLX5_SET(alloc_pd_in, in, uid, uid);
+ err = mlx5_cmd_exec(to_mdev(ibdev)->mdev, in, sizeof(in),
+ out, sizeof(out));
if (err) {
kfree(pd);
return ERR_PTR(err);
}
+ pd->pdn = MLX5_GET(alloc_pd_out, out, pd);
+ pd->uid = uid;
if (context) {
resp.pdn = pd->pdn;
if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
- mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
+ mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid);
kfree(pd);
return ERR_PTR(-EFAULT);
}
@@ -2346,7 +2289,7 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd);
- mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
+ mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid);
kfree(mpd);
return 0;
@@ -2452,20 +2395,50 @@ static int check_mpls_supp_fields(u32 field_support, const __be32 *set_mask)
offsetof(typeof(filter), field) -\
sizeof(filter.field))
-static int parse_flow_flow_action(const union ib_flow_spec *ib_spec,
- const struct ib_flow_attr *flow_attr,
- struct mlx5_flow_act *action)
+int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
+ bool is_egress,
+ struct mlx5_flow_act *action)
{
- struct mlx5_ib_flow_action *maction = to_mflow_act(ib_spec->action.act);
switch (maction->ib_action.type) {
case IB_FLOW_ACTION_ESP:
+ if (action->action & (MLX5_FLOW_CONTEXT_ACTION_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT))
+ return -EINVAL;
/* Currently only AES_GCM keymat is supported by the driver */
action->esp_id = (uintptr_t)maction->esp_aes_gcm.ctx;
- action->action |= flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS ?
+ action->action |= is_egress ?
MLX5_FLOW_CONTEXT_ACTION_ENCRYPT :
MLX5_FLOW_CONTEXT_ACTION_DECRYPT;
return 0;
+ case IB_FLOW_ACTION_UNSPECIFIED:
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER) {
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ return -EINVAL;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ action->modify_id = maction->flow_action_raw.action_id;
+ return 0;
+ }
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_DECAP) {
+ if (action->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
+ return -EINVAL;
+ action->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ return 0;
+ }
+ if (maction->flow_action_raw.sub_type ==
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT) {
+ if (action->action &
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
+ return -EINVAL;
+ action->action |=
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ action->reformat_id =
+ maction->flow_action_raw.action_id;
+ return 0;
+ }
+ /* fall through */
default:
return -EOPNOTSUPP;
}
@@ -2802,7 +2775,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
action->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
break;
case IB_FLOW_SPEC_ACTION_HANDLE:
- ret = parse_flow_flow_action(ib_spec, flow_attr, action);
+ ret = parse_flow_flow_action(to_mflow_act(ib_spec->action.act),
+ flow_attr->flags & IB_FLOW_ATTR_FLAGS_EGRESS, action);
if (ret)
return ret;
break;
@@ -2883,7 +2857,7 @@ is_valid_esp_aes_gcm(struct mlx5_core_dev *mdev,
* rules would be supported, always return VALID_SPEC_NA.
*/
if (!is_crypto)
- return egress ? VALID_SPEC_INVALID : VALID_SPEC_NA;
+ return VALID_SPEC_NA;
return is_crypto && is_ipsec &&
(!egress || (!is_drop && !(flow_act->flags & FLOW_ACT_HAS_TAG))) ?
@@ -3026,14 +3000,15 @@ enum flow_table_type {
static struct mlx5_ib_flow_prio *_get_prio(struct mlx5_flow_namespace *ns,
struct mlx5_ib_flow_prio *prio,
int priority,
- int num_entries, int num_groups)
+ int num_entries, int num_groups,
+ u32 flags)
{
struct mlx5_flow_table *ft;
ft = mlx5_create_auto_grouped_flow_table(ns, priority,
num_entries,
num_groups,
- 0, 0);
+ 0, flags);
if (IS_ERR(ft))
return ERR_CAST(ft);
@@ -3053,26 +3028,43 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
int max_table_size;
int num_entries;
int num_groups;
+ u32 flags = 0;
int priority;
max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
log_max_ft_size));
if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
- if (ft_type == MLX5_IB_FT_TX)
- priority = 0;
- else if (flow_is_multicast_only(flow_attr) &&
- !dont_trap)
+ enum mlx5_flow_namespace_type fn_type;
+
+ if (flow_is_multicast_only(flow_attr) &&
+ !dont_trap)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
priority = ib_prio_to_core_prio(flow_attr->priority,
dont_trap);
- ns = mlx5_get_flow_namespace(dev->mdev,
- ft_type == MLX5_IB_FT_TX ?
- MLX5_FLOW_NAMESPACE_EGRESS :
- MLX5_FLOW_NAMESPACE_BYPASS);
+ if (ft_type == MLX5_IB_FT_RX) {
+ fn_type = MLX5_FLOW_NAMESPACE_BYPASS;
+ prio = &dev->flow_db->prios[priority];
+ if (!dev->rep &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (!dev->rep &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ reformat_l3_tunnel_to_l2))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ } else {
+ max_table_size =
+ BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
+ log_max_ft_size));
+ fn_type = MLX5_FLOW_NAMESPACE_EGRESS;
+ prio = &dev->flow_db->egress_prios[priority];
+ if (!dev->rep &&
+ MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ }
+ ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
num_entries = MLX5_FS_MAX_ENTRIES;
num_groups = MLX5_FS_MAX_TYPES;
- prio = &dev->flow_db->prios[priority];
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
ns = mlx5_get_flow_namespace(dev->mdev,
@@ -3104,7 +3096,8 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = prio->flow_table;
if (!ft)
- return _get_prio(ns, prio, priority, num_entries, num_groups);
+ return _get_prio(ns, prio, priority, num_entries, num_groups,
+ flags);
return prio;
}
@@ -3271,6 +3264,9 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!is_valid_attr(dev->mdev, flow_attr))
return ERR_PTR(-EINVAL);
+ if (dev->rep && is_egress)
+ return ERR_PTR(-EINVAL);
+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
handler = kzalloc(sizeof(*handler), GFP_KERNEL);
if (!handler || !spec) {
@@ -3661,34 +3657,54 @@ free_ucmd:
return ERR_PTR(err);
}
-static struct mlx5_ib_flow_prio *_get_flow_table(struct mlx5_ib_dev *dev,
- int priority, bool mcast)
+static struct mlx5_ib_flow_prio *
+_get_flow_table(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_matcher *fs_matcher,
+ bool mcast)
{
- int max_table_size;
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_ib_flow_prio *prio;
+ int max_table_size;
+ u32 flags = 0;
+ int priority;
+
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS) {
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ log_max_ft_size));
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, decap))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_DECAP;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ reformat_l3_tunnel_to_l2))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ } else { /* Can only be MLX5_FLOW_NAMESPACE_EGRESS */
+ max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev,
+ log_max_ft_size));
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev->mdev, reformat))
+ flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT;
+ }
- max_table_size = BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
- log_max_ft_size));
if (max_table_size < MLX5_FS_MAX_ENTRIES)
return ERR_PTR(-ENOMEM);
if (mcast)
priority = MLX5_IB_FLOW_MCAST_PRIO;
else
- priority = ib_prio_to_core_prio(priority, false);
+ priority = ib_prio_to_core_prio(fs_matcher->priority, false);
- ns = mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS);
+ ns = mlx5_get_flow_namespace(dev->mdev, fs_matcher->ns_type);
if (!ns)
return ERR_PTR(-ENOTSUPP);
- prio = &dev->flow_db->prios[priority];
+ if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_BYPASS)
+ prio = &dev->flow_db->prios[priority];
+ else
+ prio = &dev->flow_db->egress_prios[priority];
if (prio->flow_table)
return prio;
return _get_prio(ns, prio, priority, MLX5_FS_MAX_ENTRIES,
- MLX5_FS_MAX_TYPES);
+ MLX5_FS_MAX_TYPES, flags);
}
static struct mlx5_ib_flow_handler *
@@ -3696,10 +3712,10 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
struct mlx5_flow_destination *dst,
struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_act *flow_act,
void *cmd_in, int inlen)
{
struct mlx5_ib_flow_handler *handler;
- struct mlx5_flow_act flow_act = {.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG};
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft = ft_prio->flow_table;
int err = 0;
@@ -3718,9 +3734,8 @@ _create_raw_flow_rule(struct mlx5_ib_dev *dev,
fs_matcher->mask_len);
spec->match_criteria_enable = fs_matcher->match_criteria_enable;
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
handler->rule = mlx5_add_flow_rules(ft, spec,
- &flow_act, dst, 1);
+ flow_act, dst, 1);
if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule);
@@ -3782,12 +3797,12 @@ static bool raw_fs_is_multicast(struct mlx5_ib_flow_matcher *fs_matcher,
struct mlx5_ib_flow_handler *
mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_matcher *fs_matcher,
+ struct mlx5_flow_act *flow_act,
void *cmd_in, int inlen, int dest_id,
int dest_type)
{
struct mlx5_flow_destination *dst;
struct mlx5_ib_flow_prio *ft_prio;
- int priority = fs_matcher->priority;
struct mlx5_ib_flow_handler *handler;
bool mcast;
int err;
@@ -3805,7 +3820,7 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
mcast = raw_fs_is_multicast(fs_matcher, cmd_in);
mutex_lock(&dev->flow_db->lock);
- ft_prio = _get_flow_table(dev, priority, mcast);
+ ft_prio = _get_flow_table(dev, fs_matcher, mcast);
if (IS_ERR(ft_prio)) {
err = PTR_ERR(ft_prio);
goto unlock;
@@ -3814,13 +3829,18 @@ mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
dst->type = dest_type;
dst->tir_num = dest_id;
- } else {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
dst->ft_num = dest_id;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ } else {
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
}
- handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, cmd_in,
- inlen);
+ handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
+ cmd_in, inlen);
if (IS_ERR(handler)) {
err = PTR_ERR(handler);
@@ -3998,6 +4018,9 @@ static int mlx5_ib_destroy_flow_action(struct ib_flow_action *action)
*/
mlx5_accel_esp_destroy_xfrm(maction->esp_aes_gcm.ctx);
break;
+ case IB_FLOW_ACTION_UNSPECIFIED:
+ mlx5_ib_destroy_flow_action_raw(maction);
+ break;
default:
WARN_ON(true);
break;
@@ -4012,13 +4035,17 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *mqp = to_mqp(ibqp);
int err;
+ u16 uid;
+
+ uid = ibqp->pd ?
+ to_mpd(ibqp->pd)->uid : 0;
if (mqp->flags & MLX5_IB_QP_UNDERLAY) {
mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n");
return -EOPNOTSUPP;
}
- err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
+ err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
if (err)
mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
ibqp->qp_num, gid->raw);
@@ -4030,8 +4057,11 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
int err;
+ u16 uid;
- err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
+ uid = ibqp->pd ?
+ to_mpd(ibqp->pd)->uid : 0;
+ err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid);
if (err)
mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
ibqp->qp_num, gid->raw);
@@ -4052,16 +4082,17 @@ static int init_node_data(struct mlx5_ib_dev *dev)
return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
}
-static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t fw_pages_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
}
+static DEVICE_ATTR_RO(fw_pages);
-static ssize_t show_reg_pages(struct device *device,
+static ssize_t reg_pages_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
@@ -4069,44 +4100,47 @@ static ssize_t show_reg_pages(struct device *device,
return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
}
+static DEVICE_ATTR_RO(reg_pages);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "%x\n", dev->mdev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mlx5_ib_dev *dev =
container_of(device, struct mlx5_ib_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
dev->mdev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
-static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
+static struct attribute *mlx5_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_fw_pages.attr,
+ &dev_attr_reg_pages.attr,
+ NULL,
+};
-static struct device_attribute *mlx5_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
- &dev_attr_fw_pages,
- &dev_attr_reg_pages,
+static const struct attribute_group mlx5_attr_group = {
+ .attrs = mlx5_class_attributes,
};
static void pkey_change_handler(struct work_struct *work)
@@ -5631,7 +5665,6 @@ void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
- const char *name;
int err;
int i;
@@ -5664,12 +5697,6 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
if (mlx5_use_mad_ifc(dev))
get_ext_port_caps(dev);
- if (!mlx5_lag_is_active(mdev))
- name = "mlx5_%d";
- else
- name = "mlx5_bond_%d";
-
- strlcpy(dev->ib_dev.name, name, IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
@@ -5876,7 +5903,7 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) ||
MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc)))
- mutex_init(&dev->lb_mutex);
+ mutex_init(&dev->lb.mutex);
return 0;
}
@@ -6083,7 +6110,14 @@ static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev)
int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
{
- return ib_register_device(&dev->ib_dev, NULL);
+ const char *name;
+
+ rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group);
+ if (!mlx5_lag_is_active(dev->mdev))
+ name = "mlx5_%d";
+ else
+ name = "mlx5_bond_%d";
+ return ib_register_device(&dev->ib_dev, name, NULL);
}
void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
@@ -6113,21 +6147,6 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
cancel_delay_drop(dev);
}
-int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
-{
- int err;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
- err = device_create_file(&dev->ib_dev.dev,
- mlx5_class_attributes[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
{
mlx5_ib_register_vport_reps(dev);
@@ -6151,6 +6170,8 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
profile->stage[stage].cleanup(dev);
}
+ if (dev->devx_whitelist_uid)
+ mlx5_ib_devx_destroy(dev, dev->devx_whitelist_uid);
ib_dealloc_device((struct ib_device *)dev);
}
@@ -6159,8 +6180,7 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
{
int err;
int i;
-
- printk_once(KERN_INFO "%s", mlx5_version);
+ int uid;
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
if (profile->stage[i].init) {
@@ -6170,6 +6190,10 @@ void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
}
}
+ uid = mlx5_ib_devx_create(dev);
+ if (uid > 0)
+ dev->devx_whitelist_uid = uid;
+
dev->profile = profile;
dev->ib_active = true;
@@ -6230,9 +6254,6 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
mlx5_ib_stage_delay_drop_init,
mlx5_ib_stage_delay_drop_cleanup),
- STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
- mlx5_ib_stage_class_attr_init,
- NULL),
};
static const struct mlx5_ib_profile nic_rep_profile = {
@@ -6275,9 +6296,6 @@ static const struct mlx5_ib_profile nic_rep_profile = {
STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
mlx5_ib_stage_post_ib_reg_umr_init,
NULL),
- STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
- mlx5_ib_stage_class_attr_init,
- NULL),
STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
mlx5_ib_stage_rep_reg_init,
mlx5_ib_stage_rep_reg_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index f3dbd75a0a96..549234988bb4 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -57,7 +57,7 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
int entry;
unsigned long page_shift = umem->page_shift;
- if (umem->odp_data) {
+ if (umem->is_odp) {
*ncont = ib_umem_page_count(umem);
*count = *ncont << (page_shift - PAGE_SHIFT);
*shift = page_shift;
@@ -152,14 +152,13 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
struct scatterlist *sg;
int entry;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- const bool odp = umem->odp_data != NULL;
-
- if (odp) {
+ if (umem->is_odp) {
WARN_ON(shift != 0);
WARN_ON(access_flags != (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE));
for (i = 0; i < num_pages; ++i) {
- dma_addr_t pa = umem->odp_data->dma_list[offset + i];
+ dma_addr_t pa =
+ to_ib_umem_odp(umem)->dma_list[offset + i];
pas[i] = cpu_to_be64(umem_dma_to_mtt(pa));
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 289c18db2611..b651a7a6fde9 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -39,8 +39,10 @@
#include <rdma/ib_smi.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
+#include <linux/mlx5/fs.h>
#include <linux/mlx5/qp.h>
#include <linux/mlx5/srq.h>
+#include <linux/mlx5/fs.h>
#include <linux/types.h>
#include <linux/mlx5/transobj.h>
#include <rdma/ib_user_verbs.h>
@@ -48,17 +50,17 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
-#define mlx5_ib_dbg(dev, format, arg...) \
-pr_debug("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+#define mlx5_ib_dbg(_dev, format, arg...) \
+ dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
-#define mlx5_ib_err(dev, format, arg...) \
-pr_err("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+#define mlx5_ib_err(_dev, format, arg...) \
+ dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
-#define mlx5_ib_warn(dev, format, arg...) \
-pr_warn("%s:%s:%d:(pid %d): " format, (dev)->ib_dev.name, __func__, \
- __LINE__, current->pid, ##arg)
+#define mlx5_ib_warn(_dev, format, arg...) \
+ dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
+ __LINE__, current->pid, ##arg)
#define field_avail(type, fld, sz) (offsetof(type, fld) + \
sizeof(((type *)0)->fld) <= (sz))
@@ -114,13 +116,6 @@ enum {
MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
};
-struct mlx5_ib_vma_private_data {
- struct list_head list;
- struct vm_area_struct *vma;
- /* protect vma_private_list add/del */
- struct mutex *vma_private_list_mutex;
-};
-
struct mlx5_ib_ucontext {
struct ib_ucontext ibucontext;
struct list_head db_page_list;
@@ -132,13 +127,12 @@ struct mlx5_ib_ucontext {
u8 cqe_version;
/* Transport Domain number */
u32 tdn;
- struct list_head vma_private_list;
- /* protect vma_private_list add/del */
- struct mutex vma_private_list_mutex;
u64 lib_caps;
DECLARE_BITMAP(dm_pages, MLX5_MAX_MEMIC_PAGES);
u16 devx_uid;
+ /* For RoCE LAG TX affinity */
+ atomic_t tx_port_affinity;
};
static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
@@ -149,6 +143,13 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
struct mlx5_ib_pd {
struct ib_pd ibpd;
u32 pdn;
+ u16 uid;
+};
+
+enum {
+ MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
+ MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
+ MLX5_IB_FLOW_ACTION_DECAP,
};
#define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
@@ -180,6 +181,7 @@ struct mlx5_ib_flow_matcher {
struct mlx5_ib_match_params matcher_mask;
int mask_len;
enum mlx5_ib_flow_type flow_type;
+ enum mlx5_flow_namespace_type ns_type;
u16 priority;
struct mlx5_core_dev *mdev;
atomic_t usecnt;
@@ -188,6 +190,7 @@ struct mlx5_ib_flow_matcher {
struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
struct mlx5_flow_table *lag_demux_ft;
@@ -322,6 +325,7 @@ enum {
struct mlx5_ib_rwq_ind_table {
struct ib_rwq_ind_table ib_rwq_ind_tbl;
u32 rqtn;
+ u16 uid;
};
struct mlx5_ib_ubuffer {
@@ -428,7 +432,7 @@ struct mlx5_ib_qp {
struct list_head cq_send_list;
struct mlx5_rate_limit rl;
u32 underlay_qpn;
- bool tunnel_offload_en;
+ u32 flags_en;
/* storage for qp sub type when core qp type is IB_QPT_DRIVER */
enum ib_qp_type qp_sub_type;
};
@@ -536,6 +540,7 @@ struct mlx5_ib_srq {
struct mlx5_ib_xrcd {
struct ib_xrcd ibxrcd;
u32 xrcdn;
+ u16 uid;
};
enum mlx5_ib_mtt_access_flags {
@@ -700,7 +705,7 @@ struct mlx5_roce {
rwlock_t netdev_lock;
struct net_device *netdev;
struct notifier_block nb;
- atomic_t next_port;
+ atomic_t tx_port_affinity;
enum ib_port_state last_port_state;
struct mlx5_ib_dev *dev;
u8 native_port_num;
@@ -815,6 +820,11 @@ struct mlx5_ib_flow_action {
u64 ib_flags;
struct mlx5_accel_esp_xfrm *ctx;
} esp_aes_gcm;
+ struct {
+ struct mlx5_ib_dev *dev;
+ u32 sub_type;
+ u32 action_id;
+ } flow_action_raw;
};
};
@@ -859,9 +869,20 @@ to_mcounters(struct ib_counters *ibcntrs)
return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
}
+int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
+ bool is_egress,
+ struct mlx5_flow_act *action);
+struct mlx5_ib_lb_state {
+ /* protect the user_td */
+ struct mutex mutex;
+ u32 user_td;
+ int qps;
+ bool enabled;
+};
+
struct mlx5_ib_dev {
struct ib_device ib_dev;
- const struct uverbs_object_tree_def *driver_trees[6];
+ const struct uverbs_object_tree_def *driver_trees[7];
struct mlx5_core_dev *mdev;
struct mlx5_roce roce[MLX5_MAX_PORTS];
int num_ports;
@@ -900,13 +921,12 @@ struct mlx5_ib_dev {
const struct mlx5_ib_profile *profile;
struct mlx5_eswitch_rep *rep;
- /* protect the user_td */
- struct mutex lb_mutex;
- u32 user_td;
+ struct mlx5_ib_lb_state lb;
u8 umr_fence;
struct list_head ib_dev_list;
u64 sys_image_guid;
struct mlx5_memic memic;
+ u16 devx_whitelist_uid;
};
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
@@ -1017,6 +1037,8 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
int mlx5_ib_destroy_srq(struct ib_srq *srq);
int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
+int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
+void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata);
@@ -1106,7 +1128,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
int page_shift, __be64 *pas, int access_flags);
void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
-int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
@@ -1141,7 +1163,7 @@ void mlx5_ib_pfault(struct mlx5_core_dev *mdev, void *context,
int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
int __init mlx5_ib_odp_init(void);
void mlx5_ib_odp_cleanup(void);
-void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
unsigned long end);
void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
@@ -1180,7 +1202,6 @@ void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
-int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
const struct mlx5_ib_profile *profile,
int stage);
@@ -1229,22 +1250,20 @@ void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
u8 port_num);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
-int mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context);
-void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context);
+int mlx5_ib_devx_create(struct mlx5_ib_dev *dev);
+void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
- void *cmd_in, int inlen, int dest_id, int dest_type);
+ struct mlx5_flow_act *flow_act, void *cmd_in, int inlen,
+ int dest_id, int dest_type);
bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
+void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
#else
static inline int
-mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context) { return -EOPNOTSUPP; };
-static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev,
- struct mlx5_ib_ucontext *context) {}
+mlx5_ib_devx_create(struct mlx5_ib_dev *dev) { return -EOPNOTSUPP; };
+static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
static inline const struct uverbs_object_tree_def *
mlx5_ib_get_devx_tree(void) { return NULL; }
static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
@@ -1257,6 +1276,11 @@ mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
{
return 0;
}
+static inline void
+mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
+{
+ return;
+};
#endif
static inline void init_query_mad(struct ib_smp *mad)
{
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index e22314837645..9b195d65a13e 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -98,7 +98,7 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
static void update_odp_mr(struct mlx5_ib_mr *mr)
{
- if (mr->umem->odp_data) {
+ if (mr->umem->is_odp) {
/*
* This barrier prevents the compiler from moving the
* setting of umem->odp_data->private to point to our
@@ -107,7 +107,7 @@ static void update_odp_mr(struct mlx5_ib_mr *mr)
* handle invalidations.
*/
smp_wmb();
- mr->umem->odp_data->private = mr;
+ to_ib_umem_odp(mr->umem)->private = mr;
/*
* Make sure we will see the new
* umem->odp_data->private value in the invalidation
@@ -691,7 +691,6 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
init_completion(&ent->compl);
INIT_WORK(&ent->work, cache_work_func);
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
- queue_work(cache->wq, &ent->work);
if (i > MR_CACHE_LAST_STD_ENTRY) {
mlx5_odp_init_mr_cache_entry(ent);
@@ -711,6 +710,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->limit = dev->mdev->profile->mr_cache[i].limit;
else
ent->limit = 0;
+ queue_work(cache->wq, &ent->work);
}
err = mlx5_mr_cache_debugfs_init(dev);
@@ -1627,14 +1627,16 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
struct ib_umem *umem = mr->umem;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- if (umem && umem->odp_data) {
+ if (umem && umem->is_odp) {
+ struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem);
+
/* Prevent new page faults from succeeding */
mr->live = 0;
/* Wait for all running page-fault handlers to finish. */
synchronize_srcu(&dev->mr_srcu);
/* Destroy all page mappings */
- if (umem->odp_data->page_list)
- mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
+ if (umem_odp->page_list)
+ mlx5_ib_invalidate_range(umem_odp, ib_umem_start(umem),
ib_umem_end(umem));
else
mlx5_ib_free_implicit_mr(mr);
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d216e0d2921d..b04eb6775326 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -61,13 +61,21 @@ static int check_parent(struct ib_umem_odp *odp,
return mr && mr->parent == parent && !odp->dying;
}
+struct ib_ucontext_per_mm *mr_to_per_mm(struct mlx5_ib_mr *mr)
+{
+ if (WARN_ON(!mr || !mr->umem || !mr->umem->is_odp))
+ return NULL;
+
+ return to_ib_umem_odp(mr->umem)->per_mm;
+}
+
static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
{
struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent;
- struct ib_ucontext *ctx = odp->umem->context;
+ struct ib_ucontext_per_mm *per_mm = odp->per_mm;
struct rb_node *rb;
- down_read(&ctx->umem_rwsem);
+ down_read(&per_mm->umem_rwsem);
while (1) {
rb = rb_next(&odp->interval_tree.rb);
if (!rb)
@@ -79,19 +87,19 @@ static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp)
not_found:
odp = NULL;
end:
- up_read(&ctx->umem_rwsem);
+ up_read(&per_mm->umem_rwsem);
return odp;
}
-static struct ib_umem_odp *odp_lookup(struct ib_ucontext *ctx,
- u64 start, u64 length,
+static struct ib_umem_odp *odp_lookup(u64 start, u64 length,
struct mlx5_ib_mr *parent)
{
+ struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(parent);
struct ib_umem_odp *odp;
struct rb_node *rb;
- down_read(&ctx->umem_rwsem);
- odp = rbt_ib_umem_lookup(&ctx->umem_tree, start, length);
+ down_read(&per_mm->umem_rwsem);
+ odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length);
if (!odp)
goto end;
@@ -102,13 +110,13 @@ static struct ib_umem_odp *odp_lookup(struct ib_ucontext *ctx,
if (!rb)
goto not_found;
odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb);
- if (ib_umem_start(odp->umem) > start + length)
+ if (ib_umem_start(&odp->umem) > start + length)
goto not_found;
}
not_found:
odp = NULL;
end:
- up_read(&ctx->umem_rwsem);
+ up_read(&per_mm->umem_rwsem);
return odp;
}
@@ -116,7 +124,6 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
size_t nentries, struct mlx5_ib_mr *mr, int flags)
{
struct ib_pd *pd = mr->ibmr.pd;
- struct ib_ucontext *ctx = pd->uobject->context;
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem_odp *odp;
unsigned long va;
@@ -131,13 +138,13 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
return;
}
- odp = odp_lookup(ctx, offset * MLX5_IMR_MTT_SIZE,
- nentries * MLX5_IMR_MTT_SIZE, mr);
+ odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
+ nentries * MLX5_IMR_MTT_SIZE, mr);
for (i = 0; i < nentries; i++, pklm++) {
pklm->bcount = cpu_to_be32(MLX5_IMR_MTT_SIZE);
va = (offset + i) * MLX5_IMR_MTT_SIZE;
- if (odp && odp->umem->address == va) {
+ if (odp && odp->umem.address == va) {
struct mlx5_ib_mr *mtt = odp->private;
pklm->key = cpu_to_be32(mtt->ibmr.lkey);
@@ -153,13 +160,13 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
static void mr_leaf_free_action(struct work_struct *work)
{
struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
- int idx = ib_umem_start(odp->umem) >> MLX5_IMR_MTT_SHIFT;
+ int idx = ib_umem_start(&odp->umem) >> MLX5_IMR_MTT_SHIFT;
struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
mr->parent = NULL;
synchronize_srcu(&mr->dev->mr_srcu);
- ib_umem_release(odp->umem);
+ ib_umem_release(&odp->umem);
if (imr->live)
mlx5_ib_update_xlt(imr, idx, 1, 0,
MLX5_IB_UPD_XLT_INDIRECT |
@@ -170,22 +177,24 @@ static void mr_leaf_free_action(struct work_struct *work)
wake_up(&imr->q_leaf_free);
}
-void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
+void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
unsigned long end)
{
struct mlx5_ib_mr *mr;
const u64 umr_block_mask = (MLX5_UMR_MTT_ALIGNMENT /
sizeof(struct mlx5_mtt)) - 1;
u64 idx = 0, blk_start_idx = 0;
+ struct ib_umem *umem;
int in_block = 0;
u64 addr;
- if (!umem || !umem->odp_data) {
+ if (!umem_odp) {
pr_err("invalidation called on NULL umem or non-ODP umem\n");
return;
}
+ umem = &umem_odp->umem;
- mr = umem->odp_data->private;
+ mr = umem_odp->private;
if (!mr || !mr->ibmr.pd)
return;
@@ -208,7 +217,7 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
* estimate the cost of another UMR vs. the cost of bigger
* UMR.
*/
- if (umem->odp_data->dma_list[idx] &
+ if (umem_odp->dma_list[idx] &
(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT)) {
if (!in_block) {
blk_start_idx = idx;
@@ -237,13 +246,13 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
* needed.
*/
- ib_umem_odp_unmap_dma_pages(umem, start, end);
+ ib_umem_odp_unmap_dma_pages(umem_odp, start, end);
if (unlikely(!umem->npages && mr->parent &&
- !umem->odp_data->dying)) {
- WRITE_ONCE(umem->odp_data->dying, 1);
+ !umem_odp->dying)) {
+ WRITE_ONCE(umem_odp->dying, 1);
atomic_inc(&mr->parent->num_leaf_free);
- schedule_work(&umem->odp_data->work);
+ schedule_work(&umem_odp->work);
}
}
@@ -366,16 +375,15 @@ fail:
static struct ib_umem_odp *implicit_mr_get_data(struct mlx5_ib_mr *mr,
u64 io_virt, size_t bcnt)
{
- struct ib_ucontext *ctx = mr->ibmr.pd->uobject->context;
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.pd->device);
struct ib_umem_odp *odp, *result = NULL;
+ struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
u64 addr = io_virt & MLX5_IMR_MTT_MASK;
int nentries = 0, start_idx = 0, ret;
struct mlx5_ib_mr *mtt;
- struct ib_umem *umem;
- mutex_lock(&mr->umem->odp_data->umem_mutex);
- odp = odp_lookup(ctx, addr, 1, mr);
+ mutex_lock(&odp_mr->umem_mutex);
+ odp = odp_lookup(addr, 1, mr);
mlx5_ib_dbg(dev, "io_virt:%llx bcnt:%zx addr:%llx odp:%p\n",
io_virt, bcnt, addr, odp);
@@ -385,22 +393,23 @@ next_mr:
if (nentries)
nentries++;
} else {
- umem = ib_alloc_odp_umem(ctx, addr, MLX5_IMR_MTT_SIZE);
- if (IS_ERR(umem)) {
- mutex_unlock(&mr->umem->odp_data->umem_mutex);
- return ERR_CAST(umem);
+ odp = ib_alloc_odp_umem(odp_mr->per_mm, addr,
+ MLX5_IMR_MTT_SIZE);
+ if (IS_ERR(odp)) {
+ mutex_unlock(&odp_mr->umem_mutex);
+ return ERR_CAST(odp);
}
- mtt = implicit_mr_alloc(mr->ibmr.pd, umem, 0, mr->access_flags);
+ mtt = implicit_mr_alloc(mr->ibmr.pd, &odp->umem, 0,
+ mr->access_flags);
if (IS_ERR(mtt)) {
- mutex_unlock(&mr->umem->odp_data->umem_mutex);
- ib_umem_release(umem);
+ mutex_unlock(&odp_mr->umem_mutex);
+ ib_umem_release(&odp->umem);
return ERR_CAST(mtt);
}
- odp = umem->odp_data;
odp->private = mtt;
- mtt->umem = umem;
+ mtt->umem = &odp->umem;
mtt->mmkey.iova = addr;
mtt->parent = mr;
INIT_WORK(&odp->work, mr_leaf_free_action);
@@ -417,7 +426,7 @@ next_mr:
addr += MLX5_IMR_MTT_SIZE;
if (unlikely(addr < io_virt + bcnt)) {
odp = odp_next(odp);
- if (odp && odp->umem->address != addr)
+ if (odp && odp->umem.address != addr)
odp = NULL;
goto next_mr;
}
@@ -432,7 +441,7 @@ next_mr:
}
}
- mutex_unlock(&mr->umem->odp_data->umem_mutex);
+ mutex_unlock(&odp_mr->umem_mutex);
return result;
}
@@ -460,36 +469,36 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
return imr;
}
-static int mr_leaf_free(struct ib_umem *umem, u64 start,
- u64 end, void *cookie)
+static int mr_leaf_free(struct ib_umem_odp *umem_odp, u64 start, u64 end,
+ void *cookie)
{
- struct mlx5_ib_mr *mr = umem->odp_data->private, *imr = cookie;
+ struct mlx5_ib_mr *mr = umem_odp->private, *imr = cookie;
+ struct ib_umem *umem = &umem_odp->umem;
if (mr->parent != imr)
return 0;
- ib_umem_odp_unmap_dma_pages(umem,
- ib_umem_start(umem),
+ ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem),
ib_umem_end(umem));
- if (umem->odp_data->dying)
+ if (umem_odp->dying)
return 0;
- WRITE_ONCE(umem->odp_data->dying, 1);
+ WRITE_ONCE(umem_odp->dying, 1);
atomic_inc(&imr->num_leaf_free);
- schedule_work(&umem->odp_data->work);
+ schedule_work(&umem_odp->work);
return 0;
}
void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
{
- struct ib_ucontext *ctx = imr->ibmr.pd->uobject->context;
+ struct ib_ucontext_per_mm *per_mm = mr_to_per_mm(imr);
- down_read(&ctx->umem_rwsem);
- rbt_ib_umem_for_each_in_range(&ctx->umem_tree, 0, ULLONG_MAX,
+ down_read(&per_mm->umem_rwsem);
+ rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, 0, ULLONG_MAX,
mr_leaf_free, true, imr);
- up_read(&ctx->umem_rwsem);
+ up_read(&per_mm->umem_rwsem);
wait_event(imr->q_leaf_free, !atomic_read(&imr->num_leaf_free));
}
@@ -497,6 +506,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
u64 io_virt, size_t bcnt, u32 *bytes_mapped)
{
+ struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
u64 access_mask = ODP_READ_ALLOWED_BIT;
int npages = 0, page_shift, np;
u64 start_idx, page_mask;
@@ -505,7 +515,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
size_t size;
int ret;
- if (!mr->umem->odp_data->page_list) {
+ if (!odp_mr->page_list) {
odp = implicit_mr_get_data(mr, io_virt, bcnt);
if (IS_ERR(odp))
@@ -513,11 +523,11 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
mr = odp->private;
} else {
- odp = mr->umem->odp_data;
+ odp = odp_mr;
}
next_mr:
- size = min_t(size_t, bcnt, ib_umem_end(odp->umem) - io_virt);
+ size = min_t(size_t, bcnt, ib_umem_end(&odp->umem) - io_virt);
page_shift = mr->umem->page_shift;
page_mask = ~(BIT(page_shift) - 1);
@@ -533,7 +543,7 @@ next_mr:
*/
smp_rmb();
- ret = ib_umem_odp_map_dma_pages(mr->umem, io_virt, size,
+ ret = ib_umem_odp_map_dma_pages(to_ib_umem_odp(mr->umem), io_virt, size,
access_mask, current_seq);
if (ret < 0)
@@ -542,7 +552,8 @@ next_mr:
np = ret;
mutex_lock(&odp->umem_mutex);
- if (!ib_umem_mmu_notifier_retry(mr->umem, current_seq)) {
+ if (!ib_umem_mmu_notifier_retry(to_ib_umem_odp(mr->umem),
+ current_seq)) {
/*
* No need to check whether the MTTs really belong to
* this MR, since ib_umem_odp_map_dma_pages already
@@ -575,7 +586,7 @@ next_mr:
io_virt += size;
next = odp_next(odp);
- if (unlikely(!next || next->umem->address != io_virt)) {
+ if (unlikely(!next || next->umem.address != io_virt)) {
mlx5_ib_dbg(dev, "next implicit leaf removed at 0x%llx. got %p\n",
io_virt, next);
return -EAGAIN;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index daf1eb84cd31..6841c0f9237f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -37,6 +37,7 @@
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
#include "ib_rep.h"
+#include "cmd.h"
/* not supported currently */
static int wq_signature;
@@ -850,6 +851,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
goto err_umem;
}
+ MLX5_SET(create_qp_in, *in, uid, to_mpd(pd)->uid);
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem)
mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
@@ -1051,7 +1053,8 @@ static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
static int is_connected(enum ib_qp_type qp_type)
{
- if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
+ if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
+ qp_type == MLX5_IB_QPT_DCI)
return 1;
return 0;
@@ -1059,11 +1062,13 @@ static int is_connected(enum ib_qp_type qp_type)
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
- struct mlx5_ib_sq *sq, u32 tdn)
+ struct mlx5_ib_sq *sq, u32 tdn,
+ struct ib_pd *pd)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+ MLX5_SET(create_tis_in, in, uid, to_mpd(pd)->uid);
MLX5_SET(tisc, tisc, transport_domain, tdn);
if (qp->flags & MLX5_IB_QP_UNDERLAY)
MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
@@ -1072,9 +1077,9 @@ static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
}
static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
- struct mlx5_ib_sq *sq)
+ struct mlx5_ib_sq *sq, struct ib_pd *pd)
{
- mlx5_core_destroy_tis(dev->mdev, sq->tisn);
+ mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid);
}
static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
@@ -1114,6 +1119,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
goto err_umem;
}
+ MLX5_SET(create_sq_in, in, uid, to_mpd(pd)->uid);
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
@@ -1188,7 +1194,7 @@ static size_t get_rq_pas_size(void *qpc)
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, void *qpin,
- size_t qpinlen)
+ size_t qpinlen, struct ib_pd *pd)
{
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
@@ -1209,6 +1215,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
MLX5_SET(rqc, rqc, vsd, 1);
@@ -1256,10 +1263,23 @@ static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
MLX5_CAP_ETH(dev, tunnel_stateless_geneve_rx));
}
+static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_rq *rq,
+ u32 qp_flags_en,
+ struct ib_pd *pd)
+{
+ if (qp_flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
+ mlx5_cmd_destroy_tir(dev->mdev, rq->tirn, to_mpd(pd)->uid);
+}
+
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, u32 tdn,
- bool tunnel_offload_en)
+ u32 *qp_flags_en,
+ struct ib_pd *pd)
{
+ u8 lb_flag = 0;
u32 *in;
void *tirc;
int inlen;
@@ -1270,33 +1290,45 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
MLX5_SET(tirc, tirc, transport_domain, tdn);
- if (tunnel_offload_en)
+ if (*qp_flags_en & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
- if (dev->rep)
- MLX5_SET(tirc, tirc, self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST);
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+
+ if (*qp_flags_en & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+
+ if (dev->rep) {
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ *qp_flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+ }
+
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ destroy_raw_packet_qp_tir(dev, rq, 0, pd);
+ }
kvfree(in);
return err;
}
-static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq)
-{
- mlx5_core_destroy_tir(dev->mdev, rq->tirn);
-}
-
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in, size_t inlen,
- struct ib_pd *pd)
+ struct ib_pd *pd,
+ struct ib_udata *udata,
+ struct mlx5_ib_create_qp_resp *resp)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
@@ -1306,9 +1338,10 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
int err;
u32 tdn = mucontext->tdn;
+ u16 uid = to_mpd(pd)->uid;
if (qp->sq.wqe_cnt) {
- err = create_raw_packet_qp_tis(dev, qp, sq, tdn);
+ err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd);
if (err)
return err;
@@ -1316,6 +1349,13 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (err)
goto err_destroy_tis;
+ if (uid) {
+ resp->tisn = sq->tisn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TISN;
+ resp->sqn = sq->base.mqp.qpn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_SQN;
+ }
+
sq->base.container_mibqp = qp;
sq->base.mqp.event = mlx5_ib_qp_event;
}
@@ -1327,22 +1367,32 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
- err = create_raw_packet_qp_rq(dev, rq, in, inlen);
+ err = create_raw_packet_qp_rq(dev, rq, in, inlen, pd);
if (err)
goto err_destroy_sq;
-
- err = create_raw_packet_qp_tir(dev, rq, tdn,
- qp->tunnel_offload_en);
+ err = create_raw_packet_qp_tir(dev, rq, tdn, &qp->flags_en, pd);
if (err)
goto err_destroy_rq;
+
+ if (uid) {
+ resp->rqn = rq->base.mqp.qpn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_RQN;
+ resp->tirn = rq->tirn;
+ resp->comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ }
}
qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
rq->base.mqp.qpn;
+ err = ib_copy_to_udata(udata, resp, min(udata->outlen, sizeof(*resp)));
+ if (err)
+ goto err_destroy_tir;
return 0;
+err_destroy_tir:
+ destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, pd);
err_destroy_rq:
destroy_raw_packet_qp_rq(dev, rq);
err_destroy_sq:
@@ -1350,7 +1400,7 @@ err_destroy_sq:
return err;
destroy_raw_packet_qp_sq(dev, sq);
err_destroy_tis:
- destroy_raw_packet_qp_tis(dev, sq);
+ destroy_raw_packet_qp_tis(dev, sq, pd);
return err;
}
@@ -1363,13 +1413,13 @@ static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
if (qp->rq.wqe_cnt) {
- destroy_raw_packet_qp_tir(dev, rq);
+ destroy_raw_packet_qp_tir(dev, rq, qp->flags_en, qp->ibqp.pd);
destroy_raw_packet_qp_rq(dev, rq);
}
if (qp->sq.wqe_cnt) {
destroy_raw_packet_qp_sq(dev, sq);
- destroy_raw_packet_qp_tis(dev, sq);
+ destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd);
}
}
@@ -1387,7 +1437,11 @@ static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
- mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
+ if (qp->flags_en & (MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC))
+ mlx5_ib_disable_lb(dev, false, true);
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
+ to_mpd(qp->ibqp.pd)->uid);
}
static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
@@ -1410,6 +1464,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 tdn = mucontext->tdn;
struct mlx5_ib_create_qp_rss ucmd = {};
size_t required_cmd_sz;
+ u8 lb_flag = 0;
if (init_attr->qp_type != IB_QPT_RAW_PACKET)
return -EOPNOTSUPP;
@@ -1444,7 +1499,9 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EOPNOTSUPP;
}
- if (ucmd.flags & ~MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
+ if (ucmd.flags & ~(MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC |
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC)) {
mlx5_ib_dbg(dev, "invalid flags\n");
return -EOPNOTSUPP;
}
@@ -1461,6 +1518,16 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
return -EOPNOTSUPP;
}
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC || dev->rep) {
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST;
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
+ lb_flag |= MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST;
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
+ }
+
err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
if (err) {
mlx5_ib_dbg(dev, "copy failed\n");
@@ -1472,6 +1539,7 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_tir_in, in, uid, to_mpd(pd)->uid);
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type,
MLX5_TIRC_DISP_TYPE_INDIRECT);
@@ -1484,6 +1552,8 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+ MLX5_SET(tirc, tirc, self_lb_block, lb_flag);
+
if (ucmd.rx_hash_fields_mask & MLX5_RX_HASH_INNER)
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner);
else
@@ -1580,26 +1650,141 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
create_tir:
- if (dev->rep)
- MLX5_SET(tirc, tirc, self_lb_block,
- MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST);
-
err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
+ if (!err && MLX5_GET(tirc, tirc, self_lb_block)) {
+ err = mlx5_ib_enable_lb(dev, false, true);
+
+ if (err)
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn,
+ to_mpd(pd)->uid);
+ }
+
if (err)
goto err;
+ if (mucontext->devx_uid) {
+ resp.comp_mask |= MLX5_IB_CREATE_QP_RESP_MASK_TIRN;
+ resp.tirn = qp->rss_qp.tirn;
+ }
+
+ err = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
+ if (err)
+ goto err_copy;
+
kvfree(in);
/* qpn is reserved for that QP */
qp->trans_qp.base.mqp.qpn = 0;
qp->flags |= MLX5_IB_QP_RSS;
return 0;
+err_copy:
+ mlx5_cmd_destroy_tir(dev->mdev, qp->rss_qp.tirn, mucontext->devx_uid);
err:
kvfree(in);
return err;
}
+static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
+ void *qpc)
+{
+ int rcqe_sz;
+
+ if (init_attr->qp_type == MLX5_IB_QPT_DCI)
+ return;
+
+ rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
+
+ if (rcqe_sz == 128) {
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+ return;
+ }
+
+ if (init_attr->qp_type != MLX5_IB_QPT_DCT)
+ MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
+}
+
+static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+ struct ib_qp_init_attr *init_attr,
+ struct mlx5_ib_create_qp *ucmd,
+ void *qpc)
+{
+ enum ib_qp_type qpt = init_attr->qp_type;
+ int scqe_sz;
+ bool allow_scat_cqe = 0;
+
+ if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
+ return;
+
+ if (ucmd)
+ allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
+
+ if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
+ return;
+
+ scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
+ if (scqe_sz == 128) {
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
+ return;
+ }
+
+ if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
+ MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
+ MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
+}
+
+static int atomic_size_to_mode(int size_mask)
+{
+ /* driver does not support atomic_size > 256B
+ * and does not know how to translate bigger sizes
+ */
+ int supported_size_mask = size_mask & 0x1ff;
+ int log_max_size;
+
+ if (!supported_size_mask)
+ return -EOPNOTSUPP;
+
+ log_max_size = __fls(supported_size_mask);
+
+ if (log_max_size > 3)
+ return log_max_size;
+
+ return MLX5_ATOMIC_MODE_8B;
+}
+
+static int get_atomic_mode(struct mlx5_ib_dev *dev,
+ enum ib_qp_type qp_type)
+{
+ u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
+ u8 atomic = MLX5_CAP_GEN(dev->mdev, atomic);
+ int atomic_mode = -EOPNOTSUPP;
+ int atomic_size_mask;
+
+ if (!atomic)
+ return -EOPNOTSUPP;
+
+ if (qp_type == MLX5_IB_QPT_DCT)
+ atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_dc);
+ else
+ atomic_size_mask = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
+
+ if ((atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP) ||
+ (atomic_operations & MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD))
+ atomic_mode = atomic_size_to_mode(atomic_size_mask);
+
+ if (atomic_mode <= 0 &&
+ (atomic_operations & MLX5_ATOMIC_OPS_CMP_SWAP &&
+ atomic_operations & MLX5_ATOMIC_OPS_FETCH_ADD))
+ atomic_mode = MLX5_ATOMIC_MODE_IB_COMP;
+
+ return atomic_mode;
+}
+
+static inline bool check_flags_mask(uint64_t input, uint64_t supported)
+{
+ return (input & ~supported) == 0;
+}
+
static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, struct mlx5_ib_qp *qp)
@@ -1697,20 +1882,47 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
return -EFAULT;
}
+ if (!check_flags_mask(ucmd.flags,
+ MLX5_QP_FLAG_SIGNATURE |
+ MLX5_QP_FLAG_SCATTER_CQE |
+ MLX5_QP_FLAG_TUNNEL_OFFLOADS |
+ MLX5_QP_FLAG_BFREG_INDEX |
+ MLX5_QP_FLAG_TYPE_DCT |
+ MLX5_QP_FLAG_TYPE_DCI |
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE))
+ return -EINVAL;
+
err = get_qp_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
return err;
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
- qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
+ if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
+ qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
!tunnel_offload_supported(mdev)) {
mlx5_ib_dbg(dev, "Tunnel offload isn't supported\n");
return -EOPNOTSUPP;
}
- qp->tunnel_offload_en = true;
+ qp->flags_en |= MLX5_QP_FLAG_TUNNEL_OFFLOADS;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ mlx5_ib_dbg(dev, "Self-LB UC isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC;
+ }
+
+ if (ucmd.flags & MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC) {
+ if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
+ mlx5_ib_dbg(dev, "Self-LB UM isn't supported\n");
+ return -EOPNOTSUPP;
+ }
+ qp->flags_en |= MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC;
}
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
@@ -1811,23 +2023,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, cd_slave_receive, 1);
if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
- int rcqe_sz;
- int scqe_sz;
-
- rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
- scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
-
- if (rcqe_sz == 128)
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
- else
- MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
-
- if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
- if (scqe_sz == 128)
- MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
- else
- MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
- }
+ configure_responder_scat_cqe(init_attr, qpc);
+ configure_requester_scat_cqe(dev, init_attr,
+ (pd && pd->uobject) ? &ucmd : NULL,
+ qpc);
}
if (qp->rq.wqe_cnt) {
@@ -1911,7 +2110,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->flags & MLX5_IB_QP_UNDERLAY) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
- err = create_raw_packet_qp(dev, qp, in, inlen, pd);
+ err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
+ &resp);
} else {
err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
}
@@ -2192,6 +2392,7 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
goto err_free;
}
+ MLX5_SET(create_dct_in, qp->dct.in, uid, to_mpd(pd)->uid);
dctc = MLX5_ADDR_OF(create_dct_in, qp->dct.in, dct_context_entry);
qp->qp_sub_type = MLX5_IB_QPT_DCT;
MLX5_SET(dctc, dctc, pd, to_mpd(pd)->pdn);
@@ -2200,6 +2401,9 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
MLX5_SET(dctc, dctc, user_index, uidx);
+ if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
+ configure_responder_scat_cqe(attr, dctc);
+
qp->state = IB_QPS_RESET;
return &qp->ibqp;
@@ -2405,13 +2609,15 @@ int mlx5_ib_destroy_qp(struct ib_qp *qp)
return 0;
}
-static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
- int attr_mask)
+static int to_mlx5_access_flags(struct mlx5_ib_qp *qp,
+ const struct ib_qp_attr *attr,
+ int attr_mask, __be32 *hw_access_flags)
{
- u32 hw_access_flags = 0;
u8 dest_rd_atomic;
u32 access_flags;
+ struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.device);
+
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
dest_rd_atomic = attr->max_dest_rd_atomic;
else
@@ -2426,13 +2632,25 @@ static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_att
access_flags &= IB_ACCESS_REMOTE_WRITE;
if (access_flags & IB_ACCESS_REMOTE_READ)
- hw_access_flags |= MLX5_QP_BIT_RRE;
- if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
- hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
+ *hw_access_flags |= MLX5_QP_BIT_RRE;
+ if ((access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
+ qp->ibqp.qp_type == IB_QPT_RC) {
+ int atomic_mode;
+
+ atomic_mode = get_atomic_mode(dev, qp->ibqp.qp_type);
+ if (atomic_mode < 0)
+ return -EOPNOTSUPP;
+
+ *hw_access_flags |= MLX5_QP_BIT_RAE;
+ *hw_access_flags |= atomic_mode << MLX5_ATOMIC_MODE_OFFSET;
+ }
+
if (access_flags & IB_ACCESS_REMOTE_WRITE)
- hw_access_flags |= MLX5_QP_BIT_RWE;
+ *hw_access_flags |= MLX5_QP_BIT_RWE;
+
+ *hw_access_flags = cpu_to_be32(*hw_access_flags);
- return cpu_to_be32(hw_access_flags);
+ return 0;
}
enum {
@@ -2458,7 +2676,8 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
}
static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
- struct mlx5_ib_sq *sq, u8 sl)
+ struct mlx5_ib_sq *sq, u8 sl,
+ struct ib_pd *pd)
{
void *in;
void *tisc;
@@ -2471,6 +2690,7 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
return -ENOMEM;
MLX5_SET(modify_tis_in, in, bitmask.prio, 1);
+ MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
@@ -2483,7 +2703,8 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
}
static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
- struct mlx5_ib_sq *sq, u8 tx_affinity)
+ struct mlx5_ib_sq *sq, u8 tx_affinity,
+ struct ib_pd *pd)
{
void *in;
void *tisc;
@@ -2496,6 +2717,7 @@ static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
return -ENOMEM;
MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
+ MLX5_SET(modify_tis_in, in, uid, to_mpd(pd)->uid);
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
@@ -2580,7 +2802,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
&qp->raw_packet_qp.sq,
- sl & 0xf);
+ sl & 0xf, qp->ibqp.pd);
return 0;
}
@@ -2728,9 +2950,9 @@ static int ib_mask_to_mlx5_opt(int ib_mask)
return result;
}
-static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
- struct mlx5_ib_rq *rq, int new_state,
- const struct mlx5_modify_raw_qp_param *raw_qp_param)
+static int modify_raw_packet_qp_rq(
+ struct mlx5_ib_dev *dev, struct mlx5_ib_rq *rq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
{
void *in;
void *rqc;
@@ -2743,6 +2965,7 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
return -ENOMEM;
MLX5_SET(modify_rq_in, in, rq_state, rq->state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(pd)->uid);
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(rqc, rqc, state, new_state);
@@ -2753,8 +2976,9 @@ static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
} else
- pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n",
- dev->ib_dev.name);
+ dev_info_once(
+ &dev->ib_dev.dev,
+ "RAW PACKET QP counters are not supported on current FW\n");
}
err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
@@ -2768,10 +2992,9 @@ out:
return err;
}
-static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
- struct mlx5_ib_sq *sq,
- int new_state,
- const struct mlx5_modify_raw_qp_param *raw_qp_param)
+static int modify_raw_packet_qp_sq(
+ struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state,
+ const struct mlx5_modify_raw_qp_param *raw_qp_param, struct ib_pd *pd)
{
struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
struct mlx5_rate_limit old_rl = ibqp->rl;
@@ -2788,6 +3011,7 @@ static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
if (!in)
return -ENOMEM;
+ MLX5_SET(modify_sq_in, in, uid, to_mpd(pd)->uid);
MLX5_SET(modify_sq_in, in, sq_state, sq->state);
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
@@ -2890,7 +3114,8 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
if (modify_rq) {
- err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
+ err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param,
+ qp->ibqp.pd);
if (err)
return err;
}
@@ -2898,17 +3123,50 @@ static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (modify_sq) {
if (tx_affinity) {
err = modify_raw_packet_tx_affinity(dev->mdev, sq,
- tx_affinity);
+ tx_affinity,
+ qp->ibqp.pd);
if (err)
return err;
}
- return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param);
+ return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state,
+ raw_qp_param, qp->ibqp.pd);
}
return 0;
}
+static unsigned int get_tx_affinity(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_pd *pd,
+ struct mlx5_ib_qp_base *qp_base,
+ u8 port_num)
+{
+ struct mlx5_ib_ucontext *ucontext = NULL;
+ unsigned int tx_port_affinity;
+
+ if (pd && pd->ibpd.uobject && pd->ibpd.uobject->context)
+ ucontext = to_mucontext(pd->ibpd.uobject->context);
+
+ if (ucontext) {
+ tx_port_affinity = (unsigned int)atomic_add_return(
+ 1, &ucontext->tx_port_affinity) %
+ MLX5_MAX_PORTS +
+ 1;
+ mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x ucontext %p\n",
+ tx_port_affinity, qp_base->mqp.qpn, ucontext);
+ } else {
+ tx_port_affinity =
+ (unsigned int)atomic_add_return(
+ 1, &dev->roce[port_num].tx_port_affinity) %
+ MLX5_MAX_PORTS +
+ 1;
+ mlx5_ib_dbg(dev, "Set tx affinity 0x%x to qpn 0x%x\n",
+ tx_port_affinity, qp_base->mqp.qpn);
+ }
+
+ return tx_port_affinity;
+}
+
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, enum ib_qp_state new_state,
@@ -2974,6 +3232,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
if (!context)
return -ENOMEM;
+ pd = get_pd(qp);
context->flags = cpu_to_be32(mlx5_st << 16);
if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
@@ -3002,9 +3261,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (mlx5_lag_is_active(dev->mdev)) {
u8 p = mlx5_core_native_port_num(dev->mdev);
- tx_affinity = (unsigned int)atomic_add_return(1,
- &dev->roce[p].next_port) %
- MLX5_MAX_PORTS + 1;
+ tx_affinity = get_tx_affinity(dev, pd, base, p);
context->flags |= cpu_to_be32(tx_affinity << 24);
}
}
@@ -3062,7 +3319,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
goto out;
}
- pd = get_pd(qp);
get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
@@ -3092,8 +3348,15 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
}
- if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
- context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
+ if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
+ __be32 access_flags = 0;
+
+ err = to_mlx5_access_flags(qp, attr, attr_mask, &access_flags);
+ if (err)
+ goto out;
+
+ context->params2 |= access_flags;
+ }
if (attr_mask & IB_QP_MIN_RNR_TIMER)
context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
@@ -3243,7 +3506,9 @@ static bool modify_dci_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state new
int req = IB_QP_STATE;
int opt = 0;
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ if (new_state == IB_QPS_RESET) {
+ return is_valid_mask(attr_mask, req, opt);
+ } else if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
req |= IB_QP_PKEY_INDEX | IB_QP_PORT;
return is_valid_mask(attr_mask, req, opt);
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
@@ -3307,10 +3572,14 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
MLX5_SET(dctc, dctc, rwe, 1);
if (attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) {
- if (!mlx5_ib_dc_atomic_is_supported(dev))
+ int atomic_mode;
+
+ atomic_mode = get_atomic_mode(dev, MLX5_IB_QPT_DCT);
+ if (atomic_mode < 0)
return -EOPNOTSUPP;
+
+ MLX5_SET(dctc, dctc, atomic_mode, atomic_mode);
MLX5_SET(dctc, dctc, rae, 1);
- MLX5_SET(dctc, dctc, atomic_mode, MLX5_ATOMIC_MODE_DCT_CX);
}
MLX5_SET(dctc, dctc, pkey_index, attr->pkey_index);
MLX5_SET(dctc, dctc, port, attr->port_num);
@@ -3367,7 +3636,6 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
size_t required_cmd_sz;
int err = -EINVAL;
int port;
- enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
if (ibqp->rwq_ind_tbl)
return -ENOSYS;
@@ -3413,7 +3681,6 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (!(cur_state == new_state && cur_state == IB_QPS_RESET)) {
port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
- ll = dev->ib_dev.get_link_layer(&dev->ib_dev, port);
}
if (qp->flags & MLX5_IB_QP_UNDERLAY) {
@@ -3424,7 +3691,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
} else if (qp_type != MLX5_IB_QPT_REG_UMR &&
qp_type != MLX5_IB_QPT_DCI &&
- !ib_modify_qp_is_ok(cur_state, new_state, qp_type, attr_mask, ll)) {
+ !ib_modify_qp_is_ok(cur_state, new_state, qp_type,
+ attr_mask)) {
mlx5_ib_dbg(dev, "invalid QP state transition from %d to %d, qp_type %d, attr_mask 0x%x\n",
cur_state, new_state, ibqp->qp_type, attr_mask);
goto out;
@@ -4371,6 +4639,12 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
u8 next_fence = 0;
u8 fence;
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_post_send(ibqp, wr, bad_wr);
@@ -4380,13 +4654,6 @@ static int _mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
- err = -EIO;
- *bad_wr = wr;
- nreq = 0;
- goto out;
- }
-
for (nreq = 0; wr; nreq++, wr = wr->next) {
if (unlikely(wr->opcode >= ARRAY_SIZE(mlx5_ib_opcode))) {
mlx5_ib_warn(dev, "\n");
@@ -4700,18 +4967,17 @@ static int _mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int ind;
int i;
+ if (unlikely(mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR &&
+ !drain)) {
+ *bad_wr = wr;
+ return -EIO;
+ }
+
if (unlikely(ibqp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_post_recv(ibqp, wr, bad_wr);
spin_lock_irqsave(&qp->rq.lock, flags);
- if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR && !drain) {
- err = -EIO;
- *bad_wr = wr;
- nreq = 0;
- goto out;
- }
-
ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
for (nreq = 0; wr; nreq++, wr = wr->next) {
@@ -5175,6 +5441,7 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_xrcd *xrcd;
int err;
+ u16 uid;
if (!MLX5_CAP_GEN(dev->mdev, xrc))
return ERR_PTR(-ENOSYS);
@@ -5183,12 +5450,14 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
if (!xrcd)
return ERR_PTR(-ENOMEM);
- err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
+ uid = context ? to_mucontext(context)->devx_uid : 0;
+ err = mlx5_cmd_xrcd_alloc(dev->mdev, &xrcd->xrcdn, uid);
if (err) {
kfree(xrcd);
return ERR_PTR(-ENOMEM);
}
+ xrcd->uid = uid;
return &xrcd->ibxrcd;
}
@@ -5196,9 +5465,10 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
{
struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
+ u16 uid = to_mxrcd(xrcd)->uid;
int err;
- err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
+ err = mlx5_cmd_xrcd_dealloc(dev->mdev, xrcdn, uid);
if (err)
mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
@@ -5268,6 +5538,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
if (!in)
return -ENOMEM;
+ MLX5_SET(create_rq_in, in, uid, to_mpd(pd)->uid);
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
MLX5_SET(rqc, rqc, mem_rq_type,
MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
@@ -5443,8 +5714,7 @@ static int prepare_user_rq(struct ib_pd *pd,
err = create_user_rq(dev, pd, rwq, &ucmd);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
- if (err)
- return err;
+ return err;
}
rwq->user_index = ucmd.user_index;
@@ -5573,6 +5843,9 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
for (i = 0; i < sz; i++)
MLX5_SET(rqtc, rqtc, rq_num[i], init_attr->ind_tbl[i]->wq_num);
+ rwq_ind_tbl->uid = to_mpd(init_attr->ind_tbl[0]->pd)->uid;
+ MLX5_SET(create_rqt_in, in, uid, rwq_ind_tbl->uid);
+
err = mlx5_core_create_rqt(dev->mdev, in, inlen, &rwq_ind_tbl->rqtn);
kvfree(in);
@@ -5591,7 +5864,7 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device,
return &rwq_ind_tbl->ib_rwq_ind_tbl;
err_copy:
- mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+ mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
err:
kfree(rwq_ind_tbl);
return ERR_PTR(err);
@@ -5602,7 +5875,7 @@ int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
struct mlx5_ib_rwq_ind_table *rwq_ind_tbl = to_mrwq_ind_table(ib_rwq_ind_tbl);
struct mlx5_ib_dev *dev = to_mdev(ib_rwq_ind_tbl->device);
- mlx5_core_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn);
+ mlx5_cmd_destroy_rqt(dev->mdev, rwq_ind_tbl->rqtn, rwq_ind_tbl->uid);
kfree(rwq_ind_tbl);
return 0;
@@ -5653,6 +5926,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
if (wq_state == IB_WQS_ERR)
wq_state = MLX5_RQC_STATE_ERR;
MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
+ MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid);
MLX5_SET(rqc, rqc, state, wq_state);
if (wq_attr_mask & IB_WQ_FLAGS) {
@@ -5684,8 +5958,9 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
MLX5_SET(rqc, rqc, counter_set_id,
dev->port->cnts.set_id);
} else
- pr_info_once("%s: Receive WQ counters are not supported on current FW\n",
- dev->ib_dev.name);
+ dev_info_once(
+ &dev->ib_dev.dev,
+ "Receive WQ counters are not supported on current FW\n");
}
err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index d359fecf7a5b..d012e7dbcc38 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -144,6 +144,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
in->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
in->page_offset = offset;
+ in->uid = to_mpd(pd)->uid;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1 &&
in->type != IB_SRQT_BASIC)
in->user_index = uidx;
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 093f7755c843..2e5dc0a67cfc 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -58,8 +58,9 @@ static int mthca_update_rate(struct mthca_dev *dev, u8 port_num)
ret = ib_query_port(&dev->ib_dev, port_num, tprops);
if (ret) {
- printk(KERN_WARNING "ib_query_port failed (%d) for %s port %d\n",
- ret, dev->ib_dev.name, port_num);
+ dev_warn(&dev->ib_dev.dev,
+ "ib_query_port failed (%d) forport %d\n", ret,
+ port_num);
goto out;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index f3e80dec1334..92c49bff22bc 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -986,7 +986,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
goto err_free_dev;
}
- if (mthca_cmd_init(mdev)) {
+ err = mthca_cmd_init(mdev);
+ if (err) {
mthca_err(mdev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}
@@ -1014,8 +1015,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
err = mthca_setup_hca(mdev);
if (err == -EBUSY && (mdev->mthca_flags & MTHCA_FLAG_MSI_X)) {
- if (mdev->mthca_flags & MTHCA_FLAG_MSI_X)
- pci_free_irq_vectors(pdev);
+ pci_free_irq_vectors(pdev);
mdev->mthca_flags &= ~MTHCA_FLAG_MSI_X;
err = mthca_setup_hca(mdev);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 0d3473b4596e..691c6f048938 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1076,16 +1076,17 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
return err;
}
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
return sprintf(buf, "%x\n", dev->rev_id);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
@@ -1103,23 +1104,26 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
return sprintf(buf, "unknown\n");
}
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct mthca_dev *dev =
container_of(device, struct mthca_dev, ib_dev.dev);
return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *mthca_dev_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
+};
-static struct device_attribute *mthca_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group mthca_attr_group = {
+ .attrs = mthca_dev_attributes,
};
static int mthca_init_node_data(struct mthca_dev *dev)
@@ -1192,13 +1196,11 @@ static void get_dev_fw_str(struct ib_device *device, char *str)
int mthca_register_device(struct mthca_dev *dev)
{
int ret;
- int i;
ret = mthca_init_node_data(dev);
if (ret)
return ret;
- strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.uverbs_abi_ver = MTHCA_UVERBS_ABI_VERSION;
@@ -1296,20 +1298,12 @@ int mthca_register_device(struct mthca_dev *dev)
mutex_init(&dev->cap_mask_mutex);
+ rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
dev->ib_dev.driver_id = RDMA_DRIVER_MTHCA;
- ret = ib_register_device(&dev->ib_dev, NULL);
+ ret = ib_register_device(&dev->ib_dev, "mthca%d", NULL);
if (ret)
return ret;
- for (i = 0; i < ARRAY_SIZE(mthca_dev_attributes); ++i) {
- ret = device_create_file(&dev->ib_dev.dev,
- mthca_dev_attributes[i]);
- if (ret) {
- ib_unregister_device(&dev->ib_dev);
- return ret;
- }
- }
-
mthca_start_catas_poll(dev);
return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 3d37f2373d63..9d178ee3c96a 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -872,8 +872,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_UNSPECIFIED)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
+ attr_mask)) {
mthca_dbg(dev, "Bad QP transition (transport %d) "
"%d->%d with attr 0x%08x\n",
qp->transport, cur_state, new_state,
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 42b68aa999fc..e00add6d78ec 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -456,9 +456,6 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
void __iomem *mmio_regs = NULL;
u8 hw_rev;
- assert(pcidev != NULL);
- assert(ent != NULL);
-
printk(KERN_INFO PFX "NetEffect RNIC driver v%s loading. (%s)\n",
DRV_VERSION, pci_name(pcidev));
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index bedaa02749fb..a895fe980d10 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -149,18 +149,9 @@ do { \
printk(KERN_ERR PFX "%s[%u]: " fmt, __func__, __LINE__, ##args); \
} while (0)
-#define assert(expr) \
-do { \
- if (!(expr)) { \
- printk(KERN_ERR PFX "Assertion failed! %s, %s, %s, line %d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- } \
-} while (0)
-
#define NES_EVENT_TIMEOUT 1200000
#else
#define nes_debug(level, fmt, args...) no_printk(fmt, ##args)
-#define assert(expr) do {} while (0)
#define NES_EVENT_TIMEOUT 100000
#endif
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index bd0675d8f298..5517e392bc01 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1443,7 +1443,7 @@ static int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_inde
mdelay(1);
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- } while ((temp_phy_data2 == temp_phy_data));
+ } while (temp_phy_data2 == temp_phy_data);
/* wait for tracking */
counter = 0;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 61014e251555..16f33454c198 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -146,8 +146,6 @@ static int nes_netdev_open(struct net_device *netdev)
struct list_head *list_pos, *list_temp;
unsigned long flags;
- assert(nesdev != NULL);
-
if (nesvnic->netdev_open == 1)
return 0;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 6940c7215961..92d1cadd4cfd 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -687,7 +687,7 @@ static struct ib_pd *nes_alloc_pd(struct ib_device *ibdev,
}
nes_debug(NES_DBG_PD, "Allocating PD (%p) for ib device %s\n",
- nespd, nesvnic->nesibdev->ibdev.name);
+ nespd, dev_name(&nesvnic->nesibdev->ibdev.dev));
nespd->pd_id = (pd_num << (PAGE_SHIFT-12)) + nesadapter->base_pd;
@@ -2556,8 +2556,8 @@ static int nes_dereg_mr(struct ib_mr *ib_mr)
/**
* show_rev
*/
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct nes_ib_device *nesibdev =
container_of(dev, struct nes_ib_device, ibdev.dev);
@@ -2566,40 +2566,40 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
nes_debug(NES_DBG_INIT, "\n");
return sprintf(buf, "%x\n", nesvnic->nesdev->nesadapter->hw_rev);
}
-
+static DEVICE_ATTR_RO(hw_rev);
/**
* show_hca
*/
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
nes_debug(NES_DBG_INIT, "\n");
return sprintf(buf, "NES020\n");
}
-
+static DEVICE_ATTR_RO(hca_type);
/**
* show_board
*/
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
nes_debug(NES_DBG_INIT, "\n");
return sprintf(buf, "%.*s\n", 32, "NES020 Board ID");
}
+static DEVICE_ATTR_RO(board_id);
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-
-static struct device_attribute *nes_dev_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static struct attribute *nes_dev_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL
};
+static const struct attribute_group nes_attr_group = {
+ .attrs = nes_dev_attributes,
+};
/**
* nes_query_qp
@@ -3640,7 +3640,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
if (nesibdev == NULL) {
return NULL;
}
- strlcpy(nesibdev->ibdev.name, "nes%d", IB_DEVICE_NAME_MAX);
nesibdev->ibdev.owner = THIS_MODULE;
nesibdev->ibdev.node_type = RDMA_NODE_RNIC;
@@ -3795,10 +3794,11 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
struct nes_vnic *nesvnic = nesibdev->nesvnic;
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesdev->nesadapter;
- int i, ret;
+ int ret;
+ rdma_set_device_sysfs_group(&nesvnic->nesibdev->ibdev, &nes_attr_group);
nesvnic->nesibdev->ibdev.driver_id = RDMA_DRIVER_NES;
- ret = ib_register_device(&nesvnic->nesibdev->ibdev, NULL);
+ ret = ib_register_device(&nesvnic->nesibdev->ibdev, "nes%d", NULL);
if (ret) {
return ret;
}
@@ -3809,19 +3809,6 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
nesibdev->max_qp = (nesadapter->max_qp-NES_FIRST_QPN) / nesadapter->port_count;
nesibdev->max_pd = nesadapter->max_pd / nesadapter->port_count;
- for (i = 0; i < ARRAY_SIZE(nes_dev_attributes); ++i) {
- ret = device_create_file(&nesibdev->ibdev.dev, nes_dev_attributes[i]);
- if (ret) {
- while (i > 0) {
- i--;
- device_remove_file(&nesibdev->ibdev.dev,
- nes_dev_attributes[i]);
- }
- ib_unregister_device(&nesibdev->ibdev);
- return ret;
- }
- }
-
nesvnic->of_device_registered = 1;
return 0;
@@ -3834,15 +3821,9 @@ int nes_register_ofa_device(struct nes_ib_device *nesibdev)
static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev)
{
struct nes_vnic *nesvnic = nesibdev->nesvnic;
- int i;
- for (i = 0; i < ARRAY_SIZE(nes_dev_attributes); ++i) {
- device_remove_file(&nesibdev->ibdev.dev, nes_dev_attributes[i]);
- }
-
- if (nesvnic->of_device_registered) {
+ if (nesvnic->of_device_registered)
ib_unregister_device(&nesibdev->ibdev);
- }
nesvnic->of_device_registered = 0;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index e578281471af..241a57a07485 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -792,7 +792,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
qp->srq->ibsrq.
srq_context);
} else if (dev_event) {
- pr_err("%s: Fatal event received\n", dev->ibdev.name);
+ dev_err(&dev->ibdev.dev, "Fatal event received\n");
ib_dispatch_event(&ib_evt);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 7832ee3e0c84..873cc7f6fe61 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -114,9 +114,37 @@ static void get_dev_fw_str(struct ib_device *device, char *str)
snprintf(str, IB_FW_VERSION_NAME_MAX, "%s", &dev->attr.fw_ver[0]);
}
+/* OCRDMA sysfs interface */
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ struct ocrdma_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *ocrdma_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group ocrdma_attr_group = {
+ .attrs = ocrdma_attributes,
+};
+
static int ocrdma_register_device(struct ocrdma_dev *dev)
{
- strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX);
ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
@@ -213,8 +241,9 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.destroy_srq = ocrdma_destroy_srq;
dev->ibdev.post_srq_recv = ocrdma_post_srq_recv;
}
+ rdma_set_device_sysfs_group(&dev->ibdev, &ocrdma_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_OCRDMA;
- return ib_register_device(&dev->ibdev, NULL);
+ return ib_register_device(&dev->ibdev, "ocrdma%d", NULL);
}
static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
@@ -260,42 +289,9 @@ static void ocrdma_free_resources(struct ocrdma_dev *dev)
kfree(dev->cq_tbl);
}
-/* OCRDMA sysfs interface */
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->nic_info.pdev->vendor);
-}
-
-static ssize_t show_hca_type(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct ocrdma_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "%s\n", &dev->model_number[0]);
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
-
-static struct device_attribute *ocrdma_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type
-};
-
-static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
- device_remove_file(&dev->ibdev.dev, ocrdma_attributes[i]);
-}
-
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
- int status = 0, i;
+ int status = 0;
u8 lstate = 0;
struct ocrdma_dev *dev;
@@ -331,9 +327,6 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (!status)
ocrdma_update_link_state(dev, lstate);
- for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
- if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
- goto sysfs_err;
/* Init stats */
ocrdma_add_port_stats(dev);
/* Interrupt Moderation */
@@ -348,8 +341,6 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
dev_name(&dev->nic_info.pdev->dev), dev->id);
return dev;
-sysfs_err:
- ocrdma_remove_sysfiles(dev);
alloc_err:
ocrdma_free_resources(dev);
ocrdma_cleanup_hw(dev);
@@ -376,7 +367,6 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
* of the registered clients.
*/
cancel_delayed_work_sync(&dev->eqd_work);
- ocrdma_remove_sysfiles(dev);
ib_unregister_device(&dev->ibdev);
ocrdma_rem_port_stats(dev);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 24d20a4aa262..290d776edf48 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -764,7 +764,8 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
return;
/* Create post stats base dir */
- dev->dir = debugfs_create_dir(dev->ibdev.name, ocrdma_dbgfs_dir);
+ dev->dir =
+ debugfs_create_dir(dev_name(&dev->ibdev.dev), ocrdma_dbgfs_dir);
if (!dev->dir)
goto err;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index c158ca9fde6d..06d2a7f3304c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1480,8 +1480,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
new_qps = old_qps;
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
__func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index a0af6d424aed..8d6ff9df49fe 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -133,6 +133,33 @@ static int qedr_iw_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+/* QEDR sysfs interface */
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct qedr_dev *dev = dev_get_drvdata(device);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
+}
+static DEVICE_ATTR_RO(hw_rev);
+
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
+}
+static DEVICE_ATTR_RO(hca_type);
+
+static struct attribute *qedr_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ NULL
+};
+
+static const struct attribute_group qedr_attr_group = {
+ .attrs = qedr_attributes,
+};
+
static int qedr_iw_register_device(struct qedr_dev *dev)
{
dev->ibdev.node_type = RDMA_NODE_RNIC;
@@ -170,8 +197,6 @@ static int qedr_register_device(struct qedr_dev *dev)
{
int rc;
- strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
-
dev->ibdev.node_guid = dev->attr.node_guid;
memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
dev->ibdev.owner = THIS_MODULE;
@@ -262,9 +287,9 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.get_link_layer = qedr_link_layer;
dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
-
+ rdma_set_device_sysfs_group(&dev->ibdev, &qedr_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_QEDR;
- return ib_register_device(&dev->ibdev, NULL);
+ return ib_register_device(&dev->ibdev, "qedr%d", NULL);
}
/* This function allocates fast-path status block memory */
@@ -404,37 +429,6 @@ err1:
return rc;
}
-/* QEDR sysfs interface */
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
-{
- struct qedr_dev *dev = dev_get_drvdata(device);
-
- return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
-}
-
-static ssize_t show_hca_type(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
-
-static struct device_attribute *qedr_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type
-};
-
-static void qedr_remove_sysfiles(struct qedr_dev *dev)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
- device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
-}
-
static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
{
int rc = pci_enable_atomic_ops_to_root(pdev,
@@ -855,7 +849,7 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
{
struct qed_dev_rdma_info dev_info;
struct qedr_dev *dev;
- int rc = 0, i;
+ int rc = 0;
dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
if (!dev) {
@@ -914,18 +908,12 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
goto reg_err;
}
- for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
- if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
- goto sysfs_err;
-
if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
return dev;
-sysfs_err:
- ib_unregister_device(&dev->ibdev);
reg_err:
qedr_sync_free_irqs(dev);
irq_err:
@@ -944,7 +932,6 @@ static void qedr_remove(struct qedr_dev *dev)
/* First unregister with stack to stop all the active traffic
* of the registered clients.
*/
- qedr_remove_sysfiles(dev);
ib_unregister_device(&dev->ibdev);
qedr_stop_hw(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index a2d708dceb8d..53bbe6b4e6e6 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -43,7 +43,7 @@
#include "qedr_hsi_rdma.h"
#define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
-#define DP_NAME(dev) ((dev)->ibdev.name)
+#define DP_NAME(_dev) dev_name(&(_dev)->ibdev.dev)
#define IS_IWARP(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_IWARP)
#define IS_ROCE(_dev) ((_dev)->rdma_type == QED_RDMA_TYPE_ROCE)
diff --git a/drivers/infiniband/hw/qedr/qedr_roce_cm.c b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
index 85578887421b..e1ac2fd60bb1 100644
--- a/drivers/infiniband/hw/qedr/qedr_roce_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_roce_cm.c
@@ -519,9 +519,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
}
if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
- packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+ packet->tx_dest = QED_LL2_TX_DEST_LB;
else
- packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
+ packet->tx_dest = QED_LL2_TX_DEST_NW;
packet->roce_mode = roce_mode;
memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 8cc3df24e04e..82ee4b4a7084 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1447,7 +1447,6 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
u64 pbl_base_addr, phy_prod_pair_addr;
struct ib_ucontext *ib_ctx = NULL;
struct qedr_srq_hwq_info *hw_srq;
- struct qedr_ucontext *ctx = NULL;
u32 page_cnt, page_size;
struct qedr_srq *srq;
int rc = 0;
@@ -1473,7 +1472,6 @@ struct ib_srq *qedr_create_srq(struct ib_pd *ibpd,
if (udata && ibpd->uobject && ibpd->uobject->context) {
ib_ctx = ibpd->uobject->context;
- ctx = get_qedr_ucontext(ib_ctx);
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
DP_ERR(dev,
@@ -2240,8 +2238,7 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (rdma_protocol_roce(&dev->ibdev, 1)) {
if (!ib_modify_qp_is_ok(old_qp_state, new_qp_state,
- ibqp->qp_type, attr_mask,
- IB_LINK_LAYER_ETHERNET)) {
+ ibqp->qp_type, attr_mask)) {
DP_ERR(dev,
"modify qp: invalid attribute mask=0x%x specified for\n"
"qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 3461df002f81..83d2349188db 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1390,13 +1390,13 @@ static inline u32 qib_get_hdrqtail(const struct qib_ctxtdata *rcd)
*/
extern const char ib_qib_version[];
+extern const struct attribute_group qib_attr_group;
int qib_device_create(struct qib_devdata *);
void qib_device_remove(struct qib_devdata *);
int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
struct kobject *kobj);
-int qib_verbs_register_sysfs(struct qib_devdata *);
void qib_verbs_unregister_sysfs(struct qib_devdata *);
/* Hook for sysfs read of QSFP */
extern int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len);
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 5ac7b31c346b..30595b358d8f 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -597,7 +597,6 @@ qib_pci_resume(struct pci_dev *pdev)
struct qib_devdata *dd = pci_get_drvdata(pdev);
qib_devinfo(pdev, "QIB resume function called\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
/*
* Running jobs will fail, since it's asynchronous
* unlike sysfs-requested reset. Better than
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 344e401915f7..a81905df2d0f 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -378,25 +378,22 @@ void qib_flush_qp_waiters(struct rvt_qp *qp)
* qib_check_send_wqe - validate wr/wqe
* @qp - The qp
* @wqe - The built wqe
+ * @call_send - Determine if the send should be posted or scheduled
*
- * validate wr/wqe. This is called
- * prior to inserting the wqe into
- * the ring but after the wqe has been
- * setup.
- *
- * Returns 1 to force direct progress, 0 otherwise, -EINVAL on failure
+ * Returns 0 on success, -EINVAL on failure
*/
int qib_check_send_wqe(struct rvt_qp *qp,
- struct rvt_swqe *wqe)
+ struct rvt_swqe *wqe, bool *call_send)
{
struct rvt_ah *ah;
- int ret = 0;
switch (qp->ibqp.qp_type) {
case IB_QPT_RC:
case IB_QPT_UC:
if (wqe->length > 0x80000000U)
return -EINVAL;
+ if (wqe->length > qp->pmtu)
+ *call_send = false;
break;
case IB_QPT_SMI:
case IB_QPT_GSI:
@@ -405,12 +402,12 @@ int qib_check_send_wqe(struct rvt_qp *qp,
if (wqe->length > (1 << ah->log_pmtu))
return -EINVAL;
/* progress hint */
- ret = 1;
+ *call_send = true;
break;
default:
break;
}
- return ret;
+ return 0;
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index f35fdeb14347..6fa002940451 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -254,7 +254,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
+ rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
/* will get called again */
goto done;
@@ -838,7 +838,7 @@ void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
qib_migrate_qp(qp);
qp->s_retry = qp->s_retry_cnt;
} else if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
return;
} else /* XXX need to handle delayed completion */
@@ -1221,7 +1221,7 @@ static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
ibp->rvp.n_other_naks++;
class_b:
if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
break;
@@ -1425,7 +1425,8 @@ read_middle:
qp->s_rdma_read_len -= pmtu;
update_last_psn(qp, psn);
spin_unlock_irqrestore(&qp->s_lock, flags);
- qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, pmtu, false, false);
goto bail;
case OP(RDMA_READ_RESPONSE_ONLY):
@@ -1471,7 +1472,8 @@ read_last:
if (unlikely(tlen != qp->s_rdma_read_len))
goto ack_len_err;
aeth = be32_to_cpu(ohdr->u.aeth);
- qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0);
+ rvt_copy_sge(qp, &qp->s_rdma_read_sge,
+ data, tlen, false, false);
WARN_ON(qp->s_rdma_read_sge.num_sge);
(void) do_rc_ack(qp, aeth, psn,
OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
@@ -1490,7 +1492,7 @@ ack_len_err:
status = IB_WC_LOC_LEN_ERR;
ack_err:
if (qp->s_last == qp->s_acked) {
- qib_send_complete(qp, wqe, status);
+ rvt_send_complete(qp, wqe, status);
rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
}
ack_done:
@@ -1844,7 +1846,7 @@ send_middle:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto nack_inv;
- qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -1890,7 +1892,7 @@ send_last:
wc.byte_len = tlen + qp->r_rcv_len;
if (unlikely(wc.byte_len > qp->r_len))
goto nack_inv;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
qp->r_msn++;
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index f8a7de795beb..1fa21938f310 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -171,307 +171,6 @@ err:
}
/**
- * qib_ruc_loopback - handle UC and RC lookback requests
- * @sqp: the sending QP
- *
- * This is called from qib_do_send() to
- * forward a WQE addressed to the same HCA.
- * Note that although we are single threaded due to the tasklet, we still
- * have to protect against post_send(). We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void qib_ruc_loopback(struct rvt_qp *sqp)
-{
- struct qib_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
- struct qib_pportdata *ppd = ppd_from_ibp(ibp);
- struct qib_devdata *dd = ppd->dd;
- struct rvt_dev_info *rdi = &dd->verbs_dev.rdi;
- struct rvt_qp *qp;
- struct rvt_swqe *wqe;
- struct rvt_sge *sge;
- unsigned long flags;
- struct ib_wc wc;
- u64 sdata;
- atomic64_t *maddr;
- enum ib_wc_status send_status;
- int release;
- int ret;
-
- rcu_read_lock();
- /*
- * Note that we check the responder QP state after
- * checking the requester's state.
- */
- qp = rvt_lookup_qpn(rdi, &ibp->rvp, sqp->remote_qpn);
- if (!qp)
- goto done;
-
- spin_lock_irqsave(&sqp->s_lock, flags);
-
- /* Return if we are already busy processing a work request. */
- if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
- !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- goto unlock;
-
- sqp->s_flags |= RVT_S_BUSY;
-
-again:
- if (sqp->s_last == READ_ONCE(sqp->s_head))
- goto clr_busy;
- wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
-
- /* Return if it is not OK to start a new work reqeust. */
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
- if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
- goto clr_busy;
- /* We are in the error state, flush the work request. */
- send_status = IB_WC_WR_FLUSH_ERR;
- goto flush_send;
- }
-
- /*
- * We can rely on the entry not changing without the s_lock
- * being held until we update s_last.
- * We increment s_cur to indicate s_last is in progress.
- */
- if (sqp->s_last == sqp->s_cur) {
- if (++sqp->s_cur >= sqp->s_size)
- sqp->s_cur = 0;
- }
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-
- if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
- qp->ibqp.qp_type != sqp->ibqp.qp_type) {
- ibp->rvp.n_pkt_drops++;
- /*
- * For RC, the requester would timeout and retry so
- * shortcut the timeouts and just signal too many retries.
- */
- if (sqp->ibqp.qp_type == IB_QPT_RC)
- send_status = IB_WC_RETRY_EXC_ERR;
- else
- send_status = IB_WC_SUCCESS;
- goto serr;
- }
-
- memset(&wc, 0, sizeof(wc));
- send_status = IB_WC_SUCCESS;
-
- release = 1;
- sqp->s_sge.sge = wqe->sg_list[0];
- sqp->s_sge.sg_list = wqe->sg_list + 1;
- sqp->s_sge.num_sge = wqe->wr.num_sge;
- sqp->s_len = wqe->length;
- switch (wqe->wr.opcode) {
- case IB_WR_SEND_WITH_IMM:
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- /* FALLTHROUGH */
- case IB_WR_SEND:
- ret = rvt_get_rwqe(qp, false);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- break;
-
- case IB_WR_RDMA_WRITE_WITH_IMM:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- wc.wc_flags = IB_WC_WITH_IMM;
- wc.ex.imm_data = wqe->wr.ex.imm_data;
- ret = rvt_get_rwqe(qp, true);
- if (ret < 0)
- goto op_err;
- if (!ret)
- goto rnr_nak;
- /* FALLTHROUGH */
- case IB_WR_RDMA_WRITE:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
- goto inv_err;
- if (wqe->length == 0)
- break;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_WRITE)))
- goto acc_err;
- qp->r_sge.sg_list = NULL;
- qp->r_sge.num_sge = 1;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_RDMA_READ:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
- wqe->rdma_wr.remote_addr,
- wqe->rdma_wr.rkey,
- IB_ACCESS_REMOTE_READ)))
- goto acc_err;
- release = 0;
- sqp->s_sge.sg_list = NULL;
- sqp->s_sge.num_sge = 1;
- qp->r_sge.sge = wqe->sg_list[0];
- qp->r_sge.sg_list = wqe->sg_list + 1;
- qp->r_sge.num_sge = wqe->wr.num_sge;
- qp->r_sge.total_len = wqe->length;
- break;
-
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
- goto inv_err;
- if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
- wqe->atomic_wr.remote_addr,
- wqe->atomic_wr.rkey,
- IB_ACCESS_REMOTE_ATOMIC)))
- goto acc_err;
- /* Perform atomic OP and save result. */
- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
- sdata = wqe->atomic_wr.compare_add;
- *(u64 *) sqp->s_sge.sge.vaddr =
- (wqe->atomic_wr.wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
- (u64) atomic64_add_return(sdata, maddr) - sdata :
- (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
- sdata, wqe->atomic_wr.swap);
- rvt_put_mr(qp->r_sge.sge.mr);
- qp->r_sge.num_sge = 0;
- goto send_comp;
-
- default:
- send_status = IB_WC_LOC_QP_OP_ERR;
- goto serr;
- }
-
- sge = &sqp->s_sge.sge;
- while (sqp->s_len) {
- u32 len = sqp->s_len;
-
- if (len > sge->length)
- len = sge->length;
- if (len > sge->sge_length)
- len = sge->sge_length;
- BUG_ON(len == 0);
- qib_copy_sge(&qp->r_sge, sge->vaddr, len, release);
- sge->vaddr += len;
- sge->length -= len;
- sge->sge_length -= len;
- if (sge->sge_length == 0) {
- if (!release)
- rvt_put_mr(sge->mr);
- if (--sqp->s_sge.num_sge)
- *sge = *sqp->s_sge.sg_list++;
- } else if (sge->length == 0 && sge->mr->lkey) {
- if (++sge->n >= RVT_SEGSZ) {
- if (++sge->m >= sge->mr->mapsz)
- break;
- sge->n = 0;
- }
- sge->vaddr =
- sge->mr->map[sge->m]->segs[sge->n].vaddr;
- sge->length =
- sge->mr->map[sge->m]->segs[sge->n].length;
- }
- sqp->s_len -= len;
- }
- if (release)
- rvt_put_ss(&qp->r_sge);
-
- if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
- goto send_comp;
-
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
- wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- else
- wc.opcode = IB_WC_RECV;
- wc.wr_id = qp->r_wr_id;
- wc.status = IB_WC_SUCCESS;
- wc.byte_len = wqe->length;
- wc.qp = &qp->ibqp;
- wc.src_qp = qp->remote_qpn;
- wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
- wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
- wc.port_num = 1;
- /* Signal completion event if the solicited bit is set. */
- rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
- wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
- spin_lock_irqsave(&sqp->s_lock, flags);
- ibp->rvp.n_loop_pkts++;
-flush_send:
- sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
- qib_send_complete(sqp, wqe, send_status);
- goto again;
-
-rnr_nak:
- /* Handle RNR NAK */
- if (qp->ibqp.qp_type == IB_QPT_UC)
- goto send_comp;
- ibp->rvp.n_rnr_naks++;
- /*
- * Note: we don't need the s_lock held since the BUSY flag
- * makes this single threaded.
- */
- if (sqp->s_rnr_retry == 0) {
- send_status = IB_WC_RNR_RETRY_EXC_ERR;
- goto serr;
- }
- if (sqp->s_rnr_retry_cnt < 7)
- sqp->s_rnr_retry--;
- spin_lock_irqsave(&sqp->s_lock, flags);
- if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
- goto clr_busy;
- rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
- IB_AETH_CREDIT_SHIFT);
- goto clr_busy;
-
-op_err:
- send_status = IB_WC_REM_OP_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
- wc.status = IB_WC_LOC_QP_OP_ERR;
- goto err;
-
-acc_err:
- send_status = IB_WC_REM_ACCESS_ERR;
- wc.status = IB_WC_LOC_PROT_ERR;
-err:
- /* responder goes to error state */
- rvt_rc_error(qp, wc.status);
-
-serr:
- spin_lock_irqsave(&sqp->s_lock, flags);
- qib_send_complete(sqp, wqe, send_status);
- if (sqp->ibqp.qp_type == IB_QPT_RC) {
- int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
- sqp->s_flags &= ~RVT_S_BUSY;
- spin_unlock_irqrestore(&sqp->s_lock, flags);
- if (lastwqe) {
- struct ib_event ev;
-
- ev.device = sqp->ibqp.device;
- ev.element.qp = &sqp->ibqp;
- ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
- sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
- }
- goto done;
- }
-clr_busy:
- sqp->s_flags &= ~RVT_S_BUSY;
-unlock:
- spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
- rcu_read_unlock();
-}
-
-/**
* qib_make_grh - construct a GRH header
* @ibp: a pointer to the IB port
* @hdr: a pointer to the GRH header being constructed
@@ -573,7 +272,7 @@ void qib_do_send(struct rvt_qp *qp)
qp->ibqp.qp_type == IB_QPT_UC) &&
(rdma_ah_get_dlid(&qp->remote_ah_attr) &
~((1 << ppd->lmc) - 1)) == ppd->lid) {
- qib_ruc_loopback(qp);
+ rvt_ruc_loopback(qp);
return;
}
@@ -613,42 +312,3 @@ void qib_do_send(struct rvt_qp *qp)
spin_unlock_irqrestore(&qp->s_lock, flags);
}
-
-/*
- * This should be called with s_lock held.
- */
-void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status)
-{
- u32 old_last, last;
-
- if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
- return;
-
- last = qp->s_last;
- old_last = last;
- if (++last >= qp->s_size)
- last = 0;
- qp->s_last = last;
- /* See post_send() */
- barrier();
- rvt_put_swqe(wqe);
- if (qp->ibqp.qp_type == IB_QPT_UD ||
- qp->ibqp.qp_type == IB_QPT_SMI ||
- qp->ibqp.qp_type == IB_QPT_GSI)
- atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
-
- rvt_qp_swqe_complete(qp,
- wqe,
- ib_qib_wc_opcode[wqe->wr.opcode],
- status);
-
- if (qp->s_acked == old_last)
- qp->s_acked = last;
- if (qp->s_cur == old_last)
- qp->s_cur = last;
- if (qp->s_tail == old_last)
- qp->s_tail = last;
- if (qp->state == IB_QPS_SQD && last == qp->s_cur)
- qp->s_draining = 0;
-}
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index d0723d4aef5c..757d4c9d713d 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -651,7 +651,7 @@ unmap:
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)
rvt_error_qp(qp, IB_WC_GENERAL_ERR);
} else if (qp->s_wqe)
- qib_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
+ rvt_send_complete(qp, qp->s_wqe, IB_WC_GENERAL_ERR);
spin_unlock(&qp->s_lock);
spin_unlock(&qp->r_lock);
/* return zero to process the next send work request */
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index ca2638d8f35e..1cf4ca3f23e3 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -551,17 +551,18 @@ static struct kobj_type qib_diagc_ktype = {
* Start of per-unit (or driver, in some cases, but replicated
* per unit) functions (these get a device *)
*/
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr,
+ char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, rdi.ibdev.dev);
return sprintf(buf, "%x\n", dd_from_dev(dev)->minrev);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, rdi.ibdev.dev);
@@ -574,15 +575,18 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
ret = scnprintf(buf, PAGE_SIZE, "%s\n", dd->boardname);
return ret;
}
+static DEVICE_ATTR_RO(hca_type);
+static DEVICE_ATTR(board_id, 0444, hca_type_show, NULL);
-static ssize_t show_version(struct device *device,
+static ssize_t version_show(struct device *device,
struct device_attribute *attr, char *buf)
{
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", (char *)ib_qib_version);
}
+static DEVICE_ATTR_RO(version);
-static ssize_t show_boardversion(struct device *device,
+static ssize_t boardversion_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -592,9 +596,9 @@ static ssize_t show_boardversion(struct device *device,
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->boardversion);
}
+static DEVICE_ATTR_RO(boardversion);
-
-static ssize_t show_localbus_info(struct device *device,
+static ssize_t localbus_info_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -604,9 +608,9 @@ static ssize_t show_localbus_info(struct device *device,
/* The string printed here is already newline-terminated. */
return scnprintf(buf, PAGE_SIZE, "%s", dd->lbus_info);
}
+static DEVICE_ATTR_RO(localbus_info);
-
-static ssize_t show_nctxts(struct device *device,
+static ssize_t nctxts_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -620,9 +624,10 @@ static ssize_t show_nctxts(struct device *device,
(dd->first_user_ctxt > dd->cfgctxts) ? 0 :
(dd->cfgctxts - dd->first_user_ctxt));
}
+static DEVICE_ATTR_RO(nctxts);
-static ssize_t show_nfreectxts(struct device *device,
- struct device_attribute *attr, char *buf)
+static ssize_t nfreectxts_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
container_of(device, struct qib_ibdev, rdi.ibdev.dev);
@@ -631,8 +636,9 @@ static ssize_t show_nfreectxts(struct device *device,
/* Return the number of free user ports (contexts) available. */
return scnprintf(buf, PAGE_SIZE, "%u\n", dd->freectxts);
}
+static DEVICE_ATTR_RO(nfreectxts);
-static ssize_t show_serial(struct device *device,
+static ssize_t serial_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -644,8 +650,9 @@ static ssize_t show_serial(struct device *device,
strcat(buf, "\n");
return strlen(buf);
}
+static DEVICE_ATTR_RO(serial);
-static ssize_t store_chip_reset(struct device *device,
+static ssize_t chip_reset_store(struct device *device,
struct device_attribute *attr, const char *buf,
size_t count)
{
@@ -663,11 +670,12 @@ static ssize_t store_chip_reset(struct device *device,
bail:
return ret < 0 ? ret : count;
}
+static DEVICE_ATTR_WO(chip_reset);
/*
* Dump tempsense regs. in decimal, to ease shell-scripts.
*/
-static ssize_t show_tempsense(struct device *device,
+static ssize_t tempsense_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct qib_ibdev *dev =
@@ -695,6 +703,7 @@ static ssize_t show_tempsense(struct device *device,
*(signed char *)(regvals + 7));
return ret;
}
+static DEVICE_ATTR_RO(tempsense);
/*
* end of per-unit (or driver, in some cases, but replicated
@@ -702,30 +711,23 @@ static ssize_t show_tempsense(struct device *device,
*/
/* start of per-unit file structures and support code */
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
-static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
-static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
-static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
-static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
-
-static struct device_attribute *qib_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id,
- &dev_attr_version,
- &dev_attr_nctxts,
- &dev_attr_nfreectxts,
- &dev_attr_serial,
- &dev_attr_boardversion,
- &dev_attr_tempsense,
- &dev_attr_localbus_info,
- &dev_attr_chip_reset,
+static struct attribute *qib_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ &dev_attr_version.attr,
+ &dev_attr_nctxts.attr,
+ &dev_attr_nfreectxts.attr,
+ &dev_attr_serial.attr,
+ &dev_attr_boardversion.attr,
+ &dev_attr_tempsense.attr,
+ &dev_attr_localbus_info.attr,
+ &dev_attr_chip_reset.attr,
+ NULL,
+};
+
+const struct attribute_group qib_attr_group = {
+ .attrs = qib_attributes,
};
int qib_create_port_files(struct ib_device *ibdev, u8 port_num,
@@ -827,27 +829,6 @@ bail:
}
/*
- * Register and create our files in /sys/class/infiniband.
- */
-int qib_verbs_register_sysfs(struct qib_devdata *dd)
-{
- struct ib_device *dev = &dd->verbs_dev.rdi.ibdev;
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) {
- ret = device_create_file(&dev->dev, qib_attributes[i]);
- if (ret)
- goto bail;
- }
-
- return 0;
-bail:
- for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i)
- device_remove_file(&dev->dev, qib_attributes[i]);
- return ret;
-}
-
-/*
* Unregister and remove our files in /sys/class/infiniband.
*/
void qib_verbs_unregister_sysfs(struct qib_devdata *dd)
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c
index 3e54bc11e0ae..30c70ad0f4bf 100644
--- a/drivers/infiniband/hw/qib/qib_uc.c
+++ b/drivers/infiniband/hw/qib/qib_uc.c
@@ -68,7 +68,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
@@ -359,7 +359,7 @@ send_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto rewind;
- qib_copy_sge(&qp->r_sge, data, pmtu, 0);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, false, false);
break;
case OP(SEND_LAST_WITH_IMMEDIATE):
@@ -385,7 +385,7 @@ send_last:
if (unlikely(wc.byte_len > qp->r_len))
goto rewind;
wc.opcode = IB_WC_RECV;
- qib_copy_sge(&qp->r_sge, data, tlen, 0);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, false, false);
rvt_put_ss(&qp->s_rdma_read_sge);
last_imm:
wc.wr_id = qp->r_wr_id;
@@ -449,7 +449,7 @@ rdma_first:
qp->r_rcv_len += pmtu;
if (unlikely(qp->r_rcv_len > qp->r_len))
goto drop;
- qib_copy_sge(&qp->r_sge, data, pmtu, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
break;
case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
@@ -479,7 +479,7 @@ rdma_last_imm:
}
wc.byte_len = qp->r_len;
wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
goto last_imm;
@@ -495,7 +495,7 @@ rdma_last:
tlen -= (hdrsize + pad + 4);
if (unlikely(tlen + qp->r_rcv_len != qp->r_len))
goto drop;
- qib_copy_sge(&qp->r_sge, data, tlen, 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, false);
rvt_put_ss(&qp->r_sge);
break;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index f8d029a2390f..4d4c31ea4e2d 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -162,8 +162,8 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
const struct ib_global_route *grd = rdma_ah_read_grh(ah_attr);
qib_make_grh(ibp, &grh, grd, 0, 0);
- qib_copy_sge(&qp->r_sge, &grh,
- sizeof(grh), 1);
+ rvt_copy_sge(qp, &qp->r_sge, &grh,
+ sizeof(grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
@@ -179,7 +179,7 @@ static void qib_ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
if (len > sge->sge_length)
len = sge->sge_length;
BUG_ON(len == 0);
- qib_copy_sge(&qp->r_sge, sge->vaddr, len, 1);
+ rvt_copy_sge(qp, &qp->r_sge, sge->vaddr, len, true, false);
sge->vaddr += len;
sge->length -= len;
sge->sge_length -= len;
@@ -260,7 +260,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
goto bail;
}
wqe = rvt_get_swqe_ptr(qp, qp->s_last);
- qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
+ rvt_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done;
}
@@ -304,7 +304,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags)
qib_ud_loopback(qp, wqe);
spin_lock_irqsave(&qp->s_lock, tflags);
*flags = tflags;
- qib_send_complete(qp, wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, wqe, IB_WC_SUCCESS);
goto done;
}
}
@@ -551,12 +551,13 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
goto drop;
}
if (has_grh) {
- qib_copy_sge(&qp->r_sge, &hdr->u.l.grh,
- sizeof(struct ib_grh), 1);
+ rvt_copy_sge(qp, &qp->r_sge, &hdr->u.l.grh,
+ sizeof(struct ib_grh), true, false);
wc.wc_flags |= IB_WC_GRH;
} else
rvt_skip_sge(&qp->r_sge, sizeof(struct ib_grh), true);
- qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1);
+ rvt_copy_sge(qp, &qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh),
+ true, false);
rvt_put_ss(&qp->r_sge);
if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
return;
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 41babbc0db58..4b0f5761a646 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -131,27 +131,6 @@ const enum ib_wc_opcode ib_qib_wc_opcode[] = {
*/
__be64 ib_qib_sys_image_guid;
-/**
- * qib_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- */
-void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length, int release)
-{
- struct rvt_sge *sge = &ss->sge;
-
- while (length) {
- u32 len = rvt_get_sge_length(sge, length);
-
- WARN_ON_ONCE(len == 0);
- memcpy(sge->vaddr, data, len);
- rvt_update_sge(ss, len, release);
- data += len;
- length -= len;
- }
-}
-
/*
* Count the number of DMA descriptors needed to send length bytes of data.
* Don't modify the qib_sge_state to get the count.
@@ -752,7 +731,7 @@ static void sdma_complete(struct qib_sdma_txreq *cookie, int status)
spin_lock(&qp->s_lock);
if (tx->wqe)
- qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, tx->wqe, IB_WC_SUCCESS);
else if (qp->ibqp.qp_type == IB_QPT_RC) {
struct ib_header *hdr;
@@ -1025,7 +1004,7 @@ done:
}
if (qp->s_wqe) {
spin_lock_irqsave(&qp->s_lock, flags);
- qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
+ rvt_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
spin_unlock_irqrestore(&qp->s_lock, flags);
} else if (qp->ibqp.qp_type == IB_QPT_RC) {
spin_lock_irqsave(&qp->s_lock, flags);
@@ -1512,6 +1491,9 @@ static void qib_fill_device_attr(struct qib_devdata *dd)
rdi->dparms.props.max_mcast_grp;
/* post send table */
dd->verbs_dev.rdi.post_parms = qib_post_parms;
+
+ /* opcode translation table */
+ dd->verbs_dev.rdi.wc_opcode = ib_qib_wc_opcode;
}
/**
@@ -1588,7 +1570,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
dd->verbs_dev.rdi.driver_f.port_callback = qib_create_port_files;
dd->verbs_dev.rdi.driver_f.get_pci_dev = qib_get_pci_dev;
dd->verbs_dev.rdi.driver_f.check_ah = qib_check_ah;
- dd->verbs_dev.rdi.driver_f.check_send_wqe = qib_check_send_wqe;
+ dd->verbs_dev.rdi.driver_f.setup_wqe = qib_check_send_wqe;
dd->verbs_dev.rdi.driver_f.notify_new_ah = qib_notify_new_ah;
dd->verbs_dev.rdi.driver_f.alloc_qpn = qib_alloc_qpn;
dd->verbs_dev.rdi.driver_f.qp_priv_alloc = qib_qp_priv_alloc;
@@ -1631,6 +1613,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
dd->verbs_dev.rdi.dparms.node = dd->assigned_node_id;
dd->verbs_dev.rdi.dparms.core_cap_flags = RDMA_CORE_PORT_IBA_IB;
dd->verbs_dev.rdi.dparms.max_mad_size = IB_MGMT_MAD_SIZE;
+ dd->verbs_dev.rdi.dparms.sge_copy_mode = RVT_SGE_COPY_MEMCPY;
qib_fill_device_attr(dd);
@@ -1642,19 +1625,14 @@ int qib_register_ib_device(struct qib_devdata *dd)
i,
dd->rcd[ctxt]->pkeys);
}
+ rdma_set_device_sysfs_group(&dd->verbs_dev.rdi.ibdev, &qib_attr_group);
ret = rvt_register_device(&dd->verbs_dev.rdi, RDMA_DRIVER_QIB);
if (ret)
goto err_tx;
- ret = qib_verbs_register_sysfs(dd);
- if (ret)
- goto err_class;
-
return ret;
-err_class:
- rvt_unregister_device(&dd->verbs_dev.rdi);
err_tx:
while (!list_empty(&dev->txreq_free)) {
struct list_head *l = dev->txreq_free.next;
@@ -1716,14 +1694,14 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
* It is only used in post send, which doesn't hold
* the s_lock.
*/
-void _qib_schedule_send(struct rvt_qp *qp)
+bool _qib_schedule_send(struct rvt_qp *qp)
{
struct qib_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct qib_pportdata *ppd = ppd_from_ibp(ibp);
struct qib_qp_priv *priv = qp->priv;
- queue_work(ppd->qib_wq, &priv->s_work);
+ return queue_work(ppd->qib_wq, &priv->s_work);
}
/**
@@ -1733,8 +1711,9 @@ void _qib_schedule_send(struct rvt_qp *qp)
* This schedules qp progress. The s_lock
* should be held.
*/
-void qib_schedule_send(struct rvt_qp *qp)
+bool qib_schedule_send(struct rvt_qp *qp)
{
if (qib_send_ok(qp))
- _qib_schedule_send(qp);
+ return _qib_schedule_send(qp);
+ return false;
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 666613eef88f..a4426c24b0d1 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012 - 2017 Intel Corporation. All rights reserved.
+ * Copyright (c) 2012 - 2018 Intel Corporation. All rights reserved.
* Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
* Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
*
@@ -223,8 +223,8 @@ static inline int qib_send_ok(struct rvt_qp *qp)
!(qp->s_flags & RVT_S_ANY_WAIT_SEND));
}
-void _qib_schedule_send(struct rvt_qp *qp);
-void qib_schedule_send(struct rvt_qp *qp);
+bool _qib_schedule_send(struct rvt_qp *qp);
+bool qib_schedule_send(struct rvt_qp *qp);
static inline int qib_pkey_ok(u16 pkey1, u16 pkey2)
{
@@ -292,9 +292,6 @@ void qib_put_txreq(struct qib_verbs_txreq *tx);
int qib_verbs_send(struct rvt_qp *qp, struct ib_header *hdr,
u32 hdrwords, struct rvt_sge_state *ss, u32 len);
-void qib_copy_sge(struct rvt_sge_state *ss, void *data, u32 length,
- int release);
-
void qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
int has_grh, void *data, u32 tlen, struct rvt_qp *qp);
@@ -303,7 +300,8 @@ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr,
int qib_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
-int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe);
+int qib_check_send_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ bool *call_send);
struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid);
@@ -333,9 +331,6 @@ void _qib_do_send(struct work_struct *work);
void qib_do_send(struct rvt_qp *qp);
-void qib_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
- enum ib_wc_status status);
-
void qib_send_rc_ack(struct rvt_qp *qp);
int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags);
diff --git a/drivers/infiniband/hw/usnic/usnic_debugfs.c b/drivers/infiniband/hw/usnic/usnic_debugfs.c
index 92dc66cc2d50..a3115709fb03 100644
--- a/drivers/infiniband/hw/usnic/usnic_debugfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_debugfs.c
@@ -165,6 +165,5 @@ void usnic_debugfs_flow_add(struct usnic_ib_qp_grp_flow *qp_flow)
void usnic_debugfs_flow_remove(struct usnic_ib_qp_grp_flow *qp_flow)
{
- if (!IS_ERR_OR_NULL(qp_flow->dbgfs_dentry))
- debugfs_remove(qp_flow->dbgfs_dentry);
+ debugfs_remove(qp_flow->dbgfs_dentry);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index f0538a460328..73bd00f8d2c8 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -76,7 +76,7 @@ static LIST_HEAD(usnic_ib_ibdev_list);
static int usnic_ib_dump_vf_hdr(void *obj, char *buf, int buf_sz)
{
struct usnic_ib_vf *vf = obj;
- return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name);
+ return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev));
}
/* End callback dump funcs */
@@ -138,7 +138,7 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
netdev = us_ibdev->netdev;
switch (event) {
case NETDEV_REBOOT:
- usnic_info("PF Reset on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
ib_event.event = IB_EVENT_PORT_ERR;
ib_event.device = &us_ibdev->ib_dev;
@@ -151,7 +151,8 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
if (!us_ibdev->ufdev->link_up &&
netif_carrier_ok(netdev)) {
usnic_fwd_carrier_up(us_ibdev->ufdev);
- usnic_info("Link UP on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("Link UP on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
ib_event.event = IB_EVENT_PORT_ACTIVE;
ib_event.device = &us_ibdev->ib_dev;
ib_event.element.port_num = 1;
@@ -159,7 +160,8 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
} else if (us_ibdev->ufdev->link_up &&
!netif_carrier_ok(netdev)) {
usnic_fwd_carrier_down(us_ibdev->ufdev);
- usnic_info("Link DOWN on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("Link DOWN on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
ib_event.event = IB_EVENT_PORT_ERR;
ib_event.device = &us_ibdev->ib_dev;
@@ -168,17 +170,17 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
} else {
usnic_dbg("Ignoring %s on %s\n",
netdev_cmd_to_name(event),
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
break;
case NETDEV_CHANGEADDR:
if (!memcmp(us_ibdev->ufdev->mac, netdev->dev_addr,
sizeof(us_ibdev->ufdev->mac))) {
usnic_dbg("Ignoring addr change on %s\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
} else {
usnic_info(" %s old mac: %pM new mac: %pM\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
us_ibdev->ufdev->mac,
netdev->dev_addr);
usnic_fwd_set_mac(us_ibdev->ufdev, netdev->dev_addr);
@@ -193,19 +195,19 @@ static void usnic_ib_handle_usdev_event(struct usnic_ib_dev *us_ibdev,
case NETDEV_CHANGEMTU:
if (us_ibdev->ufdev->mtu != netdev->mtu) {
usnic_info("MTU Change on %s old: %u new: %u\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
us_ibdev->ufdev->mtu, netdev->mtu);
usnic_fwd_set_mtu(us_ibdev->ufdev, netdev->mtu);
usnic_ib_qp_grp_modify_active_to_err(us_ibdev);
} else {
usnic_dbg("Ignoring MTU change on %s\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
break;
default:
usnic_dbg("Ignoring event %s on %s",
netdev_cmd_to_name(event),
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
mutex_unlock(&us_ibdev->usdev_lock);
}
@@ -267,7 +269,7 @@ static int usnic_ib_handle_inet_event(struct usnic_ib_dev *us_ibdev,
default:
usnic_info("Ignoring event %s on %s",
netdev_cmd_to_name(event),
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
}
mutex_unlock(&us_ibdev->usdev_lock);
@@ -364,7 +366,6 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS;
us_ibdev->ib_dev.dev.parent = &dev->dev;
us_ibdev->ib_dev.uverbs_abi_ver = USNIC_UVERBS_ABI_VERSION;
- strlcpy(us_ibdev->ib_dev.name, "usnic_%d", IB_DEVICE_NAME_MAX);
us_ibdev->ib_dev.uverbs_cmd_mask =
(1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
@@ -416,7 +417,9 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
us_ibdev->ib_dev.driver_id = RDMA_DRIVER_USNIC;
- if (ib_register_device(&us_ibdev->ib_dev, NULL))
+ rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group);
+
+ if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d", NULL))
goto err_fwd_dealloc;
usnic_fwd_set_mtu(us_ibdev->ufdev, us_ibdev->netdev->mtu);
@@ -437,9 +440,9 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
kref_init(&us_ibdev->vf_cnt);
usnic_info("Added ibdev: %s netdev: %s with mac %pM Link: %u MTU: %u\n",
- us_ibdev->ib_dev.name, netdev_name(us_ibdev->netdev),
- us_ibdev->ufdev->mac, us_ibdev->ufdev->link_up,
- us_ibdev->ufdev->mtu);
+ dev_name(&us_ibdev->ib_dev.dev),
+ netdev_name(us_ibdev->netdev), us_ibdev->ufdev->mac,
+ us_ibdev->ufdev->link_up, us_ibdev->ufdev->mtu);
return us_ibdev;
err_fwd_dealloc:
@@ -452,7 +455,7 @@ err_dealloc:
static void usnic_ib_device_remove(struct usnic_ib_dev *us_ibdev)
{
- usnic_info("Unregistering %s\n", us_ibdev->ib_dev.name);
+ usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev));
usnic_ib_sysfs_unregister_usdev(us_ibdev);
usnic_fwd_dev_free(us_ibdev->ufdev);
ib_unregister_device(&us_ibdev->ib_dev);
@@ -591,7 +594,7 @@ static int usnic_ib_pci_probe(struct pci_dev *pdev,
mutex_unlock(&pf->usdev_lock);
usnic_info("Registering usnic VF %s into PF %s\n", pci_name(pdev),
- pf->ib_dev.name);
+ dev_name(&pf->ib_dev.dev));
usnic_ib_log_vf(vf);
return 0;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 4210ca14014d..a7e4b2ccfaf8 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -46,9 +46,8 @@
#include "usnic_ib_sysfs.h"
#include "usnic_log.h"
-static ssize_t usnic_ib_show_board(struct device *device,
- struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev =
container_of(device, struct usnic_ib_dev, ib_dev.dev);
@@ -60,13 +59,13 @@ static ssize_t usnic_ib_show_board(struct device *device,
return scnprintf(buf, PAGE_SIZE, "%hu\n", subsystem_device_id);
}
+static DEVICE_ATTR_RO(board_id);
/*
* Report the configuration for this PF
*/
static ssize_t
-usnic_ib_show_config(struct device *device, struct device_attribute *attr,
- char *buf)
+config_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
char *ptr;
@@ -94,7 +93,7 @@ usnic_ib_show_config(struct device *device, struct device_attribute *attr,
n = scnprintf(ptr, left,
"%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
busname,
PCI_SLOT(us_ibdev->pdev->devfn),
PCI_FUNC(us_ibdev->pdev->devfn),
@@ -119,17 +118,17 @@ usnic_ib_show_config(struct device *device, struct device_attribute *attr,
UPDATE_PTR_LEFT(n, ptr, left);
} else {
n = scnprintf(ptr, left, "%s: no VFs\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
UPDATE_PTR_LEFT(n, ptr, left);
}
mutex_unlock(&us_ibdev->usdev_lock);
return ptr - buf;
}
+static DEVICE_ATTR_RO(config);
static ssize_t
-usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
- char *buf)
+iface_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
@@ -138,10 +137,10 @@ usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%s\n",
netdev_name(us_ibdev->netdev));
}
+static DEVICE_ATTR_RO(iface);
static ssize_t
-usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+max_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
@@ -150,10 +149,10 @@ usnic_ib_show_max_vf(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%u\n",
kref_read(&us_ibdev->vf_cnt));
}
+static DEVICE_ATTR_RO(max_vf);
static ssize_t
-usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+qp_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
int qp_per_vf;
@@ -165,10 +164,10 @@ usnic_ib_show_qp_per_vf(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE,
"%d\n", qp_per_vf);
}
+static DEVICE_ATTR_RO(qp_per_vf);
static ssize_t
-usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
- char *buf)
+cq_per_vf_show(struct device *device, struct device_attribute *attr, char *buf)
{
struct usnic_ib_dev *us_ibdev;
@@ -177,21 +176,20 @@ usnic_ib_show_cq_per_vf(struct device *device, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "%d\n",
us_ibdev->vf_res_cnt[USNIC_VNIC_RES_TYPE_CQ]);
}
+static DEVICE_ATTR_RO(cq_per_vf);
+
+static struct attribute *usnic_class_attributes[] = {
+ &dev_attr_board_id.attr,
+ &dev_attr_config.attr,
+ &dev_attr_iface.attr,
+ &dev_attr_max_vf.attr,
+ &dev_attr_qp_per_vf.attr,
+ &dev_attr_cq_per_vf.attr,
+ NULL
+};
-static DEVICE_ATTR(board_id, S_IRUGO, usnic_ib_show_board, NULL);
-static DEVICE_ATTR(config, S_IRUGO, usnic_ib_show_config, NULL);
-static DEVICE_ATTR(iface, S_IRUGO, usnic_ib_show_iface, NULL);
-static DEVICE_ATTR(max_vf, S_IRUGO, usnic_ib_show_max_vf, NULL);
-static DEVICE_ATTR(qp_per_vf, S_IRUGO, usnic_ib_show_qp_per_vf, NULL);
-static DEVICE_ATTR(cq_per_vf, S_IRUGO, usnic_ib_show_cq_per_vf, NULL);
-
-static struct device_attribute *usnic_class_attributes[] = {
- &dev_attr_board_id,
- &dev_attr_config,
- &dev_attr_iface,
- &dev_attr_max_vf,
- &dev_attr_qp_per_vf,
- &dev_attr_cq_per_vf,
+const struct attribute_group usnic_attr_group = {
+ .attrs = usnic_class_attributes,
};
struct qpn_attribute {
@@ -278,18 +276,6 @@ static struct kobj_type usnic_ib_qpn_type = {
int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
{
- int i;
- int err;
- for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
- err = device_create_file(&us_ibdev->ib_dev.dev,
- usnic_class_attributes[i]);
- if (err) {
- usnic_err("Failed to create device file %d for %s eith err %d",
- i, us_ibdev->ib_dev.name, err);
- return -EINVAL;
- }
- }
-
/* create kernel object for looking at individual QPs */
kobject_get(&us_ibdev->ib_dev.dev.kobj);
us_ibdev->qpn_kobj = kobject_create_and_add("qpn",
@@ -304,12 +290,6 @@ int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev)
{
- int i;
- for (i = 0; i < ARRAY_SIZE(usnic_class_attributes); ++i) {
- device_remove_file(&us_ibdev->ib_dev.dev,
- usnic_class_attributes[i]);
- }
-
kobject_put(us_ibdev->qpn_kobj);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
index 3d98e16cfeaf..b1f064cec850 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.h
@@ -41,4 +41,6 @@ void usnic_ib_sysfs_unregister_usdev(struct usnic_ib_dev *us_ibdev);
void usnic_ib_sysfs_qpn_add(struct usnic_ib_qp_grp *qp_grp);
void usnic_ib_sysfs_qpn_remove(struct usnic_ib_qp_grp *qp_grp);
+extern const struct attribute_group usnic_attr_group;
+
#endif /* !USNIC_IB_SYSFS_H_ */
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 9973ac893635..0b91ff36768a 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -159,7 +159,8 @@ static int usnic_ib_fill_create_qp_resp(struct usnic_ib_qp_grp *qp_grp,
err = ib_copy_to_udata(udata, &resp, sizeof(resp));
if (err) {
- usnic_err("Failed to copy udata for %s", us_ibdev->ib_dev.name);
+ usnic_err("Failed to copy udata for %s",
+ dev_name(&us_ibdev->ib_dev.dev));
return err;
}
@@ -197,7 +198,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
vnic = vf->vnic;
if (!usnic_vnic_check_room(vnic, res_spec)) {
usnic_dbg("Found used vnic %s from %s\n",
- us_ibdev->ib_dev.name,
+ dev_name(&us_ibdev->ib_dev.dev),
pci_name(usnic_vnic_get_pdev(
vnic)));
qp_grp = usnic_ib_qp_grp_create(us_ibdev->ufdev,
@@ -230,7 +231,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
spin_unlock(&vf->lock);
}
- usnic_info("No free qp grp found on %s\n", us_ibdev->ib_dev.name);
+ usnic_info("No free qp grp found on %s\n",
+ dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-ENOMEM);
qp_grp_check:
@@ -471,7 +473,7 @@ struct ib_pd *usnic_ib_alloc_pd(struct ib_device *ibdev,
}
usnic_info("domain 0x%p allocated for context 0x%p and device %s\n",
- pd, context, ibdev->name);
+ pd, context, dev_name(&ibdev->dev));
return &pd->ibpd;
}
@@ -508,20 +510,20 @@ struct ib_qp *usnic_ib_create_qp(struct ib_pd *pd,
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
usnic_err("%s: cannot copy udata for create_qp\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-EINVAL);
}
err = create_qp_validate_user_data(cmd);
if (err) {
usnic_err("%s: Failed to validate user data\n",
- us_ibdev->ib_dev.name);
+ dev_name(&us_ibdev->ib_dev.dev));
return ERR_PTR(-EINVAL);
}
if (init_attr->qp_type != IB_QPT_UD) {
usnic_err("%s asked to make a non-UD QP: %d\n",
- us_ibdev->ib_dev.name, init_attr->qp_type);
+ dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c
index e0a95538c364..82dd810bc000 100644
--- a/drivers/infiniband/hw/usnic/usnic_transport.c
+++ b/drivers/infiniband/hw/usnic/usnic_transport.c
@@ -121,7 +121,7 @@ void usnic_transport_unrsrv_port(enum usnic_transport_type type, u16 port_num)
if (type == USNIC_TRANSPORT_ROCE_CUSTOM) {
spin_lock(&roce_bitmap_lock);
if (!port_num) {
- usnic_err("Unreserved unvalid port num 0 for %s\n",
+ usnic_err("Unreserved invalid port num 0 for %s\n",
usnic_transport_to_str(type));
goto out_roce_custom;
}
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c
index 9dd39daa602b..49275a548751 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.c
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.c
@@ -54,18 +54,6 @@ static struct workqueue_struct *usnic_uiom_wq;
((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
-static void usnic_uiom_reg_account(struct work_struct *work)
-{
- struct usnic_uiom_reg *umem = container_of(work,
- struct usnic_uiom_reg, work);
-
- down_write(&umem->mm->mmap_sem);
- umem->mm->locked_vm -= umem->diff;
- up_write(&umem->mm->mmap_sem);
- mmput(umem->mm);
- kfree(umem);
-}
-
static int usnic_uiom_dma_fault(struct iommu_domain *domain,
struct device *dev,
unsigned long iova, int flags,
@@ -99,8 +87,9 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
}
static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
- int dmasync, struct list_head *chunk_list)
+ int dmasync, struct usnic_uiom_reg *uiomr)
{
+ struct list_head *chunk_list = &uiomr->chunk_list;
struct page **page_list;
struct scatterlist *sg;
struct usnic_uiom_chunk *chunk;
@@ -114,6 +103,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
int flags;
dma_addr_t pa;
unsigned int gup_flags;
+ struct mm_struct *mm;
/*
* If the combination of the addr and size requested for this memory
@@ -136,7 +126,8 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
- down_write(&current->mm->mmap_sem);
+ uiomr->owning_mm = mm = current->mm;
+ down_write(&mm->mmap_sem);
locked = npages + current->mm->pinned_vm;
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -196,10 +187,12 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
out:
if (ret < 0)
usnic_uiom_put_pages(chunk_list, 0);
- else
- current->mm->pinned_vm = locked;
+ else {
+ mm->pinned_vm = locked;
+ mmgrab(uiomr->owning_mm);
+ }
- up_write(&current->mm->mmap_sem);
+ up_write(&mm->mmap_sem);
free_page((unsigned long) page_list);
return ret;
}
@@ -379,7 +372,7 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
uiomr->pd = pd;
err = usnic_uiom_get_pages(addr, size, writable, dmasync,
- &uiomr->chunk_list);
+ uiomr);
if (err) {
usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
vpn_start, vpn_last, err);
@@ -426,29 +419,39 @@ out_put_intervals:
out_put_pages:
usnic_uiom_put_pages(&uiomr->chunk_list, 0);
spin_unlock(&pd->lock);
+ mmdrop(uiomr->owning_mm);
out_free_uiomr:
kfree(uiomr);
return ERR_PTR(err);
}
-void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
- struct ib_ucontext *ucontext)
+static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
{
- struct task_struct *task;
- struct mm_struct *mm;
- unsigned long diff;
+ mmdrop(uiomr->owning_mm);
+ kfree(uiomr);
+}
- __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
+static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
+{
+ return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+}
- task = get_pid_task(ucontext->tgid, PIDTYPE_PID);
- if (!task)
- goto out;
- mm = get_task_mm(task);
- put_task_struct(task);
- if (!mm)
- goto out;
+static void usnic_uiom_release_defer(struct work_struct *work)
+{
+ struct usnic_uiom_reg *uiomr =
+ container_of(work, struct usnic_uiom_reg, work);
- diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
+ down_write(&uiomr->owning_mm->mmap_sem);
+ uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
+ up_write(&uiomr->owning_mm->mmap_sem);
+
+ __usnic_uiom_release_tail(uiomr);
+}
+
+void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
+ struct ib_ucontext *context)
+{
+ __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
/*
* We may be called with the mm's mmap_sem already held. This
@@ -456,25 +459,21 @@ void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
* the last reference to our file and calls our release
* method. If there are memory regions to destroy, we'll end
* up here and not be able to take the mmap_sem. In that case
- * we defer the vm_locked accounting to the system workqueue.
+ * we defer the vm_locked accounting to a workqueue.
*/
- if (ucontext->closing) {
- if (!down_write_trylock(&mm->mmap_sem)) {
- INIT_WORK(&uiomr->work, usnic_uiom_reg_account);
- uiomr->mm = mm;
- uiomr->diff = diff;
-
+ if (context->closing) {
+ if (!down_write_trylock(&uiomr->owning_mm->mmap_sem)) {
+ INIT_WORK(&uiomr->work, usnic_uiom_release_defer);
queue_work(usnic_uiom_wq, &uiomr->work);
return;
}
- } else
- down_write(&mm->mmap_sem);
+ } else {
+ down_write(&uiomr->owning_mm->mmap_sem);
+ }
+ uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
+ up_write(&uiomr->owning_mm->mmap_sem);
- mm->pinned_vm -= diff;
- up_write(&mm->mmap_sem);
- mmput(mm);
-out:
- kfree(uiomr);
+ __usnic_uiom_release_tail(uiomr);
}
struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.h b/drivers/infiniband/hw/usnic/usnic_uiom.h
index 8c096acff123..b86a9731071b 100644
--- a/drivers/infiniband/hw/usnic/usnic_uiom.h
+++ b/drivers/infiniband/hw/usnic/usnic_uiom.h
@@ -71,8 +71,7 @@ struct usnic_uiom_reg {
int writable;
struct list_head chunk_list;
struct work_struct work;
- struct mm_struct *mm;
- unsigned long diff;
+ struct mm_struct *owning_mm;
};
struct usnic_uiom_chunk {
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index a5719899f49a..398443f43dc3 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -65,32 +65,36 @@ static struct workqueue_struct *event_wq;
static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context);
static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hca_type_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
}
+static DEVICE_ATTR_RO(hca_type);
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t hw_rev_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", PVRDMA_REV_ID);
}
+static DEVICE_ATTR_RO(hw_rev);
-static ssize_t show_board(struct device *device, struct device_attribute *attr,
- char *buf)
+static ssize_t board_id_show(struct device *device,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
}
+static DEVICE_ATTR_RO(board_id);
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
+static struct attribute *pvrdma_class_attributes[] = {
+ &dev_attr_hw_rev.attr,
+ &dev_attr_hca_type.attr,
+ &dev_attr_board_id.attr,
+ NULL,
+};
-static struct device_attribute *pvrdma_class_attributes[] = {
- &dev_attr_hw_rev,
- &dev_attr_hca_type,
- &dev_attr_board_id
+static const struct attribute_group pvrdma_attr_group = {
+ .attrs = pvrdma_class_attributes,
};
static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str)
@@ -160,9 +164,7 @@ static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
static int pvrdma_register_device(struct pvrdma_dev *dev)
{
int ret = -1;
- int i = 0;
- strlcpy(dev->ib_dev.name, "vmw_pvrdma%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
dev->flags = 0;
@@ -266,24 +268,16 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
}
dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA;
spin_lock_init(&dev->srq_tbl_lock);
+ rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
- ret = ib_register_device(&dev->ib_dev, NULL);
+ ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", NULL);
if (ret)
goto err_srq_free;
- for (i = 0; i < ARRAY_SIZE(pvrdma_class_attributes); ++i) {
- ret = device_create_file(&dev->ib_dev.dev,
- pvrdma_class_attributes[i]);
- if (ret)
- goto err_class;
- }
-
dev->ib_active = true;
return 0;
-err_class:
- ib_unregister_device(&dev->ib_dev);
err_srq_free:
kfree(dev->srq_tbl);
err_qp_free:
@@ -735,7 +729,7 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
default:
dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
- event, dev->ib_dev.name);
+ event, dev_name(&dev->ib_dev.dev));
break;
}
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
index 60083c0363a5..cf22f57a9f0d 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -499,7 +499,7 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
next_state = (attr_mask & IB_QP_STATE) ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, next_state, ibqp->qp_type,
- attr_mask, IB_LINK_LAYER_ETHERNET)) {
+ attr_mask)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/sw/rdmavt/Kconfig b/drivers/infiniband/sw/rdmavt/Kconfig
index 98e798007f75..7df896a18d38 100644
--- a/drivers/infiniband/sw/rdmavt/Kconfig
+++ b/drivers/infiniband/sw/rdmavt/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library"
- depends on 64BIT && ARCH_DMA_ADDR_T_64BIT
+ depends on X86_64 && ARCH_DMA_ADDR_T_64BIT
depends on PCI
select DMA_VIRT_OPS
---help---
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 5ce403c6cddb..1735deb1a9d4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -118,6 +118,187 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = {
};
EXPORT_SYMBOL(ib_rvt_state_ops);
+/* platform specific: return the last level cache (llc) size, in KiB */
+static int rvt_wss_llc_size(void)
+{
+ /* assume that the boot CPU value is universal for all CPUs */
+ return boot_cpu_data.x86_cache_size;
+}
+
+/* platform specific: cacheless copy */
+static void cacheless_memcpy(void *dst, void *src, size_t n)
+{
+ /*
+ * Use the only available X64 cacheless copy. Add a __user cast
+ * to quiet sparse. The src agument is already in the kernel so
+ * there are no security issues. The extra fault recovery machinery
+ * is not invoked.
+ */
+ __copy_user_nocache(dst, (void __user *)src, n, 0);
+}
+
+void rvt_wss_exit(struct rvt_dev_info *rdi)
+{
+ struct rvt_wss *wss = rdi->wss;
+
+ if (!wss)
+ return;
+
+ /* coded to handle partially initialized and repeat callers */
+ kfree(wss->entries);
+ wss->entries = NULL;
+ kfree(rdi->wss);
+ rdi->wss = NULL;
+}
+
+/**
+ * rvt_wss_init - Init wss data structures
+ *
+ * Return: 0 on success
+ */
+int rvt_wss_init(struct rvt_dev_info *rdi)
+{
+ unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
+ unsigned int wss_threshold = rdi->dparms.wss_threshold;
+ unsigned int wss_clean_period = rdi->dparms.wss_clean_period;
+ long llc_size;
+ long llc_bits;
+ long table_size;
+ long table_bits;
+ struct rvt_wss *wss;
+ int node = rdi->dparms.node;
+
+ if (sge_copy_mode != RVT_SGE_COPY_ADAPTIVE) {
+ rdi->wss = NULL;
+ return 0;
+ }
+
+ rdi->wss = kzalloc_node(sizeof(*rdi->wss), GFP_KERNEL, node);
+ if (!rdi->wss)
+ return -ENOMEM;
+ wss = rdi->wss;
+
+ /* check for a valid percent range - default to 80 if none or invalid */
+ if (wss_threshold < 1 || wss_threshold > 100)
+ wss_threshold = 80;
+
+ /* reject a wildly large period */
+ if (wss_clean_period > 1000000)
+ wss_clean_period = 256;
+
+ /* reject a zero period */
+ if (wss_clean_period == 0)
+ wss_clean_period = 1;
+
+ /*
+ * Calculate the table size - the next power of 2 larger than the
+ * LLC size. LLC size is in KiB.
+ */
+ llc_size = rvt_wss_llc_size() * 1024;
+ table_size = roundup_pow_of_two(llc_size);
+
+ /* one bit per page in rounded up table */
+ llc_bits = llc_size / PAGE_SIZE;
+ table_bits = table_size / PAGE_SIZE;
+ wss->pages_mask = table_bits - 1;
+ wss->num_entries = table_bits / BITS_PER_LONG;
+
+ wss->threshold = (llc_bits * wss_threshold) / 100;
+ if (wss->threshold == 0)
+ wss->threshold = 1;
+
+ wss->clean_period = wss_clean_period;
+ atomic_set(&wss->clean_counter, wss_clean_period);
+
+ wss->entries = kcalloc_node(wss->num_entries, sizeof(*wss->entries),
+ GFP_KERNEL, node);
+ if (!wss->entries) {
+ rvt_wss_exit(rdi);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/*
+ * Advance the clean counter. When the clean period has expired,
+ * clean an entry.
+ *
+ * This is implemented in atomics to avoid locking. Because multiple
+ * variables are involved, it can be racy which can lead to slightly
+ * inaccurate information. Since this is only a heuristic, this is
+ * OK. Any innaccuracies will clean themselves out as the counter
+ * advances. That said, it is unlikely the entry clean operation will
+ * race - the next possible racer will not start until the next clean
+ * period.
+ *
+ * The clean counter is implemented as a decrement to zero. When zero
+ * is reached an entry is cleaned.
+ */
+static void wss_advance_clean_counter(struct rvt_wss *wss)
+{
+ int entry;
+ int weight;
+ unsigned long bits;
+
+ /* become the cleaner if we decrement the counter to zero */
+ if (atomic_dec_and_test(&wss->clean_counter)) {
+ /*
+ * Set, not add, the clean period. This avoids an issue
+ * where the counter could decrement below the clean period.
+ * Doing a set can result in lost decrements, slowing the
+ * clean advance. Since this a heuristic, this possible
+ * slowdown is OK.
+ *
+ * An alternative is to loop, advancing the counter by a
+ * clean period until the result is > 0. However, this could
+ * lead to several threads keeping another in the clean loop.
+ * This could be mitigated by limiting the number of times
+ * we stay in the loop.
+ */
+ atomic_set(&wss->clean_counter, wss->clean_period);
+
+ /*
+ * Uniquely grab the entry to clean and move to next.
+ * The current entry is always the lower bits of
+ * wss.clean_entry. The table size, wss.num_entries,
+ * is always a power-of-2.
+ */
+ entry = (atomic_inc_return(&wss->clean_entry) - 1)
+ & (wss->num_entries - 1);
+
+ /* clear the entry and count the bits */
+ bits = xchg(&wss->entries[entry], 0);
+ weight = hweight64((u64)bits);
+ /* only adjust the contended total count if needed */
+ if (weight)
+ atomic_sub(weight, &wss->total_count);
+ }
+}
+
+/*
+ * Insert the given address into the working set array.
+ */
+static void wss_insert(struct rvt_wss *wss, void *address)
+{
+ u32 page = ((unsigned long)address >> PAGE_SHIFT) & wss->pages_mask;
+ u32 entry = page / BITS_PER_LONG; /* assumes this ends up a shift */
+ u32 nr = page & (BITS_PER_LONG - 1);
+
+ if (!test_and_set_bit(nr, &wss->entries[entry]))
+ atomic_inc(&wss->total_count);
+
+ wss_advance_clean_counter(wss);
+}
+
+/*
+ * Is the working set larger than the threshold?
+ */
+static inline bool wss_exceeds_threshold(struct rvt_wss *wss)
+{
+ return atomic_read(&wss->total_count) >= wss->threshold;
+}
+
static void get_map_page(struct rvt_qpn_table *qpt,
struct rvt_qpn_map *map)
{
@@ -1164,11 +1345,8 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int lastwqe = 0;
int mig = 0;
int pmtu = 0; /* for gcc warning only */
- enum rdma_link_layer link;
int opa_ah;
- link = rdma_port_get_link_layer(ibqp->device, qp->port_num);
-
spin_lock_irq(&qp->r_lock);
spin_lock(&qp->s_hlock);
spin_lock(&qp->s_lock);
@@ -1179,7 +1357,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
opa_ah = rdma_cap_opa_ah(ibqp->device, qp->port_num);
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
- attr_mask, link))
+ attr_mask))
goto inval;
if (rdi->driver_f.check_modify_qp &&
@@ -1718,7 +1896,7 @@ static inline int rvt_qp_is_avail(
*/
static int rvt_post_one_wr(struct rvt_qp *qp,
const struct ib_send_wr *wr,
- int *call_send)
+ bool *call_send)
{
struct rvt_swqe *wqe;
u32 next;
@@ -1823,15 +2001,11 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
wqe->wr.num_sge = j;
}
- /* general part of wqe valid - allow for driver checks */
- if (rdi->driver_f.check_send_wqe) {
- ret = rdi->driver_f.check_send_wqe(qp, wqe);
- if (ret < 0)
- goto bail_inval_free;
- if (ret)
- *call_send = ret;
- }
-
+ /*
+ * Calculate and set SWQE PSN values prior to handing it off
+ * to the driver's check routine. This give the driver the
+ * opportunity to adjust PSN values based on internal checks.
+ */
log_pmtu = qp->log_pmtu;
if (qp->ibqp.qp_type != IB_QPT_UC &&
qp->ibqp.qp_type != IB_QPT_RC) {
@@ -1856,8 +2030,18 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
(wqe->length ?
((wqe->length - 1) >> log_pmtu) :
0);
- qp->s_next_psn = wqe->lpsn + 1;
}
+
+ /* general part of wqe valid - allow for driver checks */
+ if (rdi->driver_f.setup_wqe) {
+ ret = rdi->driver_f.setup_wqe(qp, wqe, call_send);
+ if (ret < 0)
+ goto bail_inval_free_ref;
+ }
+
+ if (!(rdi->post_parms[wr->opcode].flags & RVT_OPERATION_LOCAL))
+ qp->s_next_psn = wqe->lpsn + 1;
+
if (unlikely(reserved_op)) {
wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
rvt_qp_wqe_reserve(qp, wqe);
@@ -1871,6 +2055,10 @@ static int rvt_post_one_wr(struct rvt_qp *qp,
return 0;
+bail_inval_free_ref:
+ if (qp->ibqp.qp_type != IB_QPT_UC &&
+ qp->ibqp.qp_type != IB_QPT_RC)
+ atomic_dec(&ibah_to_rvtah(ud_wr(wr)->ah)->refcount);
bail_inval_free:
/* release mr holds */
while (j) {
@@ -1897,7 +2085,7 @@ int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
unsigned long flags = 0;
- int call_send;
+ bool call_send;
unsigned nreq = 0;
int err = 0;
@@ -1930,7 +2118,11 @@ int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
bail:
spin_unlock_irqrestore(&qp->s_hlock, flags);
if (nreq) {
- if (call_send)
+ /*
+ * Only call do_send if there is exactly one packet, and the
+ * driver said it was ok.
+ */
+ if (nreq == 1 && call_send)
rdi->driver_f.do_send(qp);
else
rdi->driver_f.schedule_send_no_lock(qp);
@@ -2465,3 +2657,454 @@ void rvt_qp_iter(struct rvt_dev_info *rdi,
rcu_read_unlock();
}
EXPORT_SYMBOL(rvt_qp_iter);
+
+/*
+ * This should be called with s_lock held.
+ */
+void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ enum ib_wc_status status)
+{
+ u32 old_last, last;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+ return;
+
+ last = qp->s_last;
+ old_last = last;
+ trace_rvt_qp_send_completion(qp, wqe, last);
+ if (++last >= qp->s_size)
+ last = 0;
+ trace_rvt_qp_send_completion(qp, wqe, last);
+ qp->s_last = last;
+ /* See post_send() */
+ barrier();
+ rvt_put_swqe(wqe);
+ if (qp->ibqp.qp_type == IB_QPT_UD ||
+ qp->ibqp.qp_type == IB_QPT_SMI ||
+ qp->ibqp.qp_type == IB_QPT_GSI)
+ atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
+
+ rvt_qp_swqe_complete(qp,
+ wqe,
+ rdi->wc_opcode[wqe->wr.opcode],
+ status);
+
+ if (qp->s_acked == old_last)
+ qp->s_acked = last;
+ if (qp->s_cur == old_last)
+ qp->s_cur = last;
+ if (qp->s_tail == old_last)
+ qp->s_tail = last;
+ if (qp->state == IB_QPS_SQD && last == qp->s_cur)
+ qp->s_draining = 0;
+}
+EXPORT_SYMBOL(rvt_send_complete);
+
+/**
+ * rvt_copy_sge - copy data to SGE memory
+ * @qp: associated QP
+ * @ss: the SGE state
+ * @data: the data to copy
+ * @length: the length of the data
+ * @release: boolean to release MR
+ * @copy_last: do a separate copy of the last 8 bytes
+ */
+void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
+ void *data, u32 length,
+ bool release, bool copy_last)
+{
+ struct rvt_sge *sge = &ss->sge;
+ int i;
+ bool in_last = false;
+ bool cacheless_copy = false;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+ struct rvt_wss *wss = rdi->wss;
+ unsigned int sge_copy_mode = rdi->dparms.sge_copy_mode;
+
+ if (sge_copy_mode == RVT_SGE_COPY_CACHELESS) {
+ cacheless_copy = length >= PAGE_SIZE;
+ } else if (sge_copy_mode == RVT_SGE_COPY_ADAPTIVE) {
+ if (length >= PAGE_SIZE) {
+ /*
+ * NOTE: this *assumes*:
+ * o The first vaddr is the dest.
+ * o If multiple pages, then vaddr is sequential.
+ */
+ wss_insert(wss, sge->vaddr);
+ if (length >= (2 * PAGE_SIZE))
+ wss_insert(wss, (sge->vaddr + PAGE_SIZE));
+
+ cacheless_copy = wss_exceeds_threshold(wss);
+ } else {
+ wss_advance_clean_counter(wss);
+ }
+ }
+
+ if (copy_last) {
+ if (length > 8) {
+ length -= 8;
+ } else {
+ copy_last = false;
+ in_last = true;
+ }
+ }
+
+again:
+ while (length) {
+ u32 len = rvt_get_sge_length(sge, length);
+
+ WARN_ON_ONCE(len == 0);
+ if (unlikely(in_last)) {
+ /* enforce byte transfer ordering */
+ for (i = 0; i < len; i++)
+ ((u8 *)sge->vaddr)[i] = ((u8 *)data)[i];
+ } else if (cacheless_copy) {
+ cacheless_memcpy(sge->vaddr, data, len);
+ } else {
+ memcpy(sge->vaddr, data, len);
+ }
+ rvt_update_sge(ss, len, release);
+ data += len;
+ length -= len;
+ }
+
+ if (copy_last) {
+ copy_last = false;
+ in_last = true;
+ length = 8;
+ goto again;
+ }
+}
+EXPORT_SYMBOL(rvt_copy_sge);
+
+/**
+ * ruc_loopback - handle UC and RC loopback requests
+ * @sqp: the sending QP
+ *
+ * This is called from rvt_do_send() to forward a WQE addressed to the same HFI
+ * Note that although we are single threaded due to the send engine, we still
+ * have to protect against post_send(). We don't have to worry about
+ * receive interrupts since this is a connected protocol and all packets
+ * will pass through here.
+ */
+void rvt_ruc_loopback(struct rvt_qp *sqp)
+{
+ struct rvt_ibport *rvp = NULL;
+ struct rvt_dev_info *rdi = ib_to_rvt(sqp->ibqp.device);
+ struct rvt_qp *qp;
+ struct rvt_swqe *wqe;
+ struct rvt_sge *sge;
+ unsigned long flags;
+ struct ib_wc wc;
+ u64 sdata;
+ atomic64_t *maddr;
+ enum ib_wc_status send_status;
+ bool release;
+ int ret;
+ bool copy_last = false;
+ int local_ops = 0;
+
+ rcu_read_lock();
+ rvp = rdi->ports[sqp->port_num - 1];
+
+ /*
+ * Note that we check the responder QP state after
+ * checking the requester's state.
+ */
+
+ qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), rvp,
+ sqp->remote_qpn);
+
+ spin_lock_irqsave(&sqp->s_lock, flags);
+
+ /* Return if we are already busy processing a work request. */
+ if ((sqp->s_flags & (RVT_S_BUSY | RVT_S_ANY_WAIT)) ||
+ !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
+ goto unlock;
+
+ sqp->s_flags |= RVT_S_BUSY;
+
+again:
+ if (sqp->s_last == READ_ONCE(sqp->s_head))
+ goto clr_busy;
+ wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
+
+ /* Return if it is not OK to start a new work request. */
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
+ goto clr_busy;
+ /* We are in the error state, flush the work request. */
+ send_status = IB_WC_WR_FLUSH_ERR;
+ goto flush_send;
+ }
+
+ /*
+ * We can rely on the entry not changing without the s_lock
+ * being held until we update s_last.
+ * We increment s_cur to indicate s_last is in progress.
+ */
+ if (sqp->s_last == sqp->s_cur) {
+ if (++sqp->s_cur >= sqp->s_size)
+ sqp->s_cur = 0;
+ }
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+
+ if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
+ qp->ibqp.qp_type != sqp->ibqp.qp_type) {
+ rvp->n_pkt_drops++;
+ /*
+ * For RC, the requester would timeout and retry so
+ * shortcut the timeouts and just signal too many retries.
+ */
+ if (sqp->ibqp.qp_type == IB_QPT_RC)
+ send_status = IB_WC_RETRY_EXC_ERR;
+ else
+ send_status = IB_WC_SUCCESS;
+ goto serr;
+ }
+
+ memset(&wc, 0, sizeof(wc));
+ send_status = IB_WC_SUCCESS;
+
+ release = true;
+ sqp->s_sge.sge = wqe->sg_list[0];
+ sqp->s_sge.sg_list = wqe->sg_list + 1;
+ sqp->s_sge.num_sge = wqe->wr.num_sge;
+ sqp->s_len = wqe->length;
+ switch (wqe->wr.opcode) {
+ case IB_WR_REG_MR:
+ goto send_comp;
+
+ case IB_WR_LOCAL_INV:
+ if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
+ if (rvt_invalidate_rkey(sqp,
+ wqe->wr.ex.invalidate_rkey))
+ send_status = IB_WC_LOC_PROT_ERR;
+ local_ops = 1;
+ }
+ goto send_comp;
+
+ case IB_WR_SEND_WITH_INV:
+ if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
+ wc.wc_flags = IB_WC_WITH_INVALIDATE;
+ wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
+ }
+ goto send;
+
+ case IB_WR_SEND_WITH_IMM:
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ /* FALLTHROUGH */
+ case IB_WR_SEND:
+send:
+ ret = rvt_get_rwqe(qp, false);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ break;
+
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+ wc.wc_flags = IB_WC_WITH_IMM;
+ wc.ex.imm_data = wqe->wr.ex.imm_data;
+ ret = rvt_get_rwqe(qp, true);
+ if (ret < 0)
+ goto op_err;
+ if (!ret)
+ goto rnr_nak;
+ /* skip copy_last set and qp_access_flags recheck */
+ goto do_write;
+ case IB_WR_RDMA_WRITE:
+ copy_last = rvt_is_user_qp(qp);
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
+ goto inv_err;
+do_write:
+ if (wqe->length == 0)
+ break;
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
+ wqe->rdma_wr.remote_addr,
+ wqe->rdma_wr.rkey,
+ IB_ACCESS_REMOTE_WRITE)))
+ goto acc_err;
+ qp->r_sge.sg_list = NULL;
+ qp->r_sge.num_sge = 1;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_RDMA_READ:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
+ goto inv_err;
+ if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
+ wqe->rdma_wr.remote_addr,
+ wqe->rdma_wr.rkey,
+ IB_ACCESS_REMOTE_READ)))
+ goto acc_err;
+ release = false;
+ sqp->s_sge.sg_list = NULL;
+ sqp->s_sge.num_sge = 1;
+ qp->r_sge.sge = wqe->sg_list[0];
+ qp->r_sge.sg_list = wqe->sg_list + 1;
+ qp->r_sge.num_sge = wqe->wr.num_sge;
+ qp->r_sge.total_len = wqe->length;
+ break;
+
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
+ goto inv_err;
+ if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
+ wqe->atomic_wr.remote_addr,
+ wqe->atomic_wr.rkey,
+ IB_ACCESS_REMOTE_ATOMIC)))
+ goto acc_err;
+ /* Perform atomic OP and save result. */
+ maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
+ sdata = wqe->atomic_wr.compare_add;
+ *(u64 *)sqp->s_sge.sge.vaddr =
+ (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
+ (u64)atomic64_add_return(sdata, maddr) - sdata :
+ (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
+ sdata, wqe->atomic_wr.swap);
+ rvt_put_mr(qp->r_sge.sge.mr);
+ qp->r_sge.num_sge = 0;
+ goto send_comp;
+
+ default:
+ send_status = IB_WC_LOC_QP_OP_ERR;
+ goto serr;
+ }
+
+ sge = &sqp->s_sge.sge;
+ while (sqp->s_len) {
+ u32 len = sqp->s_len;
+
+ if (len > sge->length)
+ len = sge->length;
+ if (len > sge->sge_length)
+ len = sge->sge_length;
+ WARN_ON_ONCE(len == 0);
+ rvt_copy_sge(qp, &qp->r_sge, sge->vaddr,
+ len, release, copy_last);
+ sge->vaddr += len;
+ sge->length -= len;
+ sge->sge_length -= len;
+ if (sge->sge_length == 0) {
+ if (!release)
+ rvt_put_mr(sge->mr);
+ if (--sqp->s_sge.num_sge)
+ *sge = *sqp->s_sge.sg_list++;
+ } else if (sge->length == 0 && sge->mr->lkey) {
+ if (++sge->n >= RVT_SEGSZ) {
+ if (++sge->m >= sge->mr->mapsz)
+ break;
+ sge->n = 0;
+ }
+ sge->vaddr =
+ sge->mr->map[sge->m]->segs[sge->n].vaddr;
+ sge->length =
+ sge->mr->map[sge->m]->segs[sge->n].length;
+ }
+ sqp->s_len -= len;
+ }
+ if (release)
+ rvt_put_ss(&qp->r_sge);
+
+ if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
+ goto send_comp;
+
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
+ wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
+ else
+ wc.opcode = IB_WC_RECV;
+ wc.wr_id = qp->r_wr_id;
+ wc.status = IB_WC_SUCCESS;
+ wc.byte_len = wqe->length;
+ wc.qp = &qp->ibqp;
+ wc.src_qp = qp->remote_qpn;
+ wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
+ wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
+ wc.port_num = 1;
+ /* Signal completion event if the solicited bit is set. */
+ rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
+ wqe->wr.send_flags & IB_SEND_SOLICITED);
+
+send_comp:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ rvp->n_loop_pkts++;
+flush_send:
+ sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
+ rvt_send_complete(sqp, wqe, send_status);
+ if (local_ops) {
+ atomic_dec(&sqp->local_ops_pending);
+ local_ops = 0;
+ }
+ goto again;
+
+rnr_nak:
+ /* Handle RNR NAK */
+ if (qp->ibqp.qp_type == IB_QPT_UC)
+ goto send_comp;
+ rvp->n_rnr_naks++;
+ /*
+ * Note: we don't need the s_lock held since the BUSY flag
+ * makes this single threaded.
+ */
+ if (sqp->s_rnr_retry == 0) {
+ send_status = IB_WC_RNR_RETRY_EXC_ERR;
+ goto serr;
+ }
+ if (sqp->s_rnr_retry_cnt < 7)
+ sqp->s_rnr_retry--;
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
+ goto clr_busy;
+ rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
+ IB_AETH_CREDIT_SHIFT);
+ goto clr_busy;
+
+op_err:
+ send_status = IB_WC_REM_OP_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+inv_err:
+ send_status = IB_WC_REM_INV_REQ_ERR;
+ wc.status = IB_WC_LOC_QP_OP_ERR;
+ goto err;
+
+acc_err:
+ send_status = IB_WC_REM_ACCESS_ERR;
+ wc.status = IB_WC_LOC_PROT_ERR;
+err:
+ /* responder goes to error state */
+ rvt_rc_error(qp, wc.status);
+
+serr:
+ spin_lock_irqsave(&sqp->s_lock, flags);
+ rvt_send_complete(sqp, wqe, send_status);
+ if (sqp->ibqp.qp_type == IB_QPT_RC) {
+ int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
+
+ sqp->s_flags &= ~RVT_S_BUSY;
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+ if (lastwqe) {
+ struct ib_event ev;
+
+ ev.device = sqp->ibqp.device;
+ ev.element.qp = &sqp->ibqp;
+ ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
+ }
+ goto done;
+ }
+clr_busy:
+ sqp->s_flags &= ~RVT_S_BUSY;
+unlock:
+ spin_unlock_irqrestore(&sqp->s_lock, flags);
+done:
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL(rvt_ruc_loopback);
diff --git a/drivers/infiniband/sw/rdmavt/qp.h b/drivers/infiniband/sw/rdmavt/qp.h
index 264811fdc530..6d883972e0b8 100644
--- a/drivers/infiniband/sw/rdmavt/qp.h
+++ b/drivers/infiniband/sw/rdmavt/qp.h
@@ -66,4 +66,6 @@ int rvt_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
const struct ib_send_wr **bad_wr);
int rvt_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr);
+int rvt_wss_init(struct rvt_dev_info *rdi);
+void rvt_wss_exit(struct rvt_dev_info *rdi);
#endif /* DEF_RVTQP_H */
diff --git a/drivers/infiniband/sw/rdmavt/trace_tx.h b/drivers/infiniband/sw/rdmavt/trace_tx.h
index 0ef25fc49f25..d5df352eadb1 100644
--- a/drivers/infiniband/sw/rdmavt/trace_tx.h
+++ b/drivers/infiniband/sw/rdmavt/trace_tx.h
@@ -153,6 +153,48 @@ TRACE_EVENT(
)
);
+TRACE_EVENT(
+ rvt_qp_send_completion,
+ TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
+ TP_ARGS(qp, wqe, idx),
+ TP_STRUCT__entry(
+ RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device))
+ __field(struct rvt_swqe *, wqe)
+ __field(u64, wr_id)
+ __field(u32, qpn)
+ __field(u32, qpt)
+ __field(u32, length)
+ __field(u32, idx)
+ __field(u32, ssn)
+ __field(enum ib_wr_opcode, opcode)
+ __field(int, send_flags)
+ ),
+ TP_fast_assign(
+ RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device))
+ __entry->wqe = wqe;
+ __entry->wr_id = wqe->wr.wr_id;
+ __entry->qpn = qp->ibqp.qp_num;
+ __entry->qpt = qp->ibqp.qp_type;
+ __entry->length = wqe->length;
+ __entry->idx = idx;
+ __entry->ssn = wqe->ssn;
+ __entry->opcode = wqe->wr.opcode;
+ __entry->send_flags = wqe->wr.send_flags;
+ ),
+ TP_printk(
+ "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
+ __get_str(dev),
+ __entry->qpn,
+ __entry->qpt,
+ __entry->wqe,
+ __entry->idx,
+ __entry->wr_id,
+ __entry->length,
+ __entry->ssn,
+ __entry->opcode,
+ __entry->send_flags
+ )
+);
#endif /* __RVT_TRACE_TX_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c
index 17e4abc067af..723d3daf2eba 100644
--- a/drivers/infiniband/sw/rdmavt/vt.c
+++ b/drivers/infiniband/sw/rdmavt/vt.c
@@ -774,6 +774,13 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
goto bail_no_mr;
}
+ /* Memory Working Set Size */
+ ret = rvt_wss_init(rdi);
+ if (ret) {
+ rvt_pr_err(rdi, "Error in WSS init.\n");
+ goto bail_mr;
+ }
+
/* Completion queues */
spin_lock_init(&rdi->n_cqs_lock);
@@ -828,10 +835,11 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
rdi->ibdev.driver_id = driver_id;
/* We are now good to announce we exist */
- ret = ib_register_device(&rdi->ibdev, rdi->driver_f.port_callback);
+ ret = ib_register_device(&rdi->ibdev, dev_name(&rdi->ibdev.dev),
+ rdi->driver_f.port_callback);
if (ret) {
rvt_pr_err(rdi, "Failed to register driver with ib core.\n");
- goto bail_mr;
+ goto bail_wss;
}
rvt_create_mad_agents(rdi);
@@ -839,6 +847,8 @@ int rvt_register_device(struct rvt_dev_info *rdi, u32 driver_id)
rvt_pr_info(rdi, "Registration with rdmavt done.\n");
return ret;
+bail_wss:
+ rvt_wss_exit(rdi);
bail_mr:
rvt_mr_exit(rdi);
@@ -862,6 +872,7 @@ void rvt_unregister_device(struct rvt_dev_info *rdi)
rvt_free_mad_agents(rdi);
ib_unregister_device(&rdi->ibdev);
+ rvt_wss_exit(rdi);
rvt_mr_exit(rdi);
rvt_qp_exit(rdi);
}
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 10999fa69281..383e65c7bbc0 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -103,7 +103,7 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
rxe->attr.max_ee_init_rd_atom = RXE_MAX_EE_INIT_RD_ATOM;
- rxe->attr.atomic_cap = RXE_ATOMIC_CAP;
+ rxe->attr.atomic_cap = IB_ATOMIC_HCA;
rxe->attr.max_ee = RXE_MAX_EE;
rxe->attr.max_rdd = RXE_MAX_RDD;
rxe->attr.max_mw = RXE_MAX_MW;
@@ -128,9 +128,9 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
/* initialize port attributes */
static int rxe_init_port_param(struct rxe_port *port)
{
- port->attr.state = RXE_PORT_STATE;
- port->attr.max_mtu = RXE_PORT_MAX_MTU;
- port->attr.active_mtu = RXE_PORT_ACTIVE_MTU;
+ port->attr.state = IB_PORT_DOWN;
+ port->attr.max_mtu = IB_MTU_4096;
+ port->attr.active_mtu = IB_MTU_256;
port->attr.gid_tbl_len = RXE_PORT_GID_TBL_LEN;
port->attr.port_cap_flags = RXE_PORT_PORT_CAP_FLAGS;
port->attr.max_msg_sz = RXE_PORT_MAX_MSG_SZ;
@@ -147,8 +147,7 @@ static int rxe_init_port_param(struct rxe_port *port)
port->attr.active_width = RXE_PORT_ACTIVE_WIDTH;
port->attr.active_speed = RXE_PORT_ACTIVE_SPEED;
port->attr.phys_state = RXE_PORT_PHYS_STATE;
- port->mtu_cap =
- ib_mtu_enum_to_int(RXE_PORT_ACTIVE_MTU);
+ port->mtu_cap = ib_mtu_enum_to_int(IB_MTU_256);
port->subnet_prefix = cpu_to_be64(RXE_PORT_SUBNET_PREFIX);
return 0;
@@ -300,7 +299,7 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
mtu = eth_mtu_int_to_enum(ndev_mtu);
/* Make sure that new MTU in range */
- mtu = mtu ? min_t(enum ib_mtu, mtu, RXE_PORT_MAX_MTU) : IB_MTU_256;
+ mtu = mtu ? min_t(enum ib_mtu, mtu, IB_MTU_4096) : IB_MTU_256;
port->attr.active_mtu = mtu;
port->mtu_cap = ib_mtu_enum_to_int(mtu);
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 83311dd07019..ea089cb091ad 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -191,6 +191,7 @@ static inline void reset_retry_counters(struct rxe_qp *qp)
{
qp->comp.retry_cnt = qp->attr.retry_cnt;
qp->comp.rnr_retry = qp->attr.rnr_retry;
+ qp->comp.started_retry = 0;
}
static inline enum comp_state check_psn(struct rxe_qp *qp,
@@ -253,6 +254,17 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE:
if (pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE &&
pkt->opcode != IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST) {
+ /* read retries of partial data may restart from
+ * read response first or response only.
+ */
+ if ((pkt->psn == wqe->first_psn &&
+ pkt->opcode ==
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) ||
+ (wqe->first_psn == wqe->last_psn &&
+ pkt->opcode ==
+ IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY))
+ break;
+
return COMPST_ERROR;
}
break;
@@ -499,11 +511,11 @@ static inline enum comp_state complete_wqe(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- qp->comp.opcode = -1;
-
- if (pkt) {
- if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
- qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ if (pkt && wqe->state == wqe_state_pending) {
+ if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
+ qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
+ qp->comp.opcode = -1;
+ }
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
@@ -676,6 +688,20 @@ int rxe_completer(void *arg)
goto exit;
}
+ /* if we've started a retry, don't start another
+ * retry sequence, unless this is a timeout.
+ */
+ if (qp->comp.started_retry &&
+ !qp->comp.timeout_retry) {
+ if (pkt) {
+ rxe_drop_ref(pkt->qp);
+ kfree_skb(skb);
+ skb = NULL;
+ }
+
+ goto done;
+ }
+
if (qp->comp.retry_cnt > 0) {
if (qp->comp.retry_cnt != 7)
qp->comp.retry_cnt--;
@@ -692,6 +718,7 @@ int rxe_completer(void *arg)
rxe_counter_inc(rxe,
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
+ qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 1);
}
@@ -701,7 +728,7 @@ int rxe_completer(void *arg)
skb = NULL;
}
- goto exit;
+ goto done;
} else {
rxe_counter_inc(rxe, RXE_CNT_RETRY_EXCEEDED);
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 2ee4b08b00ea..a57276f2cb84 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -30,7 +30,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
-
+#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
@@ -97,7 +97,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context,
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
if (err) {
- kvfree(cq->queue->buf);
+ vfree(cq->queue->buf);
kfree(cq->queue);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 87d14f7ef21b..afd53f57a62b 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -144,8 +144,7 @@ void rxe_loopback(struct sk_buff *skb);
int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb);
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt);
-int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, u32 *crc);
+int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc);
enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num);
const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num);
struct device *rxe_dma_device(struct rxe_dev *rxe);
@@ -196,7 +195,7 @@ static inline int qp_mtu(struct rxe_qp *qp)
if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
return qp->attr.path_mtu;
else
- return RXE_PORT_MAX_MTU;
+ return IB_MTU_4096;
}
static inline int rcv_wqe_size(int max_sge)
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index dff605fdf60f..9d3916b93f23 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -573,33 +573,20 @@ struct rxe_mem *lookup_mem(struct rxe_pd *pd, int access, u32 key,
struct rxe_dev *rxe = to_rdev(pd->ibpd.device);
int index = key >> 8;
- if (index >= RXE_MIN_MR_INDEX && index <= RXE_MAX_MR_INDEX) {
- mem = rxe_pool_get_index(&rxe->mr_pool, index);
- if (!mem)
- goto err1;
- } else {
- goto err1;
+ mem = rxe_pool_get_index(&rxe->mr_pool, index);
+ if (!mem)
+ return NULL;
+
+ if (unlikely((type == lookup_local && mem->lkey != key) ||
+ (type == lookup_remote && mem->rkey != key) ||
+ mem->pd != pd ||
+ (access && !(access & mem->access)) ||
+ mem->state != RXE_MEM_STATE_VALID)) {
+ rxe_drop_ref(mem);
+ mem = NULL;
}
- if ((type == lookup_local && mem->lkey != key) ||
- (type == lookup_remote && mem->rkey != key))
- goto err2;
-
- if (mem->pd != pd)
- goto err2;
-
- if (access && !(access & mem->access))
- goto err2;
-
- if (mem->state != RXE_MEM_STATE_VALID)
- goto err2;
-
return mem;
-
-err2:
- rxe_drop_ref(mem);
-err1:
- return NULL;
}
int rxe_mem_map_pages(struct rxe_dev *rxe, struct rxe_mem *mem,
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 8094cbaa54a9..40e82e0f6c2d 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -72,7 +72,7 @@ struct rxe_dev *get_rxe_by_name(const char *name)
spin_lock_bh(&dev_list_lock);
list_for_each_entry(rxe, &rxe_dev_list, list) {
- if (!strcmp(name, rxe->ib_dev.name)) {
+ if (!strcmp(name, dev_name(&rxe->ib_dev.dev))) {
found = rxe;
break;
}
@@ -182,19 +182,11 @@ static struct dst_entry *rxe_find_route6(struct net_device *ndev,
#endif
-static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
+static struct dst_entry *rxe_find_route(struct net_device *ndev,
struct rxe_qp *qp,
struct rxe_av *av)
{
- const struct ib_gid_attr *attr;
struct dst_entry *dst = NULL;
- struct net_device *ndev;
-
- attr = rdma_get_gid_attr(&rxe->ib_dev, qp->attr.port_num,
- av->grh.sgid_index);
- if (IS_ERR(attr))
- return NULL;
- ndev = attr->ndev;
if (qp_type(qp) == IB_QPT_RC)
dst = sk_dst_get(qp->sk->sk);
@@ -229,7 +221,6 @@ static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
sk_dst_set(qp->sk->sk, dst);
}
}
- rdma_put_gid_attr(attr);
return dst;
}
@@ -377,8 +368,8 @@ static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
}
-static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, struct rxe_av *av)
+static int prepare4(struct rxe_pkt_info *pkt, struct sk_buff *skb,
+ struct rxe_av *av)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
@@ -387,7 +378,7 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
- dst = rxe_find_route(rxe, qp, av);
+ dst = rxe_find_route(skb->dev, qp, av);
if (!dst) {
pr_err("Host not reachable\n");
return -EHOSTUNREACH;
@@ -396,8 +387,8 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
if (!memcmp(saddr, daddr, sizeof(*daddr)))
pkt->mask |= RXE_LOOPBACK_MASK;
- prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
- htons(ROCE_V2_UDP_DPORT));
+ prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
+ cpu_to_be16(ROCE_V2_UDP_DPORT));
prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
av->grh.traffic_class, av->grh.hop_limit, df, xnet);
@@ -406,15 +397,15 @@ static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return 0;
}
-static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, struct rxe_av *av)
+static int prepare6(struct rxe_pkt_info *pkt, struct sk_buff *skb,
+ struct rxe_av *av)
{
struct rxe_qp *qp = pkt->qp;
struct dst_entry *dst;
struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
- dst = rxe_find_route(rxe, qp, av);
+ dst = rxe_find_route(skb->dev, qp, av);
if (!dst) {
pr_err("Host not reachable\n");
return -EHOSTUNREACH;
@@ -423,8 +414,8 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
if (!memcmp(saddr, daddr, sizeof(*daddr)))
pkt->mask |= RXE_LOOPBACK_MASK;
- prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
- htons(ROCE_V2_UDP_DPORT));
+ prepare_udp_hdr(skb, cpu_to_be16(qp->src_port),
+ cpu_to_be16(ROCE_V2_UDP_DPORT));
prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
av->grh.traffic_class,
@@ -434,16 +425,15 @@ static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
return 0;
}
-int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
- struct sk_buff *skb, u32 *crc)
+int rxe_prepare(struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 *crc)
{
int err = 0;
struct rxe_av *av = rxe_get_av(pkt);
if (av->network_type == RDMA_NETWORK_IPV4)
- err = prepare4(rxe, pkt, skb, av);
+ err = prepare4(pkt, skb, av);
else if (av->network_type == RDMA_NETWORK_IPV6)
- err = prepare6(rxe, pkt, skb, av);
+ err = prepare6(pkt, skb, av);
*crc = rxe_icrc_hdr(pkt, skb);
@@ -501,11 +491,6 @@ void rxe_loopback(struct sk_buff *skb)
rxe_rcv(skb);
}
-static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
-{
- return rxe->port.port_guid == av->grh.dgid.global.interface_id;
-}
-
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
int paylen, struct rxe_pkt_info *pkt)
{
@@ -625,7 +610,7 @@ void rxe_port_up(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
- pr_info("set %s active\n", rxe->ib_dev.name);
+ dev_info(&rxe->ib_dev.dev, "set active\n");
}
/* Caller must hold net_info_lock */
@@ -638,7 +623,7 @@ void rxe_port_down(struct rxe_dev *rxe)
port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
rxe_port_event(rxe, IB_EVENT_PORT_ERR);
- pr_info("set %s down\n", rxe->ib_dev.name);
+ dev_info(&rxe->ib_dev.dev, "set down\n");
}
static int rxe_notify(struct notifier_block *not_blk,
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 4555510d86c4..bdea899a58ac 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -90,7 +90,6 @@ enum rxe_device_param {
RXE_MAX_RES_RD_ATOM = 0x3f000,
RXE_MAX_QP_INIT_RD_ATOM = 128,
RXE_MAX_EE_INIT_RD_ATOM = 0,
- RXE_ATOMIC_CAP = 1,
RXE_MAX_EE = 0,
RXE_MAX_RDD = 0,
RXE_MAX_MW = 0,
@@ -139,9 +138,6 @@ enum rxe_device_param {
/* default/initial rxe port parameters */
enum rxe_port_param {
- RXE_PORT_STATE = IB_PORT_DOWN,
- RXE_PORT_MAX_MTU = IB_MTU_4096,
- RXE_PORT_ACTIVE_MTU = IB_MTU_256,
RXE_PORT_GID_TBL_LEN = 1024,
RXE_PORT_PORT_CAP_FLAGS = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP,
RXE_PORT_MAX_MSG_SZ = 0x800000,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index b4a8acc7bb7d..36b53fb94a49 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -207,7 +207,7 @@ int rxe_pool_init(
kref_init(&pool->ref_cnt);
- spin_lock_init(&pool->pool_lock);
+ rwlock_init(&pool->pool_lock);
if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
err = rxe_pool_init_index(pool,
@@ -222,7 +222,7 @@ int rxe_pool_init(
pool->key_size = rxe_type_info[type].key_size;
}
- pool->state = rxe_pool_valid;
+ pool->state = RXE_POOL_STATE_VALID;
out:
return err;
@@ -232,7 +232,7 @@ static void rxe_pool_release(struct kref *kref)
{
struct rxe_pool *pool = container_of(kref, struct rxe_pool, ref_cnt);
- pool->state = rxe_pool_invalid;
+ pool->state = RXE_POOL_STATE_INVALID;
kfree(pool->table);
}
@@ -245,12 +245,12 @@ int rxe_pool_cleanup(struct rxe_pool *pool)
{
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
- pool->state = rxe_pool_invalid;
+ write_lock_irqsave(&pool->pool_lock, flags);
+ pool->state = RXE_POOL_STATE_INVALID;
if (atomic_read(&pool->num_elem) > 0)
pr_warn("%s pool destroyed with unfree'd elem\n",
pool_name(pool));
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
rxe_pool_put(pool);
@@ -336,10 +336,10 @@ void rxe_add_key(void *arg, void *key)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
memcpy((u8 *)elem + pool->key_offset, key, pool->key_size);
insert_key(pool, elem);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void rxe_drop_key(void *arg)
@@ -348,9 +348,9 @@ void rxe_drop_key(void *arg)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
rb_erase(&elem->node, &pool->tree);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void rxe_add_index(void *arg)
@@ -359,10 +359,10 @@ void rxe_add_index(void *arg)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
elem->index = alloc_index(pool);
insert_index(pool, elem);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void rxe_drop_index(void *arg)
@@ -371,10 +371,10 @@ void rxe_drop_index(void *arg)
struct rxe_pool *pool = elem->pool;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_irqsave(&pool->pool_lock, flags);
clear_bit(elem->index - pool->min_index, pool->table);
rb_erase(&elem->node, &pool->tree);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_irqrestore(&pool->pool_lock, flags);
}
void *rxe_alloc(struct rxe_pool *pool)
@@ -384,13 +384,13 @@ void *rxe_alloc(struct rxe_pool *pool)
might_sleep_if(!(pool->flags & RXE_POOL_ATOMIC));
- spin_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != rxe_pool_valid) {
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_lock_irqsave(&pool->pool_lock, flags);
+ if (pool->state != RXE_POOL_STATE_VALID) {
+ read_unlock_irqrestore(&pool->pool_lock, flags);
return NULL;
}
kref_get(&pool->ref_cnt);
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
kref_get(&pool->rxe->ref_cnt);
@@ -436,9 +436,9 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
struct rxe_pool_entry *elem = NULL;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != rxe_pool_valid)
+ if (pool->state != RXE_POOL_STATE_VALID)
goto out;
node = pool->tree.rb_node;
@@ -450,15 +450,14 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
node = node->rb_left;
else if (elem->index < index)
node = node->rb_right;
- else
+ else {
+ kref_get(&elem->ref_cnt);
break;
+ }
}
- if (node)
- kref_get(&elem->ref_cnt);
-
out:
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
return node ? elem : NULL;
}
@@ -469,9 +468,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
int cmp;
unsigned long flags;
- spin_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_irqsave(&pool->pool_lock, flags);
- if (pool->state != rxe_pool_valid)
+ if (pool->state != RXE_POOL_STATE_VALID)
goto out;
node = pool->tree.rb_node;
@@ -494,6 +493,6 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
kref_get(&elem->ref_cnt);
out:
- spin_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_irqrestore(&pool->pool_lock, flags);
return node ? elem : NULL;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 47df28e43acf..aa4ba307097b 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -74,8 +74,8 @@ struct rxe_type_info {
extern struct rxe_type_info rxe_type_info[];
enum rxe_pool_state {
- rxe_pool_invalid,
- rxe_pool_valid,
+ RXE_POOL_STATE_INVALID,
+ RXE_POOL_STATE_VALID,
};
struct rxe_pool_entry {
@@ -90,7 +90,7 @@ struct rxe_pool_entry {
struct rxe_pool {
struct rxe_dev *rxe;
- spinlock_t pool_lock; /* pool spinlock */
+ rwlock_t pool_lock; /* protects pool add/del/search */
size_t elem_size;
struct kref ref_cnt;
void (*cleanup)(struct rxe_pool_entry *obj);
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index c58452daffc7..b9710907dac2 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -34,6 +34,7 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/sched.h>
+#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
@@ -227,6 +228,16 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
qp->sk->sk->sk_user_data = qp;
+ /* pick a source UDP port number for this QP based on
+ * the source QPN. this spreads traffic for different QPs
+ * across different NIC RX queues (while using a single
+ * flow for a given QP to maintain packet order).
+ * the port number must be in the Dynamic Ports range
+ * (0xc000 - 0xffff).
+ */
+ qp->src_port = RXE_ROCE_V2_SPORT +
+ (hash_32_generic(qp_num(qp), 14) & 0x3fff);
+
qp->sq.max_wr = init->cap.max_send_wr;
qp->sq.max_sge = init->cap.max_send_sge;
qp->sq.max_inline = init->cap.max_inline_data;
@@ -247,7 +258,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
&qp->sq.queue->ip);
if (err) {
- kvfree(qp->sq.queue->buf);
+ vfree(qp->sq.queue->buf);
kfree(qp->sq.queue);
return err;
}
@@ -300,7 +311,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->rq.queue->buf, qp->rq.queue->buf_size,
&qp->rq.queue->ip);
if (err) {
- kvfree(qp->rq.queue->buf);
+ vfree(qp->rq.queue->buf);
kfree(qp->rq.queue);
return err;
}
@@ -408,8 +419,7 @@ int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
enum ib_qp_state new_state = (mask & IB_QP_STATE) ?
attr->qp_state : cur_state;
- if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask,
- IB_LINK_LAYER_ETHERNET)) {
+ if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask)) {
pr_warn("invalid mask or state for qp\n");
goto err1;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index d30dbac24583..5c29a1bb575a 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -122,7 +122,7 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
set_bad_pkey_cntr(port);
goto err1;
}
- } else if (qpn != 0) {
+ } else {
if (unlikely(!pkey_match(pkey,
port->pkey_tbl[qp->attr.pkey_index]
))) {
@@ -134,7 +134,7 @@ static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
}
if ((qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) &&
- qpn != 0 && pkt->mask) {
+ pkt->mask) {
u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
if (unlikely(deth_qkey(pkt) != qkey)) {
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 8be27238a86e..6c361d70d7cd 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -73,9 +73,6 @@ static void req_retry(struct rxe_qp *qp)
int npsn;
int first = 1;
- wqe = queue_head(qp->sq.queue);
- npsn = (qp->comp.psn - wqe->first_psn) & BTH_PSN_MASK;
-
qp->req.wqe_index = consumer_index(qp->sq.queue);
qp->req.psn = qp->comp.psn;
qp->req.opcode = -1;
@@ -107,11 +104,17 @@ static void req_retry(struct rxe_qp *qp)
if (first) {
first = 0;
- if (mask & WR_WRITE_OR_SEND_MASK)
+ if (mask & WR_WRITE_OR_SEND_MASK) {
+ npsn = (qp->comp.psn - wqe->first_psn) &
+ BTH_PSN_MASK;
retry_first_write_send(qp, wqe, mask, npsn);
+ }
- if (mask & WR_READ_MASK)
+ if (mask & WR_READ_MASK) {
+ npsn = (wqe->dma.length - wqe->dma.resid) /
+ qp->mtu;
wqe->iova += npsn * qp->mtu;
+ }
}
wqe->state = wqe_state_posted;
@@ -435,7 +438,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
if (pkt->mask & RXE_RETH_MASK) {
reth_set_rkey(pkt, ibwr->wr.rdma.rkey);
reth_set_va(pkt, wqe->iova);
- reth_set_len(pkt, wqe->dma.length);
+ reth_set_len(pkt, wqe->dma.resid);
}
if (pkt->mask & RXE_IMMDT_MASK)
@@ -476,7 +479,7 @@ static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
u32 *p;
int err;
- err = rxe_prepare(rxe, pkt, skb, &crc);
+ err = rxe_prepare(pkt, skb, &crc);
if (err)
return err;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index aa5833318372..c962160292f4 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -637,7 +637,7 @@ static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
if (ack->mask & RXE_ATMACK_MASK)
atmack_set_orig(ack, qp->resp.atomic_orig);
- err = rxe_prepare(rxe, ack, skb, &crc);
+ err = rxe_prepare(ack, skb, &crc);
if (err) {
kfree_skb(skb);
return NULL;
@@ -682,6 +682,7 @@ static enum resp_states read_reply(struct rxe_qp *qp,
rxe_advance_resp_resource(qp);
res->type = RXE_READ_MASK;
+ res->replay = 0;
res->read.va = qp->resp.va;
res->read.va_org = qp->resp.va;
@@ -752,7 +753,8 @@ static enum resp_states read_reply(struct rxe_qp *qp,
state = RESPST_DONE;
} else {
qp->resp.res = NULL;
- qp->resp.opcode = -1;
+ if (!res->replay)
+ qp->resp.opcode = -1;
if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
qp->resp.psn = res->cur_psn;
state = RESPST_CLEANUP;
@@ -814,6 +816,7 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
/* next expected psn, read handles this separately */
qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
+ qp->resp.ack_psn = qp->resp.psn;
qp->resp.opcode = pkt->opcode;
qp->resp.status = IB_WC_SUCCESS;
@@ -1065,7 +1068,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
struct rxe_pkt_info *pkt)
{
enum resp_states rc;
- u32 prev_psn = (qp->resp.psn - 1) & BTH_PSN_MASK;
+ u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
if (pkt->mask & RXE_SEND_MASK ||
pkt->mask & RXE_WRITE_MASK) {
@@ -1108,6 +1111,7 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
res->state = (pkt->psn == res->first_psn) ?
rdatm_res_state_new :
rdatm_res_state_replay;
+ res->replay = 1;
/* Reset the resource, except length. */
res->read.va_org = iova;
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 0d6c04ba7fc3..c41a5fee81f7 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/vmalloc.h>
#include "rxe.h"
#include "rxe_loc.h"
#include "rxe_queue.h"
@@ -129,13 +130,18 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context, q->buf,
q->buf_size, &q->ip);
- if (err)
+ if (err) {
+ vfree(q->buf);
+ kfree(q);
return err;
+ }
if (uresp) {
if (copy_to_user(&uresp->srq_num, &srq->srq_num,
- sizeof(uresp->srq_num)))
+ sizeof(uresp->srq_num))) {
+ rxe_queue_cleanup(q);
return -EFAULT;
+ }
}
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
index d5ed7571128f..73a19f808e1b 100644
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ b/drivers/infiniband/sw/rxe/rxe_sysfs.c
@@ -105,7 +105,7 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
}
rxe_set_port_state(ndev);
- pr_info("added %s to %s\n", rxe->ib_dev.name, intf);
+ dev_info(&rxe->ib_dev.dev, "added %s\n", intf);
err:
if (ndev)
dev_put(ndev);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index f5b1e0ad6142..9c19f2027511 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -1148,18 +1148,21 @@ static ssize_t parent_show(struct device *device,
static DEVICE_ATTR_RO(parent);
-static struct device_attribute *rxe_dev_attributes[] = {
- &dev_attr_parent,
+static struct attribute *rxe_dev_attributes[] = {
+ &dev_attr_parent.attr,
+ NULL
+};
+
+static const struct attribute_group rxe_attr_group = {
+ .attrs = rxe_dev_attributes,
};
int rxe_register_device(struct rxe_dev *rxe)
{
int err;
- int i;
struct ib_device *dev = &rxe->ib_dev;
struct crypto_shash *tfm;
- strlcpy(dev->name, "rxe%d", IB_DEVICE_NAME_MAX);
strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
dev->owner = THIS_MODULE;
@@ -1260,26 +1263,16 @@ int rxe_register_device(struct rxe_dev *rxe)
}
rxe->tfm = tfm;
+ rdma_set_device_sysfs_group(dev, &rxe_attr_group);
dev->driver_id = RDMA_DRIVER_RXE;
- err = ib_register_device(dev, NULL);
+ err = ib_register_device(dev, "rxe%d", NULL);
if (err) {
pr_warn("%s failed with error %d\n", __func__, err);
goto err1;
}
- for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i) {
- err = device_create_file(&dev->dev, rxe_dev_attributes[i]);
- if (err) {
- pr_warn("%s failed with error %d for attr number %d\n",
- __func__, err, i);
- goto err2;
- }
- }
-
return 0;
-err2:
- ib_unregister_device(dev);
err1:
crypto_free_shash(rxe->tfm);
@@ -1288,12 +1281,8 @@ err1:
int rxe_unregister_device(struct rxe_dev *rxe)
{
- int i;
struct ib_device *dev = &rxe->ib_dev;
- for (i = 0; i < ARRAY_SIZE(rxe_dev_attributes); ++i)
- device_remove_file(&dev->dev, rxe_dev_attributes[i]);
-
ib_unregister_device(dev);
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index af1470d29391..82e670d6eeea 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -158,6 +158,7 @@ struct rxe_comp_info {
int opcode;
int timeout;
int timeout_retry;
+ int started_retry;
u32 retry_cnt;
u32 rnr_retry;
struct rxe_task task;
@@ -171,6 +172,7 @@ enum rdatm_res_state {
struct resp_res {
int type;
+ int replay;
u32 first_psn;
u32 last_psn;
u32 cur_psn;
@@ -195,6 +197,7 @@ struct rxe_resp_info {
enum rxe_qp_state state;
u32 msn;
u32 psn;
+ u32 ack_psn;
int opcode;
int drop_msg;
int goto_error;
@@ -248,6 +251,7 @@ struct rxe_qp {
struct socket *sk;
u32 dst_cookie;
+ u16 src_port;
struct rxe_av pri_av;
struct rxe_av alt_av;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 3d5424f335cb..0428e01e8f69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1438,11 +1438,15 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
- if (skb->protocol == htons(ETH_P_IP))
+ if (skb->protocol == htons(ETH_P_IP)) {
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
+ }
#if IS_ENABLED(CONFIG_IPV6)
- else if (skb->protocol == htons(ETH_P_IPV6))
+ else if (skb->protocol == htons(ETH_P_IPV6)) {
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ }
#endif
dev_kfree_skb_any(skb);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8baa75a705c5..8710214594d8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -243,7 +243,8 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
- if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
+ if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) ||
+ new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
return -EINVAL;
priv->admin_mtu = new_mtu;
@@ -1880,6 +1881,8 @@ static int ipoib_parent_init(struct net_device *ndev)
sizeof(union ib_gid));
SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
+ priv->dev->dev_port = priv->port - 1;
+ /* Let's set this one too for backwards compatibility. */
priv->dev->dev_id = priv->port - 1;
return 0;
@@ -2385,6 +2388,35 @@ int ipoib_add_pkey_attr(struct net_device *dev)
return device_create_file(&dev->dev, &dev_attr_pkey);
}
+/*
+ * We erroneously exposed the iface's port number in the dev_id
+ * sysfs field long after dev_port was introduced for that purpose[1],
+ * and we need to stop everyone from relying on that.
+ * Let's overload the shower routine for the dev_id file here
+ * to gently bring the issue up.
+ *
+ * [1] https://www.spinics.net/lists/netdev/msg272123.html
+ */
+static ssize_t dev_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *ndev = to_net_dev(dev);
+
+ if (ndev->dev_id == ndev->dev_port)
+ netdev_info_once(ndev,
+ "\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
+ current->comm);
+
+ return sprintf(buf, "%#x\n", ndev->dev_id);
+}
+static DEVICE_ATTR_RO(dev_id);
+
+int ipoib_intercept_dev_id_attr(struct net_device *dev)
+{
+ device_remove_file(&dev->dev, &dev_attr_dev_id);
+ return device_create_file(&dev->dev, &dev_attr_dev_id);
+}
+
static struct net_device *ipoib_add_port(const char *format,
struct ib_device *hca, u8 port)
{
@@ -2437,6 +2469,8 @@ static struct net_device *ipoib_add_port(const char *format,
*/
ndev->priv_destructor = ipoib_intf_free;
+ if (ipoib_intercept_dev_id_attr(ndev))
+ goto sysfs_failed;
if (ipoib_cm_add_mode_attr(ndev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(ndev))
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 9f36ca786df8..1e88213459f2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -277,7 +277,7 @@ void ipoib_event(struct ib_event_handler *handler,
return;
ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event,
- record->device->name, record->element.port_num);
+ dev_name(&record->device->dev), record->element.port_num);
if (record->event == IB_EVENT_SM_CHANGE ||
record->event == IB_EVENT_CLIENT_REREGISTER) {
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 2f6388596f88..96af06cfe0af 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -589,13 +589,19 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
ib_conn->post_recv_buf_count--;
}
-static inline void
+static inline int
iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
{
- if (likely(rkey == desc->rsc.mr->rkey))
+ if (likely(rkey == desc->rsc.mr->rkey)) {
desc->rsc.mr_valid = 0;
- else if (likely(rkey == desc->pi_ctx->sig_mr->rkey))
+ } else if (likely(desc->pi_ctx && rkey == desc->pi_ctx->sig_mr->rkey)) {
desc->pi_ctx->sig_mr_valid = 0;
+ } else {
+ iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
+ return -EINVAL;
+ }
+
+ return 0;
}
static int
@@ -623,12 +629,14 @@ iser_check_remote_inv(struct iser_conn *iser_conn,
if (iser_task->dir[ISER_DIR_IN]) {
desc = iser_task->rdma_reg[ISER_DIR_IN].mem_h;
- iser_inv_desc(desc, rkey);
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
}
if (iser_task->dir[ISER_DIR_OUT]) {
desc = iser_task->rdma_reg[ISER_DIR_OUT].mem_h;
- iser_inv_desc(desc, rkey);
+ if (unlikely(iser_inv_desc(desc, rkey)))
+ return -EINVAL;
}
} else {
iser_err("failed to get task for itt=%d\n", hdr->itt);
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b686a4aaffe8..946b623ba5eb 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -55,7 +55,7 @@ static void iser_event_handler(struct ib_event_handler *handler,
{
iser_err("async event %s (%d) on device %s port %d\n",
ib_event_msg(event->event), event->event,
- event->device->name, event->element.port_num);
+ dev_name(&event->device->dev), event->element.port_num);
}
/**
@@ -85,7 +85,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe);
iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
- device->comps_used, ib_dev->name,
+ device->comps_used, dev_name(&ib_dev->dev),
ib_dev->num_comp_vectors, max_cqe);
device->pd = ib_alloc_pd(ib_dev,
@@ -468,7 +468,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
iser_conn->max_cmds =
ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr);
iser_dbg("device %s supports max_send_wr %d\n",
- device->ib_device->name, ib_dev->attrs.max_qp_wr);
+ dev_name(&device->ib_device->dev),
+ ib_dev->attrs.max_qp_wr);
}
}
@@ -764,7 +765,7 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
IB_DEVICE_SIGNATURE_HANDOVER)) {
iser_warn("T10-PI requested but not supported on %s, "
"continue without T10-PI\n",
- ib_conn->device->ib_device->name);
+ dev_name(&ib_conn->device->ib_device->dev));
ib_conn->pi_support = false;
} else {
ib_conn->pi_support = true;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index f39670c5c25c..e3dd13798d79 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -262,7 +262,7 @@ isert_alloc_comps(struct isert_device *device)
isert_info("Using %d CQs, %s supports %d vectors support "
"pi_capable %d\n",
- device->comps_used, device->ib_device->name,
+ device->comps_used, dev_name(&device->ib_device->dev),
device->ib_device->num_comp_vectors,
device->pi_capable);
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
index 267da8215e08..31cd361416ac 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c
@@ -351,7 +351,8 @@ static uint32_t opa_vnic_get_dlid(struct opa_vnic_adapter *adapter,
if (unlikely(!dlid))
v_warn("Null dlid in MAC address\n");
} else if (def_port != OPA_VNIC_INVALID_PORT) {
- dlid = info->vesw.u_ucast_dlid[def_port];
+ if (def_port < OPA_VESW_MAX_NUM_DEF_PORT)
+ dlid = info->vesw.u_ucast_dlid[def_port];
}
}
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index 15711dcc6f58..d119d9afa845 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -888,7 +888,8 @@ static void opa_vnic_event(struct ib_event_handler *handler,
return;
c_dbg("OPA_VNIC received event %d on device %s port %d\n",
- record->event, record->device->name, record->element.port_num);
+ record->event, dev_name(&record->device->dev),
+ record->element.port_num);
if (record->event == IB_EVENT_PORT_ERR)
idr_for_each(&port->vport_idr, vema_disable_vport, NULL);
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 0b34e909505f..eed0eb3bb04c 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -1330,17 +1330,8 @@ static void srp_terminate_io(struct srp_rport *rport)
{
struct srp_target_port *target = rport->lld_data;
struct srp_rdma_ch *ch;
- struct Scsi_Host *shost = target->scsi_host;
- struct scsi_device *sdev;
int i, j;
- /*
- * Invoking srp_terminate_io() while srp_queuecommand() is running
- * is not safe. Hence the warning statement below.
- */
- shost_for_each_device(sdev, shost)
- WARN_ON_ONCE(sdev->request_queue->request_fn_active);
-
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
@@ -3124,7 +3115,8 @@ static ssize_t show_local_ib_device(struct device *dev,
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
- return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
+ return sprintf(buf, "%s\n",
+ dev_name(&target->srp_host->srp_dev->dev->dev));
}
static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
@@ -3987,7 +3979,7 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
- return sprintf(buf, "%s\n", host->srp_dev->dev->name);
+ return sprintf(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
@@ -4019,7 +4011,8 @@ static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
host->dev.class = &srp_class;
host->dev.parent = device->dev->dev.parent;
- dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
+ dev_set_name(&host->dev, "srp-%s-%d", dev_name(&device->dev->dev),
+ port);
if (device_register(&host->dev))
goto free_host;
@@ -4095,7 +4088,7 @@ static void srp_add_one(struct ib_device *device)
srp_dev->mr_max_size = srp_dev->mr_page_size *
srp_dev->max_pages_per_mr;
pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
- device->name, mr_page_shift, attr->max_mr_size,
+ dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
attr->max_fast_reg_page_list_len,
srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index f37cbad022a2..2357aa727dcf 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -148,7 +148,7 @@ static void srpt_event_handler(struct ib_event_handler *handler,
return;
pr_debug("ASYNC event= %d on device= %s\n", event->event,
- sdev->device->name);
+ dev_name(&sdev->device->dev));
switch (event->event) {
case IB_EVENT_PORT_ERR:
@@ -1941,7 +1941,8 @@ static void __srpt_close_all_ch(struct srpt_port *sport)
if (srpt_disconnect_ch(ch) >= 0)
pr_info("Closing channel %s because target %s_%d has been disabled\n",
ch->sess_name,
- sport->sdev->device->name, sport->port);
+ dev_name(&sport->sdev->device->dev),
+ sport->port);
srpt_close_ch(ch);
}
}
@@ -2127,7 +2128,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
if (!sport->enabled) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
- sport->sdev->device->name, port_num);
+ dev_name(&sport->sdev->device->dev), port_num);
goto reject;
}
@@ -2267,7 +2268,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
- sdev->device->name, port_num);
+ dev_name(&sdev->device->dev), port_num);
mutex_unlock(&sport->mutex);
goto reject;
}
@@ -2708,7 +2709,7 @@ static void srpt_queue_response(struct se_cmd *cmd)
break;
}
- if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
+ if (WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))
return;
/* For read commands, transfer the data to the initiator. */
@@ -2842,7 +2843,7 @@ static int srpt_release_sport(struct srpt_port *sport)
while (wait_event_timeout(sport->ch_releaseQ,
srpt_ch_list_empty(sport), 5 * HZ) <= 0) {
pr_info("%s_%d: waiting for session unregistration ...\n",
- sport->sdev->device->name, sport->port);
+ dev_name(&sport->sdev->device->dev), sport->port);
rcu_read_lock();
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
@@ -2932,7 +2933,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
}
pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
- sdev->device->attrs.max_srq_wr, device->name);
+ sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
sdev->ioctx_ring = (struct srpt_recv_ioctx **)
srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
@@ -2965,8 +2966,8 @@ static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
} else if (use_srq && !sdev->srq) {
ret = srpt_alloc_srq(sdev);
}
- pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__, device->name,
- sdev->use_srq, ret);
+ pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__,
+ dev_name(&device->dev), sdev->use_srq, ret);
return ret;
}
@@ -3052,7 +3053,7 @@ static void srpt_add_one(struct ib_device *device)
if (srpt_refresh_port(sport)) {
pr_err("MAD registration failed for %s-%d.\n",
- sdev->device->name, i);
+ dev_name(&sdev->device->dev), i);
goto err_event;
}
}
@@ -3063,7 +3064,7 @@ static void srpt_add_one(struct ib_device *device)
out:
ib_set_client_data(device, &srpt_client, sdev);
- pr_debug("added %s.\n", device->name);
+ pr_debug("added %s.\n", dev_name(&device->dev));
return;
err_event:
@@ -3078,7 +3079,7 @@ free_dev:
kfree(sdev);
err:
sdev = NULL;
- pr_info("%s(%s) failed.\n", __func__, device->name);
+ pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
goto out;
}
@@ -3093,7 +3094,8 @@ static void srpt_remove_one(struct ib_device *device, void *client_data)
int i;
if (!sdev) {
- pr_info("%s(%s): nothing to do.\n", __func__, device->name);
+ pr_info("%s(%s): nothing to do.\n", __func__,
+ dev_name(&device->dev));
return;
}
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index b86c1e5fbc11..9e8684ab48f4 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -27,6 +27,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/sort.h>
+#include <linux/pm_wakeirq.h>
#include <linux/mfd/ti_am335x_tscadc.h>
@@ -46,6 +47,7 @@ static const int config_pins[] = {
struct titsc {
struct input_dev *input;
struct ti_tscadc_dev *mfd_tscadc;
+ struct device *dev;
unsigned int irq;
unsigned int wires;
unsigned int x_plate_resistance;
@@ -276,7 +278,7 @@ static irqreturn_t titsc_irq(int irq, void *dev)
if (status & IRQENB_HW_PEN) {
ts_dev->pen_down = true;
irqclr |= IRQENB_HW_PEN;
- pm_stay_awake(ts_dev->mfd_tscadc->dev);
+ pm_stay_awake(ts_dev->dev);
}
if (status & IRQENB_PENUP) {
@@ -286,7 +288,7 @@ static irqreturn_t titsc_irq(int irq, void *dev)
input_report_key(input_dev, BTN_TOUCH, 0);
input_report_abs(input_dev, ABS_PRESSURE, 0);
input_sync(input_dev);
- pm_relax(ts_dev->mfd_tscadc->dev);
+ pm_relax(ts_dev->dev);
} else {
ts_dev->pen_down = true;
}
@@ -422,6 +424,7 @@ static int titsc_probe(struct platform_device *pdev)
ts_dev->mfd_tscadc = tscadc_dev;
ts_dev->input = input_dev;
ts_dev->irq = tscadc_dev->irq;
+ ts_dev->dev = &pdev->dev;
err = titsc_parse_dt(pdev, ts_dev);
if (err) {
@@ -436,6 +439,11 @@ static int titsc_probe(struct platform_device *pdev)
goto err_free_mem;
}
+ device_init_wakeup(&pdev->dev, true);
+ err = dev_pm_set_wake_irq(&pdev->dev, ts_dev->irq);
+ if (err)
+ dev_err(&pdev->dev, "irq wake enable failed.\n");
+
titsc_writel(ts_dev, REG_IRQSTATUS, TSC_IRQENB_MASK);
titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_FIFO0THRES);
titsc_writel(ts_dev, REG_IRQENABLE, IRQENB_EOS);
@@ -467,6 +475,8 @@ static int titsc_probe(struct platform_device *pdev)
return 0;
err_free_irq:
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
free_irq(ts_dev->irq, ts_dev);
err_free_mem:
input_free_device(input_dev);
@@ -479,6 +489,8 @@ static int titsc_remove(struct platform_device *pdev)
struct titsc *ts_dev = platform_get_drvdata(pdev);
u32 steps;
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
free_irq(ts_dev->irq, ts_dev);
/* total steps followed by the enable mask */
@@ -499,7 +511,7 @@ static int __maybe_unused titsc_suspend(struct device *dev)
unsigned int idle;
tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
- if (device_may_wakeup(tscadc_dev->dev)) {
+ if (device_may_wakeup(dev)) {
titsc_writel(ts_dev, REG_IRQSTATUS, TSC_IRQENB_MASK);
idle = titsc_readl(ts_dev, REG_IRQENABLE);
titsc_writel(ts_dev, REG_IRQENABLE,
@@ -515,11 +527,11 @@ static int __maybe_unused titsc_resume(struct device *dev)
struct ti_tscadc_dev *tscadc_dev;
tscadc_dev = ti_tscadc_dev_get(to_platform_device(dev));
- if (device_may_wakeup(tscadc_dev->dev)) {
+ if (device_may_wakeup(dev)) {
titsc_writel(ts_dev, REG_IRQWAKEUP,
0x00);
titsc_writel(ts_dev, REG_IRQCLR, IRQENB_HW_PEN);
- pm_relax(ts_dev->mfd_tscadc->dev);
+ pm_relax(dev);
}
titsc_step_config(ts_dev);
titsc_writel(ts_dev, REG_FIFO0THR,
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c60395b7470f..d9a25715650e 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -186,6 +186,19 @@ config INTEL_IOMMU
and include PCI device scope covered by these DMA
remapping devices.
+config INTEL_IOMMU_DEBUGFS
+ bool "Export Intel IOMMU internals in Debugfs"
+ depends on INTEL_IOMMU && IOMMU_DEBUGFS
+ help
+ !!!WARNING!!!
+
+ DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!!
+
+ Expose Intel IOMMU internals in Debugfs.
+
+ This option is -NOT- intended for production environments, and should
+ only be enabled for debugging Intel IOMMU.
+
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on INTEL_IOMMU && X86
@@ -372,6 +385,14 @@ config S390_CCW_IOMMU
Enables bits of IOMMU API required by VFIO. The iommu_ops
is not implemented as it is not necessary for VFIO.
+config S390_AP_IOMMU
+ bool "S390 AP IOMMU Support"
+ depends on S390 && ZCRYPT
+ select IOMMU_API
+ help
+ Enables bits of IOMMU API required by VFIO. The iommu_ops
+ is not implemented as it is not necessary for VFIO.
+
config MTK_IOMMU
bool "MTK IOMMU Support"
depends on ARM || ARM64
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index ab5eba6edf82..a158a68c8ea8 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
+obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel-iommu-debugfs.o
obj-$(CONFIG_INTEL_IOMMU_SVM) += intel-svm.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index bee0dfb7b93b..1167ff0416cf 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3083,6 +3083,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return (irq_remapping_enabled == 1);
case IOMMU_CAP_NOEXEC:
return false;
+ default:
+ break;
}
return false;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 3931c7de7c69..bb2cd29e1658 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1719,7 +1719,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
NULL,
};
-static int iommu_init_pci(struct amd_iommu *iommu)
+static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high;
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 5059d09f3202..6947ccf26512 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1,18 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IOMMU API for ARM architected SMMUv3 implementations.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
* Copyright (C) 2015 ARM Limited
*
* Author: Will Deacon <will.deacon@arm.com>
@@ -567,7 +556,8 @@ struct arm_smmu_device {
int gerr_irq;
int combined_irq;
- atomic_t sync_nr;
+ u32 sync_nr;
+ u8 prev_cmd_opcode;
unsigned long ias; /* IPA */
unsigned long oas; /* PA */
@@ -611,6 +601,7 @@ struct arm_smmu_domain {
struct mutex init_mutex; /* Protects smmu pointer */
struct io_pgtable_ops *pgtbl_ops;
+ bool non_strict;
enum arm_smmu_domain_stage stage;
union {
@@ -708,7 +699,7 @@ static void queue_inc_prod(struct arm_smmu_queue *q)
}
/*
- * Wait for the SMMU to consume items. If drain is true, wait until the queue
+ * Wait for the SMMU to consume items. If sync is true, wait until the queue
* is empty. Otherwise, wait until there is at least one free slot.
*/
static int queue_poll_cons(struct arm_smmu_queue *q, bool sync, bool wfe)
@@ -901,6 +892,8 @@ static void arm_smmu_cmdq_insert_cmd(struct arm_smmu_device *smmu, u64 *cmd)
struct arm_smmu_queue *q = &smmu->cmdq.q;
bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
+ smmu->prev_cmd_opcode = FIELD_GET(CMDQ_0_OP, cmd[0]);
+
while (queue_insert_raw(q, cmd) == -ENOSPC) {
if (queue_poll_cons(q, false, wfe))
dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
@@ -948,15 +941,21 @@ static int __arm_smmu_cmdq_issue_sync_msi(struct arm_smmu_device *smmu)
struct arm_smmu_cmdq_ent ent = {
.opcode = CMDQ_OP_CMD_SYNC,
.sync = {
- .msidata = atomic_inc_return_relaxed(&smmu->sync_nr),
.msiaddr = virt_to_phys(&smmu->sync_count),
},
};
- arm_smmu_cmdq_build_cmd(cmd, &ent);
-
spin_lock_irqsave(&smmu->cmdq.lock, flags);
- arm_smmu_cmdq_insert_cmd(smmu, cmd);
+
+ /* Piggy-back on the previous command if it's a SYNC */
+ if (smmu->prev_cmd_opcode == CMDQ_OP_CMD_SYNC) {
+ ent.sync.msidata = smmu->sync_nr;
+ } else {
+ ent.sync.msidata = ++smmu->sync_nr;
+ arm_smmu_cmdq_build_cmd(cmd, &ent);
+ arm_smmu_cmdq_insert_cmd(smmu, cmd);
+ }
+
spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
return __arm_smmu_sync_poll_msi(smmu, ent.sync.msidata);
@@ -1372,15 +1371,11 @@ static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev)
}
/* IO_PGTABLE API */
-static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
-{
- arm_smmu_cmdq_issue_sync(smmu);
-}
-
static void arm_smmu_tlb_sync(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
- __arm_smmu_tlb_sync(smmu_domain->smmu);
+
+ arm_smmu_cmdq_issue_sync(smmu_domain->smmu);
}
static void arm_smmu_tlb_inv_context(void *cookie)
@@ -1398,8 +1393,14 @@ static void arm_smmu_tlb_inv_context(void *cookie)
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
+ /*
+ * NOTE: when io-pgtable is in non-strict mode, we may get here with
+ * PTEs previously cleared by unmaps on the current CPU not yet visible
+ * to the SMMU. We are relying on the DSB implicit in queue_inc_prod()
+ * to guarantee those are observed before the TLBI. Do be careful, 007.
+ */
arm_smmu_cmdq_issue_cmd(smmu, &cmd);
- __arm_smmu_tlb_sync(smmu);
+ arm_smmu_cmdq_issue_sync(smmu);
}
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
@@ -1624,6 +1625,9 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
if (smmu->features & ARM_SMMU_FEAT_COHERENCY)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
+ if (smmu_domain->non_strict)
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops)
return -ENOMEM;
@@ -1772,12 +1776,20 @@ arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
return ops->unmap(ops, iova, size);
}
+static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (smmu_domain->smmu)
+ arm_smmu_tlb_inv_context(smmu_domain);
+}
+
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu;
if (smmu)
- __arm_smmu_tlb_sync(smmu);
+ arm_smmu_cmdq_issue_sync(smmu);
}
static phys_addr_t
@@ -1917,15 +1929,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
+ switch (domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ *(int *)data = smmu_domain->non_strict;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
default:
- return -ENODEV;
+ return -EINVAL;
}
}
@@ -1935,26 +1959,37 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
mutex_lock(&smmu_domain->init_mutex);
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
+ switch (domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ if (*(int *)data)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch(attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ smmu_domain->non_strict = *(int *)data;
+ break;
+ default:
+ ret = -ENODEV;
}
-
- if (*(int *)data)
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
- else
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-
break;
default:
- ret = -ENODEV;
+ ret = -EINVAL;
}
out_unlock:
@@ -1999,7 +2034,7 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
- .flush_iotlb_all = arm_smmu_iotlb_sync,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
@@ -2180,7 +2215,6 @@ static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
{
int ret;
- atomic_set(&smmu->sync_nr, 0);
ret = arm_smmu_init_queues(smmu);
if (ret)
return ret;
@@ -2353,8 +2387,8 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
irq = smmu->combined_irq;
if (irq) {
/*
- * Cavium ThunderX2 implementation doesn't not support unique
- * irq lines. Use single irq line for all the SMMUv3 interrupts.
+ * Cavium ThunderX2 implementation doesn't support unique irq
+ * lines. Use a single irq line for all the SMMUv3 interrupts.
*/
ret = devm_request_threaded_irq(smmu->dev, irq,
arm_smmu_combined_irq_handler,
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index fd1b80ef9490..5a28ae892504 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -52,6 +52,7 @@
#include <linux/spinlock.h>
#include <linux/amba/bus.h>
+#include <linux/fsl/mc.h>
#include "io-pgtable.h"
#include "arm-smmu-regs.h"
@@ -246,6 +247,7 @@ struct arm_smmu_domain {
const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
+ bool non_strict;
struct mutex init_mutex; /* Protects smmu pointer */
spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */
struct iommu_domain domain;
@@ -447,7 +449,11 @@ static void arm_smmu_tlb_inv_context_s1(void *cookie)
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
- writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+ /*
+ * NOTE: this is not a relaxed write; it needs to guarantee that PTEs
+ * cleared by the current CPU are visible to the SMMU before the TLBI.
+ */
+ writel(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
arm_smmu_tlb_sync_context(cookie);
}
@@ -457,7 +463,8 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *base = ARM_SMMU_GR0(smmu);
- writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
+ /* NOTE: see above */
+ writel(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
arm_smmu_tlb_sync_global(smmu);
}
@@ -469,6 +476,9 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
+
if (stage1) {
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
@@ -510,6 +520,9 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
struct arm_smmu_domain *smmu_domain = cookie;
void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
+ wmb();
+
writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
}
@@ -863,6 +876,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
+ if (smmu_domain->non_strict)
+ pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT;
+
smmu_domain->smmu = smmu;
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) {
@@ -1252,6 +1268,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ops->unmap(ops, iova, size);
}
+static void arm_smmu_flush_iotlb_all(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (smmu_domain->tlb_ops)
+ smmu_domain->tlb_ops->tlb_flush_all(smmu_domain);
+}
+
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -1459,6 +1483,8 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev)
if (dev_is_pci(dev))
group = pci_device_group(dev);
+ else if (dev_is_fsl_mc(dev))
+ group = fsl_mc_device_group(dev);
else
group = generic_device_group(dev);
@@ -1470,15 +1496,27 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
- return 0;
+ switch(domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ *(int *)data = smmu_domain->non_strict;
+ return 0;
+ default:
+ return -ENODEV;
+ }
+ break;
default:
- return -ENODEV;
+ return -EINVAL;
}
}
@@ -1488,28 +1526,38 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
int ret = 0;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
- if (domain->type != IOMMU_DOMAIN_UNMANAGED)
- return -EINVAL;
-
mutex_lock(&smmu_domain->init_mutex);
- switch (attr) {
- case DOMAIN_ATTR_NESTING:
- if (smmu_domain->smmu) {
- ret = -EPERM;
- goto out_unlock;
+ switch(domain->type) {
+ case IOMMU_DOMAIN_UNMANAGED:
+ switch (attr) {
+ case DOMAIN_ATTR_NESTING:
+ if (smmu_domain->smmu) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ if (*(int *)data)
+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
+ else
+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ break;
+ case IOMMU_DOMAIN_DMA:
+ switch (attr) {
+ case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE:
+ smmu_domain->non_strict = *(int *)data;
+ break;
+ default:
+ ret = -ENODEV;
}
-
- if (*(int *)data)
- smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
- else
- smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
-
break;
default:
- ret = -ENODEV;
+ ret = -EINVAL;
}
-
out_unlock:
mutex_unlock(&smmu_domain->init_mutex);
return ret;
@@ -1562,7 +1610,7 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
- .flush_iotlb_all = arm_smmu_iotlb_sync,
+ .flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
@@ -2036,6 +2084,10 @@ static void arm_smmu_bus_init(void)
bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
}
#endif
+#ifdef CONFIG_FSL_MC_BUS
+ if (!iommu_present(&fsl_mc_bus_type))
+ bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
+#endif
}
static int arm_smmu_device_probe(struct platform_device *pdev)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 511ff9a1d6d9..d1b04753b204 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -55,6 +55,9 @@ struct iommu_dma_cookie {
};
struct list_head msi_page_list;
spinlock_t msi_lock;
+
+ /* Domain for flush queue callback; NULL if flush queue not in use */
+ struct iommu_domain *fq_domain;
};
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
@@ -257,6 +260,20 @@ static int iova_reserve_iommu_regions(struct device *dev,
return ret;
}
+static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
+{
+ struct iommu_dma_cookie *cookie;
+ struct iommu_domain *domain;
+
+ cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
+ domain = cookie->fq_domain;
+ /*
+ * The IOMMU driver supporting DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE
+ * implies that ops->flush_iotlb_all must be non-NULL.
+ */
+ domain->ops->flush_iotlb_all(domain);
+}
+
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -275,6 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
+ int attr;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@@ -308,6 +326,13 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
init_iova_domain(iovad, 1UL << order, base_pfn);
+
+ if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
+ cookie->fq_domain = domain;
+ init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
+ }
+
if (!dev)
return 0;
@@ -393,6 +418,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
/* The MSI case is only ever cleaning up its most recent allocation */
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
+ else if (cookie->fq_domain) /* non-strict mode */
+ queue_iova(iovad, iova_pfn(iovad, iova),
+ size >> iova_shift(iovad), 0);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
@@ -408,7 +436,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
dma_addr -= iova_off;
size = iova_align(iovad, size + iova_off);
- WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
+ WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
+ if (!cookie->fq_domain)
+ iommu_tlb_sync(domain);
iommu_dma_free_iova(cookie, dma_addr, size);
}
@@ -491,7 +521,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count,
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
*handle = IOMMU_MAPPING_ERROR;
}
@@ -518,7 +548,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
unsigned long attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
struct page **pages;
@@ -606,9 +636,8 @@ int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot)
+ size_t size, int prot, struct iommu_domain *domain)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
size_t iova_off = 0;
dma_addr_t iova;
@@ -632,13 +661,14 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, int prot)
{
- return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
+ return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
+ iommu_get_dma_domain(dev));
}
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
}
/*
@@ -726,7 +756,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, int prot)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
struct scatterlist *s, *prev = NULL;
@@ -811,20 +841,21 @@ void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
sg = tmp;
}
end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
}
dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
+ iommu_get_dma_domain(dev));
}
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
+ __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
}
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -850,7 +881,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot);
+ iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
if (iommu_dma_mapping_error(dev, iova))
goto out_free_page;
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index 8540625796a1..1b955aea44dd 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -543,7 +543,7 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
return ~(u32)0;
}
- for_each_node_by_type(node, "cpu") {
+ for_each_of_cpu_node(node) {
prop = of_get_property(node, "reg", &len);
for (i = 0; i < len / sizeof(u32); i++) {
if (be32_to_cpup(&prop[i]) == vcpu) {
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index f089136e9c3f..9b528cfcc6c3 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -814,6 +814,55 @@ static int configure_domain_dma_state(struct fsl_dma_domain *dma_domain, bool en
return 0;
}
+static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
+{
+ struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dma_domain->domain_lock, flags);
+ /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
+ if (dma_domain->enabled) {
+ pr_debug("Can't set geometry attributes as domain is active\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EBUSY;
+ }
+
+ /* Ensure that the geometry has been set for the domain */
+ if (!dma_domain->geom_size) {
+ pr_debug("Please configure geometry before setting the number of windows\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ /*
+ * Ensure we have valid window count i.e. it should be less than
+ * maximum permissible limit and should be a power of two.
+ */
+ if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
+ pr_debug("Invalid window count\n");
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -EINVAL;
+ }
+
+ ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
+ w_count > 1 ? w_count : 0);
+ if (!ret) {
+ kfree(dma_domain->win_arr);
+ dma_domain->win_arr = kcalloc(w_count,
+ sizeof(*dma_domain->win_arr),
+ GFP_ATOMIC);
+ if (!dma_domain->win_arr) {
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+ return -ENOMEM;
+ }
+ dma_domain->win_cnt = w_count;
+ }
+ spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
+
+ return ret;
+}
+
static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
enum iommu_attr attr_type, void *data)
{
@@ -830,6 +879,9 @@ static int fsl_pamu_set_domain_attr(struct iommu_domain *domain,
case DOMAIN_ATTR_FSL_PAMU_ENABLE:
ret = configure_domain_dma_state(dma_domain, *(int *)data);
break;
+ case DOMAIN_ATTR_WINDOWS:
+ ret = fsl_pamu_set_windows(domain, *(u32 *)data);
+ break;
default:
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
@@ -856,6 +908,9 @@ static int fsl_pamu_get_domain_attr(struct iommu_domain *domain,
case DOMAIN_ATTR_FSL_PAMUV1:
*(int *)data = DOMAIN_ATTR_FSL_PAMUV1;
break;
+ case DOMAIN_ATTR_WINDOWS:
+ *(u32 *)data = dma_domain->win_cnt;
+ break;
default:
pr_debug("Unsupported attribute type\n");
ret = -EINVAL;
@@ -916,13 +971,13 @@ static struct iommu_group *get_shared_pci_device_group(struct pci_dev *pdev)
static struct iommu_group *get_pci_device_group(struct pci_dev *pdev)
{
struct pci_controller *pci_ctl;
- bool pci_endpt_partioning;
+ bool pci_endpt_partitioning;
struct iommu_group *group = NULL;
pci_ctl = pci_bus_to_host(pdev->bus);
- pci_endpt_partioning = check_pci_ctl_endpt_part(pci_ctl);
+ pci_endpt_partitioning = check_pci_ctl_endpt_part(pci_ctl);
/* We can partition PCIe devices so assign device group to the device */
- if (pci_endpt_partioning) {
+ if (pci_endpt_partitioning) {
group = pci_device_group(&pdev->dev);
/*
@@ -994,62 +1049,6 @@ static void fsl_pamu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
}
-static int fsl_pamu_set_windows(struct iommu_domain *domain, u32 w_count)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&dma_domain->domain_lock, flags);
- /* Ensure domain is inactive i.e. DMA should be disabled for the domain */
- if (dma_domain->enabled) {
- pr_debug("Can't set geometry attributes as domain is active\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EBUSY;
- }
-
- /* Ensure that the geometry has been set for the domain */
- if (!dma_domain->geom_size) {
- pr_debug("Please configure geometry before setting the number of windows\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- /*
- * Ensure we have valid window count i.e. it should be less than
- * maximum permissible limit and should be a power of two.
- */
- if (w_count > pamu_get_max_subwin_cnt() || !is_power_of_2(w_count)) {
- pr_debug("Invalid window count\n");
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -EINVAL;
- }
-
- ret = pamu_set_domain_geometry(dma_domain, &domain->geometry,
- w_count > 1 ? w_count : 0);
- if (!ret) {
- kfree(dma_domain->win_arr);
- dma_domain->win_arr = kcalloc(w_count,
- sizeof(*dma_domain->win_arr),
- GFP_ATOMIC);
- if (!dma_domain->win_arr) {
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
- return -ENOMEM;
- }
- dma_domain->win_cnt = w_count;
- }
- spin_unlock_irqrestore(&dma_domain->domain_lock, flags);
-
- return ret;
-}
-
-static u32 fsl_pamu_get_windows(struct iommu_domain *domain)
-{
- struct fsl_dma_domain *dma_domain = to_fsl_dma_domain(domain);
-
- return dma_domain->win_cnt;
-}
-
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc,
@@ -1058,8 +1057,6 @@ static const struct iommu_ops fsl_pamu_ops = {
.detach_dev = fsl_pamu_detach_device,
.domain_window_enable = fsl_pamu_window_enable,
.domain_window_disable = fsl_pamu_window_disable,
- .domain_get_windows = fsl_pamu_get_windows,
- .domain_set_windows = fsl_pamu_set_windows,
.iova_to_phys = fsl_pamu_iova_to_phys,
.domain_set_attr = fsl_pamu_set_domain_attr,
.domain_get_attr = fsl_pamu_get_domain_attr,
diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
new file mode 100644
index 000000000000..7fabf9b1c2dc
--- /dev/null
+++ b/drivers/iommu/intel-iommu-debugfs.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2018 Intel Corporation.
+ *
+ * Authors: Gayatri Kammela <gayatri.kammela@intel.com>
+ * Sohil Mehta <sohil.mehta@intel.com>
+ * Jacob Pan <jacob.jun.pan@linux.intel.com>
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dmar.h>
+#include <linux/intel-iommu.h>
+#include <linux/pci.h>
+
+#include <asm/irq_remapping.h>
+
+struct iommu_regset {
+ int offset;
+ const char *regs;
+};
+
+#define IOMMU_REGSET_ENTRY(_reg_) \
+ { DMAR_##_reg_##_REG, __stringify(_reg_) }
+static const struct iommu_regset iommu_regs[] = {
+ IOMMU_REGSET_ENTRY(VER),
+ IOMMU_REGSET_ENTRY(CAP),
+ IOMMU_REGSET_ENTRY(ECAP),
+ IOMMU_REGSET_ENTRY(GCMD),
+ IOMMU_REGSET_ENTRY(GSTS),
+ IOMMU_REGSET_ENTRY(RTADDR),
+ IOMMU_REGSET_ENTRY(CCMD),
+ IOMMU_REGSET_ENTRY(FSTS),
+ IOMMU_REGSET_ENTRY(FECTL),
+ IOMMU_REGSET_ENTRY(FEDATA),
+ IOMMU_REGSET_ENTRY(FEADDR),
+ IOMMU_REGSET_ENTRY(FEUADDR),
+ IOMMU_REGSET_ENTRY(AFLOG),
+ IOMMU_REGSET_ENTRY(PMEN),
+ IOMMU_REGSET_ENTRY(PLMBASE),
+ IOMMU_REGSET_ENTRY(PLMLIMIT),
+ IOMMU_REGSET_ENTRY(PHMBASE),
+ IOMMU_REGSET_ENTRY(PHMLIMIT),
+ IOMMU_REGSET_ENTRY(IQH),
+ IOMMU_REGSET_ENTRY(IQT),
+ IOMMU_REGSET_ENTRY(IQA),
+ IOMMU_REGSET_ENTRY(ICS),
+ IOMMU_REGSET_ENTRY(IRTA),
+ IOMMU_REGSET_ENTRY(PQH),
+ IOMMU_REGSET_ENTRY(PQT),
+ IOMMU_REGSET_ENTRY(PQA),
+ IOMMU_REGSET_ENTRY(PRS),
+ IOMMU_REGSET_ENTRY(PECTL),
+ IOMMU_REGSET_ENTRY(PEDATA),
+ IOMMU_REGSET_ENTRY(PEADDR),
+ IOMMU_REGSET_ENTRY(PEUADDR),
+ IOMMU_REGSET_ENTRY(MTRRCAP),
+ IOMMU_REGSET_ENTRY(MTRRDEF),
+ IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX16K_80000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX16K_A0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_C0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_C8000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_D0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_D8000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_E0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_E8000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_F0000),
+ IOMMU_REGSET_ENTRY(MTRR_FIX4K_F8000),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE0),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK0),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE1),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK1),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE2),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK2),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE3),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK3),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE4),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK4),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE5),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK5),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE6),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK6),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE7),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK7),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE8),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK8),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSBASE9),
+ IOMMU_REGSET_ENTRY(MTRR_PHYSMASK9),
+ IOMMU_REGSET_ENTRY(VCCAP),
+ IOMMU_REGSET_ENTRY(VCMD),
+ IOMMU_REGSET_ENTRY(VCRSP),
+};
+
+static int iommu_regset_show(struct seq_file *m, void *unused)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ unsigned long flag;
+ int i, ret = 0;
+ u64 value;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!drhd->reg_base_addr) {
+ seq_puts(m, "IOMMU: Invalid base address\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ seq_printf(m, "IOMMU: %s Register Base Address: %llx\n",
+ iommu->name, drhd->reg_base_addr);
+ seq_puts(m, "Name\t\t\tOffset\t\tContents\n");
+ /*
+ * Publish the contents of the 64-bit hardware registers
+ * by adding the offset to the pointer (virtual address).
+ */
+ raw_spin_lock_irqsave(&iommu->register_lock, flag);
+ for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) {
+ value = dmar_readq(iommu->reg + iommu_regs[i].offset);
+ seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
+ iommu_regs[i].regs, iommu_regs[i].offset,
+ value);
+ }
+ raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
+ seq_putc(m, '\n');
+ }
+out:
+ rcu_read_unlock();
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(iommu_regset);
+
+static void ctx_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu,
+ int bus)
+{
+ struct context_entry *context;
+ int devfn;
+
+ seq_printf(m, " Context Table Entries for Bus: %d\n", bus);
+ seq_puts(m, " Entry\tB:D.F\tHigh\tLow\n");
+
+ for (devfn = 0; devfn < 256; devfn++) {
+ context = iommu_context_addr(iommu, bus, devfn, 0);
+ if (!context)
+ return;
+
+ if (!context_present(context))
+ continue;
+
+ seq_printf(m, " %-5d\t%02x:%02x.%x\t%-6llx\t%llx\n", devfn,
+ bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ context[0].hi, context[0].lo);
+ }
+}
+
+static void root_tbl_entry_show(struct seq_file *m, struct intel_iommu *iommu)
+{
+ unsigned long flags;
+ int bus;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+ seq_printf(m, "IOMMU %s: Root Table Address:%llx\n", iommu->name,
+ (u64)virt_to_phys(iommu->root_entry));
+ seq_puts(m, "Root Table Entries:\n");
+
+ for (bus = 0; bus < 256; bus++) {
+ if (!(iommu->root_entry[bus].lo & 1))
+ continue;
+
+ seq_printf(m, " Bus: %d H: %llx L: %llx\n", bus,
+ iommu->root_entry[bus].hi,
+ iommu->root_entry[bus].lo);
+
+ ctx_tbl_entry_show(m, iommu, bus);
+ seq_putc(m, '\n');
+ }
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
+static int dmar_translation_struct_show(struct seq_file *m, void *unused)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ root_tbl_entry_show(m, iommu);
+ seq_putc(m, '\n');
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dmar_translation_struct);
+
+#ifdef CONFIG_IRQ_REMAP
+static void ir_tbl_remap_entry_show(struct seq_file *m,
+ struct intel_iommu *iommu)
+{
+ struct irte *ri_entry;
+ unsigned long flags;
+ int idx;
+
+ seq_puts(m, " Entry SrcID DstID Vct IRTE_high\t\tIRTE_low\n");
+
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
+ ri_entry = &iommu->ir_table->base[idx];
+ if (!ri_entry->present || ri_entry->p_pst)
+ continue;
+
+ seq_printf(m, " %-5d %02x:%02x.%01x %08x %02x %016llx\t%016llx\n",
+ idx, PCI_BUS_NUM(ri_entry->sid),
+ PCI_SLOT(ri_entry->sid), PCI_FUNC(ri_entry->sid),
+ ri_entry->dest_id, ri_entry->vector,
+ ri_entry->high, ri_entry->low);
+ }
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+}
+
+static void ir_tbl_posted_entry_show(struct seq_file *m,
+ struct intel_iommu *iommu)
+{
+ struct irte *pi_entry;
+ unsigned long flags;
+ int idx;
+
+ seq_puts(m, " Entry SrcID PDA_high PDA_low Vct IRTE_high\t\tIRTE_low\n");
+
+ raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+ for (idx = 0; idx < INTR_REMAP_TABLE_ENTRIES; idx++) {
+ pi_entry = &iommu->ir_table->base[idx];
+ if (!pi_entry->present || !pi_entry->p_pst)
+ continue;
+
+ seq_printf(m, " %-5d %02x:%02x.%01x %08x %08x %02x %016llx\t%016llx\n",
+ idx, PCI_BUS_NUM(pi_entry->sid),
+ PCI_SLOT(pi_entry->sid), PCI_FUNC(pi_entry->sid),
+ pi_entry->pda_h, pi_entry->pda_l << 6,
+ pi_entry->vector, pi_entry->high,
+ pi_entry->low);
+ }
+ raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+}
+
+/*
+ * For active IOMMUs go through the Interrupt remapping
+ * table and print valid entries in a table format for
+ * Remapped and Posted Interrupts.
+ */
+static int ir_translation_struct_show(struct seq_file *m, void *unused)
+{
+ struct dmar_drhd_unit *drhd;
+ struct intel_iommu *iommu;
+ u64 irta;
+
+ rcu_read_lock();
+ for_each_active_iommu(iommu, drhd) {
+ if (!ecap_ir_support(iommu->ecap))
+ continue;
+
+ seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
+ iommu->name);
+
+ if (iommu->ir_table) {
+ irta = virt_to_phys(iommu->ir_table->base);
+ seq_printf(m, " IR table address:%llx\n", irta);
+ ir_tbl_remap_entry_show(m, iommu);
+ } else {
+ seq_puts(m, "Interrupt Remapping is not enabled\n");
+ }
+ seq_putc(m, '\n');
+ }
+
+ seq_puts(m, "****\n\n");
+
+ for_each_active_iommu(iommu, drhd) {
+ if (!cap_pi_support(iommu->cap))
+ continue;
+
+ seq_printf(m, "Posted Interrupt supported on IOMMU: %s\n",
+ iommu->name);
+
+ if (iommu->ir_table) {
+ irta = virt_to_phys(iommu->ir_table->base);
+ seq_printf(m, " IR table address:%llx\n", irta);
+ ir_tbl_posted_entry_show(m, iommu);
+ } else {
+ seq_puts(m, "Interrupt Remapping is not enabled\n");
+ }
+ seq_putc(m, '\n');
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ir_translation_struct);
+#endif
+
+void __init intel_iommu_debugfs_init(void)
+{
+ struct dentry *intel_iommu_debug = debugfs_create_dir("intel",
+ iommu_debugfs_dir);
+
+ debugfs_create_file("iommu_regset", 0444, intel_iommu_debug, NULL,
+ &iommu_regset_fops);
+ debugfs_create_file("dmar_translation_struct", 0444, intel_iommu_debug,
+ NULL, &dmar_translation_struct_fops);
+#ifdef CONFIG_IRQ_REMAP
+ debugfs_create_file("ir_translation_struct", 0444, intel_iommu_debug,
+ NULL, &ir_translation_struct_fops);
+#endif
+}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 76f0a5d16ed3..f3ccf025108b 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -185,16 +185,6 @@ static int rwbf_quirk;
static int force_on = 0;
int intel_iommu_tboot_noforce;
-/*
- * 0: Present
- * 1-11: Reserved
- * 12-63: Context Ptr (12 - (haw-1))
- * 64-127: Reserved
- */
-struct root_entry {
- u64 lo;
- u64 hi;
-};
#define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry))
/*
@@ -220,21 +210,6 @@ static phys_addr_t root_entry_uctp(struct root_entry *re)
return re->hi & VTD_PAGE_MASK;
}
-/*
- * low 64 bits:
- * 0: present
- * 1: fault processing disable
- * 2-3: translation type
- * 12-63: address space root
- * high 64 bits:
- * 0-2: address width
- * 3-6: aval
- * 8-23: domain id
- */
-struct context_entry {
- u64 lo;
- u64 hi;
-};
static inline void context_clear_pasid_enable(struct context_entry *context)
{
@@ -261,7 +236,7 @@ static inline bool __context_present(struct context_entry *context)
return (context->lo & 1);
}
-static inline bool context_present(struct context_entry *context)
+bool context_present(struct context_entry *context)
{
return context_pasid_enabled(context) ?
__context_present(context) :
@@ -788,8 +763,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->iommu_superpage = domain_update_iommu_superpage(NULL);
}
-static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu,
- u8 bus, u8 devfn, int alloc)
+struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
+ u8 devfn, int alloc)
{
struct root_entry *root = &iommu->root_entry[bus];
struct context_entry *context;
@@ -4860,6 +4835,7 @@ int __init intel_iommu_init(void)
cpuhp_setup_state(CPUHP_IOMMU_INTEL_DEAD, "iommu/intel:dead", NULL,
intel_iommu_cpu_dead);
intel_iommu_enabled = 1;
+ intel_iommu_debugfs_init();
return 0;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 967450bd421a..c2d6c11431de 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -76,7 +76,7 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS];
* in single-threaded environment with interrupt disabled, so no need to tabke
* the dmar_global_lock.
*/
-static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
+DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
static const struct irq_domain_ops intel_ir_domain_ops;
static void iommu_disable_irq_remapping(struct intel_iommu *iommu);
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index b5948ba6b3b3..445c3bde0480 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -587,6 +587,7 @@ static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data,
}
io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
+ io_pgtable_tlb_sync(&data->iop);
return size;
}
@@ -642,6 +643,13 @@ static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable *data,
io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte[i], lvl);
__arm_v7s_free_table(ptep, lvl + 1, data);
+ } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
+ /*
+ * Order the PTE update against queueing the IOVA, to
+ * guarantee that a flush callback from a different CPU
+ * has observed it before the TLBIALL can be issued.
+ */
+ smp_wmb();
} else {
io_pgtable_tlb_add_flush(iop, iova, blk_size,
blk_size, true);
@@ -712,7 +720,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
IO_PGTABLE_QUIRK_NO_PERMS |
IO_PGTABLE_QUIRK_TLBI_ON_MAP |
IO_PGTABLE_QUIRK_ARM_MTK_4GB |
- IO_PGTABLE_QUIRK_NO_DMA))
+ IO_PGTABLE_QUIRK_NO_DMA |
+ IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 88641b4560bc..237cacd4a62b 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -574,13 +574,13 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
return 0;
tablep = iopte_deref(pte, data);
+ } else if (unmap_idx >= 0) {
+ io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
+ io_pgtable_tlb_sync(&data->iop);
+ return size;
}
- if (unmap_idx < 0)
- return __arm_lpae_unmap(data, iova, size, lvl, tablep);
-
- io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
- return size;
+ return __arm_lpae_unmap(data, iova, size, lvl, tablep);
}
static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
@@ -610,6 +610,13 @@ static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
io_pgtable_tlb_sync(iop);
ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep);
+ } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
+ /*
+ * Order the PTE update against queueing the IOVA, to
+ * guarantee that a flush callback from a different CPU
+ * has observed it before the TLBIALL can be issued.
+ */
+ smp_wmb();
} else {
io_pgtable_tlb_add_flush(iop, iova, size, size, true);
}
@@ -772,7 +779,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
u64 reg;
struct arm_lpae_io_pgtable *data;
- if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
+ IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
@@ -864,7 +872,8 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
struct arm_lpae_io_pgtable *data;
/* The NS quirk doesn't apply at stage 2 */
- if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA)
+ if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
+ IO_PGTABLE_QUIRK_NON_STRICT))
return NULL;
data = arm_lpae_alloc_pgtable(cfg);
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 2df79093cad9..47d5ae559329 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -71,12 +71,17 @@ struct io_pgtable_cfg {
* be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a
* software-emulated IOMMU), such that pagetable updates need not
* be treated as explicit DMA data.
+ *
+ * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
+ * on unmap, for DMA domains using the flush queue mechanism for
+ * delayed invalidation.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
#define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3)
#define IO_PGTABLE_QUIRK_NO_DMA BIT(4)
+ #define IO_PGTABLE_QUIRK_NON_STRICT BIT(5)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 8c15c5980299..edbdf5d6962c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -32,6 +32,7 @@
#include <linux/pci.h>
#include <linux/bitops.h>
#include <linux/property.h>
+#include <linux/fsl/mc.h>
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
@@ -41,6 +42,7 @@ static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
#else
static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
#endif
+static bool iommu_dma_strict __read_mostly = true;
struct iommu_callback_data {
const struct iommu_ops *ops;
@@ -131,6 +133,12 @@ static int __init iommu_set_def_domain_type(char *str)
}
early_param("iommu.passthrough", iommu_set_def_domain_type);
+static int __init iommu_dma_setup(char *str)
+{
+ return kstrtobool(str, &iommu_dma_strict);
+}
+early_param("iommu.strict", iommu_dma_setup);
+
static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf)
{
@@ -1024,6 +1032,18 @@ struct iommu_group *pci_device_group(struct device *dev)
return iommu_group_alloc();
}
+/* Get the IOMMU group for device on fsl-mc bus */
+struct iommu_group *fsl_mc_device_group(struct device *dev)
+{
+ struct device *cont_dev = fsl_mc_cont_dev(dev);
+ struct iommu_group *group;
+
+ group = iommu_group_get(cont_dev);
+ if (!group)
+ group = iommu_group_alloc();
+ return group;
+}
+
/**
* iommu_group_get_for_dev - Find or create the IOMMU group for a device
* @dev: target device
@@ -1072,6 +1092,13 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
group->default_domain = dom;
if (!group->domain)
group->domain = dom;
+
+ if (dom && !iommu_dma_strict) {
+ int attr = 1;
+ iommu_domain_set_attr(dom,
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
+ &attr);
+ }
}
ret = iommu_group_add_device(group, dev);
@@ -1416,7 +1443,16 @@ struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
/*
- * IOMMU groups are really the natrual working unit of the IOMMU, but
+ * For IOMMU_DOMAIN_DMA implementations which already provide their own
+ * guarantees that the group and its default domain are valid and correct.
+ */
+struct iommu_domain *iommu_get_dma_domain(struct device *dev)
+{
+ return dev->iommu_group->default_domain;
+}
+
+/*
+ * IOMMU groups are really the natural working unit of the IOMMU, but
* the IOMMU API works on domains and devices. Bridge that gap by
* iterating over the devices in a group. Ideally we'd have a single
* device which represents the requestor ID of the group, but we also
@@ -1796,7 +1832,6 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
struct iommu_domain_geometry *geometry;
bool *paging;
int ret = 0;
- u32 *count;
switch (attr) {
case DOMAIN_ATTR_GEOMETRY:
@@ -1808,15 +1843,6 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
paging = data;
*paging = (domain->pgsize_bitmap != 0UL);
break;
- case DOMAIN_ATTR_WINDOWS:
- count = data;
-
- if (domain->ops->domain_get_windows != NULL)
- *count = domain->ops->domain_get_windows(domain);
- else
- ret = -ENODEV;
-
- break;
default:
if (!domain->ops->domain_get_attr)
return -EINVAL;
@@ -1832,18 +1858,8 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
int ret = 0;
- u32 *count;
switch (attr) {
- case DOMAIN_ATTR_WINDOWS:
- count = data;
-
- if (domain->ops->domain_set_windows != NULL)
- ret = domain->ops->domain_set_windows(domain, *count);
- else
- ret = -ENODEV;
-
- break;
default:
if (domain->ops->domain_set_attr == NULL)
return -EINVAL;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 83fe2621effe..f8d3ba247523 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -56,6 +56,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->granule = granule;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
iovad->flush_cb = NULL;
iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
@@ -139,8 +140,10 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
if (free->pfn_hi < iovad->dma_32bit_pfn &&
- free->pfn_lo >= cached_iova->pfn_lo)
+ free->pfn_lo >= cached_iova->pfn_lo) {
iovad->cached32_node = rb_next(&free->node);
+ iovad->max32_alloc_size = iovad->dma_32bit_pfn;
+ }
cached_iova = rb_entry(iovad->cached_node, struct iova, node);
if (free->pfn_lo >= cached_iova->pfn_lo)
@@ -190,6 +193,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
/* Walk the tree backwards */
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ if (limit_pfn <= iovad->dma_32bit_pfn &&
+ size >= iovad->max32_alloc_size)
+ goto iova32_full;
+
curr = __get_cached_rbnode(iovad, limit_pfn);
curr_iova = rb_entry(curr, struct iova, node);
do {
@@ -200,10 +207,8 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr_iova = rb_entry(curr, struct iova, node);
} while (curr && new_pfn <= curr_iova->pfn_hi);
- if (limit_pfn < size || new_pfn < iovad->start_pfn) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- return -ENOMEM;
- }
+ if (limit_pfn < size || new_pfn < iovad->start_pfn)
+ goto iova32_full;
/* pfn_lo will point to size aligned address if size_aligned is set */
new->pfn_lo = new_pfn;
@@ -214,9 +219,12 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
__cached_rbnode_insert_update(iovad, new);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
-
-
return 0;
+
+iova32_full:
+ iovad->max32_alloc_size = size;
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return -ENOMEM;
}
static struct kmem_cache *iova_cache;
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 22b94f8a9a04..b98a03189580 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* IPMMU VMSA
*
* Copyright (C) 2014 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
*/
#include <linux/bitmap.h>
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index f7787e757244..c5dd63072529 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -24,6 +24,7 @@
#include <linux/of_iommu.h>
#include <linux/of_pci.h>
#include <linux/slab.h>
+#include <linux/fsl/mc.h>
#define NO_IOMMU 1
@@ -132,9 +133,8 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
struct of_phandle_args iommu_spec = { .args_count = 1 };
int err;
- err = of_pci_map_rid(info->np, alias, "iommu-map",
- "iommu-map-mask", &iommu_spec.np,
- iommu_spec.args);
+ err = of_map_rid(info->np, alias, "iommu-map", "iommu-map-mask",
+ &iommu_spec.np, iommu_spec.args);
if (err)
return err == -ENODEV ? NO_IOMMU : err;
@@ -143,6 +143,23 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
return err;
}
+static int of_fsl_mc_iommu_init(struct fsl_mc_device *mc_dev,
+ struct device_node *master_np)
+{
+ struct of_phandle_args iommu_spec = { .args_count = 1 };
+ int err;
+
+ err = of_map_rid(master_np, mc_dev->icid, "iommu-map",
+ "iommu-map-mask", &iommu_spec.np,
+ iommu_spec.args);
+ if (err)
+ return err == -ENODEV ? NO_IOMMU : err;
+
+ err = of_iommu_xlate(&mc_dev->dev, &iommu_spec);
+ of_node_put(iommu_spec.np);
+ return err;
+}
+
const struct iommu_ops *of_iommu_configure(struct device *dev,
struct device_node *master_np)
{
@@ -174,6 +191,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
err = pci_for_each_dma_alias(to_pci_dev(dev),
of_pci_iommu_init, &info);
+ } else if (dev_is_fsl_mc(dev)) {
+ err = of_fsl_mc_iommu_init(to_fsl_mc_device(dev), master_np);
} else {
struct of_phandle_args iommu_spec;
int idx = 0;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 383e7b70221d..96451b581452 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -310,6 +310,9 @@ config MVEBU_ODMI
config MVEBU_PIC
bool
+config MVEBU_SEI
+ bool
+
config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
depends on PCI && PCI_MSI
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index fbd1ec8070ef..b822199445ff 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -76,6 +76,7 @@ obj-$(CONFIG_MVEBU_GICP) += irq-mvebu-gicp.o
obj-$(CONFIG_MVEBU_ICU) += irq-mvebu-icu.o
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o
+obj-$(CONFIG_MVEBU_SEI) += irq-mvebu-sei.o
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c2df341ff6fa..db20e992a40f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -19,13 +19,16 @@
#include <linux/acpi_iort.h>
#include <linux/bitmap.h>
#include <linux/cpu.h>
+#include <linux/crash_dump.h>
#include <linux/delay.h>
#include <linux/dma-iommu.h>
+#include <linux/efi.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/list_sort.h>
#include <linux/log2.h>
+#include <linux/memblock.h>
#include <linux/mm.h>
#include <linux/msi.h>
#include <linux/of.h>
@@ -52,6 +55,7 @@
#define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
+#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
static u32 lpi_id_bits;
@@ -64,7 +68,7 @@ static u32 lpi_id_bits;
#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
-#define LPI_PROP_DEFAULT_PRIO 0xa0
+#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
/*
* Collection structure - just an ID, and a redistributor address to
@@ -173,6 +177,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
+#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
@@ -1028,7 +1033,7 @@ static inline u32 its_get_event_id(struct irq_data *d)
static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
{
irq_hw_number_t hwirq;
- struct page *prop_page;
+ void *va;
u8 *cfg;
if (irqd_is_forwarded_to_vcpu(d)) {
@@ -1036,7 +1041,7 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
u32 event = its_get_event_id(d);
struct its_vlpi_map *map;
- prop_page = its_dev->event_map.vm->vprop_page;
+ va = page_address(its_dev->event_map.vm->vprop_page);
map = &its_dev->event_map.vlpi_maps[event];
hwirq = map->vintid;
@@ -1044,11 +1049,11 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
map->properties &= ~clr;
map->properties |= set | LPI_PROP_GROUP1;
} else {
- prop_page = gic_rdists->prop_page;
+ va = gic_rdists->prop_table_va;
hwirq = d->hwirq;
}
- cfg = page_address(prop_page) + hwirq - 8192;
+ cfg = va + hwirq - 8192;
*cfg &= ~clr;
*cfg |= set | LPI_PROP_GROUP1;
@@ -1597,6 +1602,15 @@ static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
kfree(bitmap);
}
+static void gic_reset_prop_table(void *va)
+{
+ /* Priority 0xa0, Group-1, disabled */
+ memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
+
+ /* Make sure the GIC will observe the written configuration */
+ gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
+}
+
static struct page *its_allocate_prop_table(gfp_t gfp_flags)
{
struct page *prop_page;
@@ -1605,13 +1619,7 @@ static struct page *its_allocate_prop_table(gfp_t gfp_flags)
if (!prop_page)
return NULL;
- /* Priority 0xa0, Group-1, disabled */
- memset(page_address(prop_page),
- LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1,
- LPI_PROPBASE_SZ);
-
- /* Make sure the GIC will observe the written configuration */
- gic_flush_dcache_to_poc(page_address(prop_page), LPI_PROPBASE_SZ);
+ gic_reset_prop_table(page_address(prop_page));
return prop_page;
}
@@ -1622,20 +1630,74 @@ static void its_free_prop_table(struct page *prop_page)
get_order(LPI_PROPBASE_SZ));
}
-static int __init its_alloc_lpi_tables(void)
+static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
{
- phys_addr_t paddr;
+ phys_addr_t start, end, addr_end;
+ u64 i;
- lpi_id_bits = min_t(u32, GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
- ITS_MAX_LPI_NRBITS);
- gic_rdists->prop_page = its_allocate_prop_table(GFP_NOWAIT);
- if (!gic_rdists->prop_page) {
- pr_err("Failed to allocate PROPBASE\n");
- return -ENOMEM;
+ /*
+ * We don't bother checking for a kdump kernel as by
+ * construction, the LPI tables are out of this kernel's
+ * memory map.
+ */
+ if (is_kdump_kernel())
+ return true;
+
+ addr_end = addr + size - 1;
+
+ for_each_reserved_mem_region(i, &start, &end) {
+ if (addr >= start && addr_end <= end)
+ return true;
+ }
+
+ /* Not found, not a good sign... */
+ pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
+ &addr, &addr_end);
+ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+ return false;
+}
+
+static int gic_reserve_range(phys_addr_t addr, unsigned long size)
+{
+ if (efi_enabled(EFI_CONFIG_TABLES))
+ return efi_mem_reserve_persistent(addr, size);
+
+ return 0;
+}
+
+static int __init its_setup_lpi_prop_table(void)
+{
+ if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
+ u64 val;
+
+ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
+ lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
+
+ gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
+ gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
+ LPI_PROPBASE_SZ,
+ MEMREMAP_WB);
+ gic_reset_prop_table(gic_rdists->prop_table_va);
+ } else {
+ struct page *page;
+
+ lpi_id_bits = min_t(u32,
+ GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
+ ITS_MAX_LPI_NRBITS);
+ page = its_allocate_prop_table(GFP_NOWAIT);
+ if (!page) {
+ pr_err("Failed to allocate PROPBASE\n");
+ return -ENOMEM;
+ }
+
+ gic_rdists->prop_table_pa = page_to_phys(page);
+ gic_rdists->prop_table_va = page_address(page);
+ WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
+ LPI_PROPBASE_SZ));
}
- paddr = page_to_phys(gic_rdists->prop_page);
- pr_info("GIC: using LPI property table @%pa\n", &paddr);
+ pr_info("GICv3: using LPI property table @%pa\n",
+ &gic_rdists->prop_table_pa);
return its_lpi_init(lpi_id_bits);
}
@@ -1924,12 +1986,9 @@ static int its_alloc_collections(struct its_node *its)
static struct page *its_allocate_pending_table(gfp_t gfp_flags)
{
struct page *pend_page;
- /*
- * The pending pages have to be at least 64kB aligned,
- * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below.
- */
+
pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
- get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+ get_order(LPI_PENDBASE_SZ));
if (!pend_page)
return NULL;
@@ -1941,36 +2000,103 @@ static struct page *its_allocate_pending_table(gfp_t gfp_flags)
static void its_free_pending_table(struct page *pt)
{
- free_pages((unsigned long)page_address(pt),
- get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
+ free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
+}
+
+/*
+ * Booting with kdump and LPIs enabled is generally fine. Any other
+ * case is wrong in the absence of firmware/EFI support.
+ */
+static bool enabled_lpis_allowed(void)
+{
+ phys_addr_t addr;
+ u64 val;
+
+ /* Check whether the property table is in a reserved region */
+ val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
+ addr = val & GENMASK_ULL(51, 12);
+
+ return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
+}
+
+static int __init allocate_lpi_tables(void)
+{
+ u64 val;
+ int err, cpu;
+
+ /*
+ * If LPIs are enabled while we run this from the boot CPU,
+ * flag the RD tables as pre-allocated if the stars do align.
+ */
+ val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
+ if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
+ gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
+ RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
+ pr_info("GICv3: Using preallocated redistributor tables\n");
+ }
+
+ err = its_setup_lpi_prop_table();
+ if (err)
+ return err;
+
+ /*
+ * We allocate all the pending tables anyway, as we may have a
+ * mix of RDs that have had LPIs enabled, and some that
+ * don't. We'll free the unused ones as each CPU comes online.
+ */
+ for_each_possible_cpu(cpu) {
+ struct page *pend_page;
+
+ pend_page = its_allocate_pending_table(GFP_NOWAIT);
+ if (!pend_page) {
+ pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
+ return -ENOMEM;
+ }
+
+ gic_data_rdist_cpu(cpu)->pend_page = pend_page;
+ }
+
+ return 0;
}
static void its_cpu_init_lpis(void)
{
void __iomem *rbase = gic_data_rdist_rd_base();
struct page *pend_page;
+ phys_addr_t paddr;
u64 val, tmp;
- /* If we didn't allocate the pending table yet, do it now */
- pend_page = gic_data_rdist()->pend_page;
- if (!pend_page) {
- phys_addr_t paddr;
+ if (gic_data_rdist()->lpi_enabled)
+ return;
- pend_page = its_allocate_pending_table(GFP_NOWAIT);
- if (!pend_page) {
- pr_err("Failed to allocate PENDBASE for CPU%d\n",
- smp_processor_id());
- return;
- }
+ val = readl_relaxed(rbase + GICR_CTLR);
+ if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
+ (val & GICR_CTLR_ENABLE_LPIS)) {
+ /*
+ * Check that we get the same property table on all
+ * RDs. If we don't, this is hopeless.
+ */
+ paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
+ paddr &= GENMASK_ULL(51, 12);
+ if (WARN_ON(gic_rdists->prop_table_pa != paddr))
+ add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
+
+ paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
+ paddr &= GENMASK_ULL(51, 16);
- paddr = page_to_phys(pend_page);
- pr_info("CPU%d: using LPI pending table @%pa\n",
- smp_processor_id(), &paddr);
- gic_data_rdist()->pend_page = pend_page;
+ WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
+ its_free_pending_table(gic_data_rdist()->pend_page);
+ gic_data_rdist()->pend_page = NULL;
+
+ goto out;
}
+ pend_page = gic_data_rdist()->pend_page;
+ paddr = page_to_phys(pend_page);
+ WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
+
/* set PROPBASE */
- val = (page_to_phys(gic_rdists->prop_page) |
+ val = (gic_rdists->prop_table_pa |
GICR_PROPBASER_InnerShareable |
GICR_PROPBASER_RaWaWb |
((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
@@ -2020,6 +2146,12 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
+out:
+ gic_data_rdist()->lpi_enabled = true;
+ pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
+ smp_processor_id(),
+ gic_data_rdist()->pend_page ? "allocated" : "reserved",
+ &paddr);
}
static void its_cpu_init_collection(struct its_node *its)
@@ -3498,16 +3630,6 @@ static int redist_disable_lpis(void)
u64 timeout = USEC_PER_SEC;
u64 val;
- /*
- * If coming via a CPU hotplug event, we don't need to disable
- * LPIs before trying to re-enable them. They are already
- * configured and all is well in the world. Detect this case
- * by checking the allocation of the pending table for the
- * current CPU.
- */
- if (gic_data_rdist()->pend_page)
- return 0;
-
if (!gic_rdists_supports_plpis()) {
pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
return -ENXIO;
@@ -3517,7 +3639,21 @@ static int redist_disable_lpis(void)
if (!(val & GICR_CTLR_ENABLE_LPIS))
return 0;
- pr_warn("CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
+ /*
+ * If coming via a CPU hotplug event, we don't need to disable
+ * LPIs before trying to re-enable them. They are already
+ * configured and all is well in the world.
+ *
+ * If running with preallocated tables, there is nothing to do.
+ */
+ if (gic_data_rdist()->lpi_enabled ||
+ (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
+ return 0;
+
+ /*
+ * From that point on, we only try to do some damage control.
+ */
+ pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
smp_processor_id());
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
@@ -3773,7 +3909,8 @@ int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
}
gic_rdists = rdists;
- err = its_alloc_lpi_tables();
+
+ err = allocate_lpi_tables();
if (err)
return err;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index d5912f1ec884..8f87f40c9460 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -348,48 +348,45 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
{
u32 irqnr;
- do {
- irqnr = gic_read_iar();
+ irqnr = gic_read_iar();
- if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
- int err;
+ if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
+ int err;
- if (static_branch_likely(&supports_deactivate_key))
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_write_eoir(irqnr);
+ else
+ isb();
+
+ err = handle_domain_irq(gic_data.domain, irqnr, regs);
+ if (err) {
+ WARN_ONCE(true, "Unexpected interrupt received!\n");
+ if (static_branch_likely(&supports_deactivate_key)) {
+ if (irqnr < 8192)
+ gic_write_dir(irqnr);
+ } else {
gic_write_eoir(irqnr);
- else
- isb();
-
- err = handle_domain_irq(gic_data.domain, irqnr, regs);
- if (err) {
- WARN_ONCE(true, "Unexpected interrupt received!\n");
- if (static_branch_likely(&supports_deactivate_key)) {
- if (irqnr < 8192)
- gic_write_dir(irqnr);
- } else {
- gic_write_eoir(irqnr);
- }
}
- continue;
}
- if (irqnr < 16) {
- gic_write_eoir(irqnr);
- if (static_branch_likely(&supports_deactivate_key))
- gic_write_dir(irqnr);
+ return;
+ }
+ if (irqnr < 16) {
+ gic_write_eoir(irqnr);
+ if (static_branch_likely(&supports_deactivate_key))
+ gic_write_dir(irqnr);
#ifdef CONFIG_SMP
- /*
- * Unlike GICv2, we don't need an smp_rmb() here.
- * The control dependency from gic_read_iar to
- * the ISB in gic_write_eoir is enough to ensure
- * that any shared data read by handle_IPI will
- * be read after the ACK.
- */
- handle_IPI(irqnr, regs);
+ /*
+ * Unlike GICv2, we don't need an smp_rmb() here.
+ * The control dependency from gic_read_iar to
+ * the ISB in gic_write_eoir is enough to ensure
+ * that any shared data read by handle_IPI will
+ * be read after the ACK.
+ */
+ handle_IPI(irqnr, regs);
#else
- WARN_ONCE(true, "Unexpected SGI received!\n");
+ WARN_ONCE(true, "Unexpected SGI received!\n");
#endif
- continue;
- }
- } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
+ }
}
static void __init gic_dist_init(void)
@@ -653,7 +650,9 @@ early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
static int gic_dist_supports_lpis(void)
{
- return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && !gicv3_nolpi;
+ return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
+ !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
+ !gicv3_nolpi);
}
static void gic_cpu_init(void)
@@ -673,10 +672,6 @@ static void gic_cpu_init(void)
gic_cpu_config(rbase, gic_redist_wait_for_rwp);
- /* Give LPIs a spin */
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
- its_cpu_init();
-
/* initialise system registers */
gic_cpu_sys_reg_init();
}
@@ -689,6 +684,10 @@ static void gic_cpu_init(void)
static int gic_starting_cpu(unsigned int cpu)
{
gic_cpu_init();
+
+ if (gic_dist_supports_lpis())
+ its_cpu_init();
+
return 0;
}
@@ -1127,14 +1126,16 @@ static int __init gic_init_bases(void __iomem *dist_base,
gic_update_vlpi_properties();
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
- its_init(handle, &gic_data.rdists, gic_data.domain);
-
gic_smp_init();
gic_dist_init();
gic_cpu_init();
gic_cpu_pm_init();
+ if (gic_dist_supports_lpis()) {
+ its_init(handle, &gic_data.rdists, gic_data.domain);
+ its_cpu_init();
+ }
+
return 0;
out_free:
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 13063339b416..547045d89c4b 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -13,6 +13,7 @@
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
+#include <linux/jump_label.h>
#include <linux/kernel.h>
#include <linux/msi.h>
#include <linux/of_irq.h>
@@ -26,6 +27,10 @@
#define ICU_SETSPI_NSR_AH 0x14
#define ICU_CLRSPI_NSR_AL 0x18
#define ICU_CLRSPI_NSR_AH 0x1c
+#define ICU_SET_SEI_AL 0x50
+#define ICU_SET_SEI_AH 0x54
+#define ICU_CLR_SEI_AL 0x58
+#define ICU_CLR_SEI_AH 0x5C
#define ICU_INT_CFG(x) (0x100 + 4 * (x))
#define ICU_INT_ENABLE BIT(24)
#define ICU_IS_EDGE BIT(28)
@@ -36,12 +41,23 @@
#define ICU_SATA0_ICU_ID 109
#define ICU_SATA1_ICU_ID 107
+struct mvebu_icu_subset_data {
+ unsigned int icu_group;
+ unsigned int offset_set_ah;
+ unsigned int offset_set_al;
+ unsigned int offset_clr_ah;
+ unsigned int offset_clr_al;
+};
+
struct mvebu_icu {
- struct irq_chip irq_chip;
void __iomem *base;
- struct irq_domain *domain;
struct device *dev;
+};
+
+struct mvebu_icu_msi_data {
+ struct mvebu_icu *icu;
atomic_t initialized;
+ const struct mvebu_icu_subset_data *subset_data;
};
struct mvebu_icu_irq_data {
@@ -50,28 +66,40 @@ struct mvebu_icu_irq_data {
unsigned int type;
};
-static void mvebu_icu_init(struct mvebu_icu *icu, struct msi_msg *msg)
+DEFINE_STATIC_KEY_FALSE(legacy_bindings);
+
+static void mvebu_icu_init(struct mvebu_icu *icu,
+ struct mvebu_icu_msi_data *msi_data,
+ struct msi_msg *msg)
{
- if (atomic_cmpxchg(&icu->initialized, false, true))
+ const struct mvebu_icu_subset_data *subset = msi_data->subset_data;
+
+ if (atomic_cmpxchg(&msi_data->initialized, false, true))
return;
- /* Set Clear/Set ICU SPI message address in AP */
- writel_relaxed(msg[0].address_hi, icu->base + ICU_SETSPI_NSR_AH);
- writel_relaxed(msg[0].address_lo, icu->base + ICU_SETSPI_NSR_AL);
- writel_relaxed(msg[1].address_hi, icu->base + ICU_CLRSPI_NSR_AH);
- writel_relaxed(msg[1].address_lo, icu->base + ICU_CLRSPI_NSR_AL);
+ /* Set 'SET' ICU SPI message address in AP */
+ writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah);
+ writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al);
+
+ if (subset->icu_group != ICU_GRP_NSR)
+ return;
+
+ /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */
+ writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah);
+ writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al);
}
static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
{
struct irq_data *d = irq_get_irq_data(desc->irq);
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d->domain);
struct mvebu_icu_irq_data *icu_irqd = d->chip_data;
struct mvebu_icu *icu = icu_irqd->icu;
unsigned int icu_int;
if (msg->address_lo || msg->address_hi) {
- /* One off initialization */
- mvebu_icu_init(icu, msg);
+ /* One off initialization per domain */
+ mvebu_icu_init(icu, msi_data, msg);
/* Configure the ICU with irq number & type */
icu_int = msg->data | ICU_INT_ENABLE;
if (icu_irqd->type & IRQ_TYPE_EDGE_RISING)
@@ -101,37 +129,66 @@ static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg)
}
}
+static struct irq_chip mvebu_icu_nsr_chip = {
+ .name = "ICU-NSR",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+static struct irq_chip mvebu_icu_sei_chip = {
+ .name = "ICU-SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
static int
mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
- struct mvebu_icu *icu = d->host_data;
- unsigned int icu_group;
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(d);
+ struct mvebu_icu *icu = platform_msi_get_host_data(d);
+ unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
/* Check the count of the parameters in dt */
- if (WARN_ON(fwspec->param_count < 3)) {
+ if (WARN_ON(fwspec->param_count != param_count)) {
dev_err(icu->dev, "wrong ICU parameter count %d\n",
fwspec->param_count);
return -EINVAL;
}
- /* Only ICU group type is handled */
- icu_group = fwspec->param[0];
- if (icu_group != ICU_GRP_NSR && icu_group != ICU_GRP_SR &&
- icu_group != ICU_GRP_SEI && icu_group != ICU_GRP_REI) {
- dev_err(icu->dev, "wrong ICU group type %x\n", icu_group);
- return -EINVAL;
+ if (static_branch_unlikely(&legacy_bindings)) {
+ *hwirq = fwspec->param[1];
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ if (fwspec->param[0] != ICU_GRP_NSR) {
+ dev_err(icu->dev, "wrong ICU group type %x\n",
+ fwspec->param[0]);
+ return -EINVAL;
+ }
+ } else {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
+
+ /*
+ * The ICU receives level interrupts. While the NSR are also
+ * level interrupts, SEI are edge interrupts. Force the type
+ * here in this case. Please note that this makes the interrupt
+ * handling unreliable.
+ */
+ if (msi_data->subset_data->icu_group == ICU_GRP_SEI)
+ *type = IRQ_TYPE_EDGE_RISING;
}
- *hwirq = fwspec->param[1];
if (*hwirq >= ICU_MAX_IRQS) {
dev_err(icu->dev, "invalid interrupt number %ld\n", *hwirq);
return -EINVAL;
}
- /* Mask the type to prevent wrong DT configuration */
- *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
-
return 0;
}
@@ -142,8 +199,10 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
int err;
unsigned long hwirq;
struct irq_fwspec *fwspec = args;
- struct mvebu_icu *icu = platform_msi_get_host_data(domain);
+ struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain);
+ struct mvebu_icu *icu = msi_data->icu;
struct mvebu_icu_irq_data *icu_irqd;
+ struct irq_chip *chip = &mvebu_icu_nsr_chip;
icu_irqd = kmalloc(sizeof(*icu_irqd), GFP_KERNEL);
if (!icu_irqd)
@@ -156,7 +215,10 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
goto free_irqd;
}
- icu_irqd->icu_group = fwspec->param[0];
+ if (static_branch_unlikely(&legacy_bindings))
+ icu_irqd->icu_group = fwspec->param[0];
+ else
+ icu_irqd->icu_group = msi_data->subset_data->icu_group;
icu_irqd->icu = icu;
err = platform_msi_domain_alloc(domain, virq, nr_irqs);
@@ -170,8 +232,11 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (err)
goto free_msi;
+ if (icu_irqd->icu_group == ICU_GRP_SEI)
+ chip = &mvebu_icu_sei_chip;
+
err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- &icu->irq_chip, icu_irqd);
+ chip, icu_irqd);
if (err) {
dev_err(icu->dev, "failed to set the data to IRQ domain\n");
goto free_msi;
@@ -204,11 +269,84 @@ static const struct irq_domain_ops mvebu_icu_domain_ops = {
.free = mvebu_icu_irq_domain_free,
};
+static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = {
+ .icu_group = ICU_GRP_NSR,
+ .offset_set_ah = ICU_SETSPI_NSR_AH,
+ .offset_set_al = ICU_SETSPI_NSR_AL,
+ .offset_clr_ah = ICU_CLRSPI_NSR_AH,
+ .offset_clr_al = ICU_CLRSPI_NSR_AL,
+};
+
+static const struct mvebu_icu_subset_data mvebu_icu_sei_subset_data = {
+ .icu_group = ICU_GRP_SEI,
+ .offset_set_ah = ICU_SET_SEI_AH,
+ .offset_set_al = ICU_SET_SEI_AL,
+};
+
+static const struct of_device_id mvebu_icu_subset_of_match[] = {
+ {
+ .compatible = "marvell,cp110-icu-nsr",
+ .data = &mvebu_icu_nsr_subset_data,
+ },
+ {
+ .compatible = "marvell,cp110-icu-sei",
+ .data = &mvebu_icu_sei_subset_data,
+ },
+ {},
+};
+
+static int mvebu_icu_subset_probe(struct platform_device *pdev)
+{
+ struct mvebu_icu_msi_data *msi_data;
+ struct device_node *msi_parent_dn;
+ struct device *dev = &pdev->dev;
+ struct irq_domain *irq_domain;
+
+ msi_data = devm_kzalloc(dev, sizeof(*msi_data), GFP_KERNEL);
+ if (!msi_data)
+ return -ENOMEM;
+
+ if (static_branch_unlikely(&legacy_bindings)) {
+ msi_data->icu = dev_get_drvdata(dev);
+ msi_data->subset_data = &mvebu_icu_nsr_subset_data;
+ } else {
+ msi_data->icu = dev_get_drvdata(dev->parent);
+ msi_data->subset_data = of_device_get_match_data(dev);
+ }
+
+ dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ DOMAIN_BUS_PLATFORM_MSI);
+ if (!dev->msi_domain)
+ return -EPROBE_DEFER;
+
+ msi_parent_dn = irq_domain_get_of_node(dev->msi_domain);
+ if (!msi_parent_dn)
+ return -ENODEV;
+
+ irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS,
+ mvebu_icu_write_msg,
+ &mvebu_icu_domain_ops,
+ msi_data);
+ if (!irq_domain) {
+ dev_err(dev, "Failed to create ICU MSI domain\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct platform_driver mvebu_icu_subset_driver = {
+ .probe = mvebu_icu_subset_probe,
+ .driver = {
+ .name = "mvebu-icu-subset",
+ .of_match_table = mvebu_icu_subset_of_match,
+ },
+};
+builtin_platform_driver(mvebu_icu_subset_driver);
+
static int mvebu_icu_probe(struct platform_device *pdev)
{
struct mvebu_icu *icu;
- struct device_node *node = pdev->dev.of_node;
- struct device_node *gicp_dn;
struct resource *res;
int i;
@@ -226,53 +364,38 @@ static int mvebu_icu_probe(struct platform_device *pdev)
return PTR_ERR(icu->base);
}
- icu->irq_chip.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "ICU.%x",
- (unsigned int)res->start);
- if (!icu->irq_chip.name)
- return -ENOMEM;
-
- icu->irq_chip.irq_mask = irq_chip_mask_parent;
- icu->irq_chip.irq_unmask = irq_chip_unmask_parent;
- icu->irq_chip.irq_eoi = irq_chip_eoi_parent;
- icu->irq_chip.irq_set_type = irq_chip_set_type_parent;
-#ifdef CONFIG_SMP
- icu->irq_chip.irq_set_affinity = irq_chip_set_affinity_parent;
-#endif
-
/*
- * We're probed after MSI domains have been resolved, so force
- * resolution here.
+ * Legacy bindings: ICU is one node with one MSI parent: force manually
+ * the probe of the NSR interrupts side.
+ * New bindings: ICU node has children, one per interrupt controller
+ * having its own MSI parent: call platform_populate().
+ * All ICU instances should use the same bindings.
*/
- pdev->dev.msi_domain = of_msi_get_domain(&pdev->dev, node,
- DOMAIN_BUS_PLATFORM_MSI);
- if (!pdev->dev.msi_domain)
- return -EPROBE_DEFER;
-
- gicp_dn = irq_domain_get_of_node(pdev->dev.msi_domain);
- if (!gicp_dn)
- return -ENODEV;
+ if (!of_get_child_count(pdev->dev.of_node))
+ static_branch_enable(&legacy_bindings);
/*
- * Clean all ICU interrupts with type SPI_NSR, required to
+ * Clean all ICU interrupts of type NSR and SEI, required to
* avoid unpredictable SPI assignments done by firmware.
*/
for (i = 0 ; i < ICU_MAX_IRQS ; i++) {
- u32 icu_int = readl_relaxed(icu->base + ICU_INT_CFG(i));
- if ((icu_int >> ICU_GROUP_SHIFT) == ICU_GRP_NSR)
+ u32 icu_int, icu_grp;
+
+ icu_int = readl_relaxed(icu->base + ICU_INT_CFG(i));
+ icu_grp = icu_int >> ICU_GROUP_SHIFT;
+
+ if (icu_grp == ICU_GRP_NSR ||
+ (icu_grp == ICU_GRP_SEI &&
+ !static_branch_unlikely(&legacy_bindings)))
writel_relaxed(0x0, icu->base + ICU_INT_CFG(i));
}
- icu->domain =
- platform_msi_create_device_domain(&pdev->dev, ICU_MAX_IRQS,
- mvebu_icu_write_msg,
- &mvebu_icu_domain_ops, icu);
- if (!icu->domain) {
- dev_err(&pdev->dev, "Failed to create ICU domain\n");
- return -ENOMEM;
- }
+ platform_set_drvdata(pdev, icu);
- return 0;
+ if (static_branch_unlikely(&legacy_bindings))
+ return mvebu_icu_subset_probe(pdev);
+ else
+ return devm_of_platform_populate(&pdev->dev);
}
static const struct of_device_id mvebu_icu_of_match[] = {
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
new file mode 100644
index 000000000000..566d69a2edbc
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define pr_fmt(fmt) "mvebu-sei: " fmt
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/msi.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+/* Cause register */
+#define GICP_SECR(idx) (0x0 + ((idx) * 0x4))
+/* Mask register */
+#define GICP_SEMR(idx) (0x20 + ((idx) * 0x4))
+#define GICP_SET_SEI_OFFSET 0x30
+
+#define SEI_IRQ_COUNT_PER_REG 32
+#define SEI_IRQ_REG_COUNT 2
+#define SEI_IRQ_COUNT (SEI_IRQ_COUNT_PER_REG * SEI_IRQ_REG_COUNT)
+#define SEI_IRQ_REG_IDX(irq_id) ((irq_id) / SEI_IRQ_COUNT_PER_REG)
+#define SEI_IRQ_REG_BIT(irq_id) ((irq_id) % SEI_IRQ_COUNT_PER_REG)
+
+struct mvebu_sei_interrupt_range {
+ u32 first;
+ u32 size;
+};
+
+struct mvebu_sei_caps {
+ struct mvebu_sei_interrupt_range ap_range;
+ struct mvebu_sei_interrupt_range cp_range;
+};
+
+struct mvebu_sei {
+ struct device *dev;
+ void __iomem *base;
+ struct resource *res;
+ struct irq_domain *sei_domain;
+ struct irq_domain *ap_domain;
+ struct irq_domain *cp_domain;
+ const struct mvebu_sei_caps *caps;
+
+ /* Lock on MSI allocations/releases */
+ struct mutex cp_msi_lock;
+ DECLARE_BITMAP(cp_msi_bitmap, SEI_IRQ_COUNT);
+
+ /* Lock on IRQ masking register */
+ raw_spinlock_t mask_lock;
+};
+
+static void mvebu_sei_ack_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+
+ writel_relaxed(BIT(SEI_IRQ_REG_BIT(d->hwirq)),
+ sei->base + GICP_SECR(reg_idx));
+}
+
+static void mvebu_sei_mask_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+ unsigned long flags;
+
+ /* 1 disables the interrupt */
+ raw_spin_lock_irqsave(&sei->mask_lock, flags);
+ reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
+ reg |= BIT(SEI_IRQ_REG_BIT(d->hwirq));
+ writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
+ raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
+}
+
+static void mvebu_sei_unmask_irq(struct irq_data *d)
+{
+ struct mvebu_sei *sei = irq_data_get_irq_chip_data(d);
+ u32 reg, reg_idx = SEI_IRQ_REG_IDX(d->hwirq);
+ unsigned long flags;
+
+ /* 0 enables the interrupt */
+ raw_spin_lock_irqsave(&sei->mask_lock, flags);
+ reg = readl_relaxed(sei->base + GICP_SEMR(reg_idx));
+ reg &= ~BIT(SEI_IRQ_REG_BIT(d->hwirq));
+ writel_relaxed(reg, sei->base + GICP_SEMR(reg_idx));
+ raw_spin_unlock_irqrestore(&sei->mask_lock, flags);
+}
+
+static int mvebu_sei_set_affinity(struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ return -EINVAL;
+}
+
+static int mvebu_sei_set_irqchip_state(struct irq_data *d,
+ enum irqchip_irq_state which,
+ bool state)
+{
+ /* We can only clear the pending state by acking the interrupt */
+ if (which != IRQCHIP_STATE_PENDING || state)
+ return -EINVAL;
+
+ mvebu_sei_ack_irq(d);
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_irq_chip = {
+ .name = "SEI",
+ .irq_ack = mvebu_sei_ack_irq,
+ .irq_mask = mvebu_sei_mask_irq,
+ .irq_unmask = mvebu_sei_unmask_irq,
+ .irq_set_affinity = mvebu_sei_set_affinity,
+ .irq_set_irqchip_state = mvebu_sei_set_irqchip_state,
+};
+
+static int mvebu_sei_ap_set_type(struct irq_data *data, unsigned int type)
+{
+ if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_ap_irq_chip = {
+ .name = "AP SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = mvebu_sei_ap_set_type,
+};
+
+static void mvebu_sei_cp_compose_msi_msg(struct irq_data *data,
+ struct msi_msg *msg)
+{
+ struct mvebu_sei *sei = data->chip_data;
+ phys_addr_t set = sei->res->start + GICP_SET_SEI_OFFSET;
+
+ msg->data = data->hwirq + sei->caps->cp_range.first;
+ msg->address_lo = lower_32_bits(set);
+ msg->address_hi = upper_32_bits(set);
+}
+
+static int mvebu_sei_cp_set_type(struct irq_data *data, unsigned int type)
+{
+ if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct irq_chip mvebu_sei_cp_irq_chip = {
+ .name = "CP SEI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_set_type = mvebu_sei_cp_set_type,
+ .irq_compose_msi_msg = mvebu_sei_cp_compose_msi_msg,
+};
+
+static int mvebu_sei_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec *fwspec = arg;
+
+ /* Not much to do, just setup the irqdata */
+ irq_domain_set_hwirq_and_chip(domain, virq, fwspec->param[0],
+ &mvebu_sei_irq_chip, sei);
+
+ return 0;
+}
+
+static void mvebu_sei_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ int i;
+
+ for (i = 0; i < nr_irqs; i++) {
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
+ irq_set_handler(virq + i, NULL);
+ irq_domain_reset_irq_data(d);
+ }
+}
+
+static const struct irq_domain_ops mvebu_sei_domain_ops = {
+ .alloc = mvebu_sei_domain_alloc,
+ .free = mvebu_sei_domain_free,
+};
+
+static int mvebu_sei_ap_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ *hwirq = fwspec->param[0];
+ *type = IRQ_TYPE_LEVEL_HIGH;
+
+ return 0;
+}
+
+static int mvebu_sei_ap_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec fwspec;
+ unsigned long hwirq;
+ unsigned int type;
+ int err;
+
+ mvebu_sei_ap_translate(domain, arg, &hwirq, &type);
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq + sei->caps->ap_range.first;
+
+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (err)
+ return err;
+
+ irq_domain_set_info(domain, virq, hwirq,
+ &mvebu_sei_ap_irq_chip, sei,
+ handle_level_irq, NULL, NULL);
+ irq_set_probe(virq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mvebu_sei_ap_domain_ops = {
+ .translate = mvebu_sei_ap_translate,
+ .alloc = mvebu_sei_ap_alloc,
+ .free = irq_domain_free_irqs_parent,
+};
+
+static void mvebu_sei_cp_release_irq(struct mvebu_sei *sei, unsigned long hwirq)
+{
+ mutex_lock(&sei->cp_msi_lock);
+ clear_bit(hwirq, sei->cp_msi_bitmap);
+ mutex_unlock(&sei->cp_msi_lock);
+}
+
+static int mvebu_sei_cp_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_fwspec fwspec;
+ unsigned long hwirq;
+ int ret;
+
+ /* The software only supports single allocations for now */
+ if (nr_irqs != 1)
+ return -ENOTSUPP;
+
+ mutex_lock(&sei->cp_msi_lock);
+ hwirq = find_first_zero_bit(sei->cp_msi_bitmap,
+ sei->caps->cp_range.size);
+ if (hwirq < sei->caps->cp_range.size)
+ set_bit(hwirq, sei->cp_msi_bitmap);
+ mutex_unlock(&sei->cp_msi_lock);
+
+ if (hwirq == sei->caps->cp_range.size)
+ return -ENOSPC;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 1;
+ fwspec.param[0] = hwirq + sei->caps->cp_range.first;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ goto free_irq;
+
+ irq_domain_set_info(domain, virq, hwirq,
+ &mvebu_sei_cp_irq_chip, sei,
+ handle_edge_irq, NULL, NULL);
+
+ return 0;
+
+free_irq:
+ mvebu_sei_cp_release_irq(sei, hwirq);
+ return ret;
+}
+
+static void mvebu_sei_cp_domain_free(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs)
+{
+ struct mvebu_sei *sei = domain->host_data;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+ if (nr_irqs != 1 || d->hwirq >= sei->caps->cp_range.size) {
+ dev_err(sei->dev, "Invalid hwirq %lu\n", d->hwirq);
+ return;
+ }
+
+ mvebu_sei_cp_release_irq(sei, d->hwirq);
+ irq_domain_free_irqs_parent(domain, virq, 1);
+}
+
+static const struct irq_domain_ops mvebu_sei_cp_domain_ops = {
+ .alloc = mvebu_sei_cp_domain_alloc,
+ .free = mvebu_sei_cp_domain_free,
+};
+
+static struct irq_chip mvebu_sei_msi_irq_chip = {
+ .name = "SEI pMSI",
+ .irq_ack = irq_chip_ack_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+};
+
+static struct msi_domain_ops mvebu_sei_msi_ops = {
+};
+
+static struct msi_domain_info mvebu_sei_msi_domain_info = {
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS,
+ .ops = &mvebu_sei_msi_ops,
+ .chip = &mvebu_sei_msi_irq_chip,
+};
+
+static void mvebu_sei_handle_cascade_irq(struct irq_desc *desc)
+{
+ struct mvebu_sei *sei = irq_desc_get_handler_data(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ u32 idx;
+
+ chained_irq_enter(chip, desc);
+
+ for (idx = 0; idx < SEI_IRQ_REG_COUNT; idx++) {
+ unsigned long irqmap;
+ int bit;
+
+ irqmap = readl_relaxed(sei->base + GICP_SECR(idx));
+ for_each_set_bit(bit, &irqmap, SEI_IRQ_COUNT_PER_REG) {
+ unsigned long hwirq;
+ unsigned int virq;
+
+ hwirq = idx * SEI_IRQ_COUNT_PER_REG + bit;
+ virq = irq_find_mapping(sei->sei_domain, hwirq);
+ if (likely(virq)) {
+ generic_handle_irq(virq);
+ continue;
+ }
+
+ dev_warn(sei->dev,
+ "Spurious IRQ detected (hwirq %lu)\n", hwirq);
+ }
+ }
+
+ chained_irq_exit(chip, desc);
+}
+
+static void mvebu_sei_reset(struct mvebu_sei *sei)
+{
+ u32 reg_idx;
+
+ /* Clear IRQ cause registers, mask all interrupts */
+ for (reg_idx = 0; reg_idx < SEI_IRQ_REG_COUNT; reg_idx++) {
+ writel_relaxed(0xFFFFFFFF, sei->base + GICP_SECR(reg_idx));
+ writel_relaxed(0xFFFFFFFF, sei->base + GICP_SEMR(reg_idx));
+ }
+}
+
+static int mvebu_sei_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct irq_domain *plat_domain;
+ struct mvebu_sei *sei;
+ u32 parent_irq;
+ int ret;
+
+ sei = devm_kzalloc(&pdev->dev, sizeof(*sei), GFP_KERNEL);
+ if (!sei)
+ return -ENOMEM;
+
+ sei->dev = &pdev->dev;
+
+ mutex_init(&sei->cp_msi_lock);
+ raw_spin_lock_init(&sei->mask_lock);
+
+ sei->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sei->base = devm_ioremap_resource(sei->dev, sei->res);
+ if (!sei->base) {
+ dev_err(sei->dev, "Failed to remap SEI resource\n");
+ return -ENODEV;
+ }
+
+ /* Retrieve the SEI capabilities with the interrupt ranges */
+ sei->caps = of_device_get_match_data(&pdev->dev);
+ if (!sei->caps) {
+ dev_err(sei->dev,
+ "Could not retrieve controller capabilities\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Reserve the single (top-level) parent SPI IRQ from which all the
+ * interrupts handled by this driver will be signaled.
+ */
+ parent_irq = irq_of_parse_and_map(node, 0);
+ if (parent_irq <= 0) {
+ dev_err(sei->dev, "Failed to retrieve top-level SPI IRQ\n");
+ return -ENODEV;
+ }
+
+ /* Create the root SEI domain */
+ sei->sei_domain = irq_domain_create_linear(of_node_to_fwnode(node),
+ (sei->caps->ap_range.size +
+ sei->caps->cp_range.size),
+ &mvebu_sei_domain_ops,
+ sei);
+ if (!sei->sei_domain) {
+ dev_err(sei->dev, "Failed to create SEI IRQ domain\n");
+ ret = -ENOMEM;
+ goto dispose_irq;
+ }
+
+ irq_domain_update_bus_token(sei->sei_domain, DOMAIN_BUS_NEXUS);
+
+ /* Create the 'wired' domain */
+ sei->ap_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
+ sei->caps->ap_range.size,
+ of_node_to_fwnode(node),
+ &mvebu_sei_ap_domain_ops,
+ sei);
+ if (!sei->ap_domain) {
+ dev_err(sei->dev, "Failed to create AP IRQ domain\n");
+ ret = -ENOMEM;
+ goto remove_sei_domain;
+ }
+
+ irq_domain_update_bus_token(sei->ap_domain, DOMAIN_BUS_WIRED);
+
+ /* Create the 'MSI' domain */
+ sei->cp_domain = irq_domain_create_hierarchy(sei->sei_domain, 0,
+ sei->caps->cp_range.size,
+ of_node_to_fwnode(node),
+ &mvebu_sei_cp_domain_ops,
+ sei);
+ if (!sei->cp_domain) {
+ pr_err("Failed to create CPs IRQ domain\n");
+ ret = -ENOMEM;
+ goto remove_ap_domain;
+ }
+
+ irq_domain_update_bus_token(sei->cp_domain, DOMAIN_BUS_GENERIC_MSI);
+
+ plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
+ &mvebu_sei_msi_domain_info,
+ sei->cp_domain);
+ if (!plat_domain) {
+ pr_err("Failed to create CPs MSI domain\n");
+ ret = -ENOMEM;
+ goto remove_cp_domain;
+ }
+
+ mvebu_sei_reset(sei);
+
+ irq_set_chained_handler_and_data(parent_irq,
+ mvebu_sei_handle_cascade_irq,
+ sei);
+
+ return 0;
+
+remove_cp_domain:
+ irq_domain_remove(sei->cp_domain);
+remove_ap_domain:
+ irq_domain_remove(sei->ap_domain);
+remove_sei_domain:
+ irq_domain_remove(sei->sei_domain);
+dispose_irq:
+ irq_dispose_mapping(parent_irq);
+
+ return ret;
+}
+
+struct mvebu_sei_caps mvebu_sei_ap806_caps = {
+ .ap_range = {
+ .first = 0,
+ .size = 21,
+ },
+ .cp_range = {
+ .first = 21,
+ .size = 43,
+ },
+};
+
+static const struct of_device_id mvebu_sei_of_match[] = {
+ {
+ .compatible = "marvell,ap806-sei",
+ .data = &mvebu_sei_ap806_caps,
+ },
+ {},
+};
+
+static struct platform_driver mvebu_sei_driver = {
+ .probe = mvebu_sei_probe,
+ .driver = {
+ .name = "mvebu-sei",
+ .of_match_table = mvebu_sei_of_match,
+ },
+};
+builtin_platform_driver(mvebu_sei_driver);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 532e9d68c704..357e9daf94ae 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -15,6 +15,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
+#include <asm/smp.h>
/*
* This driver implements a version of the RISC-V PLIC with the actual layout
@@ -176,7 +177,7 @@ static int plic_find_hart_id(struct device_node *node)
{
for (; node; node = node->parent) {
if (of_device_is_compatible(node, "riscv"))
- return riscv_of_processor_hart(node);
+ return riscv_of_processor_hartid(node);
}
return -1;
@@ -218,7 +219,7 @@ static int __init plic_init(struct device_node *node,
struct of_phandle_args parent;
struct plic_handler *handler;
irq_hw_number_t hwirq;
- int cpu;
+ int cpu, hartid;
if (of_irq_parse_one(node, i, &parent)) {
pr_err("failed to parse parent for context %d.\n", i);
@@ -229,12 +230,13 @@ static int __init plic_init(struct device_node *node,
if (parent.args[0] == -1)
continue;
- cpu = plic_find_hart_id(parent.np);
- if (cpu < 0) {
+ hartid = plic_find_hart_id(parent.np);
+ if (hartid < 0) {
pr_warn("failed to parse hart ID for context %d.\n", i);
continue;
}
+ cpu = riscv_hartid_to_cpuid(hartid);
handler = per_cpu_ptr(&plic_handlers, cpu);
handler->present = true;
handler->ctxid = i;
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index b1b47a40a278..faa7d61b9d6c 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -124,6 +124,7 @@ static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
break;
case IRQ_TYPE_EDGE_BOTH:
pdc_type = PDC_EDGE_DUAL;
+ type = IRQ_TYPE_EDGE_RISING;
break;
case IRQ_TYPE_LEVEL_HIGH:
pdc_type = PDC_LEVEL_HIGH;
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index ca623e6446e4..fca31640e3ef 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -20,13 +20,13 @@
#include <linux/init.h>
#include <linux/proc_fs.h>
-#include <asm/macintosh.h>
-#include <asm/macints.h>
+#include <asm/macintosh.h>
+#include <asm/macints.h>
#include <asm/mac_iop.h>
#include <asm/mac_oss.h>
#include <asm/adb_iop.h>
-#include <linux/adb.h>
+#include <linux/adb.h>
/*#define DEBUG_ADB_IOP*/
@@ -38,9 +38,9 @@ static unsigned char *reply_ptr;
#endif
static enum adb_iop_state {
- idle,
- sending,
- awaiting_reply
+ idle,
+ sending,
+ awaiting_reply
} adb_iop_state;
static void adb_iop_start(void);
@@ -66,7 +66,8 @@ static void adb_iop_end_req(struct adb_request *req, int state)
{
req->complete = 1;
current_req = req->next;
- if (req->done) (*req->done)(req);
+ if (req->done)
+ (*req->done)(req);
adb_iop_state = state;
}
@@ -100,7 +101,7 @@ static void adb_iop_complete(struct iop_msg *msg)
static void adb_iop_listen(struct iop_msg *msg)
{
- struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message;
+ struct adb_iopmsg *amsg = (struct adb_iopmsg *)msg->message;
struct adb_request *req;
unsigned long flags;
#ifdef DEBUG_ADB_IOP
@@ -113,9 +114,9 @@ static void adb_iop_listen(struct iop_msg *msg)
#ifdef DEBUG_ADB_IOP
printk("adb_iop_listen %p: rcvd packet, %d bytes: %02X %02X", req,
- (uint) amsg->count + 2, (uint) amsg->flags, (uint) amsg->cmd);
+ (uint)amsg->count + 2, (uint)amsg->flags, (uint)amsg->cmd);
for (i = 0; i < amsg->count; i++)
- printk(" %02X", (uint) amsg->data[i]);
+ printk(" %02X", (uint)amsg->data[i]);
printk("\n");
#endif
@@ -168,14 +169,15 @@ static void adb_iop_start(void)
/* get the packet to send */
req = current_req;
- if (!req) return;
+ if (!req)
+ return;
local_irq_save(flags);
#ifdef DEBUG_ADB_IOP
printk("adb_iop_start %p: sending packet, %d bytes:", req, req->nbytes);
- for (i = 0 ; i < req->nbytes ; i++)
- printk(" %02X", (uint) req->data[i]);
+ for (i = 0; i < req->nbytes; i++)
+ printk(" %02X", (uint)req->data[i]);
printk("\n");
#endif
@@ -196,19 +198,20 @@ static void adb_iop_start(void)
/* Now send it. The IOP manager will call adb_iop_complete */
/* when the packet has been sent. */
- iop_send_message(ADB_IOP, ADB_CHAN, req,
- sizeof(amsg), (__u8 *) &amsg, adb_iop_complete);
+ iop_send_message(ADB_IOP, ADB_CHAN, req, sizeof(amsg), (__u8 *)&amsg,
+ adb_iop_complete);
}
int adb_iop_probe(void)
{
- if (!iop_ism_present) return -ENODEV;
+ if (!iop_ism_present)
+ return -ENODEV;
return 0;
}
int adb_iop_init(void)
{
- printk("adb: IOP ISM driver v0.4 for Unified ADB.\n");
+ pr_info("adb: IOP ISM driver v0.4 for Unified ADB\n");
iop_listen(ADB_IOP, ADB_CHAN, adb_iop_listen, "ADB");
return 0;
}
@@ -218,10 +221,12 @@ int adb_iop_send_request(struct adb_request *req, int sync)
int err;
err = adb_iop_write(req);
- if (err) return err;
+ if (err)
+ return err;
if (sync) {
- while (!req->complete) adb_iop_poll();
+ while (!req->complete)
+ adb_iop_poll();
}
return 0;
}
@@ -251,7 +256,9 @@ static int adb_iop_write(struct adb_request *req)
}
local_irq_restore(flags);
- if (adb_iop_state == idle) adb_iop_start();
+
+ if (adb_iop_state == idle)
+ adb_iop_start();
return 0;
}
@@ -263,7 +270,8 @@ int adb_iop_autopoll(int devs)
void adb_iop_poll(void)
{
- if (adb_iop_state == idle) adb_iop_start();
+ if (adb_iop_state == idle)
+ adb_iop_start();
iop_ism_irq_poll(ADB_IOP);
}
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 76e98f0f7a3e..e49d1f287a17 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -203,15 +203,15 @@ static int adb_scan_bus(void)
}
/* Now fill in the handler_id field of the adb_handler entries. */
- pr_debug("adb devices:\n");
for (i = 1; i < 16; i++) {
if (adb_handler[i].original_address == 0)
continue;
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
(i << 4) | 0xf);
adb_handler[i].handler_id = req.reply[2];
- pr_debug(" [%d]: %d %x\n", i, adb_handler[i].original_address,
- adb_handler[i].handler_id);
+ printk(KERN_DEBUG "adb device [%d]: %d 0x%X\n", i,
+ adb_handler[i].original_address,
+ adb_handler[i].handler_id);
devmask |= 1 << i;
}
return devmask;
@@ -579,6 +579,8 @@ adb_try_handler_change(int address, int new_id)
mutex_lock(&adb_handler_mutex);
ret = try_handler_change(address, new_id);
mutex_unlock(&adb_handler_mutex);
+ if (ret)
+ pr_debug("adb handler change: [%d] 0x%X\n", address, new_id);
return ret;
}
EXPORT_SYMBOL(adb_try_handler_change);
diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c
index a261892c03b3..75482eeab2c4 100644
--- a/drivers/macintosh/adbhid.c
+++ b/drivers/macintosh/adbhid.c
@@ -757,6 +757,7 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
struct input_dev *input_dev;
int err;
int i;
+ char *keyboard_type;
if (adbhid[id]) {
pr_err("Trying to reregister ADB HID on ID %d\n", id);
@@ -798,24 +799,23 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
memcpy(hid->keycode, adb_to_linux_keycodes, sizeof(adb_to_linux_keycodes));
- pr_info("Detected ADB keyboard, type ");
switch (original_handler_id) {
default:
- pr_cont("<unknown>.\n");
+ keyboard_type = "<unknown>";
input_dev->id.version = ADB_KEYBOARD_UNKNOWN;
break;
case 0x01: case 0x02: case 0x03: case 0x06: case 0x08:
case 0x0C: case 0x10: case 0x18: case 0x1B: case 0x1C:
case 0xC0: case 0xC3: case 0xC6:
- pr_cont("ANSI.\n");
+ keyboard_type = "ANSI";
input_dev->id.version = ADB_KEYBOARD_ANSI;
break;
case 0x04: case 0x05: case 0x07: case 0x09: case 0x0D:
case 0x11: case 0x14: case 0x19: case 0x1D: case 0xC1:
case 0xC4: case 0xC7:
- pr_cont("ISO, swapping keys.\n");
+ keyboard_type = "ISO, swapping keys";
input_dev->id.version = ADB_KEYBOARD_ISO;
i = hid->keycode[10];
hid->keycode[10] = hid->keycode[50];
@@ -824,10 +824,11 @@ adbhid_input_register(int id, int default_id, int original_handler_id,
case 0x12: case 0x15: case 0x16: case 0x17: case 0x1A:
case 0x1E: case 0xC2: case 0xC5: case 0xC8: case 0xC9:
- pr_cont("JIS.\n");
+ keyboard_type = "JIS";
input_dev->id.version = ADB_KEYBOARD_JIS;
break;
}
+ pr_info("Detected ADB keyboard, type %s.\n", keyboard_type);
for (i = 0; i < 128; i++)
if (hid->keycode[i])
@@ -972,16 +973,13 @@ adbhid_probe(void)
->get it to send separate codes for left and right shift,
control, option keys */
#if 0 /* handler 5 doesn't send separate codes for R modifiers */
- if (adb_try_handler_change(id, 5))
- printk("ADB keyboard at %d, handler set to 5\n", id);
- else
+ if (!adb_try_handler_change(id, 5))
#endif
- if (adb_try_handler_change(id, 3))
- printk("ADB keyboard at %d, handler set to 3\n", id);
- else
- printk("ADB keyboard at %d, handler 1\n", id);
+ adb_try_handler_change(id, 3);
adb_get_infos(id, &default_id, &cur_handler_id);
+ printk(KERN_DEBUG "ADB keyboard at %d has handler 0x%X\n",
+ id, cur_handler_id);
reg |= adbhid_input_reregister(id, default_id, org_handler_id,
cur_handler_id, 0);
}
@@ -999,48 +997,44 @@ adbhid_probe(void)
for (i = 0; i < mouse_ids.nids; i++) {
int id = mouse_ids.id[i];
int mouse_kind;
+ char *desc = "standard";
adb_get_infos(id, &default_id, &org_handler_id);
if (adb_try_handler_change(id, 4)) {
- printk("ADB mouse at %d, handler set to 4", id);
mouse_kind = ADBMOUSE_EXTENDED;
}
else if (adb_try_handler_change(id, 0x2F)) {
- printk("ADB mouse at %d, handler set to 0x2F", id);
mouse_kind = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 0x42)) {
- printk("ADB mouse at %d, handler set to 0x42", id);
mouse_kind = ADBMOUSE_TRACKBALLPRO;
}
else if (adb_try_handler_change(id, 0x66)) {
- printk("ADB mouse at %d, handler set to 0x66", id);
mouse_kind = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 0x5F)) {
- printk("ADB mouse at %d, handler set to 0x5F", id);
mouse_kind = ADBMOUSE_MICROSPEED;
}
else if (adb_try_handler_change(id, 3)) {
- printk("ADB mouse at %d, handler set to 3", id);
mouse_kind = ADBMOUSE_MS_A3;
}
else if (adb_try_handler_change(id, 2)) {
- printk("ADB mouse at %d, handler set to 2", id);
mouse_kind = ADBMOUSE_STANDARD_200;
}
else {
- printk("ADB mouse at %d, handler 1", id);
mouse_kind = ADBMOUSE_STANDARD_100;
}
if ((mouse_kind == ADBMOUSE_TRACKBALLPRO)
|| (mouse_kind == ADBMOUSE_MICROSPEED)) {
+ desc = "Microspeed/MacPoint or compatible";
init_microspeed(id);
} else if (mouse_kind == ADBMOUSE_MS_A3) {
+ desc = "Mouse Systems A3 Mouse or compatible";
init_ms_a3(id);
} else if (mouse_kind == ADBMOUSE_EXTENDED) {
+ desc = "extended";
/*
* Register 1 is usually used for device
* identification. Here, we try to identify
@@ -1054,32 +1048,36 @@ adbhid_probe(void)
(req.reply[1] == 0x9a) && ((req.reply[2] == 0x21)
|| (req.reply[2] == 0x20))) {
mouse_kind = ADBMOUSE_TRACKBALL;
+ desc = "trackman/mouseman";
init_trackball(id);
}
else if ((req.reply_len >= 4) &&
(req.reply[1] == 0x74) && (req.reply[2] == 0x70) &&
(req.reply[3] == 0x61) && (req.reply[4] == 0x64)) {
mouse_kind = ADBMOUSE_TRACKPAD;
+ desc = "trackpad";
init_trackpad(id);
}
else if ((req.reply_len >= 4) &&
(req.reply[1] == 0x4b) && (req.reply[2] == 0x4d) &&
(req.reply[3] == 0x4c) && (req.reply[4] == 0x31)) {
mouse_kind = ADBMOUSE_TURBOMOUSE5;
+ desc = "TurboMouse 5";
init_turbomouse(id);
}
else if ((req.reply_len == 9) &&
(req.reply[1] == 0x4b) && (req.reply[2] == 0x4f) &&
(req.reply[3] == 0x49) && (req.reply[4] == 0x54)) {
if (adb_try_handler_change(id, 0x42)) {
- pr_cont("\nADB MacAlly 2-button mouse at %d, handler set to 0x42", id);
mouse_kind = ADBMOUSE_MACALLY2;
+ desc = "MacAlly 2-button";
}
}
}
- pr_cont("\n");
adb_get_infos(id, &default_id, &cur_handler_id);
+ printk(KERN_DEBUG "ADB mouse (%s) at %d has handler 0x%X\n",
+ desc, id, cur_handler_id);
reg |= adbhid_input_reregister(id, default_id, org_handler_id,
cur_handler_id, mouse_kind);
}
@@ -1092,12 +1090,10 @@ init_trackpad(int id)
struct adb_request req;
unsigned char r1_buffer[8];
- pr_cont(" (trackpad)");
-
adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1,
ADB_READREG(id,1));
if (req.reply_len < 8)
- pr_cont("bad length for reg. 1\n");
+ pr_err("%s: bad length for reg. 1\n", __func__);
else
{
memcpy(r1_buffer, &req.reply[1], 8);
@@ -1145,8 +1141,6 @@ init_trackball(int id)
{
struct adb_request req;
- pr_cont(" (trackman/mouseman)");
-
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id,1), 00,0x81);
@@ -1177,8 +1171,6 @@ init_turbomouse(int id)
{
struct adb_request req;
- pr_cont(" (TurboMouse 5)");
-
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(3));
@@ -1213,8 +1205,6 @@ init_microspeed(int id)
{
struct adb_request req;
- pr_cont(" (Microspeed/MacPoint or compatible)");
-
adb_request(&req, NULL, ADBREQ_SYNC, 1, ADB_FLUSH(id));
/* This will initialize mice using the Microspeed, MacPoint and
@@ -1253,7 +1243,6 @@ init_ms_a3(int id)
{
struct adb_request req;
- pr_cont(" (Mouse Systems A3 Mouse, or compatible)");
adb_request(&req, NULL, ADBREQ_SYNC, 3,
ADB_WRITEREG(id, 0x2),
0x00,
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 07074820a167..17d3bc917562 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -360,9 +360,10 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
struct macio_dev *in_bay,
struct resource *parent_res)
{
+ char name[MAX_NODE_NAME_SIZE + 1];
struct macio_dev *dev;
const u32 *reg;
-
+
if (np == NULL)
return NULL;
@@ -402,6 +403,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
#endif
/* MacIO itself has a different reg, we use it's PCI base */
+ snprintf(name, sizeof(name), "%pOFn", np);
if (np == chip->of_node) {
dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s",
chip->lbus.index,
@@ -410,12 +412,12 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
#else
0, /* NuBus may want to do something better here */
#endif
- MAX_NODE_NAME_SIZE, np->name);
+ MAX_NODE_NAME_SIZE, name);
} else {
reg = of_get_property(np, "reg", NULL);
dev_set_name(&dev->ofdev.dev, "%1d.%08x:%.*s",
chip->lbus.index,
- reg ? *reg : 0, MAX_NODE_NAME_SIZE, np->name);
+ reg ? *reg : 0, MAX_NODE_NAME_SIZE, name);
}
/* Setup interrupts & resources */
diff --git a/drivers/macintosh/macio_sysfs.c b/drivers/macintosh/macio_sysfs.c
index ca4fcffe454b..d2451e58acb9 100644
--- a/drivers/macintosh/macio_sysfs.c
+++ b/drivers/macintosh/macio_sysfs.c
@@ -58,7 +58,13 @@ static ssize_t devspec_show(struct device *dev,
static DEVICE_ATTR_RO(modalias);
static DEVICE_ATTR_RO(devspec);
-macio_config_of_attr (name, "%s\n");
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%pOFn\n", dev->of_node);
+}
+static DEVICE_ATTR_RO(name);
+
macio_config_of_attr (type, "%s\n");
static struct attribute *macio_dev_attrs[] = {
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 98dd702eb867..bbec6ac0a966 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -766,3 +766,38 @@ cuda_input(unsigned char *buf, int nb)
buf, nb, false);
}
}
+
+/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+#define RTC_OFFSET 2082844800
+
+time64_t cuda_get_time(void)
+{
+ struct adb_request req;
+ u32 now;
+
+ if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
+ return 0;
+ while (!req.complete)
+ cuda_poll();
+ if (req.reply_len != 7)
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ now = (req.reply[3] << 24) + (req.reply[4] << 16) +
+ (req.reply[5] << 8) + req.reply[6];
+ return (time64_t)now - RTC_OFFSET;
+}
+
+int cuda_set_rtc_time(struct rtc_time *tm)
+{
+ u32 now;
+ struct adb_request req;
+
+ now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
+ if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
+ now >> 24, now >> 16, now >> 8, now) < 0)
+ return -ENXIO;
+ while (!req.complete)
+ cuda_poll();
+ if ((req.reply_len != 3) && (req.reply_len != 7))
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ return 0;
+}
diff --git a/drivers/macintosh/via-macii.c b/drivers/macintosh/via-macii.c
index cf6f7d52d6be..ac824d7b2dcf 100644
--- a/drivers/macintosh/via-macii.c
+++ b/drivers/macintosh/via-macii.c
@@ -12,7 +12,7 @@
*
* 1999-08-02 (jmt) - Initial rewrite for Unified ADB.
* 2000-03-29 Tony Mantler <tonym@mac.linux-m68k.org>
- * - Big overhaul, should actually work now.
+ * - Big overhaul, should actually work now.
* 2006-12-31 Finn Thain - Another overhaul.
*
* Suggested reading:
@@ -23,7 +23,7 @@
* Apple's "ADB Analyzer" bus sniffer is invaluable:
* ftp://ftp.apple.com/developer/Tool_Chest/Devices_-_Hardware/Apple_Desktop_Bus/
*/
-
+
#include <stdarg.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -77,7 +77,7 @@ static volatile unsigned char *via;
#define ST_ODD 0x20 /* ADB state: odd data byte */
#define ST_IDLE 0x30 /* ADB state: idle, nothing to send */
-static int macii_init_via(void);
+static int macii_init_via(void);
static void macii_start(void);
static irqreturn_t macii_interrupt(int irq, void *arg);
static void macii_queue_poll(void);
@@ -120,31 +120,15 @@ static int srq_asserted; /* have to poll for the device that asserted it */
static int command_byte; /* the most recent command byte transmitted */
static int autopoll_devs; /* bits set are device addresses to be polled */
-/* Sanity check for request queue. Doesn't check for cycles. */
-static int request_is_queued(struct adb_request *req) {
- struct adb_request *cur;
- unsigned long flags;
- local_irq_save(flags);
- cur = current_req;
- while (cur) {
- if (cur == req) {
- local_irq_restore(flags);
- return 1;
- }
- cur = cur->next;
- }
- local_irq_restore(flags);
- return 0;
-}
-
/* Check for MacII style ADB */
static int macii_probe(void)
{
- if (macintosh_config->adb_type != MAC_ADB_II) return -ENODEV;
+ if (macintosh_config->adb_type != MAC_ADB_II)
+ return -ENODEV;
via = via1;
- printk("adb: Mac II ADB Driver v1.0 for Unified ADB\n");
+ pr_info("adb: Mac II ADB Driver v1.0 for Unified ADB\n");
return 0;
}
@@ -153,15 +137,17 @@ int macii_init(void)
{
unsigned long flags;
int err;
-
+
local_irq_save(flags);
-
+
err = macii_init_via();
- if (err) goto out;
+ if (err)
+ goto out;
err = request_irq(IRQ_MAC_ADB, macii_interrupt, 0, "ADB",
macii_interrupt);
- if (err) goto out;
+ if (err)
+ goto out;
macii_state = idle;
out:
@@ -169,7 +155,7 @@ out:
return err;
}
-/* initialize the hardware */
+/* initialize the hardware */
static int macii_init_via(void)
{
unsigned char x;
@@ -179,7 +165,7 @@ static int macii_init_via(void)
/* Set up state: idle */
via[B] |= ST_IDLE;
- last_status = via[B] & (ST_MASK|CTLR_IRQ);
+ last_status = via[B] & (ST_MASK | CTLR_IRQ);
/* Shift register on input */
via[ACR] = (via[ACR] & ~SR_CTRL) | SR_EXT;
@@ -205,7 +191,8 @@ static void macii_queue_poll(void)
int next_device;
static struct adb_request req;
- if (!autopoll_devs) return;
+ if (!autopoll_devs)
+ return;
device_mask = (1 << (((command_byte & 0xF0) >> 4) + 1)) - 1;
if (autopoll_devs & ~device_mask)
@@ -213,10 +200,7 @@ static void macii_queue_poll(void)
else
next_device = ffs(autopoll_devs) - 1;
- BUG_ON(request_is_queued(&req));
-
- adb_request(&req, NULL, ADBREQ_NOSEND, 1,
- ADB_READREG(next_device, 0));
+ adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_READREG(next_device, 0));
req.sent = 0;
req.complete = 0;
@@ -235,45 +219,47 @@ static void macii_queue_poll(void)
static int macii_send_request(struct adb_request *req, int sync)
{
int err;
- unsigned long flags;
- BUG_ON(request_is_queued(req));
-
- local_irq_save(flags);
err = macii_write(req);
- local_irq_restore(flags);
+ if (err)
+ return err;
- if (!err && sync) {
- while (!req->complete) {
+ if (sync)
+ while (!req->complete)
macii_poll();
- }
- BUG_ON(request_is_queued(req));
- }
- return err;
+ return 0;
}
/* Send an ADB request (append to request queue) */
static int macii_write(struct adb_request *req)
{
+ unsigned long flags;
+
if (req->nbytes < 2 || req->data[0] != ADB_PACKET || req->nbytes > 15) {
req->complete = 1;
return -EINVAL;
}
-
+
req->next = NULL;
req->sent = 0;
req->complete = 0;
req->reply_len = 0;
+ local_irq_save(flags);
+
if (current_req != NULL) {
last_req->next = req;
last_req = req;
} else {
current_req = req;
last_req = req;
- if (macii_state == idle) macii_start();
+ if (macii_state == idle)
+ macii_start();
}
+
+ local_irq_restore(flags);
+
return 0;
}
@@ -287,7 +273,8 @@ static int macii_autopoll(int devs)
/* bit 1 == device 1, and so on. */
autopoll_devs = devs & 0xFFFE;
- if (!autopoll_devs) return 0;
+ if (!autopoll_devs)
+ return 0;
local_irq_save(flags);
@@ -304,7 +291,8 @@ static int macii_autopoll(int devs)
return err;
}
-static inline int need_autopoll(void) {
+static inline int need_autopoll(void)
+{
/* Was the last command Talk Reg 0
* and is the target on the autopoll list?
*/
@@ -317,21 +305,17 @@ static inline int need_autopoll(void) {
/* Prod the chip without interrupts */
static void macii_poll(void)
{
- disable_irq(IRQ_MAC_ADB);
macii_interrupt(0, NULL);
- enable_irq(IRQ_MAC_ADB);
}
/* Reset the bus */
static int macii_reset_bus(void)
{
static struct adb_request req;
-
- if (request_is_queued(&req))
- return 0;
/* Command = 0, Address = ignored */
- adb_request(&req, NULL, 0, 1, ADB_BUSRESET);
+ adb_request(&req, NULL, ADBREQ_NOSEND, 1, ADB_BUSRESET);
+ macii_send_request(&req, 1);
/* Don't want any more requests during the Global Reset low time. */
udelay(3000);
@@ -346,10 +330,6 @@ static void macii_start(void)
req = current_req;
- BUG_ON(req == NULL);
-
- BUG_ON(macii_state != idle);
-
/* Now send it. Be careful though, that first byte of the request
* is actually ADB_PACKET; the real data begins at index 1!
* And req->nbytes is the number of bytes of real data plus one.
@@ -375,7 +355,7 @@ static void macii_start(void)
* to be activity on the ADB bus. The chip will poll to achieve this.
*
* The basic ADB state machine was left unchanged from the original MacII code
- * by Alan Cox, which was based on the CUDA driver for PowerMac.
+ * by Alan Cox, which was based on the CUDA driver for PowerMac.
* The syntax of the ADB status lines is totally different on MacII,
* though. MacII uses the states Command -> Even -> Odd -> Even ->...-> Idle
* for sending and Idle -> Even -> Odd -> Even ->...-> Idle for receiving.
@@ -387,164 +367,166 @@ static void macii_start(void)
static irqreturn_t macii_interrupt(int irq, void *arg)
{
int x;
- static int entered;
struct adb_request *req;
+ unsigned long flags;
+
+ local_irq_save(flags);
if (!arg) {
/* Clear the SR IRQ flag when polling. */
if (via[IFR] & SR_INT)
via[IFR] = SR_INT;
- else
+ else {
+ local_irq_restore(flags);
return IRQ_NONE;
+ }
}
- BUG_ON(entered++);
-
last_status = status;
- status = via[B] & (ST_MASK|CTLR_IRQ);
+ status = via[B] & (ST_MASK | CTLR_IRQ);
switch (macii_state) {
- case idle:
- if (reading_reply) {
- reply_ptr = current_req->reply;
- } else {
- BUG_ON(current_req != NULL);
- reply_ptr = reply_buf;
- }
+ case idle:
+ if (reading_reply) {
+ reply_ptr = current_req->reply;
+ } else {
+ WARN_ON(current_req);
+ reply_ptr = reply_buf;
+ }
- x = via[SR];
+ x = via[SR];
- if ((status & CTLR_IRQ) && (x == 0xFF)) {
- /* Bus timeout without SRQ sequence:
- * data is "FF" while CTLR_IRQ is "H"
- */
- reply_len = 0;
- srq_asserted = 0;
- macii_state = read_done;
- } else {
- macii_state = reading;
- *reply_ptr = x;
- reply_len = 1;
- }
+ if ((status & CTLR_IRQ) && (x == 0xFF)) {
+ /* Bus timeout without SRQ sequence:
+ * data is "FF" while CTLR_IRQ is "H"
+ */
+ reply_len = 0;
+ srq_asserted = 0;
+ macii_state = read_done;
+ } else {
+ macii_state = reading;
+ *reply_ptr = x;
+ reply_len = 1;
+ }
- /* set ADB state = even for first data byte */
- via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
- break;
+ /* set ADB state = even for first data byte */
+ via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
+ break;
- case sending:
- req = current_req;
- if (data_index >= req->nbytes) {
- req->sent = 1;
- macii_state = idle;
-
- if (req->reply_expected) {
- reading_reply = 1;
- } else {
- req->complete = 1;
- current_req = req->next;
- if (req->done) (*req->done)(req);
-
- if (current_req)
- macii_start();
- else
- if (need_autopoll())
- macii_autopoll(autopoll_devs);
- }
+ case sending:
+ req = current_req;
+ if (data_index >= req->nbytes) {
+ req->sent = 1;
+ macii_state = idle;
- if (macii_state == idle) {
- /* reset to shift in */
- via[ACR] &= ~SR_OUT;
- x = via[SR];
- /* set ADB state idle - might get SRQ */
- via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
- }
+ if (req->reply_expected) {
+ reading_reply = 1;
} else {
- via[SR] = req->data[data_index++];
-
- if ( (via[B] & ST_MASK) == ST_CMD ) {
- /* just sent the command byte, set to EVEN */
- via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
- } else {
- /* invert state bits, toggle ODD/EVEN */
- via[B] ^= ST_MASK;
- }
- }
- break;
-
- case reading:
- x = via[SR];
- BUG_ON((status & ST_MASK) == ST_CMD ||
- (status & ST_MASK) == ST_IDLE);
-
- /* Bus timeout with SRQ sequence:
- * data is "XX FF" while CTLR_IRQ is "L L"
- * End of packet without SRQ sequence:
- * data is "XX...YY 00" while CTLR_IRQ is "L...H L"
- * End of packet SRQ sequence:
- * data is "XX...YY 00" while CTLR_IRQ is "L...L L"
- * (where XX is the first response byte and
- * YY is the last byte of valid response data.)
- */
+ req->complete = 1;
+ current_req = req->next;
+ if (req->done)
+ (*req->done)(req);
- srq_asserted = 0;
- if (!(status & CTLR_IRQ)) {
- if (x == 0xFF) {
- if (!(last_status & CTLR_IRQ)) {
- macii_state = read_done;
- reply_len = 0;
- srq_asserted = 1;
- }
- } else if (x == 0x00) {
- macii_state = read_done;
- if (!(last_status & CTLR_IRQ))
- srq_asserted = 1;
- }
+ if (current_req)
+ macii_start();
+ else if (need_autopoll())
+ macii_autopoll(autopoll_devs);
}
- if (macii_state == reading) {
- BUG_ON(reply_len > 15);
- reply_ptr++;
- *reply_ptr = x;
- reply_len++;
+ if (macii_state == idle) {
+ /* reset to shift in */
+ via[ACR] &= ~SR_OUT;
+ x = via[SR];
+ /* set ADB state idle - might get SRQ */
+ via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
}
+ } else {
+ via[SR] = req->data[data_index++];
- /* invert state bits, toggle ODD/EVEN */
- via[B] ^= ST_MASK;
- break;
+ if ((via[B] & ST_MASK) == ST_CMD) {
+ /* just sent the command byte, set to EVEN */
+ via[B] = (via[B] & ~ST_MASK) | ST_EVEN;
+ } else {
+ /* invert state bits, toggle ODD/EVEN */
+ via[B] ^= ST_MASK;
+ }
+ }
+ break;
- case read_done:
- x = via[SR];
+ case reading:
+ x = via[SR];
+ WARN_ON((status & ST_MASK) == ST_CMD ||
+ (status & ST_MASK) == ST_IDLE);
+
+ /* Bus timeout with SRQ sequence:
+ * data is "XX FF" while CTLR_IRQ is "L L"
+ * End of packet without SRQ sequence:
+ * data is "XX...YY 00" while CTLR_IRQ is "L...H L"
+ * End of packet SRQ sequence:
+ * data is "XX...YY 00" while CTLR_IRQ is "L...L L"
+ * (where XX is the first response byte and
+ * YY is the last byte of valid response data.)
+ */
- if (reading_reply) {
- reading_reply = 0;
- req = current_req;
- req->reply_len = reply_len;
- req->complete = 1;
- current_req = req->next;
- if (req->done) (*req->done)(req);
- } else if (reply_len && autopoll_devs)
- adb_input(reply_buf, reply_len, 0);
+ srq_asserted = 0;
+ if (!(status & CTLR_IRQ)) {
+ if (x == 0xFF) {
+ if (!(last_status & CTLR_IRQ)) {
+ macii_state = read_done;
+ reply_len = 0;
+ srq_asserted = 1;
+ }
+ } else if (x == 0x00) {
+ macii_state = read_done;
+ if (!(last_status & CTLR_IRQ))
+ srq_asserted = 1;
+ }
+ }
- macii_state = idle;
+ if (macii_state == reading &&
+ reply_len < ARRAY_SIZE(reply_buf)) {
+ reply_ptr++;
+ *reply_ptr = x;
+ reply_len++;
+ }
- /* SRQ seen before, initiate poll now */
- if (srq_asserted)
- macii_queue_poll();
+ /* invert state bits, toggle ODD/EVEN */
+ via[B] ^= ST_MASK;
+ break;
- if (current_req)
- macii_start();
- else
- if (need_autopoll())
- macii_autopoll(autopoll_devs);
+ case read_done:
+ x = via[SR];
- if (macii_state == idle)
- via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
- break;
+ if (reading_reply) {
+ reading_reply = 0;
+ req = current_req;
+ req->reply_len = reply_len;
+ req->complete = 1;
+ current_req = req->next;
+ if (req->done)
+ (*req->done)(req);
+ } else if (reply_len && autopoll_devs)
+ adb_input(reply_buf, reply_len, 0);
+
+ macii_state = idle;
+
+ /* SRQ seen before, initiate poll now */
+ if (srq_asserted)
+ macii_queue_poll();
+
+ if (current_req)
+ macii_start();
+ else if (need_autopoll())
+ macii_autopoll(autopoll_devs);
+
+ if (macii_state == idle)
+ via[B] = (via[B] & ~ST_MASK) | ST_IDLE;
+ break;
- default:
+ default:
break;
}
- entered--;
+ local_irq_restore(flags);
return IRQ_HANDLED;
}
diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
index d72c450aebe5..60f57e2abf21 100644
--- a/drivers/macintosh/via-pmu.c
+++ b/drivers/macintosh/via-pmu.c
@@ -1737,6 +1737,39 @@ pmu_enable_irled(int on)
pmu_wait_complete(&req);
}
+/* Offset between Unix time (1970-based) and Mac time (1904-based) */
+#define RTC_OFFSET 2082844800
+
+time64_t pmu_get_time(void)
+{
+ struct adb_request req;
+ u32 now;
+
+ if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
+ return 0;
+ pmu_wait_complete(&req);
+ if (req.reply_len != 4)
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ now = (req.reply[0] << 24) + (req.reply[1] << 16) +
+ (req.reply[2] << 8) + req.reply[3];
+ return (time64_t)now - RTC_OFFSET;
+}
+
+int pmu_set_rtc_time(struct rtc_time *tm)
+{
+ u32 now;
+ struct adb_request req;
+
+ now = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
+ if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
+ now >> 24, now >> 16, now >> 8, now) < 0)
+ return -ENXIO;
+ pmu_wait_complete(&req);
+ if (req.reply_len != 0)
+ pr_err("%s: got %d byte reply\n", __func__, req.reply_len);
+ return 0;
+}
+
void
pmu_restart(void)
{
diff --git a/drivers/macintosh/windfarm_smu_controls.c b/drivers/macintosh/windfarm_smu_controls.c
index d174c7437337..86d65462a61c 100644
--- a/drivers/macintosh/windfarm_smu_controls.c
+++ b/drivers/macintosh/windfarm_smu_controls.c
@@ -277,7 +277,7 @@ static int __init smu_controls_init(void)
fct = smu_fan_create(fan, 0);
if (fct == NULL) {
printk(KERN_WARNING "windfarm: Failed to create SMU "
- "RPM fan %s\n", fan->name);
+ "RPM fan %pOFn\n", fan);
continue;
}
list_add(&fct->link, &smu_fans);
@@ -296,7 +296,7 @@ static int __init smu_controls_init(void)
fct = smu_fan_create(fan, 1);
if (fct == NULL) {
printk(KERN_WARNING "windfarm: Failed to create SMU "
- "PWM fan %s\n", fan->name);
+ "PWM fan %pOFn\n", fan);
continue;
}
list_add(&fct->link, &smu_fans);
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index da7f4fc1a51d..a0f61eb853c5 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -22,14 +22,6 @@
#define VERSION "1.0"
-#define DEBUG
-
-#ifdef DEBUG
-#define DBG(args...) printk(args)
-#else
-#define DBG(args...) do { } while(0)
-#endif
-
/* If the cache is older than 800ms we'll refetch it */
#define MAX_AGE msecs_to_jiffies(800)
@@ -106,13 +98,10 @@ struct smu_sdbp_header *smu_sat_get_sdb_partition(unsigned int sat_id, int id,
buf[i+2] = data[3];
buf[i+3] = data[2];
}
-#ifdef DEBUG
- DBG(KERN_DEBUG "sat %d partition %x:", sat_id, id);
- for (i = 0; i < len; ++i)
- DBG(" %x", buf[i]);
- DBG("\n");
-#endif
+ printk(KERN_DEBUG "sat %d partition %x:", sat_id, id);
+ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
+ 16, 1, buf, len, false);
if (size)
*size = len;
return (struct smu_sdbp_header *) buf;
@@ -132,13 +121,13 @@ static int wf_sat_read_cache(struct wf_sat *sat)
if (err < 0)
return err;
sat->last_read = jiffies;
+
#ifdef LOTSA_DEBUG
{
int i;
- DBG(KERN_DEBUG "wf_sat_get: data is");
- for (i = 0; i < 16; ++i)
- DBG(" %.2x", sat->cache[i]);
- DBG("\n");
+ printk(KERN_DEBUG "wf_sat_get: data is");
+ print_hex_dump(KERN_DEBUG, " ", DUMP_PREFIX_OFFSET,
+ 16, 1, sat->cache, 16, false);
}
#endif
return 0;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 8b8c123cae66..3db222509e44 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -215,17 +215,6 @@ config BLK_DEV_DM
If unsure, say N.
-config DM_MQ_DEFAULT
- bool "request-based DM: use blk-mq I/O path by default"
- depends on BLK_DEV_DM
- ---help---
- This option enables the blk-mq based I/O path for request-based
- DM devices by default. With the option the dm_mod.use_blk_mq
- module/boot option defaults to Y, without it to N, but it can
- still be overriden either way.
-
- If unsure say N.
-
config DM_DEBUG
bool "Device mapper debugging support"
depends on BLK_DEV_DM
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 1b5b9ad9e492..b61aac00ff40 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1200,7 +1200,7 @@ static void queue_demotion(struct smq_policy *mq)
struct policy_work work;
struct entry *e;
- if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
+ if (WARN_ON_ONCE(!mq->migrations_allowed))
return;
e = q_peek(&mq->clean, mq->clean.nr_levels / 2, true);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 7d480c930eaf..224d44503a06 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -112,18 +112,8 @@ struct mapped_device {
struct dm_stats stats;
- struct kthread_worker kworker;
- struct task_struct *kworker_task;
-
- /* for request-based merge heuristic in dm_request_fn() */
- unsigned seq_rq_merge_deadline_usecs;
- int last_rq_rw;
- sector_t last_rq_pos;
- ktime_t last_rq_start_time;
-
/* for blk-mq request-based DM support */
struct blk_mq_tag_set *tag_set;
- bool use_blk_mq:1;
bool init_tio_pdu:1;
struct srcu_struct io_barrier;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 0481223b1deb..b8eec515a003 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2661,6 +2661,7 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
{
struct crypt_config *cc;
+ const char *devname = dm_table_device_name(ti->table);
int key_size;
unsigned int align_mask;
unsigned long long tmpll;
@@ -2806,18 +2807,22 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ret = -ENOMEM;
- cc->io_queue = alloc_workqueue("kcryptd_io", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->io_queue = alloc_workqueue("kcryptd_io/%s",
+ WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ 1, devname);
if (!cc->io_queue) {
ti->error = "Couldn't create kcryptd io queue";
goto bad;
}
if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
- cc->crypt_queue = alloc_workqueue("kcryptd", WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s",
+ WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+ 1, devname);
else
- cc->crypt_queue = alloc_workqueue("kcryptd",
+ cc->crypt_queue = alloc_workqueue("kcryptd/%s",
WQ_HIGHPRI | WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
- num_online_cpus());
+ num_online_cpus(), devname);
if (!cc->crypt_queue) {
ti->error = "Couldn't create kcryptd queue";
goto bad;
@@ -2826,7 +2831,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
spin_lock_init(&cc->write_thread_lock);
cc->write_tree = RB_ROOT;
- cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
+ cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write/%s", devname);
if (IS_ERR(cc->write_thread)) {
ret = PTR_ERR(cc->write_thread);
cc->write_thread = NULL;
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 32aabe27b37c..3cb97fa4c11d 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -315,10 +315,6 @@ static int flakey_map(struct dm_target *ti, struct bio *bio)
if (bio_op(bio) == REQ_OP_ZONE_RESET)
goto map_bio;
- /* We need to remap reported zones, so remember the BIO iter */
- if (bio_op(bio) == REQ_OP_ZONE_REPORT)
- goto map_bio;
-
/* Are we alive ? */
elapsed = (jiffies - fc->start_time) / HZ;
if (elapsed % (fc->up_interval + fc->down_interval) >= fc->up_interval) {
@@ -380,11 +376,6 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
if (bio_op(bio) == REQ_OP_ZONE_RESET)
return DM_ENDIO_DONE;
- if (bio_op(bio) == REQ_OP_ZONE_REPORT) {
- dm_remap_zone_report(ti, bio, fc->start);
- return DM_ENDIO_DONE;
- }
-
if (!*error && pb->bio_submitted && (bio_data_dir(bio) == READ)) {
if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc)) {
@@ -457,6 +448,26 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
return 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int flakey_report_zones(struct dm_target *ti, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
+{
+ struct flakey_c *fc = ti->private;
+ int ret;
+
+ /* Do report and remap it */
+ ret = blkdev_report_zones(fc->dev->bdev, flakey_map_sector(ti, sector),
+ zones, nr_zones, gfp_mask);
+ if (ret != 0)
+ return ret;
+
+ if (*nr_zones)
+ dm_remap_zone_report(ti, fc->start, zones, nr_zones);
+ return 0;
+}
+#endif
+
static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
{
struct flakey_c *fc = ti->private;
@@ -469,6 +480,7 @@ static struct target_type flakey_target = {
.version = {1, 5, 0},
#ifdef CONFIG_BLK_DEV_ZONED
.features = DM_TARGET_ZONED_HM,
+ .report_zones = flakey_report_zones,
#endif
.module = THIS_MODULE,
.ctr = flakey_ctr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index e1fa6baf4e8e..bb3096bf2cc6 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -559,7 +559,12 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result
}
memset(result + size, 0, JOURNAL_MAC_SIZE - size);
} else {
- __u8 digest[size];
+ __u8 digest[HASH_MAX_DIGESTSIZE];
+
+ if (WARN_ON(size > sizeof(digest))) {
+ dm_integrity_io_error(ic, "digest_size", -EINVAL);
+ goto err;
+ }
r = crypto_shash_final(desc, digest);
if (unlikely(r)) {
dm_integrity_io_error(ic, "crypto_shash_final", r);
@@ -1324,7 +1329,7 @@ static void integrity_metadata(struct work_struct *w)
struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
char *checksums;
unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
- char checksums_onstack[ic->tag_size + extra_space];
+ char checksums_onstack[HASH_MAX_DIGESTSIZE];
unsigned sectors_to_process = dio->range.n_sectors;
sector_t sector = dio->range.logical_sector;
@@ -1333,8 +1338,14 @@ static void integrity_metadata(struct work_struct *w)
checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
- if (!checksums)
+ if (!checksums) {
checksums = checksums_onstack;
+ if (WARN_ON(extra_space &&
+ digest_size > sizeof(checksums_onstack))) {
+ r = -EINVAL;
+ goto error;
+ }
+ }
__bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
unsigned pos;
@@ -1546,7 +1557,7 @@ retry_kmap:
} while (++s < ic->sectors_per_block);
#ifdef INTERNAL_VERIFY
if (ic->internal_hash) {
- char checksums_onstack[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
+ char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
@@ -1596,7 +1607,7 @@ retry_kmap:
if (ic->internal_hash) {
unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
if (unlikely(digest_size > ic->tag_size)) {
- char checksums_onstack[digest_size];
+ char checksums_onstack[HASH_MAX_DIGESTSIZE];
integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
} else
@@ -2023,7 +2034,7 @@ static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
unlikely(from_replay) &&
#endif
ic->internal_hash) {
- char test_tag[max(crypto_shash_digestsize(ic->internal_hash), ic->tag_size)];
+ char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
(char *)access_journal_data(ic, i, l), test_tag);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index b810ea77e6b1..f666778ad237 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1720,8 +1720,7 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
}
static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
- int ioctl_flags,
- struct dm_ioctl **param, int *param_flags)
+ int ioctl_flags, struct dm_ioctl **param, int *param_flags)
{
struct dm_ioctl *dmi;
int secure_data;
@@ -1762,18 +1761,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
*param_flags |= DM_PARAMS_MALLOC;
- if (copy_from_user(dmi, user, param_kernel->data_size))
- goto bad;
+ /* Copy from param_kernel (which was already copied from user) */
+ memcpy(dmi, param_kernel, minimum_data_size);
-data_copied:
- /*
- * Abort if something changed the ioctl data while it was being copied.
- */
- if (dmi->data_size != param_kernel->data_size) {
- DMERR("rejecting ioctl: data size modified while processing parameters");
+ if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
+ param_kernel->data_size - minimum_data_size))
goto bad;
- }
-
+data_copied:
/* Wipe the user buffer so we do not return it to userspace */
if (secure_data && clear_user(user, param_kernel->data_size))
goto bad;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 2f7c44a006c4..8d7ddee6ac4d 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -102,19 +102,6 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
-#ifdef CONFIG_BLK_DEV_ZONED
-static int linear_end_io(struct dm_target *ti, struct bio *bio,
- blk_status_t *error)
-{
- struct linear_c *lc = ti->private;
-
- if (!*error && bio_op(bio) == REQ_OP_ZONE_REPORT)
- dm_remap_zone_report(ti, bio, lc->start);
-
- return DM_ENDIO_DONE;
-}
-#endif
-
static void linear_status(struct dm_target *ti, status_type_t type,
unsigned status_flags, char *result, unsigned maxlen)
{
@@ -148,6 +135,26 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
return 0;
}
+#ifdef CONFIG_BLK_DEV_ZONED
+static int linear_report_zones(struct dm_target *ti, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
+{
+ struct linear_c *lc = (struct linear_c *) ti->private;
+ int ret;
+
+ /* Do report and remap it */
+ ret = blkdev_report_zones(lc->dev->bdev, linear_map_sector(ti, sector),
+ zones, nr_zones, gfp_mask);
+ if (ret != 0)
+ return ret;
+
+ if (*nr_zones)
+ dm_remap_zone_report(ti, lc->start, zones, nr_zones);
+ return 0;
+}
+#endif
+
static int linear_iterate_devices(struct dm_target *ti,
iterate_devices_callout_fn fn, void *data)
{
@@ -211,8 +218,8 @@ static struct target_type linear_target = {
.name = "linear",
.version = {1, 4, 0},
#ifdef CONFIG_BLK_DEV_ZONED
- .end_io = linear_end_io,
.features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
+ .report_zones = linear_report_zones,
#else
.features = DM_TARGET_PASSES_INTEGRITY,
#endif
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 419362c2d8ac..d6a66921daf4 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -203,14 +203,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
{
if (m->queue_mode == DM_TYPE_NONE) {
- /*
- * Default to request-based.
- */
- if (dm_use_blk_mq(dm_table_get_md(ti->table)))
- m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
- else
- m->queue_mode = DM_TYPE_REQUEST_BASED;
-
+ m->queue_mode = DM_TYPE_REQUEST_BASED;
} else if (m->queue_mode == DM_TYPE_BIO_BASED) {
INIT_WORK(&m->process_queued_bios, process_queued_bios);
/*
@@ -537,10 +530,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
* get the queue busy feedback (via BLK_STS_RESOURCE),
* otherwise I/O merging can suffer.
*/
- if (q->mq_ops)
- return DM_MAPIO_REQUEUE;
- else
- return DM_MAPIO_DELAY_REQUEUE;
+ return DM_MAPIO_REQUEUE;
}
clone->bio = clone->biotail = NULL;
clone->rq_disk = bdev->bd_disk;
@@ -668,7 +658,7 @@ static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
static void process_queued_io_list(struct multipath *m)
{
- if (m->queue_mode == DM_TYPE_MQ_REQUEST_BASED)
+ if (m->queue_mode == DM_TYPE_REQUEST_BASED)
dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
else if (m->queue_mode == DM_TYPE_BIO_BASED)
queue_work(kmultipathd, &m->process_queued_bios);
@@ -1089,10 +1079,9 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
if (!strcasecmp(queue_mode_name, "bio"))
m->queue_mode = DM_TYPE_BIO_BASED;
- else if (!strcasecmp(queue_mode_name, "rq"))
+ else if (!strcasecmp(queue_mode_name, "rq") ||
+ !strcasecmp(queue_mode_name, "mq"))
m->queue_mode = DM_TYPE_REQUEST_BASED;
- else if (!strcasecmp(queue_mode_name, "mq"))
- m->queue_mode = DM_TYPE_MQ_REQUEST_BASED;
else {
ti->error = "Unknown 'queue_mode' requested";
r = -EINVAL;
@@ -1726,9 +1715,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
case DM_TYPE_BIO_BASED:
DMEMIT("queue_mode bio ");
break;
- case DM_TYPE_MQ_REQUEST_BASED:
- DMEMIT("queue_mode mq ");
- break;
default:
WARN_ON_ONCE(true);
break;
@@ -1972,7 +1958,7 @@ static int multipath_busy(struct dm_target *ti)
/* no paths available, for blk-mq: rely on IO mapping to delay requeue */
if (!atomic_read(&m->nr_valid_paths) && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
- return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED);
+ return (m->queue_mode != DM_TYPE_REQUEST_BASED);
/* Guess which priority_group will be used at next mapping time */
pg = READ_ONCE(m->current_pg);
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c44925e4e481..e1dd1622a290 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2475,7 +2475,7 @@ static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
}
/* Enable bitmap creation for RAID levels != 0 */
- mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096);
+ mddev->bitmap_info.offset = (rt_is_raid0(rs->raid_type) || rs->journal_dev.dev) ? 0 : to_sector(4096);
mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
if (!test_and_clear_bit(FirstUse, &rdev->flags)) {
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e547b8dd298..7cd36e4d1310 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -23,19 +23,6 @@ static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
#define RESERVED_REQUEST_BASED_IOS 256
static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
-static bool use_blk_mq = IS_ENABLED(CONFIG_DM_MQ_DEFAULT);
-
-bool dm_use_blk_mq_default(void)
-{
- return use_blk_mq;
-}
-
-bool dm_use_blk_mq(struct mapped_device *md)
-{
- return md->use_blk_mq;
-}
-EXPORT_SYMBOL_GPL(dm_use_blk_mq);
-
unsigned dm_get_reserved_rq_based_ios(void)
{
return __dm_get_module_param(&reserved_rq_based_ios,
@@ -59,41 +46,13 @@ int dm_request_based(struct mapped_device *md)
return queue_is_rq_based(md->queue);
}
-static void dm_old_start_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (blk_queue_stopped(q))
- blk_start_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_mq_start_queue(struct request_queue *q)
+void dm_start_queue(struct request_queue *q)
{
blk_mq_unquiesce_queue(q);
blk_mq_kick_requeue_list(q);
}
-void dm_start_queue(struct request_queue *q)
-{
- if (!q->mq_ops)
- dm_old_start_queue(q);
- else
- dm_mq_start_queue(q);
-}
-
-static void dm_old_stop_queue(struct request_queue *q)
-{
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- if (!blk_queue_stopped(q))
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
-static void dm_mq_stop_queue(struct request_queue *q)
+void dm_stop_queue(struct request_queue *q)
{
if (blk_mq_queue_stopped(q))
return;
@@ -101,14 +60,6 @@ static void dm_mq_stop_queue(struct request_queue *q)
blk_mq_quiesce_queue(q);
}
-void dm_stop_queue(struct request_queue *q)
-{
- if (!q->mq_ops)
- dm_old_stop_queue(q);
- else
- dm_mq_stop_queue(q);
-}
-
/*
* Partial completion handling for request-based dm
*/
@@ -179,9 +130,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
{
- struct request_queue *q = md->queue;
- unsigned long flags;
-
atomic_dec(&md->pending[rw]);
/* nudge anyone waiting on suspend queue */
@@ -189,18 +137,6 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
wake_up(&md->wait);
/*
- * Run this off this callpath, as drivers could invoke end_io while
- * inside their request_fn (and holding the queue lock). Calling
- * back into ->request_fn() could deadlock attempting to grab the
- * queue lock again.
- */
- if (!q->mq_ops && run_queue) {
- spin_lock_irqsave(q->queue_lock, flags);
- blk_run_queue_async(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
- }
-
- /*
* dm_put() must be at the end of this function. See the comment above
*/
dm_put(md);
@@ -222,27 +158,10 @@ static void dm_end_request(struct request *clone, blk_status_t error)
tio->ti->type->release_clone_rq(clone);
rq_end_stats(md, rq);
- if (!rq->q->mq_ops)
- blk_end_request_all(rq, error);
- else
- blk_mq_end_request(rq, error);
+ blk_mq_end_request(rq, error);
rq_completed(md, rw, true);
}
-/*
- * Requeue the original request of a clone.
- */
-static void dm_old_requeue_request(struct request *rq, unsigned long delay_ms)
-{
- struct request_queue *q = rq->q;
- unsigned long flags;
-
- spin_lock_irqsave(q->queue_lock, flags);
- blk_requeue_request(q, rq);
- blk_delay_queue(q, delay_ms);
- spin_unlock_irqrestore(q->queue_lock, flags);
-}
-
static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
{
blk_mq_delay_kick_requeue_list(q, msecs);
@@ -273,11 +192,7 @@ static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_
tio->ti->type->release_clone_rq(tio->clone);
}
- if (!rq->q->mq_ops)
- dm_old_requeue_request(rq, delay_ms);
- else
- dm_mq_delay_requeue_request(rq, delay_ms);
-
+ dm_mq_delay_requeue_request(rq, delay_ms);
rq_completed(md, rw, false);
}
@@ -340,10 +255,7 @@ static void dm_softirq_done(struct request *rq)
rq_end_stats(md, rq);
rw = rq_data_dir(rq);
- if (!rq->q->mq_ops)
- blk_end_request_all(rq, tio->error);
- else
- blk_mq_end_request(rq, tio->error);
+ blk_mq_end_request(rq, tio->error);
rq_completed(md, rw, false);
return;
}
@@ -363,17 +275,14 @@ static void dm_complete_request(struct request *rq, blk_status_t error)
struct dm_rq_target_io *tio = tio_from_request(rq);
tio->error = error;
- if (!rq->q->mq_ops)
- blk_complete_request(rq);
- else
- blk_mq_complete_request(rq);
+ blk_mq_complete_request(rq);
}
/*
* Complete the not-mapped clone and the original request with the error status
* through softirq context.
* Target's rq_end_io() function isn't called.
- * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
+ * This may be used when the target's clone_and_map_rq() function fails.
*/
static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
{
@@ -381,21 +290,10 @@ static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
dm_complete_request(rq, error);
}
-/*
- * Called with the clone's queue lock held (in the case of .request_fn)
- */
static void end_clone_request(struct request *clone, blk_status_t error)
{
struct dm_rq_target_io *tio = clone->end_io_data;
- /*
- * Actual request completion is done in a softirq context which doesn't
- * hold the clone's queue lock. Otherwise, deadlock could occur because:
- * - another request may be submitted by the upper level driver
- * of the stacking during the completion
- * - the submission which requires queue lock may be done
- * against this clone's queue
- */
dm_complete_request(tio->orig, error);
}
@@ -446,8 +344,6 @@ static int setup_clone(struct request *clone, struct request *rq,
return 0;
}
-static void map_tio_request(struct kthread_work *work);
-
static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
struct mapped_device *md)
{
@@ -464,8 +360,6 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
*/
if (!md->init_tio_pdu)
memset(&tio->info, 0, sizeof(tio->info));
- if (md->kworker_task)
- kthread_init_work(&tio->work, map_tio_request);
}
/*
@@ -504,10 +398,7 @@ check_again:
blk_rq_unprep_clone(clone);
tio->ti->type->release_clone_rq(clone);
tio->clone = NULL;
- if (!rq->q->mq_ops)
- r = DM_MAPIO_DELAY_REQUEUE;
- else
- r = DM_MAPIO_REQUEUE;
+ r = DM_MAPIO_REQUEUE;
goto check_again;
}
break;
@@ -530,20 +421,23 @@ check_again:
return r;
}
+/* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
+{
+ return sprintf(buf, "%u\n", 0);
+}
+
+ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
+ const char *buf, size_t count)
+{
+ return count;
+}
+
static void dm_start_request(struct mapped_device *md, struct request *orig)
{
- if (!orig->q->mq_ops)
- blk_start_request(orig);
- else
- blk_mq_start_request(orig);
+ blk_mq_start_request(orig);
atomic_inc(&md->pending[rq_data_dir(orig)]);
- if (md->seq_rq_merge_deadline_usecs) {
- md->last_rq_pos = rq_end_sector(orig);
- md->last_rq_rw = rq_data_dir(orig);
- md->last_rq_start_time = ktime_get();
- }
-
if (unlikely(dm_stats_used(&md->stats))) {
struct dm_rq_target_io *tio = tio_from_request(orig);
tio->duration_jiffies = jiffies;
@@ -563,8 +457,10 @@ static void dm_start_request(struct mapped_device *md, struct request *orig)
dm_get(md);
}
-static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
+static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
+ struct mapped_device *md = set->driver_data;
struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
/*
@@ -581,163 +477,6 @@ static int __dm_rq_init_rq(struct mapped_device *md, struct request *rq)
return 0;
}
-static int dm_rq_init_rq(struct request_queue *q, struct request *rq, gfp_t gfp)
-{
- return __dm_rq_init_rq(q->rq_alloc_data, rq);
-}
-
-static void map_tio_request(struct kthread_work *work)
-{
- struct dm_rq_target_io *tio = container_of(work, struct dm_rq_target_io, work);
-
- if (map_request(tio) == DM_MAPIO_REQUEUE)
- dm_requeue_original_request(tio, false);
-}
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
-{
- return sprintf(buf, "%u\n", md->seq_rq_merge_deadline_usecs);
-}
-
-#define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
-
-ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
- const char *buf, size_t count)
-{
- unsigned deadline;
-
- if (dm_get_md_type(md) != DM_TYPE_REQUEST_BASED)
- return count;
-
- if (kstrtouint(buf, 10, &deadline))
- return -EINVAL;
-
- if (deadline > MAX_SEQ_RQ_MERGE_DEADLINE_USECS)
- deadline = MAX_SEQ_RQ_MERGE_DEADLINE_USECS;
-
- md->seq_rq_merge_deadline_usecs = deadline;
-
- return count;
-}
-
-static bool dm_old_request_peeked_before_merge_deadline(struct mapped_device *md)
-{
- ktime_t kt_deadline;
-
- if (!md->seq_rq_merge_deadline_usecs)
- return false;
-
- kt_deadline = ns_to_ktime((u64)md->seq_rq_merge_deadline_usecs * NSEC_PER_USEC);
- kt_deadline = ktime_add_safe(md->last_rq_start_time, kt_deadline);
-
- return !ktime_after(ktime_get(), kt_deadline);
-}
-
-/*
- * q->request_fn for old request-based dm.
- * Called with the queue lock held.
- */
-static void dm_old_request_fn(struct request_queue *q)
-{
- struct mapped_device *md = q->queuedata;
- struct dm_target *ti = md->immutable_target;
- struct request *rq;
- struct dm_rq_target_io *tio;
- sector_t pos = 0;
-
- if (unlikely(!ti)) {
- int srcu_idx;
- struct dm_table *map = dm_get_live_table(md, &srcu_idx);
-
- if (unlikely(!map)) {
- dm_put_live_table(md, srcu_idx);
- return;
- }
- ti = dm_table_find_target(map, pos);
- dm_put_live_table(md, srcu_idx);
- }
-
- /*
- * For suspend, check blk_queue_stopped() and increment
- * ->pending within a single queue_lock not to increment the
- * number of in-flight I/Os after the queue is stopped in
- * dm_suspend().
- */
- while (!blk_queue_stopped(q)) {
- rq = blk_peek_request(q);
- if (!rq)
- return;
-
- /* always use block 0 to find the target for flushes for now */
- pos = 0;
- if (req_op(rq) != REQ_OP_FLUSH)
- pos = blk_rq_pos(rq);
-
- if ((dm_old_request_peeked_before_merge_deadline(md) &&
- md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
- md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
- (ti->type->busy && ti->type->busy(ti))) {
- blk_delay_queue(q, 10);
- return;
- }
-
- dm_start_request(md, rq);
-
- tio = tio_from_request(rq);
- init_tio(tio, rq, md);
- /* Establish tio->ti before queuing work (map_tio_request) */
- tio->ti = ti;
- kthread_queue_work(&md->kworker, &tio->work);
- BUG_ON(!irqs_disabled());
- }
-}
-
-/*
- * Fully initialize a .request_fn request-based queue.
- */
-int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t)
-{
- struct dm_target *immutable_tgt;
-
- /* Fully initialize the queue */
- md->queue->cmd_size = sizeof(struct dm_rq_target_io);
- md->queue->rq_alloc_data = md;
- md->queue->request_fn = dm_old_request_fn;
- md->queue->init_rq_fn = dm_rq_init_rq;
-
- immutable_tgt = dm_table_get_immutable_target(t);
- if (immutable_tgt && immutable_tgt->per_io_data_size) {
- /* any target-specific per-io data is immediately after the tio */
- md->queue->cmd_size += immutable_tgt->per_io_data_size;
- md->init_tio_pdu = true;
- }
- if (blk_init_allocated_queue(md->queue) < 0)
- return -EINVAL;
-
- /* disable dm_old_request_fn's merge heuristic by default */
- md->seq_rq_merge_deadline_usecs = 0;
-
- blk_queue_softirq_done(md->queue, dm_softirq_done);
-
- /* Initialize the request-based DM worker thread */
- kthread_init_worker(&md->kworker);
- md->kworker_task = kthread_run(kthread_worker_fn, &md->kworker,
- "kdmwork-%s", dm_device_name(md));
- if (IS_ERR(md->kworker_task)) {
- int error = PTR_ERR(md->kworker_task);
- md->kworker_task = NULL;
- return error;
- }
-
- return 0;
-}
-
-static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- return __dm_rq_init_rq(set->driver_data, rq);
-}
-
static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -790,11 +529,6 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
struct dm_target *immutable_tgt;
int err;
- if (!dm_table_all_blk_mq_devices(t)) {
- DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
- return -EINVAL;
- }
-
md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
if (!md->tag_set)
return -ENOMEM;
@@ -845,6 +579,8 @@ void dm_mq_cleanup_mapped_device(struct mapped_device *md)
module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
+/* Unused, but preserved for userspace compatibility */
+static bool use_blk_mq = true;
module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
diff --git a/drivers/md/dm-rq.h b/drivers/md/dm-rq.h
index f43c45460aac..b39245545229 100644
--- a/drivers/md/dm-rq.h
+++ b/drivers/md/dm-rq.h
@@ -46,10 +46,6 @@ struct dm_rq_clone_bio_info {
struct bio clone;
};
-bool dm_use_blk_mq_default(void);
-bool dm_use_blk_mq(struct mapped_device *md);
-
-int dm_old_init_request_queue(struct mapped_device *md, struct dm_table *t);
int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t);
void dm_mq_cleanup_mapped_device(struct mapped_device *md);
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index c209b8a19b84..a05fcd50e1b9 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -92,7 +92,8 @@ static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf)
{
- sprintf(buf, "%d\n", dm_use_blk_mq(md));
+ /* Purely for userspace compatibility */
+ sprintf(buf, "%d\n", true);
return strlen(buf);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 3d0e2c198f06..9038c302d5c2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -47,7 +47,6 @@ struct dm_table {
bool integrity_supported:1;
bool singleton:1;
- bool all_blk_mq:1;
unsigned integrity_added:1;
/*
@@ -872,8 +871,7 @@ static bool __table_type_bio_based(enum dm_queue_mode table_type)
static bool __table_type_request_based(enum dm_queue_mode table_type)
{
- return (table_type == DM_TYPE_REQUEST_BASED ||
- table_type == DM_TYPE_MQ_REQUEST_BASED);
+ return table_type == DM_TYPE_REQUEST_BASED;
}
void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
@@ -999,10 +997,6 @@ verify_bio_based:
BUG_ON(!request_based); /* No targets in this table */
- /*
- * The only way to establish DM_TYPE_MQ_REQUEST_BASED is by
- * having a compatible target use dm_table_set_type.
- */
t->type = DM_TYPE_REQUEST_BASED;
verify_rq_based:
@@ -1022,11 +1016,9 @@ verify_rq_based:
int srcu_idx;
struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx);
- /* inherit live table's type and all_blk_mq */
- if (live_table) {
+ /* inherit live table's type */
+ if (live_table)
t->type = live_table->type;
- t->all_blk_mq = live_table->all_blk_mq;
- }
dm_put_live_table(t->md, srcu_idx);
return 0;
}
@@ -1046,17 +1038,10 @@ verify_rq_based:
DMERR("table load rejected: including non-request-stackable devices");
return -EINVAL;
}
- if (v.sq_count && v.mq_count) {
+ if (v.sq_count > 0) {
DMERR("table load rejected: not all devices are blk-mq request-stackable");
return -EINVAL;
}
- t->all_blk_mq = v.mq_count > 0;
-
- if (!t->all_blk_mq &&
- (t->type == DM_TYPE_MQ_REQUEST_BASED || t->type == DM_TYPE_NVME_BIO_BASED)) {
- DMERR("table load rejected: all devices are not blk-mq request-stackable");
- return -EINVAL;
- }
return 0;
}
@@ -1105,11 +1090,6 @@ bool dm_table_request_based(struct dm_table *t)
return __table_type_request_based(dm_table_get_type(t));
}
-bool dm_table_all_blk_mq_devices(struct dm_table *t)
-{
- return t->all_blk_mq;
-}
-
static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
{
enum dm_queue_mode type = dm_table_get_type(t);
@@ -1937,6 +1917,16 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
*/
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
+
+ /*
+ * For a zoned target, the number of zones should be updated for the
+ * correct value to be exposed in sysfs queue/nr_zones. For a BIO based
+ * target, this is all that is needed. For a request based target, the
+ * queue zone bitmaps must also be updated.
+ * Use blk_revalidate_disk_zones() to handle this.
+ */
+ if (blk_queue_is_zoned(q))
+ blk_revalidate_disk_zones(t->md->disk);
}
unsigned int dm_table_get_num_targets(struct dm_table *t)
@@ -2079,26 +2069,24 @@ struct mapped_device *dm_table_get_md(struct dm_table *t)
}
EXPORT_SYMBOL(dm_table_get_md);
+const char *dm_table_device_name(struct dm_table *t)
+{
+ return dm_device_name(t->md);
+}
+EXPORT_SYMBOL_GPL(dm_table_device_name);
+
void dm_table_run_md_queue_async(struct dm_table *t)
{
struct mapped_device *md;
struct request_queue *queue;
- unsigned long flags;
if (!dm_table_request_based(t))
return;
md = dm_table_get_md(t);
queue = dm_get_md_queue(md);
- if (queue) {
- if (queue->mq_ops)
- blk_mq_run_hw_queues(queue, true);
- else {
- spin_lock_irqsave(queue->queue_lock, flags);
- blk_run_queue_async(queue);
- spin_unlock_irqrestore(queue->queue_lock, flags);
- }
- }
+ if (queue)
+ blk_mq_run_hw_queues(queue, true);
}
EXPORT_SYMBOL(dm_table_run_md_queue_async);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index aaf1ad481ee8..0bd8d498b3b9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -325,7 +325,7 @@ struct thin_c {
* Ensures the thin is not destroyed until the worker has finished
* iterating the active_thins list.
*/
- atomic_t refcount;
+ refcount_t refcount;
struct completion can_destroy;
};
@@ -4044,12 +4044,12 @@ static struct target_type pool_target = {
*--------------------------------------------------------------*/
static void thin_get(struct thin_c *tc)
{
- atomic_inc(&tc->refcount);
+ refcount_inc(&tc->refcount);
}
static void thin_put(struct thin_c *tc)
{
- if (atomic_dec_and_test(&tc->refcount))
+ if (refcount_dec_and_test(&tc->refcount))
complete(&tc->can_destroy);
}
@@ -4193,7 +4193,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -EINVAL;
goto bad;
}
- atomic_set(&tc->refcount, 1);
+ refcount_set(&tc->refcount, 1);
init_completion(&tc->can_destroy);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
spin_unlock_irqrestore(&tc->pool->lock, flags);
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 684af08d0747..0ce04e5b4afb 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -212,12 +212,15 @@ static int fec_read_bufs(struct dm_verity *v, struct dm_verity_io *io,
struct dm_verity_fec_io *fio = fec_io(io);
u64 block, ileaved;
u8 *bbuf, *rs_block;
- u8 want_digest[v->digest_size];
+ u8 want_digest[HASH_MAX_DIGESTSIZE];
unsigned n, k;
if (neras)
*neras = 0;
+ if (WARN_ON(v->digest_size > sizeof(want_digest)))
+ return -EINVAL;
+
/*
* read each of the rsn data blocks that are part of the RS block, and
* interleave contents to available bufs
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 5f1f80d424dd..2d50eec94cd7 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -350,10 +350,7 @@ static struct wc_memory_superblock *sb(struct dm_writecache *wc)
static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
{
- if (is_power_of_2(sizeof(struct wc_entry)) && 0)
- return &sb(wc)->entries[e - wc->entries];
- else
- return &sb(wc)->entries[e->index];
+ return &sb(wc)->entries[e->index];
}
static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 969954915566..fa68336560c3 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -99,7 +99,7 @@ struct dmz_mblock {
struct rb_node node;
struct list_head link;
sector_t no;
- atomic_t ref;
+ unsigned int ref;
unsigned long state;
struct page *page;
void *data;
@@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
RB_CLEAR_NODE(&mblk->node);
INIT_LIST_HEAD(&mblk->link);
- atomic_set(&mblk->ref, 0);
+ mblk->ref = 0;
mblk->state = 0;
mblk->no = mblk_no;
mblk->data = page_address(mblk->page);
@@ -339,10 +339,11 @@ static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
}
/*
- * Lookup a metadata block in the rbtree.
+ * Lookup a metadata block in the rbtree. If the block is found, increment
+ * its reference count.
*/
-static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
- sector_t mblk_no)
+static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
+ sector_t mblk_no)
{
struct rb_root *root = &zmd->mblk_rbtree;
struct rb_node *node = root->rb_node;
@@ -350,8 +351,17 @@ static struct dmz_mblock *dmz_lookup_mblock(struct dmz_metadata *zmd,
while (node) {
mblk = container_of(node, struct dmz_mblock, node);
- if (mblk->no == mblk_no)
+ if (mblk->no == mblk_no) {
+ /*
+ * If this is the first reference to the block,
+ * remove it from the LRU list.
+ */
+ mblk->ref++;
+ if (mblk->ref == 1 &&
+ !test_bit(DMZ_META_DIRTY, &mblk->state))
+ list_del_init(&mblk->link);
return mblk;
+ }
node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
}
@@ -382,32 +392,47 @@ static void dmz_mblock_bio_end_io(struct bio *bio)
}
/*
- * Read a metadata block from disk.
+ * Read an uncached metadata block from disk and add it to the cache.
*/
-static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
- sector_t mblk_no)
+static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
+ sector_t mblk_no)
{
- struct dmz_mblock *mblk;
+ struct dmz_mblock *mblk, *m;
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
struct bio *bio;
- /* Get block and insert it */
+ /* Get a new block and a BIO to read it */
mblk = dmz_alloc_mblock(zmd, mblk_no);
if (!mblk)
return NULL;
- spin_lock(&zmd->mblk_lock);
- atomic_inc(&mblk->ref);
- set_bit(DMZ_META_READING, &mblk->state);
- dmz_insert_mblock(zmd, mblk);
- spin_unlock(&zmd->mblk_lock);
-
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
dmz_free_mblock(zmd, mblk);
return NULL;
}
+ spin_lock(&zmd->mblk_lock);
+
+ /*
+ * Make sure that another context did not start reading
+ * the block already.
+ */
+ m = dmz_get_mblock_fast(zmd, mblk_no);
+ if (m) {
+ spin_unlock(&zmd->mblk_lock);
+ dmz_free_mblock(zmd, mblk);
+ bio_put(bio);
+ return m;
+ }
+
+ mblk->ref++;
+ set_bit(DMZ_META_READING, &mblk->state);
+ dmz_insert_mblock(zmd, mblk);
+
+ spin_unlock(&zmd->mblk_lock);
+
+ /* Submit read BIO */
bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk;
@@ -484,7 +509,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd,
spin_lock(&zmd->mblk_lock);
- if (atomic_dec_and_test(&mblk->ref)) {
+ mblk->ref--;
+ if (mblk->ref == 0) {
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
@@ -508,18 +534,12 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
/* Check rbtree */
spin_lock(&zmd->mblk_lock);
- mblk = dmz_lookup_mblock(zmd, mblk_no);
- if (mblk) {
- /* Cache hit: remove block from LRU list */
- if (atomic_inc_return(&mblk->ref) == 1 &&
- !test_bit(DMZ_META_DIRTY, &mblk->state))
- list_del_init(&mblk->link);
- }
+ mblk = dmz_get_mblock_fast(zmd, mblk_no);
spin_unlock(&zmd->mblk_lock);
if (!mblk) {
/* Cache miss: read the block from disk */
- mblk = dmz_fetch_mblock(zmd, mblk_no);
+ mblk = dmz_get_mblock_slow(zmd, mblk_no);
if (!mblk)
return ERR_PTR(-ENOMEM);
}
@@ -753,7 +773,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
spin_lock(&zmd->mblk_lock);
clear_bit(DMZ_META_DIRTY, &mblk->state);
- if (atomic_read(&mblk->ref) == 0)
+ if (mblk->ref == 0)
list_add_tail(&mblk->link, &zmd->mblk_lru_list);
spin_unlock(&zmd->mblk_lock);
}
@@ -2308,7 +2328,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
mblk = list_first_entry(&zmd->mblk_dirty_list,
struct dmz_mblock, link);
dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
- (u64)mblk->no, atomic_read(&mblk->ref));
+ (u64)mblk->no, mblk->ref);
list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk);
@@ -2326,8 +2346,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
root = &zmd->mblk_rbtree;
rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
- (u64)mblk->no, atomic_read(&mblk->ref));
- atomic_set(&mblk->ref, 0);
+ (u64)mblk->no, mblk->ref);
+ mblk->ref = 0;
dmz_free_mblock(zmd, mblk);
}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index a44183ff4be0..981154e59461 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -19,7 +19,7 @@ struct dmz_bioctx {
struct dmz_target *target;
struct dm_zone *zone;
struct bio *bio;
- atomic_t ref;
+ refcount_t ref;
blk_status_t status;
};
@@ -28,7 +28,7 @@ struct dmz_bioctx {
*/
struct dm_chunk_work {
struct work_struct work;
- atomic_t refcount;
+ refcount_t refcount;
struct dmz_target *target;
unsigned int chunk;
struct bio_list bio_list;
@@ -115,7 +115,7 @@ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
if (nr_blocks == dmz_bio_blocks(bio)) {
/* Setup and submit the BIO */
bio->bi_iter.bi_sector = sector;
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(bio);
return 0;
}
@@ -134,7 +134,7 @@ static int dmz_submit_read_bio(struct dmz_target *dmz, struct dm_zone *zone,
bio_advance(bio, clone->bi_iter.bi_size);
/* Submit the clone */
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(clone);
return 0;
@@ -240,7 +240,7 @@ static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
/* Setup and submit the BIO */
bio_set_dev(bio, dmz->dev->bdev);
bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
- atomic_inc(&bioctx->ref);
+ refcount_inc(&bioctx->ref);
generic_make_request(bio);
if (dmz_is_seq(zone))
@@ -456,7 +456,7 @@ out:
*/
static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
{
- atomic_inc(&cw->refcount);
+ refcount_inc(&cw->refcount);
}
/*
@@ -465,7 +465,7 @@ static inline void dmz_get_chunk_work(struct dm_chunk_work *cw)
*/
static void dmz_put_chunk_work(struct dm_chunk_work *cw)
{
- if (atomic_dec_and_test(&cw->refcount)) {
+ if (refcount_dec_and_test(&cw->refcount)) {
WARN_ON(!bio_list_empty(&cw->bio_list));
radix_tree_delete(&cw->target->chunk_rxtree, cw->chunk);
kfree(cw);
@@ -546,7 +546,7 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
goto out;
INIT_WORK(&cw->work, dmz_chunk_work);
- atomic_set(&cw->refcount, 0);
+ refcount_set(&cw->refcount, 0);
cw->target = dmz;
cw->chunk = chunk;
bio_list_init(&cw->bio_list);
@@ -599,7 +599,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
bioctx->target = dmz;
bioctx->zone = NULL;
bioctx->bio = bio;
- atomic_set(&bioctx->ref, 1);
+ refcount_set(&bioctx->ref, 1);
bioctx->status = BLK_STS_OK;
/* Set the BIO pending in the flush list */
@@ -633,7 +633,7 @@ static int dmz_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *error
if (bioctx->status == BLK_STS_OK && *error)
bioctx->status = *error;
- if (!atomic_dec_and_test(&bioctx->ref))
+ if (!refcount_dec_and_test(&bioctx->ref))
return DM_ENDIO_INCOMPLETE;
/* Done */
@@ -702,8 +702,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path)
dev->zone_nr_blocks = dmz_sect2blk(dev->zone_nr_sectors);
dev->zone_nr_blocks_shift = ilog2(dev->zone_nr_blocks);
- dev->nr_zones = (dev->capacity + dev->zone_nr_sectors - 1)
- >> dev->zone_nr_sectors_shift;
+ dev->nr_zones = blkdev_nr_zones(dev->bdev);
dmz->dev = dev;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 45abb54037fc..c510179a7f84 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -458,6 +458,57 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return dm_get_geometry(md, geo);
}
+static int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
+{
+#ifdef CONFIG_BLK_DEV_ZONED
+ struct mapped_device *md = disk->private_data;
+ struct dm_target *tgt;
+ struct dm_table *map;
+ int srcu_idx, ret;
+
+ if (dm_suspended_md(md))
+ return -EAGAIN;
+
+ map = dm_get_live_table(md, &srcu_idx);
+ if (!map)
+ return -EIO;
+
+ tgt = dm_table_find_target(map, sector);
+ if (!dm_target_is_valid(tgt)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * If we are executing this, we already know that the block device
+ * is a zoned device and so each target should have support for that
+ * type of drive. A missing report_zones method means that the target
+ * driver has a problem.
+ */
+ if (WARN_ON(!tgt->type->report_zones)) {
+ ret = -EIO;
+ goto out;
+ }
+
+ /*
+ * blkdev_report_zones() will loop and call this again to cover all the
+ * zones of the target, eventually moving on to the next target.
+ * So there is no need to loop here trying to fill the entire array
+ * of zones.
+ */
+ ret = tgt->type->report_zones(tgt, sector, zones,
+ nr_zones, gfp_mask);
+
+out:
+ dm_put_live_table(md, srcu_idx);
+ return ret;
+#else
+ return -ENOTSUPP;
+#endif
+}
+
static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
struct block_device **bdev)
__acquires(md->io_barrier)
@@ -1155,93 +1206,49 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
/*
- * The zone descriptors obtained with a zone report indicate zone positions
- * within the target backing device, regardless of that device is a partition
- * and regardless of the target mapping start sector on the device or partition.
- * The zone descriptors start sector and write pointer position must be adjusted
- * to match their relative position within the dm device.
- * A target may call dm_remap_zone_report() after completion of a
- * REQ_OP_ZONE_REPORT bio to remap the zone descriptors obtained from the
- * backing device.
+ * The zone descriptors obtained with a zone report indicate
+ * zone positions within the underlying device of the target. The zone
+ * descriptors must be remapped to match their position within the dm device.
+ * The caller target should obtain the zones information using
+ * blkdev_report_zones() to ensure that remapping for partition offset is
+ * already handled.
*/
-void dm_remap_zone_report(struct dm_target *ti, struct bio *bio, sector_t start)
+void dm_remap_zone_report(struct dm_target *ti, sector_t start,
+ struct blk_zone *zones, unsigned int *nr_zones)
{
#ifdef CONFIG_BLK_DEV_ZONED
- struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
- struct bio *report_bio = tio->io->orig_bio;
- struct blk_zone_report_hdr *hdr = NULL;
struct blk_zone *zone;
- unsigned int nr_rep = 0;
- unsigned int ofst;
- sector_t part_offset;
- struct bio_vec bvec;
- struct bvec_iter iter;
- void *addr;
-
- if (bio->bi_status)
- return;
-
- /*
- * bio sector was incremented by the request size on completion. Taking
- * into account the original request sector, the target start offset on
- * the backing device and the target mapping offset (ti->begin), the
- * start sector of the backing device. The partition offset is always 0
- * if the target uses a whole device.
- */
- part_offset = bio->bi_iter.bi_sector + ti->begin - (start + bio_end_sector(report_bio));
+ unsigned int nrz = *nr_zones;
+ int i;
/*
- * Remap the start sector of the reported zones. For sequential zones,
- * also remap the write pointer position.
+ * Remap the start sector and write pointer position of the zones in
+ * the array. Since we may have obtained from the target underlying
+ * device more zones that the target size, also adjust the number
+ * of zones.
*/
- bio_for_each_segment(bvec, report_bio, iter) {
- addr = kmap_atomic(bvec.bv_page);
-
- /* Remember the report header in the first page */
- if (!hdr) {
- hdr = addr;
- ofst = sizeof(struct blk_zone_report_hdr);
- } else
- ofst = 0;
-
- /* Set zones start sector */
- while (hdr->nr_zones && ofst < bvec.bv_len) {
- zone = addr + ofst;
- zone->start -= part_offset;
- if (zone->start >= start + ti->len) {
- hdr->nr_zones = 0;
- break;
- }
- zone->start = zone->start + ti->begin - start;
- if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) {
- if (zone->cond == BLK_ZONE_COND_FULL)
- zone->wp = zone->start + zone->len;
- else if (zone->cond == BLK_ZONE_COND_EMPTY)
- zone->wp = zone->start;
- else
- zone->wp = zone->wp + ti->begin - start - part_offset;
- }
- ofst += sizeof(struct blk_zone);
- hdr->nr_zones--;
- nr_rep++;
+ for (i = 0; i < nrz; i++) {
+ zone = zones + i;
+ if (zone->start >= start + ti->len) {
+ memset(zone, 0, sizeof(struct blk_zone) * (nrz - i));
+ break;
}
- if (addr != hdr)
- kunmap_atomic(addr);
-
- if (!hdr->nr_zones)
- break;
- }
+ zone->start = zone->start + ti->begin - start;
+ if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
+ continue;
- if (hdr) {
- hdr->nr_zones = nr_rep;
- kunmap_atomic(hdr);
+ if (zone->cond == BLK_ZONE_COND_FULL)
+ zone->wp = zone->start + zone->len;
+ else if (zone->cond == BLK_ZONE_COND_EMPTY)
+ zone->wp = zone->start;
+ else
+ zone->wp = zone->wp + ti->begin - start;
}
- bio_advance(report_bio, report_bio->bi_iter.bi_size);
-
+ *nr_zones = i;
#else /* !CONFIG_BLK_DEV_ZONED */
- bio->bi_status = BLK_STS_NOTSUPP;
+ *nr_zones = 0;
#endif
}
EXPORT_SYMBOL_GPL(dm_remap_zone_report);
@@ -1327,8 +1334,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio,
return r;
}
- if (bio_op(bio) != REQ_OP_ZONE_REPORT)
- bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
+ bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
clone->bi_iter.bi_size = to_bytes(len);
if (unlikely(bio_integrity(bio) != NULL))
@@ -1541,7 +1547,6 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
*/
static int __split_and_process_non_flush(struct clone_info *ci)
{
- struct bio *bio = ci->bio;
struct dm_target *ti;
unsigned len;
int r;
@@ -1553,11 +1558,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
if (unlikely(__process_abnormal_io(ci, ti, &r)))
return r;
- if (bio_op(bio) == REQ_OP_ZONE_REPORT)
- len = ci->sector_count;
- else
- len = min_t(sector_t, max_io_len(ci->sector, ti),
- ci->sector_count);
+ len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
if (r < 0)
@@ -1616,9 +1617,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* We take a clone of the original to store in
* ci.io->orig_bio to be used by end_io_acct() and
* for dec_pending to use for completion handling.
- * As this path is not used for REQ_OP_ZONE_REPORT,
- * the usage of io->orig_bio in dm_remap_zone_report()
- * won't be affected by this reassignment.
*/
struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
GFP_NOIO, &md->queue->bio_split);
@@ -1666,7 +1664,7 @@ static blk_qc_t __process_bio(struct mapped_device *md,
* Defend against IO still getting in during teardown
* - as was seen for a time with nvme-fcloop
*/
- if (unlikely(WARN_ON_ONCE(!ti || !dm_target_is_valid(ti)))) {
+ if (WARN_ON_ONCE(!ti || !dm_target_is_valid(ti))) {
error = -EIO;
goto out;
}
@@ -1808,8 +1806,6 @@ static void dm_wq_work(struct work_struct *work);
static void dm_init_normal_md_queue(struct mapped_device *md)
{
- md->use_blk_mq = false;
-
/*
* Initialize aspects of queue that aren't relevant for blk-mq
*/
@@ -1820,8 +1816,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
{
if (md->wq)
destroy_workqueue(md->wq);
- if (md->kworker_task)
- kthread_stop(md->kworker_task);
bioset_exit(&md->bs);
bioset_exit(&md->io_bs);
@@ -1888,7 +1882,6 @@ static struct mapped_device *alloc_dev(int minor)
goto bad_io_barrier;
md->numa_node_id = numa_node_id;
- md->use_blk_mq = dm_use_blk_mq_default();
md->init_tio_pdu = false;
md->type = DM_TYPE_NONE;
mutex_init(&md->suspend_lock);
@@ -1919,7 +1912,6 @@ static struct mapped_device *alloc_dev(int minor)
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
init_completion(&md->kobj_holder.completion);
- md->kworker_task = NULL;
md->disk->major = _major;
md->disk->first_minor = minor;
@@ -2219,14 +2211,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
switch (type) {
case DM_TYPE_REQUEST_BASED:
- dm_init_normal_md_queue(md);
- r = dm_old_init_request_queue(md, t);
- if (r) {
- DMERR("Cannot initialize queue for request-based mapped device");
- return r;
- }
- break;
- case DM_TYPE_MQ_REQUEST_BASED:
r = dm_mq_init_request_queue(md, t);
if (r) {
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
@@ -2331,9 +2315,6 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
blk_set_queue_dying(md->queue);
- if (dm_request_based(md) && md->kworker_task)
- kthread_flush_worker(&md->kworker);
-
/*
* Take suspend_lock so that presuspend and postsuspend methods
* do not race with internal suspend.
@@ -2586,11 +2567,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* Stop md->queue before flushing md->wq in case request-based
* dm defers requests to md->wq from md->queue.
*/
- if (dm_request_based(md)) {
+ if (dm_request_based(md))
dm_stop_queue(md->queue);
- if (md->kworker_task)
- kthread_flush_worker(&md->kworker);
- }
flush_workqueue(md->wq);
@@ -2965,7 +2943,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
goto out;
break;
case DM_TYPE_REQUEST_BASED:
- case DM_TYPE_MQ_REQUEST_BASED:
pool_size = max(dm_get_reserved_rq_based_ios(), min_pool_size);
front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
/* per_io_data_size is used for blk-mq pdu at queue allocation */
@@ -3167,6 +3144,7 @@ static const struct block_device_operations dm_blk_dops = {
.release = dm_blk_close,
.ioctl = dm_blk_ioctl,
.getgeo = dm_blk_getgeo,
+ .report_zones = dm_blk_report_zones,
.pr_ops = &dm_pr_ops,
.owner = THIS_MODULE
};
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 114a81b27c37..2d539b82ec08 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -70,7 +70,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
-bool dm_table_all_blk_mq_devices(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
index 2fc8c113977f..1cd4f991792c 100644
--- a/drivers/md/md-bitmap.c
+++ b/drivers/md/md-bitmap.c
@@ -2288,9 +2288,9 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
goto out;
}
if (mddev->pers) {
- mddev->pers->quiesce(mddev, 1);
+ mddev_suspend(mddev);
md_bitmap_destroy(mddev);
- mddev->pers->quiesce(mddev, 0);
+ mddev_resume(mddev);
}
mddev->bitmap_info.offset = 0;
if (mddev->bitmap_info.file) {
@@ -2327,8 +2327,8 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
mddev->bitmap_info.offset = offset;
if (mddev->pers) {
struct bitmap *bitmap;
- mddev->pers->quiesce(mddev, 1);
bitmap = md_bitmap_create(mddev, -1);
+ mddev_suspend(mddev);
if (IS_ERR(bitmap))
rv = PTR_ERR(bitmap);
else {
@@ -2337,11 +2337,12 @@ location_store(struct mddev *mddev, const char *buf, size_t len)
if (rv)
mddev->bitmap_info.offset = 0;
}
- mddev->pers->quiesce(mddev, 0);
if (rv) {
md_bitmap_destroy(mddev);
+ mddev_resume(mddev);
goto out;
}
+ mddev_resume(mddev);
}
}
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 0b2af6e74fc3..8dff19d5502e 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -33,13 +33,6 @@ struct dlm_lock_resource {
int mode;
};
-struct suspend_info {
- int slot;
- sector_t lo;
- sector_t hi;
- struct list_head list;
-};
-
struct resync_info {
__le64 lo;
__le64 hi;
@@ -80,7 +73,13 @@ struct md_cluster_info {
struct dlm_lock_resource **other_bitmap_lockres;
struct dlm_lock_resource *resync_lockres;
struct list_head suspend_list;
+
spinlock_t suspend_lock;
+ /* record the region which write should be suspended */
+ sector_t suspend_lo;
+ sector_t suspend_hi;
+ int suspend_from; /* the slot which broadcast suspend_lo/hi */
+
struct md_thread *recovery_thread;
unsigned long recovery_map;
/* communication loc resources */
@@ -105,6 +104,7 @@ enum msg_type {
RE_ADD,
BITMAP_NEEDS_SYNC,
CHANGE_CAPACITY,
+ BITMAP_RESIZE,
};
struct cluster_msg {
@@ -270,25 +270,22 @@ static void add_resync_info(struct dlm_lock_resource *lockres,
ri->hi = cpu_to_le64(hi);
}
-static struct suspend_info *read_resync_info(struct mddev *mddev, struct dlm_lock_resource *lockres)
+static int read_resync_info(struct mddev *mddev,
+ struct dlm_lock_resource *lockres)
{
struct resync_info ri;
- struct suspend_info *s = NULL;
- sector_t hi = 0;
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ int ret = 0;
dlm_lock_sync(lockres, DLM_LOCK_CR);
memcpy(&ri, lockres->lksb.sb_lvbptr, sizeof(struct resync_info));
- hi = le64_to_cpu(ri.hi);
- if (hi > 0) {
- s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
- if (!s)
- goto out;
- s->hi = hi;
- s->lo = le64_to_cpu(ri.lo);
+ if (le64_to_cpu(ri.hi) > 0) {
+ cinfo->suspend_hi = le64_to_cpu(ri.hi);
+ cinfo->suspend_lo = le64_to_cpu(ri.lo);
+ ret = 1;
}
dlm_unlock_sync(lockres);
-out:
- return s;
+ return ret;
}
static void recover_bitmaps(struct md_thread *thread)
@@ -298,7 +295,6 @@ static void recover_bitmaps(struct md_thread *thread)
struct dlm_lock_resource *bm_lockres;
char str[64];
int slot, ret;
- struct suspend_info *s, *tmp;
sector_t lo, hi;
while (cinfo->recovery_map) {
@@ -325,13 +321,17 @@ static void recover_bitmaps(struct md_thread *thread)
/* Clear suspend_area associated with the bitmap */
spin_lock_irq(&cinfo->suspend_lock);
- list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
- if (slot == s->slot) {
- list_del(&s->list);
- kfree(s);
- }
+ cinfo->suspend_hi = 0;
+ cinfo->suspend_lo = 0;
+ cinfo->suspend_from = -1;
spin_unlock_irq(&cinfo->suspend_lock);
+ /* Kick off a reshape if needed */
+ if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
+ test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ mddev->reshape_position != MaxSector)
+ md_wakeup_thread(mddev->sync_thread);
+
if (hi > 0) {
if (lo < mddev->recovery_cp)
mddev->recovery_cp = lo;
@@ -434,34 +434,23 @@ static void ack_bast(void *arg, int mode)
}
}
-static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
-{
- struct suspend_info *s, *tmp;
-
- list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
- if (slot == s->slot) {
- list_del(&s->list);
- kfree(s);
- break;
- }
-}
-
static void remove_suspend_info(struct mddev *mddev, int slot)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
mddev->pers->quiesce(mddev, 1);
spin_lock_irq(&cinfo->suspend_lock);
- __remove_suspend_info(cinfo, slot);
+ cinfo->suspend_hi = 0;
+ cinfo->suspend_lo = 0;
spin_unlock_irq(&cinfo->suspend_lock);
mddev->pers->quiesce(mddev, 0);
}
-
static void process_suspend_info(struct mddev *mddev,
int slot, sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
- struct suspend_info *s;
+ struct mdp_superblock_1 *sb = NULL;
+ struct md_rdev *rdev;
if (!hi) {
/*
@@ -475,6 +464,12 @@ static void process_suspend_info(struct mddev *mddev,
return;
}
+ rdev_for_each(rdev, mddev)
+ if (rdev->raid_disk > -1 && !test_bit(Faulty, &rdev->flags)) {
+ sb = page_address(rdev->sb_page);
+ break;
+ }
+
/*
* The bitmaps are not same for different nodes
* if RESYNCING is happening in one node, then
@@ -487,26 +482,26 @@ static void process_suspend_info(struct mddev *mddev,
* sync_low/hi is used to record the region which
* arrived in the previous RESYNCING message,
*
- * Call bitmap_sync_with_cluster to clear
- * NEEDED_MASK and set RESYNC_MASK since
- * resync thread is running in another node,
- * so we don't need to do the resync again
- * with the same section */
- md_bitmap_sync_with_cluster(mddev, cinfo->sync_low, cinfo->sync_hi, lo, hi);
+ * Call md_bitmap_sync_with_cluster to clear NEEDED_MASK
+ * and set RESYNC_MASK since resync thread is running
+ * in another node, so we don't need to do the resync
+ * again with the same section.
+ *
+ * Skip md_bitmap_sync_with_cluster in case reshape
+ * happening, because reshaping region is small and
+ * we don't want to trigger lots of WARN.
+ */
+ if (sb && !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE))
+ md_bitmap_sync_with_cluster(mddev, cinfo->sync_low,
+ cinfo->sync_hi, lo, hi);
cinfo->sync_low = lo;
cinfo->sync_hi = hi;
- s = kzalloc(sizeof(struct suspend_info), GFP_KERNEL);
- if (!s)
- return;
- s->slot = slot;
- s->lo = lo;
- s->hi = hi;
mddev->pers->quiesce(mddev, 1);
spin_lock_irq(&cinfo->suspend_lock);
- /* Remove existing entry (if exists) before adding */
- __remove_suspend_info(cinfo, slot);
- list_add(&s->list, &cinfo->suspend_list);
+ cinfo->suspend_from = slot;
+ cinfo->suspend_lo = lo;
+ cinfo->suspend_hi = hi;
spin_unlock_irq(&cinfo->suspend_lock);
mddev->pers->quiesce(mddev, 0);
}
@@ -612,6 +607,11 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
case BITMAP_NEEDS_SYNC:
__recover_slot(mddev, le32_to_cpu(msg->slot));
break;
+ case BITMAP_RESIZE:
+ if (le64_to_cpu(msg->high) != mddev->pers->size(mddev, 0, 0))
+ ret = md_bitmap_resize(mddev->bitmap,
+ le64_to_cpu(msg->high), 0, 0);
+ break;
default:
ret = -1;
pr_warn("%s:%d Received unknown message from %d\n",
@@ -800,7 +800,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
struct md_cluster_info *cinfo = mddev->cluster_info;
int i, ret = 0;
struct dlm_lock_resource *bm_lockres;
- struct suspend_info *s;
char str[64];
sector_t lo, hi;
@@ -819,16 +818,13 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
bm_lockres->flags |= DLM_LKF_NOQUEUE;
ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
if (ret == -EAGAIN) {
- s = read_resync_info(mddev, bm_lockres);
- if (s) {
+ if (read_resync_info(mddev, bm_lockres)) {
pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
__func__, __LINE__,
- (unsigned long long) s->lo,
- (unsigned long long) s->hi, i);
- spin_lock_irq(&cinfo->suspend_lock);
- s->slot = i;
- list_add(&s->list, &cinfo->suspend_list);
- spin_unlock_irq(&cinfo->suspend_lock);
+ (unsigned long long) cinfo->suspend_lo,
+ (unsigned long long) cinfo->suspend_hi,
+ i);
+ cinfo->suspend_from = i;
}
ret = 0;
lockres_free(bm_lockres);
@@ -1001,10 +997,17 @@ static int leave(struct mddev *mddev)
if (!cinfo)
return 0;
- /* BITMAP_NEEDS_SYNC message should be sent when node
+ /*
+ * BITMAP_NEEDS_SYNC message should be sent when node
* is leaving the cluster with dirty bitmap, also we
- * can only deliver it when dlm connection is available */
- if (cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector)
+ * can only deliver it when dlm connection is available.
+ *
+ * Also, we should send BITMAP_NEEDS_SYNC message in
+ * case reshaping is interrupted.
+ */
+ if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) ||
+ (mddev->reshape_position != MaxSector &&
+ test_bit(MD_CLOSING, &mddev->flags)))
resync_bitmap(mddev);
set_bit(MD_CLUSTER_HOLDING_MUTEX_FOR_RECVD, &cinfo->state);
@@ -1102,6 +1105,80 @@ static void metadata_update_cancel(struct mddev *mddev)
unlock_comm(cinfo);
}
+static int update_bitmap_size(struct mddev *mddev, sector_t size)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+ struct cluster_msg cmsg = {0};
+ int ret;
+
+ cmsg.type = cpu_to_le32(BITMAP_RESIZE);
+ cmsg.high = cpu_to_le64(size);
+ ret = sendmsg(cinfo, &cmsg, 0);
+ if (ret)
+ pr_err("%s:%d: failed to send BITMAP_RESIZE message (%d)\n",
+ __func__, __LINE__, ret);
+ return ret;
+}
+
+static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsize)
+{
+ struct bitmap_counts *counts;
+ char str[64];
+ struct dlm_lock_resource *bm_lockres;
+ struct bitmap *bitmap = mddev->bitmap;
+ unsigned long my_pages = bitmap->counts.pages;
+ int i, rv;
+
+ /*
+ * We need to ensure all the nodes can grow to a larger
+ * bitmap size before make the reshaping.
+ */
+ rv = update_bitmap_size(mddev, newsize);
+ if (rv)
+ return rv;
+
+ for (i = 0; i < mddev->bitmap_info.nodes; i++) {
+ if (i == md_cluster_ops->slot_number(mddev))
+ continue;
+
+ bitmap = get_bitmap_from_slot(mddev, i);
+ if (IS_ERR(bitmap)) {
+ pr_err("can't get bitmap from slot %d\n", i);
+ goto out;
+ }
+ counts = &bitmap->counts;
+
+ /*
+ * If we can hold the bitmap lock of one node then
+ * the slot is not occupied, update the pages.
+ */
+ snprintf(str, 64, "bitmap%04d", i);
+ bm_lockres = lockres_init(mddev, str, NULL, 1);
+ if (!bm_lockres) {
+ pr_err("Cannot initialize %s lock\n", str);
+ goto out;
+ }
+ bm_lockres->flags |= DLM_LKF_NOQUEUE;
+ rv = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
+ if (!rv)
+ counts->pages = my_pages;
+ lockres_free(bm_lockres);
+
+ if (my_pages != counts->pages)
+ /*
+ * Let's revert the bitmap size if one node
+ * can't resize bitmap
+ */
+ goto out;
+ }
+
+ return 0;
+out:
+ md_bitmap_free(bitmap);
+ update_bitmap_size(mddev, oldsize);
+ return -1;
+}
+
/*
* return 0 if all the bitmaps have the same sync_size
*/
@@ -1243,6 +1320,16 @@ static int resync_start(struct mddev *mddev)
return dlm_lock_sync_interruptible(cinfo->resync_lockres, DLM_LOCK_EX, mddev);
}
+static void resync_info_get(struct mddev *mddev, sector_t *lo, sector_t *hi)
+{
+ struct md_cluster_info *cinfo = mddev->cluster_info;
+
+ spin_lock_irq(&cinfo->suspend_lock);
+ *lo = cinfo->suspend_lo;
+ *hi = cinfo->suspend_hi;
+ spin_unlock_irq(&cinfo->suspend_lock);
+}
+
static int resync_info_update(struct mddev *mddev, sector_t lo, sector_t hi)
{
struct md_cluster_info *cinfo = mddev->cluster_info;
@@ -1295,21 +1382,14 @@ static int area_resyncing(struct mddev *mddev, int direction,
{
struct md_cluster_info *cinfo = mddev->cluster_info;
int ret = 0;
- struct suspend_info *s;
if ((direction == READ) &&
test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
return 1;
spin_lock_irq(&cinfo->suspend_lock);
- if (list_empty(&cinfo->suspend_list))
- goto out;
- list_for_each_entry(s, &cinfo->suspend_list, list)
- if (hi > s->lo && lo < s->hi) {
- ret = 1;
- break;
- }
-out:
+ if (hi > cinfo->suspend_lo && lo < cinfo->suspend_hi)
+ ret = 1;
spin_unlock_irq(&cinfo->suspend_lock);
return ret;
}
@@ -1482,6 +1562,7 @@ static struct md_cluster_operations cluster_ops = {
.resync_start = resync_start,
.resync_finish = resync_finish,
.resync_info_update = resync_info_update,
+ .resync_info_get = resync_info_get,
.metadata_update_start = metadata_update_start,
.metadata_update_finish = metadata_update_finish,
.metadata_update_cancel = metadata_update_cancel,
@@ -1492,6 +1573,7 @@ static struct md_cluster_operations cluster_ops = {
.remove_disk = remove_disk,
.load_bitmaps = load_bitmaps,
.gather_bitmaps = gather_bitmaps,
+ .resize_bitmaps = resize_bitmaps,
.lock_all_bitmaps = lock_all_bitmaps,
.unlock_all_bitmaps = unlock_all_bitmaps,
.update_size = update_size,
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index c0240708f443..a78e3021775d 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -14,6 +14,7 @@ struct md_cluster_operations {
int (*leave)(struct mddev *mddev);
int (*slot_number)(struct mddev *mddev);
int (*resync_info_update)(struct mddev *mddev, sector_t lo, sector_t hi);
+ void (*resync_info_get)(struct mddev *mddev, sector_t *lo, sector_t *hi);
int (*metadata_update_start)(struct mddev *mddev);
int (*metadata_update_finish)(struct mddev *mddev);
void (*metadata_update_cancel)(struct mddev *mddev);
@@ -26,6 +27,7 @@ struct md_cluster_operations {
int (*remove_disk)(struct mddev *mddev, struct md_rdev *rdev);
void (*load_bitmaps)(struct mddev *mddev, int total_slots);
int (*gather_bitmaps)(struct md_rdev *rdev);
+ int (*resize_bitmaps)(struct mddev *mddev, sector_t newsize, sector_t oldsize);
int (*lock_all_bitmaps)(struct mddev *mddev);
void (*unlock_all_bitmaps)(struct mddev *mddev);
void (*update_size)(struct mddev *mddev, sector_t old_dev_sectors);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 63ceabb4e020..fc488cb30a94 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -452,10 +452,11 @@ static void md_end_flush(struct bio *fbio)
rdev_dec_pending(rdev, mddev);
if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0)
+ if (bio->bi_iter.bi_size == 0) {
/* an empty barrier - all done */
bio_endio(bio);
- else {
+ mempool_free(fi, mddev->flush_pool);
+ } else {
INIT_WORK(&fi->flush_work, submit_flushes);
queue_work(md_wq, &fi->flush_work);
}
@@ -509,10 +510,11 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
rcu_read_unlock();
if (atomic_dec_and_test(&fi->flush_pending)) {
- if (bio->bi_iter.bi_size == 0)
+ if (bio->bi_iter.bi_size == 0) {
/* an empty barrier - all done */
bio_endio(bio);
- else {
+ mempool_free(fi, mddev->flush_pool);
+ } else {
INIT_WORK(&fi->flush_work, submit_flushes);
queue_work(md_wq, &fi->flush_work);
}
@@ -5904,14 +5906,6 @@ static void __md_stop(struct mddev *mddev)
mddev->to_remove = &md_redundancy_group;
module_put(pers->owner);
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-}
-
-void md_stop(struct mddev *mddev)
-{
- /* stop the array and free an attached data structures.
- * This is called from dm-raid
- */
- __md_stop(mddev);
if (mddev->flush_bio_pool) {
mempool_destroy(mddev->flush_bio_pool);
mddev->flush_bio_pool = NULL;
@@ -5920,6 +5914,14 @@ void md_stop(struct mddev *mddev)
mempool_destroy(mddev->flush_pool);
mddev->flush_pool = NULL;
}
+}
+
+void md_stop(struct mddev *mddev)
+{
+ /* stop the array and free an attached data structures.
+ * This is called from dm-raid
+ */
+ __md_stop(mddev);
bioset_exit(&mddev->bio_set);
bioset_exit(&mddev->sync_set);
}
@@ -8370,9 +8372,17 @@ void md_do_sync(struct md_thread *thread)
else if (!mddev->bitmap)
j = mddev->recovery_cp;
- } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
+ } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
max_sectors = mddev->resync_max_sectors;
- else {
+ /*
+ * If the original node aborts reshaping then we continue the
+ * reshaping, so set j again to avoid restart reshape from the
+ * first beginning
+ */
+ if (mddev_is_clustered(mddev) &&
+ mddev->reshape_position != MaxSector)
+ j = mddev->reshape_position;
+ } else {
/* recovery follows the physical size of devices */
max_sectors = mddev->dev_sectors;
j = MaxSector;
@@ -8623,8 +8633,10 @@ void md_do_sync(struct md_thread *thread)
mddev_lock_nointr(mddev);
md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
mddev_unlock(mddev);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
+ if (!mddev_is_clustered(mddev)) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ }
}
spin_lock(&mddev->lock);
@@ -8790,6 +8802,18 @@ static void md_start_sync(struct work_struct *ws)
*/
void md_check_recovery(struct mddev *mddev)
{
+ if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
+ /* Write superblock - thread that called mddev_suspend()
+ * holds reconfig_mutex for us.
+ */
+ set_bit(MD_UPDATING_SB, &mddev->flags);
+ smp_mb__after_atomic();
+ if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
+ md_update_sb(mddev, 0);
+ clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
+ wake_up(&mddev->sb_wait);
+ }
+
if (mddev->suspended)
return;
@@ -8949,16 +8973,6 @@ void md_check_recovery(struct mddev *mddev)
unlock:
wake_up(&mddev->sb_wait);
mddev_unlock(mddev);
- } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
- /* Write superblock - thread that called mddev_suspend()
- * holds reconfig_mutex for us.
- */
- set_bit(MD_UPDATING_SB, &mddev->flags);
- smp_mb__after_atomic();
- if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
- md_update_sb(mddev, 0);
- clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
- wake_up(&mddev->sb_wait);
}
}
EXPORT_SYMBOL(md_check_recovery);
@@ -8966,6 +8980,8 @@ EXPORT_SYMBOL(md_check_recovery);
void md_reap_sync_thread(struct mddev *mddev)
{
struct md_rdev *rdev;
+ sector_t old_dev_sectors = mddev->dev_sectors;
+ bool is_reshaped = false;
/* resync has finished, collect result */
md_unregister_thread(&mddev->sync_thread);
@@ -8980,8 +8996,11 @@ void md_reap_sync_thread(struct mddev *mddev)
}
}
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- mddev->pers->finish_reshape)
+ mddev->pers->finish_reshape) {
mddev->pers->finish_reshape(mddev);
+ if (mddev_is_clustered(mddev))
+ is_reshaped = true;
+ }
/* If array is no-longer degraded, then any saved_raid_disk
* information must be scrapped.
@@ -9002,6 +9021,14 @@ void md_reap_sync_thread(struct mddev *mddev)
clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+ /*
+ * We call md_cluster_ops->update_size here because sync_size could
+ * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared,
+ * so it is time to update size across cluster.
+ */
+ if (mddev_is_clustered(mddev) && is_reshaped
+ && !test_bit(MD_CLOSING, &mddev->flags))
+ md_cluster_ops->update_size(mddev, old_dev_sectors);
wake_up(&resync_wait);
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
@@ -9201,8 +9228,12 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
}
if (role != rdev2->raid_disk) {
- /* got activated */
- if (rdev2->raid_disk == -1 && role != 0xffff) {
+ /*
+ * got activated except reshape is happening.
+ */
+ if (rdev2->raid_disk == -1 && role != 0xffff &&
+ !(le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RESHAPE_ACTIVE)) {
rdev2->saved_raid_disk = role;
ret = remove_and_add_spares(mddev, rdev2);
pr_info("Activated spare: %s\n",
@@ -9228,6 +9259,30 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
if (mddev->raid_disks != le32_to_cpu(sb->raid_disks))
update_raid_disks(mddev, le32_to_cpu(sb->raid_disks));
+ /*
+ * Since mddev->delta_disks has already updated in update_raid_disks,
+ * so it is time to check reshape.
+ */
+ if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
+ (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
+ /*
+ * reshape is happening in the remote node, we need to
+ * update reshape_position and call start_reshape.
+ */
+ mddev->reshape_position = sb->reshape_position;
+ if (mddev->pers->update_reshape_pos)
+ mddev->pers->update_reshape_pos(mddev);
+ if (mddev->pers->start_reshape)
+ mddev->pers->start_reshape(mddev);
+ } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) &&
+ mddev->reshape_position != MaxSector &&
+ !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) {
+ /* reshape is just done in another node. */
+ mddev->reshape_position = MaxSector;
+ if (mddev->pers->update_reshape_pos)
+ mddev->pers->update_reshape_pos(mddev);
+ }
+
/* Finally set the event to be up to date */
mddev->events = le64_to_cpu(sb->events);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 8afd6bfdbfb9..c52afb52c776 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -557,6 +557,7 @@ struct md_personality
int (*check_reshape) (struct mddev *mddev);
int (*start_reshape) (struct mddev *mddev);
void (*finish_reshape) (struct mddev *mddev);
+ void (*update_reshape_pos) (struct mddev *mddev);
/* quiesce suspends or resumes internal processing.
* 1 - stop new actions and wait for action io to complete
* 0 - return to normal behaviour
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4e990246225e..1d54109071cc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1734,6 +1734,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
*/
if (rdev->saved_raid_disk >= 0 &&
rdev->saved_raid_disk >= first &&
+ rdev->saved_raid_disk < conf->raid_disks &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
first = last = rdev->saved_raid_disk;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index d6f7978b4449..b98e746e7fc4 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -25,6 +25,7 @@
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
+#include <linux/raid/md_p.h>
#include <trace/events/block.h>
#include "md.h"
#include "raid10.h"
@@ -1808,6 +1809,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
first = last = rdev->raid_disk;
if (rdev->saved_raid_disk >= first &&
+ rdev->saved_raid_disk < conf->geo.raid_disks &&
conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
mirror = rdev->saved_raid_disk;
else
@@ -3079,6 +3081,8 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sect;
int must_sync;
int any_working;
+ int need_recover = 0;
+ int need_replace = 0;
struct raid10_info *mirror = &conf->mirrors[i];
struct md_rdev *mrdev, *mreplace;
@@ -3086,11 +3090,15 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
mrdev = rcu_dereference(mirror->rdev);
mreplace = rcu_dereference(mirror->replacement);
- if ((mrdev == NULL ||
- test_bit(Faulty, &mrdev->flags) ||
- test_bit(In_sync, &mrdev->flags)) &&
- (mreplace == NULL ||
- test_bit(Faulty, &mreplace->flags))) {
+ if (mrdev != NULL &&
+ !test_bit(Faulty, &mrdev->flags) &&
+ !test_bit(In_sync, &mrdev->flags))
+ need_recover = 1;
+ if (mreplace != NULL &&
+ !test_bit(Faulty, &mreplace->flags))
+ need_replace = 1;
+
+ if (!need_recover && !need_replace) {
rcu_read_unlock();
continue;
}
@@ -3213,7 +3221,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->devs[1].devnum = i;
r10_bio->devs[1].addr = to_addr;
- if (!test_bit(In_sync, &mrdev->flags)) {
+ if (need_recover) {
bio = r10_bio->devs[1].bio;
bio->bi_next = biolist;
biolist = bio;
@@ -3230,16 +3238,11 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r10_bio->devs[1].repl_bio;
if (bio)
bio->bi_end_io = NULL;
- /* Note: if mreplace != NULL, then bio
+ /* Note: if need_replace, then bio
* cannot be NULL as r10buf_pool_alloc will
* have allocated it.
- * So the second test here is pointless.
- * But it keeps semantic-checkers happy, and
- * this comment keeps human reviewers
- * happy.
*/
- if (mreplace == NULL || bio == NULL ||
- test_bit(Faulty, &mreplace->flags))
+ if (!need_replace)
break;
bio->bi_next = biolist;
biolist = bio;
@@ -4286,12 +4289,46 @@ static int raid10_start_reshape(struct mddev *mddev)
spin_unlock_irq(&conf->device_lock);
if (mddev->delta_disks && mddev->bitmap) {
- ret = md_bitmap_resize(mddev->bitmap,
- raid10_size(mddev, 0, conf->geo.raid_disks),
- 0, 0);
+ struct mdp_superblock_1 *sb = NULL;
+ sector_t oldsize, newsize;
+
+ oldsize = raid10_size(mddev, 0, 0);
+ newsize = raid10_size(mddev, 0, conf->geo.raid_disks);
+
+ if (!mddev_is_clustered(mddev)) {
+ ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
+ if (ret)
+ goto abort;
+ else
+ goto out;
+ }
+
+ rdev_for_each(rdev, mddev) {
+ if (rdev->raid_disk > -1 &&
+ !test_bit(Faulty, &rdev->flags))
+ sb = page_address(rdev->sb_page);
+ }
+
+ /*
+ * some node is already performing reshape, and no need to
+ * call md_bitmap_resize again since it should be called when
+ * receiving BITMAP_RESIZE msg
+ */
+ if ((sb && (le32_to_cpu(sb->feature_map) &
+ MD_FEATURE_RESHAPE_ACTIVE)) || (oldsize == newsize))
+ goto out;
+
+ ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
if (ret)
goto abort;
+
+ ret = md_cluster_ops->resize_bitmaps(mddev, newsize, oldsize);
+ if (ret) {
+ md_bitmap_resize(mddev->bitmap, oldsize, 0, 0);
+ goto abort;
+ }
}
+out:
if (mddev->delta_disks > 0) {
rdev_for_each(rdev, mddev)
if (rdev->raid_disk < 0 &&
@@ -4568,6 +4605,32 @@ read_more:
r10_bio->master_bio = read_bio;
r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
+ /*
+ * Broadcast RESYNC message to other nodes, so all nodes would not
+ * write to the region to avoid conflict.
+ */
+ if (mddev_is_clustered(mddev) && conf->cluster_sync_high <= sector_nr) {
+ struct mdp_superblock_1 *sb = NULL;
+ int sb_reshape_pos = 0;
+
+ conf->cluster_sync_low = sector_nr;
+ conf->cluster_sync_high = sector_nr + CLUSTER_RESYNC_WINDOW_SECTORS;
+ sb = page_address(rdev->sb_page);
+ if (sb) {
+ sb_reshape_pos = le64_to_cpu(sb->reshape_position);
+ /*
+ * Set cluster_sync_low again if next address for array
+ * reshape is less than cluster_sync_low. Since we can't
+ * update cluster_sync_low until it has finished reshape.
+ */
+ if (sb_reshape_pos < conf->cluster_sync_low)
+ conf->cluster_sync_low = sb_reshape_pos;
+ }
+
+ md_cluster_ops->resync_info_update(mddev, conf->cluster_sync_low,
+ conf->cluster_sync_high);
+ }
+
/* Now find the locations in the new layout */
__raid10_find_phys(&conf->geo, r10_bio);
@@ -4719,6 +4782,19 @@ static void end_reshape(struct r10conf *conf)
conf->fullsync = 0;
}
+static void raid10_update_reshape_pos(struct mddev *mddev)
+{
+ struct r10conf *conf = mddev->private;
+ sector_t lo, hi;
+
+ md_cluster_ops->resync_info_get(mddev, &lo, &hi);
+ if (((mddev->reshape_position <= hi) && (mddev->reshape_position >= lo))
+ || mddev->reshape_position == MaxSector)
+ conf->reshape_progress = mddev->reshape_position;
+ else
+ WARN_ON_ONCE(1);
+}
+
static int handle_reshape_read_error(struct mddev *mddev,
struct r10bio *r10_bio)
{
@@ -4887,6 +4963,7 @@ static struct md_personality raid10_personality =
.check_reshape = raid10_check_reshape,
.start_reshape = raid10_start_reshape,
.finish_reshape = raid10_finish_reshape,
+ .update_reshape_pos = raid10_update_reshape_pos,
.congested = raid10_congested,
};
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index e6e925add700..ec3a5ef7fee0 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -3151,8 +3151,6 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
- rcu_assign_pointer(conf->log, NULL);
- md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
mempool_exit(&log->meta_pool);
out_mempool:
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e4e98f47865d..4990f0319f6c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2681,6 +2681,18 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
pr_debug("raid456: error called\n");
spin_lock_irqsave(&conf->device_lock, flags);
+
+ if (test_bit(In_sync, &rdev->flags) &&
+ mddev->degraded == conf->max_degraded) {
+ /*
+ * Don't allow to achieve failed state
+ * Don't try to recover this device
+ */
+ conf->recovery_disabled = mddev->recovery_disabled;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ return;
+ }
+
set_bit(Faulty, &rdev->flags);
clear_bit(In_sync, &rdev->flags);
mddev->degraded = raid5_calc_degraded(conf);
diff --git a/drivers/media/usb/em28xx/em28xx-audio.c b/drivers/media/usb/em28xx/em28xx-audio.c
index 8e799ae1df69..67481fc82445 100644
--- a/drivers/media/usb/em28xx/em28xx-audio.c
+++ b/drivers/media/usb/em28xx/em28xx-audio.c
@@ -116,6 +116,7 @@ static void em28xx_audio_isocirq(struct urb *urb)
stride = runtime->frame_bits >> 3;
for (i = 0; i < urb->number_of_packets; i++) {
+ unsigned long flags;
int length =
urb->iso_frame_desc[i].actual_length / stride;
cp = (unsigned char *)urb->transfer_buffer +
@@ -137,7 +138,7 @@ static void em28xx_audio_isocirq(struct urb *urb)
length * stride);
}
- snd_pcm_stream_lock(substream);
+ snd_pcm_stream_lock_irqsave(substream, flags);
dev->adev.hwptr_done_capture += length;
if (dev->adev.hwptr_done_capture >=
@@ -153,7 +154,7 @@ static void em28xx_audio_isocirq(struct urb *urb)
period_elapsed = 1;
}
- snd_pcm_stream_unlock(substream);
+ snd_pcm_stream_unlock_irqrestore(substream, flags);
}
if (period_elapsed)
snd_pcm_period_elapsed(substream);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 5657f8710ca6..2b8c84a5c9a8 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -777,6 +777,7 @@ EXPORT_SYMBOL_GPL(em28xx_set_mode);
static void em28xx_irq_callback(struct urb *urb)
{
struct em28xx *dev = urb->context;
+ unsigned long flags;
int i;
switch (urb->status) {
@@ -793,9 +794,9 @@ static void em28xx_irq_callback(struct urb *urb)
}
/* Copy data from URB */
- spin_lock(&dev->slock);
+ spin_lock_irqsave(&dev->slock, flags);
dev->usb_ctl.urb_data_copy(dev, urb);
- spin_unlock(&dev->slock);
+ spin_unlock_irqrestore(&dev->slock, flags);
/* Reset urb buffers */
for (i = 0; i < urb->number_of_packets; i++) {
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 96055de6e8ce..7d268f2404e1 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -419,6 +419,7 @@ static void tm6000_irq_callback(struct urb *urb)
{
struct tm6000_dmaqueue *dma_q = urb->context;
struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq);
+ unsigned long flags;
int i;
switch (urb->status) {
@@ -436,9 +437,9 @@ static void tm6000_irq_callback(struct urb *urb)
break;
}
- spin_lock(&dev->slock);
+ spin_lock_irqsave(&dev->slock, flags);
tm6000_isoc_copy(urb);
- spin_unlock(&dev->slock);
+ spin_unlock_irqrestore(&dev->slock, flags);
/* Reset urb buffers */
for (i = 0; i < urb->number_of_packets; i++) {
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 059997f8ebce..178f414ea8f9 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -2004,7 +2004,7 @@ typedef struct _CONFIG_PAGE_FC_PORT_6
U64 LinkFailureCount; /* 50h */
U64 LossOfSyncCount; /* 58h */
U64 LossOfSignalCount; /* 60h */
- U64 PrimativeSeqErrCount; /* 68h */
+ U64 PrimitiveSeqErrCount; /* 68h */
U64 InvalidTxWordCount; /* 70h */
U64 InvalidCrcCount; /* 78h */
U64 FcpInitiatorIoCount; /* 80h */
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index e6b4ae558767..ba551d8dfba4 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -335,11 +335,11 @@ static int mpt_remove_dead_ioc_func(void *arg)
MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg;
struct pci_dev *pdev;
- if ((ioc == NULL))
+ if (!ioc)
return -1;
pdev = ioc->pcidev;
- if ((pdev == NULL))
+ if (!pdev)
return -1;
pci_stop_and_remove_bus_device_locked(pdev);
@@ -7570,11 +7570,11 @@ mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
u8 phy_num = (u8)(evData0);
u8 port_num = (u8)(evData0 >> 8);
u8 port_width = (u8)(evData0 >> 16);
- u8 primative = (u8)(evData0 >> 24);
+ u8 primitive = (u8)(evData0 >> 24);
snprintf(evStr, EVENT_DESCR_STR_SZ,
- "SAS Broadcase Primative: phy=%d port=%d "
- "width=%d primative=0x%02x",
- phy_num, port_num, port_width, primative);
+ "SAS Broadcast Primitive: phy=%d port=%d "
+ "width=%d primitive=0x%02x",
+ phy_num, port_num, port_width, primitive);
break;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index b8cf2658649e..9b404fc69c90 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -129,7 +129,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc,
static void mptsas_send_expander_event(struct fw_event_work *fw_event);
static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
-static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
+static void mptsas_broadcast_primitive_work(struct fw_event_work *fw_event);
static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
void mptsas_schedule_target_reset(void *ioc);
@@ -1665,7 +1665,7 @@ mptsas_firmware_event_work(struct work_struct *work)
mptsas_free_fw_event(ioc, fw_event);
break;
case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
- mptsas_broadcast_primative_work(fw_event);
+ mptsas_broadcast_primitive_work(fw_event);
break;
case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
mptsas_send_expander_event(fw_event);
@@ -4826,13 +4826,13 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
}
/**
- * mptsas_broadcast_primative_work - Handle broadcast primitives
+ * mptsas_broadcast_primitive_work - Handle broadcast primitives
* @work: work queue payload containing info describing the event
*
* this will be handled in workqueue context.
*/
static void
-mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
+mptsas_broadcast_primitive_work(struct fw_event_work *fw_event)
{
MPT_ADAPTER *ioc = fw_event->ioc;
MPT_FRAME_HDR *mf;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 11841f4b7b2b..8c5dfdce4326 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -99,6 +99,15 @@ config MFD_AAT2870_CORE
additional drivers must be enabled in order to use the
functionality of the device.
+config MFD_AT91_USART
+ tristate "AT91 USART Driver"
+ select MFD_CORE
+ help
+ Select this to get support for AT91 USART IP. This is a wrapper
+ over at91-usart-serial driver and usart-spi-driver. Only one function
+ can be used at a time. The choice is done at boot time by the probe
+ function of this MFD driver according to a device tree property.
+
config MFD_ATMEL_FLEXCOM
tristate "Atmel Flexcom (Flexible Serial Communication Unit)"
select MFD_CORE
@@ -1023,16 +1032,23 @@ config MFD_RN5T618
functionality of the device.
config MFD_SEC_CORE
- bool "SAMSUNG Electronics PMIC Series Support"
+ tristate "SAMSUNG Electronics PMIC Series Support"
depends on I2C=y
select MFD_CORE
select REGMAP_I2C
select REGMAP_IRQ
help
- Support for the Samsung Electronics MFD series.
- This driver provides common support for accessing the device,
- additional drivers must be enabled in order to use the functionality
- of the device
+ Support for the Samsung Electronics PMIC devices coming
+ usually along with Samsung Exynos SoC chipset.
+ This driver provides common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device
+
+ To compile this driver as a module, choose M here: the
+ module will be called sec-core.
+ Have in mind that important core drivers (like regulators) depend
+ on this driver so building this as a module might require proper
+ initial ramdisk or might not boot up as well in certain scenarios.
config MFD_SI476X_CORE
tristate "Silicon Laboratories 4761/64/68 AM/FM radio."
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 5856a9489cbd..12980a4ad460 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -196,6 +196,7 @@ obj-$(CONFIG_MFD_SPMI_PMIC) += qcom-spmi-pmic.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
obj-$(CONFIG_MFD_TPS65090) += tps65090.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
+obj-$(CONFIG_MFD_AT91_USART) += at91-usart.o
obj-$(CONFIG_MFD_ATMEL_FLEXCOM) += atmel-flexcom.o
obj-$(CONFIG_MFD_ATMEL_HLCDC) += atmel-hlcdc.o
obj-$(CONFIG_MFD_ATMEL_SMC) += atmel-smc.o
diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c
index d817f202da5b..be0497b96720 100644
--- a/drivers/mfd/adp5520.c
+++ b/drivers/mfd/adp5520.c
@@ -360,6 +360,6 @@ static struct i2c_driver adp5520_driver = {
module_i2c_driver(adp5520_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP5520(01) PMIC-MFD Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 5f1e37d23943..27b61639cdc7 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -52,8 +52,10 @@ int arizona_clk32k_enable(struct arizona *arizona)
if (ret != 0)
goto err_ref;
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK1]);
- if (ret != 0)
- goto err_pm;
+ if (ret != 0) {
+ pm_runtime_put_sync(arizona->dev);
+ goto err_ref;
+ }
break;
case ARIZONA_32KZ_MCLK2:
ret = clk_prepare_enable(arizona->mclk[ARIZONA_MCLK2]);
@@ -67,8 +69,6 @@ int arizona_clk32k_enable(struct arizona *arizona)
ARIZONA_CLK_32K_ENA);
}
-err_pm:
- pm_runtime_put_sync(arizona->dev);
err_ref:
if (ret != 0)
arizona->clk32k_ref--;
@@ -990,7 +990,7 @@ static const struct mfd_cell wm8998_devs[] = {
int arizona_dev_init(struct arizona *arizona)
{
- const char * const mclk_name[] = { "mclk1", "mclk2" };
+ static const char * const mclk_name[] = { "mclk1", "mclk2" };
struct device *dev = arizona->dev;
const char *type_name = NULL;
unsigned int reg, val;
diff --git a/drivers/mfd/at91-usart.c b/drivers/mfd/at91-usart.c
new file mode 100644
index 000000000000..d20747f612c1
--- /dev/null
+++ b/drivers/mfd/at91-usart.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for AT91 USART
+ *
+ * Copyright (C) 2018 Microchip Technology
+ *
+ * Author: Radu Pirea <radu.pirea@microchip.com>
+ *
+ */
+
+#include <dt-bindings/mfd/at91-usart.h>
+
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/of.h>
+#include <linux/property.h>
+
+static struct mfd_cell at91_usart_spi_subdev = {
+ .name = "at91_usart_spi",
+ .of_compatible = "microchip,at91sam9g45-usart-spi",
+ };
+
+static struct mfd_cell at91_usart_serial_subdev = {
+ .name = "atmel_usart_serial",
+ .of_compatible = "atmel,at91rm9200-usart-serial",
+ };
+
+static int at91_usart_mode_probe(struct platform_device *pdev)
+{
+ struct mfd_cell cell;
+ u32 opmode = AT91_USART_MODE_SERIAL;
+
+ device_property_read_u32(&pdev->dev, "atmel,usart-mode", &opmode);
+
+ switch (opmode) {
+ case AT91_USART_MODE_SPI:
+ cell = at91_usart_spi_subdev;
+ break;
+ case AT91_USART_MODE_SERIAL:
+ cell = at91_usart_serial_subdev;
+ break;
+ default:
+ dev_err(&pdev->dev, "atmel,usart-mode has an invalid value %u\n",
+ opmode);
+ return -EINVAL;
+ }
+
+ return devm_mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO, &cell, 1,
+ NULL, 0, NULL);
+}
+
+static const struct of_device_id at91_usart_mode_of_match[] = {
+ { .compatible = "atmel,at91rm9200-usart" },
+ { .compatible = "atmel,at91sam9260-usart" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, at91_usart_mode_of_match);
+
+static struct platform_driver at91_usart_mfd = {
+ .probe = at91_usart_mode_probe,
+ .driver = {
+ .name = "at91_usart_mode",
+ .of_match_table = at91_usart_mode_of_match,
+ },
+};
+
+module_platform_driver(at91_usart_mfd);
+
+MODULE_AUTHOR("Radu Pirea <radu.pirea@microchip.com>");
+MODULE_DESCRIPTION("AT91 USART MFD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c
index 65a9757a6d21..fe6f83766144 100644
--- a/drivers/mfd/cros_ec.c
+++ b/drivers/mfd/cros_ec.c
@@ -218,7 +218,8 @@ EXPORT_SYMBOL(cros_ec_suspend);
static void cros_ec_report_events_during_suspend(struct cros_ec_device *ec_dev)
{
- while (cros_ec_get_next_event(ec_dev, NULL) > 0)
+ while (ec_dev->mkbp_event_supported &&
+ cros_ec_get_next_event(ec_dev, NULL) > 0)
blocking_notifier_call_chain(&ec_dev->event_notifier,
1, ec_dev);
}
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 999dac752bcc..8f9d6964173e 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -546,6 +546,7 @@ static struct platform_driver cros_ec_dev_driver = {
.name = DRV_NAME,
.pm = &cros_ec_dev_pm_ops,
},
+ .id_table = cros_ec_id,
.probe = ec_device_probe,
.remove = ec_device_remove,
.shutdown = ec_device_shutdown,
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 2017446c5b4b..bb24c2a07900 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Intel MSIC
*
* Copyright (C) 2011, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/err.h>
@@ -54,68 +51,44 @@ struct intel_msic {
};
static struct resource msic_touch_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_adc_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_battery_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_gpio_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_audio_resources[] = {
- {
- .name = "IRQ",
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(0, "IRQ"),
/*
* We will pass IRQ_BASE to the driver now but this can be removed
* when/if the driver starts to use intel_msic_irq_read().
*/
- {
- .name = "IRQ_BASE",
- .flags = IORESOURCE_MEM,
- .start = MSIC_IRQ_STATUS_ACCDET,
- .end = MSIC_IRQ_STATUS_ACCDET,
- },
+ DEFINE_RES_MEM_NAMED(MSIC_IRQ_STATUS_ACCDET, 1, "IRQ_BASE"),
};
static struct resource msic_hdmi_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_thermal_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_power_btn_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
static struct resource msic_ocd_resources[] = {
- {
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ(0),
};
/*
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c
index 15bc052704a6..6310c3bdb991 100644
--- a/drivers/mfd/intel_soc_pmic_bxtwc.c
+++ b/drivers/mfd/intel_soc_pmic_bxtwc.c
@@ -1,27 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MFD core driver for Intel Broxton Whiskey Cove PMIC
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
-#include <linux/module.h>
#include <linux/acpi.h>
-#include <linux/err.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
#include <linux/mfd/intel_soc_pmic_bxtwc.h>
+#include <linux/module.h>
+
#include <asm/intel_pmc_ipc.h>
/* PMIC device registers */
@@ -31,8 +24,8 @@
/* Interrupt Status Registers */
#define BXTWC_IRQLVL1 0x4E02
-#define BXTWC_PWRBTNIRQ 0x4E03
+#define BXTWC_PWRBTNIRQ 0x4E03
#define BXTWC_THRM0IRQ 0x4E04
#define BXTWC_THRM1IRQ 0x4E05
#define BXTWC_THRM2IRQ 0x4E06
@@ -47,10 +40,9 @@
/* Interrupt MASK Registers */
#define BXTWC_MIRQLVL1 0x4E0E
-#define BXTWC_MPWRTNIRQ 0x4E0F
-
#define BXTWC_MIRQLVL1_MCHGR BIT(5)
+#define BXTWC_MPWRBTNIRQ 0x4E0F
#define BXTWC_MTHRM0IRQ 0x4E12
#define BXTWC_MTHRM1IRQ 0x4E13
#define BXTWC_MTHRM2IRQ 0x4E14
@@ -66,9 +58,7 @@
/* Whiskey Cove PMIC share same ACPI ID between different platforms */
#define BROXTON_PMIC_WC_HRV 4
-/* Manage in two IRQ chips since mask registers are not consecutive */
enum bxtwc_irqs {
- /* Level 1 */
BXTWC_PWRBTN_LVL1_IRQ = 0,
BXTWC_TMU_LVL1_IRQ,
BXTWC_THRM_LVL1_IRQ,
@@ -77,9 +67,11 @@ enum bxtwc_irqs {
BXTWC_CHGR_LVL1_IRQ,
BXTWC_GPIO_LVL1_IRQ,
BXTWC_CRIT_LVL1_IRQ,
+};
- /* Level 2 */
- BXTWC_PWRBTN_IRQ,
+enum bxtwc_irqs_pwrbtn {
+ BXTWC_PWRBTN_IRQ = 0,
+ BXTWC_UIBTN_IRQ,
};
enum bxtwc_irqs_bcu {
@@ -113,7 +105,10 @@ static const struct regmap_irq bxtwc_regmap_irqs[] = {
REGMAP_IRQ_REG(BXTWC_CHGR_LVL1_IRQ, 0, BIT(5)),
REGMAP_IRQ_REG(BXTWC_GPIO_LVL1_IRQ, 0, BIT(6)),
REGMAP_IRQ_REG(BXTWC_CRIT_LVL1_IRQ, 0, BIT(7)),
- REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 1, 0x03),
+};
+
+static const struct regmap_irq bxtwc_regmap_irqs_pwrbtn[] = {
+ REGMAP_IRQ_REG(BXTWC_PWRBTN_IRQ, 0, 0x01),
};
static const struct regmap_irq bxtwc_regmap_irqs_bcu[] = {
@@ -125,7 +120,7 @@ static const struct regmap_irq bxtwc_regmap_irqs_adc[] = {
};
static const struct regmap_irq bxtwc_regmap_irqs_chgr[] = {
- REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, BIT(5)),
+ REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 0, 0x20),
REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 0, 0x1f),
REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 1, 0x1f),
};
@@ -144,7 +139,16 @@ static struct regmap_irq_chip bxtwc_regmap_irq_chip = {
.mask_base = BXTWC_MIRQLVL1,
.irqs = bxtwc_regmap_irqs,
.num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs),
- .num_regs = 2,
+ .num_regs = 1,
+};
+
+static struct regmap_irq_chip bxtwc_regmap_irq_chip_pwrbtn = {
+ .name = "bxtwc_irq_chip_pwrbtn",
+ .status_base = BXTWC_PWRBTNIRQ,
+ .mask_base = BXTWC_MPWRBTNIRQ,
+ .irqs = bxtwc_regmap_irqs_pwrbtn,
+ .num_irqs = ARRAY_SIZE(bxtwc_regmap_irqs_pwrbtn),
+ .num_regs = 1,
};
static struct regmap_irq_chip bxtwc_regmap_irq_chip_tmu = {
@@ -473,6 +477,16 @@ static int bxtwc_probe(struct platform_device *pdev)
}
ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
+ BXTWC_PWRBTN_LVL1_IRQ,
+ IRQF_ONESHOT,
+ &bxtwc_regmap_irq_chip_pwrbtn,
+ &pmic->irq_chip_data_pwrbtn);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to add PWRBTN IRQ chip\n");
+ return ret;
+ }
+
+ ret = bxtwc_add_chained_irq_chip(pmic, pmic->irq_chip_data,
BXTWC_TMU_LVL1_IRQ,
IRQF_ONESHOT,
&bxtwc_regmap_irq_chip_tmu,
diff --git a/drivers/mfd/intel_soc_pmic_chtdc_ti.c b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
index 861277c6580a..64b5c3cc30e7 100644
--- a/drivers/mfd/intel_soc_pmic_chtdc_ti.c
+++ b/drivers/mfd/intel_soc_pmic_chtdc_ti.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Device access for Dollar Cove TI PMIC
*
@@ -6,10 +7,6 @@
*
* Cleanup and forward-ported
* Copyright (c) 2017 Takashi Iwai <tiwai@suse.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/mfd/intel_soc_pmic_chtwc.c b/drivers/mfd/intel_soc_pmic_chtwc.c
index b35da01d5bcf..64a3aece9c5e 100644
--- a/drivers/mfd/intel_soc_pmic_chtwc.c
+++ b/drivers/mfd/intel_soc_pmic_chtwc.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* MFD core driver for Intel Cherrytrail Whiskey Cove PMIC
*
@@ -5,10 +6,6 @@
*
* Based on various non upstream patches to support the CHT Whiskey Cove PMIC:
* Copyright (C) 2013-2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/acpi.h>
diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c
index 274306d98ac1..c9f35378d391 100644
--- a/drivers/mfd/intel_soc_pmic_core.c
+++ b/drivers/mfd/intel_soc_pmic_core.c
@@ -1,31 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_soc_pmic_core.c - Intel SoC PMIC MFD Driver
+ * Intel SoC PMIC MFD Driver
*
* Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
-#include <linux/module.h>
-#include <linux/mfd/core.h>
+#include <linux/acpi.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/machine.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/gpio/consumer.h>
-#include <linux/acpi.h>
-#include <linux/regmap.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
-#include <linux/gpio/machine.h>
#include <linux/pwm.h>
+#include <linux/regmap.h>
+
#include "intel_soc_pmic_core.h"
/* Crystal Cove PMIC shares same ACPI ID between different platforms */
diff --git a/drivers/mfd/intel_soc_pmic_core.h b/drivers/mfd/intel_soc_pmic_core.h
index 90a1416d4dac..d490685845eb 100644
--- a/drivers/mfd/intel_soc_pmic_core.h
+++ b/drivers/mfd/intel_soc_pmic_core.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * intel_soc_pmic_core.h - Intel SoC PMIC MFD Driver
+ * Intel SoC PMIC MFD Driver
*
* Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c
index 6d19a6d0fb97..b6ab72fa0569 100644
--- a/drivers/mfd/intel_soc_pmic_crc.c
+++ b/drivers/mfd/intel_soc_pmic_crc.c
@@ -1,25 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
/*
- * intel_soc_pmic_crc.c - Device access for Crystal Cove PMIC
+ * Device access for Crystal Cove PMIC
*
* Copyright (C) 2013, 2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
-#include <linux/mfd/core.h>
#include <linux/interrupt.h>
#include <linux/regmap.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/intel_soc_pmic.h>
+
#include "intel_soc_pmic_core.h"
#define CRYSTAL_COVE_MAX_REGISTER 0xC6
@@ -36,48 +29,23 @@
#define CRYSTAL_COVE_IRQ_VHDMIOCP 6
static struct resource gpio_resources[] = {
- {
- .name = "GPIO",
- .start = CRYSTAL_COVE_IRQ_GPIO,
- .end = CRYSTAL_COVE_IRQ_GPIO,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_GPIO, "GPIO"),
};
static struct resource pwrsrc_resources[] = {
- {
- .name = "PWRSRC",
- .start = CRYSTAL_COVE_IRQ_PWRSRC,
- .end = CRYSTAL_COVE_IRQ_PWRSRC,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_PWRSRC, "PWRSRC"),
};
static struct resource adc_resources[] = {
- {
- .name = "ADC",
- .start = CRYSTAL_COVE_IRQ_ADC,
- .end = CRYSTAL_COVE_IRQ_ADC,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_ADC, "ADC"),
};
static struct resource thermal_resources[] = {
- {
- .name = "THERMAL",
- .start = CRYSTAL_COVE_IRQ_THRM,
- .end = CRYSTAL_COVE_IRQ_THRM,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_THRM, "THERMAL"),
};
static struct resource bcu_resources[] = {
- {
- .name = "BCU",
- .start = CRYSTAL_COVE_IRQ_BCU,
- .end = CRYSTAL_COVE_IRQ_BCU,
- .flags = IORESOURCE_IRQ,
- },
+ DEFINE_RES_IRQ_NAMED(CRYSTAL_COVE_IRQ_BCU, "BCU"),
};
static struct mfd_cell crystal_cove_byt_dev[] = {
@@ -134,27 +102,13 @@ static const struct regmap_config crystal_cove_regmap_config = {
};
static const struct regmap_irq crystal_cove_irqs[] = {
- [CRYSTAL_COVE_IRQ_PWRSRC] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_PWRSRC),
- },
- [CRYSTAL_COVE_IRQ_THRM] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_THRM),
- },
- [CRYSTAL_COVE_IRQ_BCU] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_BCU),
- },
- [CRYSTAL_COVE_IRQ_ADC] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_ADC),
- },
- [CRYSTAL_COVE_IRQ_CHGR] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_CHGR),
- },
- [CRYSTAL_COVE_IRQ_GPIO] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_GPIO),
- },
- [CRYSTAL_COVE_IRQ_VHDMIOCP] = {
- .mask = BIT(CRYSTAL_COVE_IRQ_VHDMIOCP),
- },
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_PWRSRC, 0, BIT(CRYSTAL_COVE_IRQ_PWRSRC)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_THRM, 0, BIT(CRYSTAL_COVE_IRQ_THRM)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_BCU, 0, BIT(CRYSTAL_COVE_IRQ_BCU)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_ADC, 0, BIT(CRYSTAL_COVE_IRQ_ADC)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_CHGR, 0, BIT(CRYSTAL_COVE_IRQ_CHGR)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_GPIO, 0, BIT(CRYSTAL_COVE_IRQ_GPIO)),
+ REGMAP_IRQ_REG(CRYSTAL_COVE_IRQ_VHDMIOCP, 0, BIT(CRYSTAL_COVE_IRQ_VHDMIOCP)),
};
static const struct regmap_irq_chip crystal_cove_irq_chip = {
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 8cfea969b060..440030cecbbd 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -132,32 +132,39 @@ const char *madera_name_from_type(enum madera_type type)
}
EXPORT_SYMBOL_GPL(madera_name_from_type);
-#define MADERA_BOOT_POLL_MAX_INTERVAL_US 5000
-#define MADERA_BOOT_POLL_TIMEOUT_US 25000
+#define MADERA_BOOT_POLL_INTERVAL_USEC 5000
+#define MADERA_BOOT_POLL_TIMEOUT_USEC 25000
static int madera_wait_for_boot(struct madera *madera)
{
+ ktime_t timeout;
unsigned int val;
- int ret;
+ int ret = 0;
/*
* We can't use an interrupt as we need to runtime resume to do so,
* so we poll the status bit. This won't race with the interrupt
* handler because it will be blocked on runtime resume.
+ * The chip could NAK a read request while it is booting so ignore
+ * errors from regmap_read.
*/
- ret = regmap_read_poll_timeout(madera->regmap,
- MADERA_IRQ1_RAW_STATUS_1,
- val,
- (val & MADERA_BOOT_DONE_STS1),
- MADERA_BOOT_POLL_MAX_INTERVAL_US,
- MADERA_BOOT_POLL_TIMEOUT_US);
-
- if (ret)
- dev_err(madera->dev, "Polling BOOT_DONE_STS failed: %d\n", ret);
+ timeout = ktime_add_us(ktime_get(), MADERA_BOOT_POLL_TIMEOUT_USEC);
+ regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val);
+ while (!(val & MADERA_BOOT_DONE_STS1) &&
+ !ktime_after(ktime_get(), timeout)) {
+ usleep_range(MADERA_BOOT_POLL_INTERVAL_USEC / 2,
+ MADERA_BOOT_POLL_INTERVAL_USEC);
+ regmap_read(madera->regmap, MADERA_IRQ1_RAW_STATUS_1, &val);
+ };
+
+ if (!(val & MADERA_BOOT_DONE_STS1)) {
+ dev_err(madera->dev, "Polling BOOT_DONE_STS timed out\n");
+ ret = -ETIMEDOUT;
+ }
/*
* BOOT_DONE defaults to unmasked on boot so we must ack it.
- * Do this unconditionally to avoid interrupt storms.
+ * Do this even after a timeout to avoid interrupt storms.
*/
regmap_write(madera->regmap, MADERA_IRQ1_STATUS_1,
MADERA_BOOT_DONE_EINT1);
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index 6cbe96b28f42..ebb13d5de530 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -1,22 +1,12 @@
-/*
- * max14577.c - mfd core driver for the Maxim 14577/77836
- *
- * Copyright (C) 2014 Samsung Electronics
- * Chanwoo Choi <cw00.choi@samsung.com>
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max14577.c - mfd core driver for the Maxim 14577/77836
+//
+// Copyright (C) 2014 Samsung Electronics
+// Chanwoo Choi <cw00.choi@samsung.com>
+// Krzysztof Kozlowski <krzk@kernel.org>
+//
+// This driver is based on max8997.c
#include <linux/err.h>
#include <linux/module.h>
diff --git a/drivers/mfd/max77620.c b/drivers/mfd/max77620.c
index b1700b5fa640..d8217366ed36 100644
--- a/drivers/mfd/max77620.c
+++ b/drivers/mfd/max77620.c
@@ -285,7 +285,7 @@ static int max77620_config_fps(struct max77620_chip *chip,
}
if (fps_id == MAX77620_FPS_COUNT) {
- dev_err(dev, "FPS node name %s is not valid\n", fps_np->name);
+ dev_err(dev, "FPS node name %pOFn is not valid\n", fps_np);
return -EINVAL;
}
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index b0e8e13c0049..71faf503844b 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -1,26 +1,12 @@
-/*
- * max77686.c - mfd core driver for the Maxim 77686/802
- *
- * Copyright (C) 2012 Samsung Electronics
- * Chiwoong Byun <woong.byun@samsung.com>
- * Jonghwa Lee <jonghwa3.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77686.c - mfd core driver for the Maxim 77686/802
+//
+// Copyright (C) 2012 Samsung Electronics
+// Chiwoong Byun <woong.byun@samsung.com>
+// Jonghwa Lee <jonghwa3.lee@samsung.com>
+//
+//This driver is based on max8997.c
#include <linux/export.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index 1c05ea0cba61..901d99d65924 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -1,27 +1,13 @@
-/*
- * max77693.c - mfd core driver for the MAX 77693
- *
- * Copyright (C) 2012 Samsung Electronics
- * SangYoung Son <hello.son@samsung.com>
- *
- * This program is not provided / owned by Maxim Integrated Products.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8997.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77693.c - mfd core driver for the MAX 77693
+//
+// Copyright (C) 2012 Samsung Electronics
+// SangYoung Son <hello.son@samsung.com>
+//
+// This program is not provided / owned by Maxim Integrated Products.
+//
+// This driver is based on max8997.c
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
index da9612dbb222..25cbb2242b26 100644
--- a/drivers/mfd/max77843.c
+++ b/drivers/mfd/max77843.c
@@ -1,15 +1,10 @@
-/*
- * MFD core driver for the Maxim MAX77843
- *
- * Copyright (C) 2015 Samsung Electronics
- * Author: Jaewon Kim <jaewon02.kim@samsung.com>
- * Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// MFD core driver for the Maxim MAX77843
+//
+// Copyright (C) 2015 Samsung Electronics
+// Author: Jaewon Kim <jaewon02.kim@samsung.com>
+// Author: Beomho Seo <beomho.seo@samsung.com>
#include <linux/err.h>
#include <linux/i2c.h>
diff --git a/drivers/mfd/max8997-irq.c b/drivers/mfd/max8997-irq.c
index 326f17b632a7..93a3b1698d9c 100644
--- a/drivers/mfd/max8997-irq.c
+++ b/drivers/mfd/max8997-irq.c
@@ -1,25 +1,11 @@
-/*
- * max8997-irq.c - Interrupt controller support for MAX8997
- *
- * Copyright (C) 2011 Samsung Electronics Co.Ltd
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8998-irq.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8997-irq.c - Interrupt controller support for MAX8997
+//
+// Copyright (C) 2011 Samsung Electronics Co.Ltd
+// MyungJoo Ham <myungjoo.ham@samsung.com>
+//
+// This driver is based on max8998-irq.c
#include <linux/err.h>
#include <linux/irq.h>
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 3f554c447521..8c06c09e36d1 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -1,25 +1,11 @@
-/*
- * max8997.c - mfd core driver for the Maxim 8966 and 8997
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max8998.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8997.c - mfd core driver for the Maxim 8966 and 8997
+//
+// Copyright (C) 2011 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
+//
+// This driver is based on max8998.c
#include <linux/err.h>
#include <linux/slab.h>
@@ -153,12 +139,6 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
pd->ono = irq_of_parse_and_map(dev->of_node, 1);
- /*
- * ToDo: the 'wakeup' member in the platform data is more of a linux
- * specfic information. Hence, there is no binding for that yet and
- * not parsed here.
- */
-
return pd;
}
@@ -246,7 +226,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c,
*/
/* MAX8997 has a power button input. */
- device_init_wakeup(max8997->dev, pdata->wakeup);
+ device_init_wakeup(max8997->dev, true);
return ret;
@@ -468,6 +448,7 @@ static int max8997_suspend(struct device *dev)
struct i2c_client *i2c = to_i2c_client(dev);
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
+ disable_irq(max8997->irq);
if (device_may_wakeup(dev))
irq_set_irq_wake(max8997->irq, 1);
return 0;
@@ -480,6 +461,7 @@ static int max8997_resume(struct device *dev)
if (device_may_wakeup(dev))
irq_set_irq_wake(max8997->irq, 0);
+ enable_irq(max8997->irq);
return max8997_irq_resume(max8997);
}
diff --git a/drivers/mfd/max8998-irq.c b/drivers/mfd/max8998-irq.c
index 90bad9ffa7e2..83b6f510bc05 100644
--- a/drivers/mfd/max8998-irq.c
+++ b/drivers/mfd/max8998-irq.c
@@ -1,15 +1,9 @@
-/*
- * Interrupt controller support for MAX8998
- *
- * Copyright (C) 2010 Samsung Electronics Co.Ltd
- * Author: Joonyoung Shim <jy0922.shim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Interrupt controller support for MAX8998
+//
+// Copyright (C) 2010 Samsung Electronics Co.Ltd
+// Author: Joonyoung Shim <jy0922.shim@samsung.com>
#include <linux/device.h>
#include <linux/interrupt.h>
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index b1d3f70782d9..56409df120f8 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -1,24 +1,10 @@
-/*
- * max8998.c - mfd core driver for the Maxim 8998
- *
- * Copyright (C) 2009-2010 Samsung Electronics
- * Kyungmin Park <kyungmin.park@samsung.com>
- * Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8998.c - mfd core driver for the Maxim 8998
+//
+// Copyright (C) 2009-2010 Samsung Electronics
+// Kyungmin Park <kyungmin.park@samsung.com>
+// Marek Szyprowski <m.szyprowski@samsung.com>
#include <linux/err.h>
#include <linux/init.h>
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index c63e331738c1..f475e848252f 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -276,7 +276,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
- adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
+ adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2 |
+ MC13XXX_ADC0_CHRGRAWDIV;
adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
/*
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index 29b7164a823b..d28ebe7ecd21 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1094,6 +1094,7 @@ static void menelaus_rtc_alarm_work(struct menelaus_chip *m)
static inline void menelaus_rtc_init(struct menelaus_chip *m)
{
int alarm = (m->client->irq > 0);
+ int err;
/* assume 32KDETEN pin is pulled high */
if (!(menelaus_read_reg(MENELAUS_OSC_CTRL) & 0x80)) {
@@ -1101,6 +1102,12 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
return;
}
+ m->rtc = devm_rtc_allocate_device(&m->client->dev);
+ if (IS_ERR(m->rtc))
+ return;
+
+ m->rtc->ops = &menelaus_rtc_ops;
+
/* support RTC alarm; it can issue wakeups */
if (alarm) {
if (menelaus_add_irq_work(MENELAUS_RTCALM_IRQ,
@@ -1125,10 +1132,8 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
}
- m->rtc = rtc_device_register(DRIVER_NAME,
- &m->client->dev,
- &menelaus_rtc_ops, THIS_MODULE);
- if (IS_ERR(m->rtc)) {
+ err = rtc_register_device(m->rtc);
+ if (err) {
if (alarm) {
menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
device_init_wakeup(&m->client->dev, 0);
diff --git a/drivers/mfd/motorola-cpcap.c b/drivers/mfd/motorola-cpcap.c
index 5276911caaec..20d9692640e1 100644
--- a/drivers/mfd/motorola-cpcap.c
+++ b/drivers/mfd/motorola-cpcap.c
@@ -18,6 +18,7 @@
#include <linux/regmap.h>
#include <linux/sysfs.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/motorola-cpcap.h>
#include <linux/spi/spi.h>
@@ -216,6 +217,53 @@ static const struct regmap_config cpcap_regmap_config = {
.val_format_endian = REGMAP_ENDIAN_LITTLE,
};
+static const struct mfd_cell cpcap_mfd_devices[] = {
+ {
+ .name = "cpcap_adc",
+ .of_compatible = "motorola,mapphone-cpcap-adc",
+ }, {
+ .name = "cpcap_battery",
+ .of_compatible = "motorola,cpcap-battery",
+ }, {
+ .name = "cpcap-charger",
+ .of_compatible = "motorola,mapphone-cpcap-charger",
+ }, {
+ .name = "cpcap-regulator",
+ .of_compatible = "motorola,mapphone-cpcap-regulator",
+ }, {
+ .name = "cpcap-rtc",
+ .of_compatible = "motorola,cpcap-rtc",
+ }, {
+ .name = "cpcap-pwrbutton",
+ .of_compatible = "motorola,cpcap-pwrbutton",
+ }, {
+ .name = "cpcap-usb-phy",
+ .of_compatible = "motorola,mapphone-cpcap-usb-phy",
+ }, {
+ .name = "cpcap-led",
+ .id = 0,
+ .of_compatible = "motorola,cpcap-led-red",
+ }, {
+ .name = "cpcap-led",
+ .id = 1,
+ .of_compatible = "motorola,cpcap-led-green",
+ }, {
+ .name = "cpcap-led",
+ .id = 2,
+ .of_compatible = "motorola,cpcap-led-blue",
+ }, {
+ .name = "cpcap-led",
+ .id = 3,
+ .of_compatible = "motorola,cpcap-led-adl",
+ }, {
+ .name = "cpcap-led",
+ .id = 4,
+ .of_compatible = "motorola,cpcap-led-cp",
+ }, {
+ .name = "cpcap-codec",
+ }
+};
+
static int cpcap_probe(struct spi_device *spi)
{
const struct of_device_id *match;
@@ -260,7 +308,8 @@ static int cpcap_probe(struct spi_device *spi)
if (ret)
return ret;
- return devm_of_platform_populate(&cpcap->spi->dev);
+ return devm_mfd_add_devices(&spi->dev, 0, cpcap_mfd_devices,
+ ARRAY_SIZE(cpcap_mfd_devices), NULL, 0, NULL);
}
static struct spi_driver cpcap_driver = {
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 9613b4257302..e0835c9df7a1 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -1,15 +1,7 @@
-/*
- * sec-core.c
- *
- * Copyright (c) 2012 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2012 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/module.h>
#include <linux/moduleparam.h>
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 5eb59c233d52..ad0099077e7e 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -1,19 +1,12 @@
-/*
- * sec-irq.c
- *
- * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
- * http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
+// http://www.samsung.com
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/mfd/samsung/core.h>
@@ -501,3 +494,10 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
return 0;
}
+EXPORT_SYMBOL_GPL(sec_irq_init);
+
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_DESCRIPTION("Interrupt support for the S5M MFD");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/ti-lmu.c b/drivers/mfd/ti-lmu.c
index cfb411cde51c..37d0bdb291c3 100644
--- a/drivers/mfd/ti-lmu.c
+++ b/drivers/mfd/ti-lmu.c
@@ -12,7 +12,7 @@
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/mfd/core.h>
@@ -21,28 +21,18 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
struct ti_lmu_data {
- struct mfd_cell *cells;
+ const struct mfd_cell *cells;
int num_cells;
unsigned int max_register;
};
static int ti_lmu_enable_hw(struct ti_lmu *lmu, enum ti_lmu_id id)
{
- int ret;
-
- if (gpio_is_valid(lmu->en_gpio)) {
- ret = devm_gpio_request_one(lmu->dev, lmu->en_gpio,
- GPIOF_OUT_INIT_HIGH, "lmu_hwen");
- if (ret) {
- dev_err(lmu->dev, "Can not request enable GPIO: %d\n",
- ret);
- return ret;
- }
- }
+ if (lmu->en_gpio)
+ gpiod_set_value(lmu->en_gpio, 1);
/* Delay about 1ms after HW enable pin control */
usleep_range(1000, 1500);
@@ -57,13 +47,14 @@ static int ti_lmu_enable_hw(struct ti_lmu *lmu, enum ti_lmu_id id)
return 0;
}
-static void ti_lmu_disable_hw(struct ti_lmu *lmu)
+static void ti_lmu_disable_hw(void *data)
{
- if (gpio_is_valid(lmu->en_gpio))
- gpio_set_value(lmu->en_gpio, 0);
+ struct ti_lmu *lmu = data;
+ if (lmu->en_gpio)
+ gpiod_set_value(lmu->en_gpio, 0);
}
-static struct mfd_cell lm3532_devices[] = {
+static const struct mfd_cell lm3532_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3532,
@@ -78,7 +69,7 @@ static struct mfd_cell lm3532_devices[] = {
.of_compatible = "ti,lm363x-regulator", \
} \
-static struct mfd_cell lm3631_devices[] = {
+static const struct mfd_cell lm3631_devices[] = {
LM363X_REGULATOR(LM3631_BOOST),
LM363X_REGULATOR(LM3631_LDO_CONT),
LM363X_REGULATOR(LM3631_LDO_OREF),
@@ -91,7 +82,7 @@ static struct mfd_cell lm3631_devices[] = {
},
};
-static struct mfd_cell lm3632_devices[] = {
+static const struct mfd_cell lm3632_devices[] = {
LM363X_REGULATOR(LM3632_BOOST),
LM363X_REGULATOR(LM3632_LDO_POS),
LM363X_REGULATOR(LM3632_LDO_NEG),
@@ -102,7 +93,7 @@ static struct mfd_cell lm3632_devices[] = {
},
};
-static struct mfd_cell lm3633_devices[] = {
+static const struct mfd_cell lm3633_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3633,
@@ -120,7 +111,7 @@ static struct mfd_cell lm3633_devices[] = {
},
};
-static struct mfd_cell lm3695_devices[] = {
+static const struct mfd_cell lm3695_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3695,
@@ -128,7 +119,7 @@ static struct mfd_cell lm3695_devices[] = {
},
};
-static struct mfd_cell lm3697_devices[] = {
+static const struct mfd_cell lm3697_devices[] = {
{
.name = "ti-lmu-backlight",
.id = LM3697,
@@ -157,34 +148,21 @@ TI_LMU_DATA(lm3633, LM3633_MAX_REG);
TI_LMU_DATA(lm3695, LM3695_MAX_REG);
TI_LMU_DATA(lm3697, LM3697_MAX_REG);
-static const struct of_device_id ti_lmu_of_match[] = {
- { .compatible = "ti,lm3532", .data = &lm3532_data },
- { .compatible = "ti,lm3631", .data = &lm3631_data },
- { .compatible = "ti,lm3632", .data = &lm3632_data },
- { .compatible = "ti,lm3633", .data = &lm3633_data },
- { .compatible = "ti,lm3695", .data = &lm3695_data },
- { .compatible = "ti,lm3697", .data = &lm3697_data },
- { }
-};
-MODULE_DEVICE_TABLE(of, ti_lmu_of_match);
-
static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
struct device *dev = &cl->dev;
- const struct of_device_id *match;
const struct ti_lmu_data *data;
struct regmap_config regmap_cfg;
struct ti_lmu *lmu;
int ret;
- match = of_match_device(ti_lmu_of_match, dev);
- if (!match)
- return -ENODEV;
/*
* Get device specific data from of_match table.
* This data is defined by using TI_LMU_DATA() macro.
*/
- data = (struct ti_lmu_data *)match->data;
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -ENODEV;
lmu = devm_kzalloc(dev, sizeof(*lmu), GFP_KERNEL);
if (!lmu)
@@ -204,11 +182,21 @@ static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
return PTR_ERR(lmu->regmap);
/* HW enable pin control and additional power up sequence if required */
- lmu->en_gpio = of_get_named_gpio(dev->of_node, "enable-gpios", 0);
+ lmu->en_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(lmu->en_gpio)) {
+ ret = PTR_ERR(lmu->en_gpio);
+ dev_err(dev, "Can not request enable GPIO: %d\n", ret);
+ return ret;
+ }
+
ret = ti_lmu_enable_hw(lmu, id->driver_data);
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, ti_lmu_disable_hw, lmu);
+ if (ret)
+ return ret;
+
/*
* Fault circuit(open/short) can be detected by ti-lmu-fault-monitor.
* After fault detection is done, some devices should re-initialize
@@ -218,18 +206,20 @@ static int ti_lmu_probe(struct i2c_client *cl, const struct i2c_device_id *id)
i2c_set_clientdata(cl, lmu);
- return mfd_add_devices(lmu->dev, 0, data->cells,
- data->num_cells, NULL, 0, NULL);
+ return devm_mfd_add_devices(lmu->dev, 0, data->cells,
+ data->num_cells, NULL, 0, NULL);
}
-static int ti_lmu_remove(struct i2c_client *cl)
-{
- struct ti_lmu *lmu = i2c_get_clientdata(cl);
-
- ti_lmu_disable_hw(lmu);
- mfd_remove_devices(lmu->dev);
- return 0;
-}
+static const struct of_device_id ti_lmu_of_match[] = {
+ { .compatible = "ti,lm3532", .data = &lm3532_data },
+ { .compatible = "ti,lm3631", .data = &lm3631_data },
+ { .compatible = "ti,lm3632", .data = &lm3632_data },
+ { .compatible = "ti,lm3633", .data = &lm3633_data },
+ { .compatible = "ti,lm3695", .data = &lm3695_data },
+ { .compatible = "ti,lm3697", .data = &lm3697_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ti_lmu_of_match);
static const struct i2c_device_id ti_lmu_ids[] = {
{ "lm3532", LM3532 },
@@ -244,7 +234,6 @@ MODULE_DEVICE_TABLE(i2c, ti_lmu_ids);
static struct i2c_driver ti_lmu_driver = {
.probe = ti_lmu_probe,
- .remove = ti_lmu_remove,
.driver = {
.name = "ti-lmu",
.of_match_table = ti_lmu_of_match,
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 7a30546880a4..c2d47d78705b 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -269,7 +269,6 @@ static int ti_tscadc_probe(struct platform_device *pdev)
if (err < 0)
goto err_disable_clk;
- device_init_wakeup(&pdev->dev, true);
platform_set_drvdata(pdev, tscadc);
return 0;
@@ -294,11 +293,24 @@ static int ti_tscadc_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused ti_tscadc_can_wakeup(struct device *dev, void *data)
+{
+ return device_may_wakeup(dev);
+}
+
static int __maybe_unused tscadc_suspend(struct device *dev)
{
struct ti_tscadc_dev *tscadc = dev_get_drvdata(dev);
regmap_write(tscadc->regmap, REG_SE, 0x00);
+ if (device_for_each_child(dev, NULL, ti_tscadc_can_wakeup)) {
+ u32 ctrl;
+
+ regmap_read(tscadc->regmap, REG_CTRL, &ctrl);
+ ctrl &= ~(CNTRLREG_POWERDOWN);
+ ctrl |= CNTRLREG_TSCSSENB;
+ regmap_write(tscadc->regmap, REG_CTRL, ctrl);
+ }
pm_runtime_put_sync(dev);
return 0;
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index 4f832002d116..1827c69959fb 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -114,6 +114,6 @@ static struct i2c_driver ad_dpot_i2c_driver = {
module_i2c_driver(ad_dpot_i2c_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index 39a7f517ee7e..0383ec153725 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -140,7 +140,7 @@ static struct spi_driver ad_dpot_spi_driver = {
module_spi_driver(ad_dpot_spi_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("digital potentiometer SPI bus driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:ad_dpot");
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index bc591b7168db..a0afadefcc49 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -1,7 +1,7 @@
/*
* ad525x_dpot: Driver for the Analog Devices digital potentiometers
* Copyright (c) 2009-2010 Analog Devices, Inc.
- * Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
+ * Author: Michael Hennerich <michael.hennerich@analog.com>
*
* DEVID #Wipers #Positions Resistor Options (kOhm)
* AD5258 1 64 1, 10, 50, 100
@@ -64,7 +64,7 @@
* Author: Chris Verges <chrisv@cyberswitching.com>
*
* derived from ad5252.c
- * Copyright (c) 2006-2011 Michael Hennerich <hennerich@blackfin.uclinux.org>
+ * Copyright (c) 2006-2011 Michael Hennerich <michael.hennerich@analog.com>
*
* Licensed under the GPL-2 or later.
*/
@@ -760,6 +760,6 @@ EXPORT_SYMBOL(ad_dpot_remove);
MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
- "Michael Hennerich <hennerich@blackfin.uclinux.org>");
+ "Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Digital potentiometer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index ed9412d750b7..24876c615c3c 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -188,7 +188,6 @@ struct apds990x_chip {
#define APDS_LUX_DEFAULT_RATE 200
static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */
-static const u8 ir_currents[] = {100, 50, 25, 12}; /* IRled currents in mA */
/* Following two tables must match i.e 10Hz rate means 1 as persistence value */
static const u16 arates_hz[] = {10, 5, 2, 1};
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index 9c62bf064f77..17e81ce9925b 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -180,9 +180,6 @@ static const char reg_vleds[] = "Vleds";
static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2};
static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500};
-/* Supported IR-led currents in mA */
-static const u8 prox_curr_ma[] = {5, 10, 20, 50, 100, 150, 200};
-
/*
* Supported stand alone rates in ms from chip data sheet
* {100, 200, 500, 1000, 2000};
diff --git a/drivers/misc/cxl/flash.c b/drivers/misc/cxl/flash.c
index 43917898fb9a..4d6836f19489 100644
--- a/drivers/misc/cxl/flash.c
+++ b/drivers/misc/cxl/flash.c
@@ -92,8 +92,8 @@ static int update_property(struct device_node *dn, const char *name,
val = (u32 *)new_prop->value;
rc = cxl_update_properties(dn, new_prop);
- pr_devel("%s: update property (%s, length: %i, value: %#x)\n",
- dn->name, name, vd, be32_to_cpu(*val));
+ pr_devel("%pOFn: update property (%s, length: %i, value: %#x)\n",
+ dn, name, vd, be32_to_cpu(*val));
if (rc) {
kfree(new_prop->name);
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 3bc0c15d4d85..5d28d9e454f5 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -1018,8 +1018,6 @@ err1:
void cxl_guest_remove_afu(struct cxl_afu *afu)
{
- pr_devel("in %s - AFU(%d)\n", __func__, afu->slice);
-
if (!afu)
return;
diff --git a/drivers/misc/echo/echo.c b/drivers/misc/echo/echo.c
index 8a5adc0d2e88..3ebe5d75ad6a 100644
--- a/drivers/misc/echo/echo.c
+++ b/drivers/misc/echo/echo.c
@@ -381,7 +381,7 @@ int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
*/
ec->factor = 0;
ec->shift = 0;
- if ((ec->nonupdate_dwell == 0)) {
+ if (!ec->nonupdate_dwell) {
int p, logp, shift;
/* Determine:
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 68a1ac929917..fe7a1d27a017 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -111,4 +111,15 @@ config EEPROM_IDT_89HPESX
This driver can also be built as a module. If so, the module
will be called idt_89hpesx.
+config EEPROM_EE1004
+ tristate "SPD EEPROMs on DDR4 memory modules"
+ depends on I2C && SYSFS
+ help
+ Enable this driver to get read support to SPD EEPROMs following
+ the JEDEC EE1004 standard. These are typically found on DDR4
+ SDRAM memory modules.
+
+ This driver can also be built as a module. If so, the module
+ will be called ee1004.
+
endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index 2aab60ef3e3e..a9b4b6579b75 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
+obj-$(CONFIG_EEPROM_EE1004) += ee1004.o
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 840afb398f9e..99de6939cd5a 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -366,7 +366,7 @@ static int at25_probe(struct spi_device *spi)
at25->nvmem_config.word_size = 1;
at25->nvmem_config.size = chip.byte_len;
- at25->nvmem = nvmem_register(&at25->nvmem_config);
+ at25->nvmem = devm_nvmem_register(&spi->dev, &at25->nvmem_config);
if (IS_ERR(at25->nvmem))
return PTR_ERR(at25->nvmem);
@@ -379,16 +379,6 @@ static int at25_probe(struct spi_device *spi)
return 0;
}
-static int at25_remove(struct spi_device *spi)
-{
- struct at25_data *at25;
-
- at25 = spi_get_drvdata(spi);
- nvmem_unregister(at25->nvmem);
-
- return 0;
-}
-
/*-------------------------------------------------------------------------*/
static const struct of_device_id at25_of_match[] = {
@@ -403,7 +393,6 @@ static struct spi_driver at25_driver = {
.of_match_table = at25_of_match,
},
.probe = at25_probe,
- .remove = at25_remove,
};
module_spi_driver(at25_driver);
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
new file mode 100644
index 000000000000..276c1690ea1b
--- /dev/null
+++ b/drivers/misc/eeprom/ee1004.c
@@ -0,0 +1,281 @@
+/*
+ * ee1004 - driver for DDR4 SPD EEPROMs
+ *
+ * Copyright (C) 2017 Jean Delvare
+ *
+ * Based on the at24 driver:
+ * Copyright (C) 2005-2007 David Brownell
+ * Copyright (C) 2008 Wolfram Sang, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+
+/*
+ * DDR4 memory modules use special EEPROMs following the Jedec EE1004
+ * specification. These are 512-byte EEPROMs using a single I2C address
+ * in the 0x50-0x57 range for data. One of two 256-byte page is selected
+ * by writing a command to I2C address 0x36 or 0x37 on the same I2C bus.
+ *
+ * Therefore we need to request these 2 additional addresses, and serialize
+ * access to all such EEPROMs with a single mutex.
+ *
+ * We assume it is safe to read up to 32 bytes at once from these EEPROMs.
+ * We use SMBus access even if I2C is available, these EEPROMs are small
+ * enough, and reading from them infrequent enough, that we favor simplicity
+ * over performance.
+ */
+
+#define EE1004_ADDR_SET_PAGE 0x36
+#define EE1004_EEPROM_SIZE 512
+#define EE1004_PAGE_SIZE 256
+#define EE1004_PAGE_SHIFT 8
+
+/*
+ * Mutex protects ee1004_set_page and ee1004_dev_count, and must be held
+ * from page selection to end of read.
+ */
+static DEFINE_MUTEX(ee1004_bus_lock);
+static struct i2c_client *ee1004_set_page[2];
+static unsigned int ee1004_dev_count;
+static int ee1004_current_page;
+
+static const struct i2c_device_id ee1004_ids[] = {
+ { "ee1004", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ee1004_ids);
+
+/*-------------------------------------------------------------------------*/
+
+static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
+ unsigned int offset, size_t count)
+{
+ int status;
+
+ if (count > I2C_SMBUS_BLOCK_MAX)
+ count = I2C_SMBUS_BLOCK_MAX;
+ /* Can't cross page boundaries */
+ if (unlikely(offset + count > EE1004_PAGE_SIZE))
+ count = EE1004_PAGE_SIZE - offset;
+
+ status = i2c_smbus_read_i2c_block_data_or_emulated(client, offset,
+ count, buf);
+ dev_dbg(&client->dev, "read %zu@%d --> %d\n", count, offset, status);
+
+ return status;
+}
+
+static ssize_t ee1004_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+ size_t requested = count;
+ int page;
+
+ if (unlikely(!count))
+ return count;
+
+ page = off >> EE1004_PAGE_SHIFT;
+ if (unlikely(page > 1))
+ return 0;
+ off &= (1 << EE1004_PAGE_SHIFT) - 1;
+
+ /*
+ * Read data from chip, protecting against concurrent access to
+ * other EE1004 SPD EEPROMs on the same adapter.
+ */
+ mutex_lock(&ee1004_bus_lock);
+
+ while (count) {
+ int status;
+
+ /* Select page */
+ if (page != ee1004_current_page) {
+ /* Data is ignored */
+ status = i2c_smbus_write_byte(ee1004_set_page[page],
+ 0x00);
+ if (status < 0) {
+ dev_err(dev, "Failed to select page %d (%d)\n",
+ page, status);
+ mutex_unlock(&ee1004_bus_lock);
+ return status;
+ }
+ dev_dbg(dev, "Selected page %d\n", page);
+ ee1004_current_page = page;
+ }
+
+ status = ee1004_eeprom_read(client, buf, off, count);
+ if (status < 0) {
+ mutex_unlock(&ee1004_bus_lock);
+ return status;
+ }
+ buf += status;
+ off += status;
+ count -= status;
+
+ if (off == EE1004_PAGE_SIZE) {
+ page++;
+ off = 0;
+ }
+ }
+
+ mutex_unlock(&ee1004_bus_lock);
+
+ return requested;
+}
+
+static const struct bin_attribute eeprom_attr = {
+ .attr = {
+ .name = "eeprom",
+ .mode = 0444,
+ },
+ .size = EE1004_EEPROM_SIZE,
+ .read = ee1004_read,
+};
+
+static int ee1004_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int err, cnr = 0;
+ const char *slow = NULL;
+
+ /* Make sure we can operate on this adapter */
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE |
+ I2C_FUNC_SMBUS_READ_WORD_DATA))
+ slow = "word";
+ else if (i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_BYTE |
+ I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ slow = "byte";
+ else
+ return -EPFNOSUPPORT;
+ }
+
+ /* Use 2 dummy devices for page select command */
+ mutex_lock(&ee1004_bus_lock);
+ if (++ee1004_dev_count == 1) {
+ for (cnr = 0; cnr < 2; cnr++) {
+ ee1004_set_page[cnr] = i2c_new_dummy(client->adapter,
+ EE1004_ADDR_SET_PAGE + cnr);
+ if (!ee1004_set_page[cnr]) {
+ dev_err(&client->dev,
+ "address 0x%02x unavailable\n",
+ EE1004_ADDR_SET_PAGE + cnr);
+ err = -EADDRINUSE;
+ goto err_clients;
+ }
+ }
+ } else if (i2c_adapter_id(client->adapter) !=
+ i2c_adapter_id(ee1004_set_page[0]->adapter)) {
+ dev_err(&client->dev,
+ "Driver only supports devices on a single I2C bus\n");
+ err = -EOPNOTSUPP;
+ goto err_clients;
+ }
+
+ /* Remember current page to avoid unneeded page select */
+ err = i2c_smbus_read_byte(ee1004_set_page[0]);
+ if (err == -ENXIO) {
+ /* Nack means page 1 is selected */
+ ee1004_current_page = 1;
+ } else if (err < 0) {
+ /* Anything else is a real error, bail out */
+ goto err_clients;
+ } else {
+ /* Ack means page 0 is selected, returned value meaningless */
+ ee1004_current_page = 0;
+ }
+ dev_dbg(&client->dev, "Currently selected page: %d\n",
+ ee1004_current_page);
+ mutex_unlock(&ee1004_bus_lock);
+
+ /* Create the sysfs eeprom file */
+ err = sysfs_create_bin_file(&client->dev.kobj, &eeprom_attr);
+ if (err)
+ goto err_clients_lock;
+
+ dev_info(&client->dev,
+ "%u byte EE1004-compliant SPD EEPROM, read-only\n",
+ EE1004_EEPROM_SIZE);
+ if (slow)
+ dev_notice(&client->dev,
+ "Falling back to %s reads, performance will suffer\n",
+ slow);
+
+ return 0;
+
+ err_clients_lock:
+ mutex_lock(&ee1004_bus_lock);
+ err_clients:
+ if (--ee1004_dev_count == 0) {
+ for (cnr--; cnr >= 0; cnr--) {
+ i2c_unregister_device(ee1004_set_page[cnr]);
+ ee1004_set_page[cnr] = NULL;
+ }
+ }
+ mutex_unlock(&ee1004_bus_lock);
+
+ return err;
+}
+
+static int ee1004_remove(struct i2c_client *client)
+{
+ int i;
+
+ sysfs_remove_bin_file(&client->dev.kobj, &eeprom_attr);
+
+ /* Remove page select clients if this is the last device */
+ mutex_lock(&ee1004_bus_lock);
+ if (--ee1004_dev_count == 0) {
+ for (i = 0; i < 2; i++) {
+ i2c_unregister_device(ee1004_set_page[i]);
+ ee1004_set_page[i] = NULL;
+ }
+ }
+ mutex_unlock(&ee1004_bus_lock);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static struct i2c_driver ee1004_driver = {
+ .driver = {
+ .name = "ee1004",
+ },
+ .probe = ee1004_probe,
+ .remove = ee1004_remove,
+ .id_table = ee1004_ids,
+};
+
+static int __init ee1004_init(void)
+{
+ return i2c_add_driver(&ee1004_driver);
+}
+module_init(ee1004_init);
+
+static void __exit ee1004_exit(void)
+{
+ i2c_del_driver(&ee1004_driver);
+}
+module_exit(ee1004_exit);
+
+MODULE_DESCRIPTION("Driver for EE1004-compliant DDR4 SPD EEPROMs");
+MODULE_AUTHOR("Jean Delvare");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 38766968bfa2..c6dd9ad9bf7b 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -439,7 +439,7 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
return -ENODEV;
}
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ edev = devm_kzalloc(&spi->dev, sizeof(*edev), GFP_KERNEL);
if (!edev)
return -ENOMEM;
@@ -449,8 +449,7 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->addrlen = 6;
else {
dev_err(&spi->dev, "unspecified address type\n");
- err = -EINVAL;
- goto fail;
+ return -EINVAL;
}
mutex_init(&edev->lock);
@@ -473,11 +472,9 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
edev->nvmem_config.word_size = 1;
edev->nvmem_config.size = edev->size;
- edev->nvmem = nvmem_register(&edev->nvmem_config);
- if (IS_ERR(edev->nvmem)) {
- err = PTR_ERR(edev->nvmem);
- goto fail;
- }
+ edev->nvmem = devm_nvmem_register(&spi->dev, &edev->nvmem_config);
+ if (IS_ERR(edev->nvmem))
+ return PTR_ERR(edev->nvmem);
dev_info(&spi->dev, "%d-bit eeprom %s\n",
(pd->flags & EE_ADDR8) ? 8 : 16,
@@ -490,21 +487,15 @@ static int eeprom_93xx46_probe(struct spi_device *spi)
spi_set_drvdata(spi, edev);
return 0;
-fail:
- kfree(edev);
- return err;
}
static int eeprom_93xx46_remove(struct spi_device *spi)
{
struct eeprom_93xx46_dev *edev = spi_get_drvdata(spi);
- nvmem_unregister(edev->nvmem);
-
if (!(edev->pdata->flags & EE_READONLY))
device_remove_file(&spi->dev, &dev_attr_erase);
- kfree(edev);
return 0;
}
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index c7cd3675bcd1..d137d0fab9bf 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -24,7 +24,6 @@
* controlled from here.
*/
-#include <linux/module.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/err.h>
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 656449cb4476..9a65bd9d6152 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -27,7 +27,6 @@
*/
#include <linux/types.h>
-#include <linux/module.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/pci.h>
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 8679e0bd8ec2..3fcb9a2fe1c9 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -23,14 +23,12 @@
*/
#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include <linux/page-flags.h>
#include <linux/scatterlist.h>
#include <linux/hugetlb.h>
#include <linux/iommu.h>
-#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/ctype.h>
@@ -298,7 +296,7 @@ static int genwqe_sgl_size(int num_pages)
int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
void __user *user_addr, size_t user_size, int write)
{
- int rc;
+ int ret = -ENOMEM;
struct pci_dev *pci_dev = cd->pci_dev;
sgl->fpage_offs = offset_in_page((unsigned long)user_addr);
@@ -318,7 +316,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (get_order(sgl->sgl_size) > MAX_ORDER) {
dev_err(&pci_dev->dev,
"[%s] err: too much memory requested!\n", __func__);
- return -ENOMEM;
+ return ret;
}
sgl->sgl = __genwqe_alloc_consistent(cd, sgl->sgl_size,
@@ -326,7 +324,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
if (sgl->sgl == NULL) {
dev_err(&pci_dev->dev,
"[%s] err: no memory available!\n", __func__);
- return -ENOMEM;
+ return ret;
}
/* Only use buffering on incomplete pages */
@@ -339,7 +337,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
/* Sync with user memory */
if (copy_from_user(sgl->fpage + sgl->fpage_offs,
user_addr, sgl->fpage_size)) {
- rc = -EFAULT;
+ ret = -EFAULT;
goto err_out;
}
}
@@ -352,7 +350,7 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
/* Sync with user memory */
if (copy_from_user(sgl->lpage, user_addr + user_size -
sgl->lpage_size, sgl->lpage_size)) {
- rc = -EFAULT;
+ ret = -EFAULT;
goto err_out2;
}
}
@@ -374,7 +372,8 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
sgl->sgl = NULL;
sgl->sgl_dma_addr = 0;
sgl->sgl_size = 0;
- return -ENOMEM;
+
+ return ret;
}
int genwqe_setup_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 6193270e7b3d..de20bdaa148d 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -985,6 +985,12 @@ static void kgdbts_run_tests(void)
int nmi_sleep = 0;
int i;
+ verbose = 0;
+ if (strstr(config, "V1"))
+ verbose = 1;
+ if (strstr(config, "V2"))
+ verbose = 2;
+
ptr = strchr(config, 'F');
if (ptr)
fork_test = simple_strtol(ptr + 1, NULL, 10);
@@ -1068,13 +1074,6 @@ static int kgdbts_option_setup(char *opt)
return -ENOSPC;
}
strcpy(config, opt);
-
- verbose = 0;
- if (strstr(config, "V1"))
- verbose = 1;
- if (strstr(config, "V2"))
- verbose = 2;
-
return 0;
}
@@ -1086,9 +1085,6 @@ static int configure_kgdbts(void)
if (!strlen(config) || isspace(config[0]))
goto noconfig;
- err = kgdbts_option_setup(config);
- if (err)
- goto noconfig;
final_ack = 0;
run_plant_and_detach_test(1);
diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 389475b25bb7..d5a0e7f1813b 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -18,7 +18,7 @@
* hardened usercopy checks by added "unconst" to all the const copies,
* and making sure "cache_size" isn't optimized into a const.
*/
-static volatile size_t unconst = 0;
+static volatile size_t unconst;
static volatile size_t cache_size = 1024;
static struct kmem_cache *whitelist_cache;
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index a6f41f96f2a1..80215c312f0e 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -17,7 +17,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/uuid.h>
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 4d77a6ae183a..87281b3695e6 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -599,10 +599,10 @@ static __poll_t mei_poll(struct file *file, poll_table *wait)
mei_cl_read_start(cl, mei_cl_mtu(cl), file);
}
- if (req_events & (POLLOUT | POLLWRNORM)) {
+ if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
poll_wait(file, &cl->tx_wait, wait);
if (cl->tx_cb_queued < dev->tx_queue_limit)
- mask |= POLLOUT | POLLWRNORM;
+ mask |= EPOLLOUT | EPOLLWRNORM;
}
out:
diff --git a/drivers/misc/mic/scif/scif_dma.c b/drivers/misc/mic/scif/scif_dma.c
index 6369aeaa7056..18b8ed57c4ac 100644
--- a/drivers/misc/mic/scif/scif_dma.c
+++ b/drivers/misc/mic/scif/scif_dma.c
@@ -1035,8 +1035,6 @@ scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work,
}
dma_async_issue_pending(chan);
}
- if (ret < 0)
- goto err;
offset += loop_len;
temp += loop_len;
temp_phys += loop_len;
@@ -1553,9 +1551,8 @@ static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
int src_cache_off, dst_cache_off;
s64 src_offset = work->src_offset, dst_offset = work->dst_offset;
u8 *temp = NULL;
- bool src_local = true, dst_local = false;
+ bool src_local = true;
struct scif_dma_comp_cb *comp_cb;
- dma_addr_t src_dma_addr, dst_dma_addr;
int err;
if (is_dma_copy_aligned(chan->device, 1, 1, 1))
@@ -1569,12 +1566,8 @@ static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd,
if (work->loopback)
return scif_rma_list_cpu_copy(work);
- src_dma_addr = __scif_off_to_dma_addr(work->src_window, src_offset);
- dst_dma_addr = __scif_off_to_dma_addr(work->dst_window, dst_offset);
src_local = work->src_window->type == SCIF_WINDOW_SELF;
- dst_local = work->dst_window->type == SCIF_WINDOW_SELF;
- dst_local = dst_local;
/* Allocate dma_completion cb */
comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL);
if (!comp_cb)
diff --git a/drivers/misc/mic/scif/scif_fence.c b/drivers/misc/mic/scif/scif_fence.c
index cac3bcc308a7..7bb929f05d85 100644
--- a/drivers/misc/mic/scif/scif_fence.c
+++ b/drivers/misc/mic/scif/scif_fence.c
@@ -272,7 +272,7 @@ static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
dma_fail:
if (!x100)
dma_pool_free(ep->remote_dev->signal_pool, status,
- status->src_dma_addr);
+ src - offsetof(struct scif_status, val));
alloc_fail:
return err;
}
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index 2e30de9c694a..57a6bb1fd3c9 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -280,7 +280,9 @@ int ocxl_config_check_afu_index(struct pci_dev *dev,
u32 val;
int rc, templ_major, templ_minor, len;
- pci_write_config_word(dev, fn->dvsec_afu_info_pos, afu_idx);
+ pci_write_config_byte(dev,
+ fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX,
+ afu_idx);
rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val);
if (rc)
return rc;
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 030769018461..4b23d586fc3f 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -634,7 +634,7 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_noop_page_overflow);
- /* fallthru */
+ /* fall through */
default:
BUG();
}
@@ -792,7 +792,7 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
break;
case CBSS_PAGE_OVERFLOW:
STAT(mesq_page_overflow);
- /* fallthru */
+ /* fall through */
default:
BUG();
}
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 05a890ce2ab8..8e6607fc8a67 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -28,7 +28,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags)
{
enum xp_retval ret;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (!(ch->flags & XPC_C_OPENREQUEST) ||
!(ch->flags & XPC_C_ROPENREQUEST)) {
@@ -82,7 +82,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
struct xpc_partition *part = &xpc_partitions[ch->partid];
u32 channel_was_connected = (ch->flags & XPC_C_WASCONNECTED);
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (!(ch->flags & XPC_C_DISCONNECTING))
return;
@@ -755,7 +755,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
{
u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED);
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED))
return;
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index 0c3ef6f1df54..3eba1c420cc0 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -98,8 +98,7 @@ xpc_get_rsvd_page_pa(int nasid)
len = L1_CACHE_ALIGN(len);
if (len > buf_len) {
- if (buf_base != NULL)
- kfree(buf_base);
+ kfree(buf_base);
buf_len = L1_CACHE_ALIGN(len);
buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL,
&buf_base);
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
index 5a12d2a54049..0ae69b9390ce 100644
--- a/drivers/misc/sgi-xp/xpc_sn2.c
+++ b/drivers/misc/sgi-xp/xpc_sn2.c
@@ -1671,7 +1671,7 @@ xpc_teardown_msg_structures_sn2(struct xpc_channel *ch)
{
struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
ch_sn2->remote_msgqueue_pa = 0;
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 340b44d9e8cf..0441abe87880 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -1183,7 +1183,7 @@ xpc_teardown_msg_structures_uv(struct xpc_channel *ch)
{
struct xpc_channel_uv *ch_uv = &ch->sn.uv;
- DBUG_ON(!spin_is_locked(&ch->lock));
+ lockdep_assert_held(&ch->lock);
kfree(ch_uv->cached_notify_gru_mq_desc);
ch_uv->cached_notify_gru_mq_desc = NULL;
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 74b183baf044..80d8cbe8c01a 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -323,10 +323,8 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
cur_start = block->start + block->size;
}
- err_chunks:
- if (child)
- of_node_put(child);
-
+err_chunks:
+ of_node_put(child);
kfree(rblocks);
return ret;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 2543ef1ece17..9b0b3fa4f836 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -25,6 +25,9 @@
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <asm/hypervisor.h>
@@ -37,20 +40,20 @@ MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
/*
- * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
- * allow wait (__GFP_RECLAIM) for NOSLEEP page allocations. Use
- * __GFP_NOWARN, to suppress page allocation failure warnings.
+ * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't allow wait
+ * (__GFP_RECLAIM) for huge page allocations. Use __GFP_NOWARN, to suppress page
+ * allocation failure warnings. Disallow access to emergency low-memory pools.
*/
-#define VMW_PAGE_ALLOC_NOSLEEP (__GFP_HIGHMEM|__GFP_NOWARN)
+#define VMW_HUGE_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
+ __GFP_NOMEMALLOC)
/*
- * Use GFP_HIGHUSER when executing in a separate kernel thread
- * context and allocation can sleep. This is less stressful to
- * the guest memory system, since it allows the thread to block
- * while memory is reclaimed, and won't take pages from emergency
- * low-memory pools.
+ * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We allow lightweight
+ * reclamation (__GFP_NORETRY). Use __GFP_NOWARN, to suppress page allocation
+ * failure warnings. Disallow access to emergency low-memory pools.
*/
-#define VMW_PAGE_ALLOC_CANSLEEP (GFP_HIGHUSER)
+#define VMW_PAGE_ALLOC_FLAGS (__GFP_HIGHMEM|__GFP_NOWARN| \
+ __GFP_NOMEMALLOC|__GFP_NORETRY)
/* Maximum number of refused pages we accumulate during inflation cycle */
#define VMW_BALLOON_MAX_REFUSED 16
@@ -77,225 +80,420 @@ enum vmwballoon_capabilities {
| VMW_BALLOON_BATCHED_2M_CMDS \
| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
-#define VMW_BALLOON_2M_SHIFT (9)
-#define VMW_BALLOON_NUM_PAGE_SIZES (2)
+#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
-/*
- * Backdoor commands availability:
- *
- * START, GET_TARGET and GUEST_ID are always available,
- *
- * VMW_BALLOON_BASIC_CMDS:
- * LOCK and UNLOCK commands,
- * VMW_BALLOON_BATCHED_CMDS:
- * BATCHED_LOCK and BATCHED_UNLOCK commands.
- * VMW BALLOON_BATCHED_2M_CMDS:
- * BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands,
- * VMW VMW_BALLOON_SIGNALLED_WAKEUP_CMD:
- * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command.
- */
-#define VMW_BALLOON_CMD_START 0
-#define VMW_BALLOON_CMD_GET_TARGET 1
-#define VMW_BALLOON_CMD_LOCK 2
-#define VMW_BALLOON_CMD_UNLOCK 3
-#define VMW_BALLOON_CMD_GUEST_ID 4
-#define VMW_BALLOON_CMD_BATCHED_LOCK 6
-#define VMW_BALLOON_CMD_BATCHED_UNLOCK 7
-#define VMW_BALLOON_CMD_BATCHED_2M_LOCK 8
-#define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK 9
-#define VMW_BALLOON_CMD_VMCI_DOORBELL_SET 10
-
-
-/* error codes */
-#define VMW_BALLOON_SUCCESS 0
-#define VMW_BALLOON_FAILURE -1
-#define VMW_BALLOON_ERROR_CMD_INVALID 1
-#define VMW_BALLOON_ERROR_PPN_INVALID 2
-#define VMW_BALLOON_ERROR_PPN_LOCKED 3
-#define VMW_BALLOON_ERROR_PPN_UNLOCKED 4
-#define VMW_BALLOON_ERROR_PPN_PINNED 5
-#define VMW_BALLOON_ERROR_PPN_NOTNEEDED 6
-#define VMW_BALLOON_ERROR_RESET 7
-#define VMW_BALLOON_ERROR_BUSY 8
+enum vmballoon_page_size_type {
+ VMW_BALLOON_4K_PAGE,
+ VMW_BALLOON_2M_PAGE,
+ VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
+};
-#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
+#define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
-/* Batch page description */
+static const char * const vmballoon_page_size_names[] = {
+ [VMW_BALLOON_4K_PAGE] = "4k",
+ [VMW_BALLOON_2M_PAGE] = "2M"
+};
-/*
- * Layout of a page in the batch page:
+enum vmballoon_op {
+ VMW_BALLOON_INFLATE,
+ VMW_BALLOON_DEFLATE
+};
+
+enum vmballoon_op_stat_type {
+ VMW_BALLOON_OP_STAT,
+ VMW_BALLOON_OP_FAIL_STAT
+};
+
+#define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
+
+/**
+ * enum vmballoon_cmd_type - backdoor commands.
*
- * +-------------+----------+--------+
- * | | | |
- * | Page number | Reserved | Status |
- * | | | |
- * +-------------+----------+--------+
- * 64 PAGE_SHIFT 6 0
+ * Availability of the commands is as followed:
*
- * The reserved field should be set to 0.
+ * %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
+ * %VMW_BALLOON_CMD_GUEST_ID are always available.
+ *
+ * If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
+ * %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
+ *
+ * If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
+ * %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
+ * are available.
+ *
+ * If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
+ * %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
+ * are supported.
+ *
+ * If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
+ * VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
+ *
+ * @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
+ * @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
+ * @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
+ * @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
+ * to be deflated from the balloon.
+ * @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
+ * runs in the VM.
+ * @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
+ * ballooned pages (up to 512).
+ * @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
+ * pages that are about to be deflated from the
+ * balloon (up to 512).
+ * @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
+ * for 2MB pages.
+ * @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
+ * @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
+ * pages.
+ * @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
+ * that would be invoked when the balloon
+ * size changes.
+ * @VMW_BALLOON_CMD_LAST: Value of the last command.
*/
-#define VMW_BALLOON_BATCH_MAX_PAGES (PAGE_SIZE / sizeof(u64))
-#define VMW_BALLOON_BATCH_STATUS_MASK ((1UL << 5) - 1)
-#define VMW_BALLOON_BATCH_PAGE_MASK (~((1UL << PAGE_SHIFT) - 1))
-
-struct vmballoon_batch_page {
- u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
+enum vmballoon_cmd_type {
+ VMW_BALLOON_CMD_START,
+ VMW_BALLOON_CMD_GET_TARGET,
+ VMW_BALLOON_CMD_LOCK,
+ VMW_BALLOON_CMD_UNLOCK,
+ VMW_BALLOON_CMD_GUEST_ID,
+ /* No command 5 */
+ VMW_BALLOON_CMD_BATCHED_LOCK = 6,
+ VMW_BALLOON_CMD_BATCHED_UNLOCK,
+ VMW_BALLOON_CMD_BATCHED_2M_LOCK,
+ VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
+ VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
+ VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
};
-static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
-{
- return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
-}
+#define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
+
+enum vmballoon_error_codes {
+ VMW_BALLOON_SUCCESS,
+ VMW_BALLOON_ERROR_CMD_INVALID,
+ VMW_BALLOON_ERROR_PPN_INVALID,
+ VMW_BALLOON_ERROR_PPN_LOCKED,
+ VMW_BALLOON_ERROR_PPN_UNLOCKED,
+ VMW_BALLOON_ERROR_PPN_PINNED,
+ VMW_BALLOON_ERROR_PPN_NOTNEEDED,
+ VMW_BALLOON_ERROR_RESET,
+ VMW_BALLOON_ERROR_BUSY
+};
-static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
- int idx)
-{
- return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
-}
+#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
-static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
- u64 pa)
-{
- batch->pages[idx] = pa;
-}
+#define VMW_BALLOON_CMD_WITH_TARGET_MASK \
+ ((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
+ (1UL << VMW_BALLOON_CMD_LOCK) | \
+ (1UL << VMW_BALLOON_CMD_UNLOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
+ (1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
+
+static const char * const vmballoon_cmd_names[] = {
+ [VMW_BALLOON_CMD_START] = "start",
+ [VMW_BALLOON_CMD_GET_TARGET] = "target",
+ [VMW_BALLOON_CMD_LOCK] = "lock",
+ [VMW_BALLOON_CMD_UNLOCK] = "unlock",
+ [VMW_BALLOON_CMD_GUEST_ID] = "guestType",
+ [VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
+ [VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
+ [VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
+ [VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
+ [VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
+};
+enum vmballoon_stat_page {
+ VMW_BALLOON_PAGE_STAT_ALLOC,
+ VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
+ VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
+ VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
+ VMW_BALLOON_PAGE_STAT_FREE,
+ VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
+};
-#define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result) \
-({ \
- unsigned long __status, __dummy1, __dummy2, __dummy3; \
- __asm__ __volatile__ ("inl %%dx" : \
- "=a"(__status), \
- "=c"(__dummy1), \
- "=d"(__dummy2), \
- "=b"(result), \
- "=S" (__dummy3) : \
- "0"(VMW_BALLOON_HV_MAGIC), \
- "1"(VMW_BALLOON_CMD_##cmd), \
- "2"(VMW_BALLOON_HV_PORT), \
- "3"(arg1), \
- "4" (arg2) : \
- "memory"); \
- if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START) \
- result = __dummy1; \
- result &= -1UL; \
- __status & -1UL; \
-})
+#define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
-#ifdef CONFIG_DEBUG_FS
-struct vmballoon_stats {
- unsigned int timer;
- unsigned int doorbell;
-
- /* allocation statistics */
- unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int sleep_alloc;
- unsigned int sleep_alloc_fail;
- unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
-
- /* monitor operations */
- unsigned int lock[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int lock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int unlock[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int unlock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
- unsigned int target;
- unsigned int target_fail;
- unsigned int start;
- unsigned int start_fail;
- unsigned int guest_type;
- unsigned int guest_type_fail;
- unsigned int doorbell_set;
- unsigned int doorbell_unset;
+enum vmballoon_stat_general {
+ VMW_BALLOON_STAT_TIMER,
+ VMW_BALLOON_STAT_DOORBELL,
+ VMW_BALLOON_STAT_RESET,
+ VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_RESET
};
-#define STATS_INC(stat) (stat)++
-#else
-#define STATS_INC(stat)
-#endif
+#define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
-struct vmballoon;
-struct vmballoon_ops {
- void (*add_page)(struct vmballoon *b, int idx, struct page *p);
- int (*lock)(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target);
- int (*unlock)(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target);
+static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
+static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
+
+struct vmballoon_ctl {
+ struct list_head pages;
+ struct list_head refused_pages;
+ unsigned int n_refused_pages;
+ unsigned int n_pages;
+ enum vmballoon_page_size_type page_size;
+ enum vmballoon_op op;
};
struct vmballoon_page_size {
/* list of reserved physical pages */
struct list_head pages;
-
- /* transient list of non-balloonable pages */
- struct list_head refused_pages;
- unsigned int n_refused_pages;
};
+/**
+ * struct vmballoon_batch_entry - a batch entry for lock or unlock.
+ *
+ * @status: the status of the operation, which is written by the hypervisor.
+ * @reserved: reserved for future use. Must be set to zero.
+ * @pfn: the physical frame number of the page to be locked or unlocked.
+ */
+struct vmballoon_batch_entry {
+ u64 status : 5;
+ u64 reserved : PAGE_SHIFT - 5;
+ u64 pfn : 52;
+} __packed;
+
struct vmballoon {
struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];
- /* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
- unsigned supported_page_sizes;
+ /**
+ * @max_page_size: maximum supported page size for ballooning.
+ *
+ * Protected by @conf_sem
+ */
+ enum vmballoon_page_size_type max_page_size;
+
+ /**
+ * @size: balloon actual size in basic page size (frames).
+ *
+ * While we currently do not support size which is bigger than 32-bit,
+ * in preparation for future support, use 64-bits.
+ */
+ atomic64_t size;
- /* balloon size in pages */
- unsigned int size;
- unsigned int target;
+ /**
+ * @target: balloon target size in basic page size (frames).
+ *
+ * We do not protect the target under the assumption that setting the
+ * value is always done through a single write. If this assumption ever
+ * breaks, we would have to use X_ONCE for accesses, and suffer the less
+ * optimized code. Although we may read stale target value if multiple
+ * accesses happen at once, the performance impact should be minor.
+ */
+ unsigned long target;
- /* reset flag */
+ /**
+ * @reset_required: reset flag
+ *
+ * Setting this flag may introduce races, but the code is expected to
+ * handle them gracefully. In the worst case, another operation will
+ * fail as reset did not take place. Clearing the flag is done while
+ * holding @conf_sem for write.
+ */
bool reset_required;
+ /**
+ * @capabilities: hypervisor balloon capabilities.
+ *
+ * Protected by @conf_sem.
+ */
unsigned long capabilities;
- struct vmballoon_batch_page *batch_page;
+ /**
+ * @batch_page: pointer to communication batch page.
+ *
+ * When batching is used, batch_page points to a page, which holds up to
+ * %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
+ */
+ struct vmballoon_batch_entry *batch_page;
+
+ /**
+ * @batch_max_pages: maximum pages that can be locked/unlocked.
+ *
+ * Indicates the number of pages that the hypervisor can lock or unlock
+ * at once, according to whether batching is enabled. If batching is
+ * disabled, only a single page can be locked/unlock on each operation.
+ *
+ * Protected by @conf_sem.
+ */
unsigned int batch_max_pages;
- struct page *page;
- const struct vmballoon_ops *ops;
+ /**
+ * @page: page to be locked/unlocked by the hypervisor
+ *
+ * @page is only used when batching is disabled and a single page is
+ * reclaimed on each iteration.
+ *
+ * Protected by @comm_lock.
+ */
+ struct page *page;
-#ifdef CONFIG_DEBUG_FS
/* statistics */
- struct vmballoon_stats stats;
+ struct vmballoon_stats *stats;
+#ifdef CONFIG_DEBUG_FS
/* debugfs file exporting statistics */
struct dentry *dbg_entry;
#endif
- struct sysinfo sysinfo;
-
struct delayed_work dwork;
+ /**
+ * @vmci_doorbell.
+ *
+ * Protected by @conf_sem.
+ */
struct vmci_handle vmci_doorbell;
+
+ /**
+ * @conf_sem: semaphore to protect the configuration and the statistics.
+ */
+ struct rw_semaphore conf_sem;
+
+ /**
+ * @comm_lock: lock to protect the communication with the host.
+ *
+ * Lock ordering: @conf_sem -> @comm_lock .
+ */
+ spinlock_t comm_lock;
};
static struct vmballoon balloon;
+struct vmballoon_stats {
+ /* timer / doorbell operations */
+ atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
+
+ /* allocation statistics for huge and small pages */
+ atomic64_t
+ page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
+
+ /* Monitor operations: total operations, and failures */
+ atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
+};
+
+static inline bool is_vmballoon_stats_on(void)
+{
+ return IS_ENABLED(CONFIG_DEBUG_FS) &&
+ static_branch_unlikely(&balloon_stat_enabled);
+}
+
+static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
+ enum vmballoon_op_stat_type type)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_inc(&b->stats->ops[op][type]);
+}
+
+static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
+ enum vmballoon_stat_general stat)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_inc(&b->stats->general_stat[stat]);
+}
+
+static inline void vmballoon_stats_gen_add(struct vmballoon *b,
+ enum vmballoon_stat_general stat,
+ unsigned int val)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_add(val, &b->stats->general_stat[stat]);
+}
+
+static inline void vmballoon_stats_page_inc(struct vmballoon *b,
+ enum vmballoon_stat_page stat,
+ enum vmballoon_page_size_type size)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_inc(&b->stats->page_stat[stat][size]);
+}
+
+static inline void vmballoon_stats_page_add(struct vmballoon *b,
+ enum vmballoon_stat_page stat,
+ enum vmballoon_page_size_type size,
+ unsigned int val)
+{
+ if (is_vmballoon_stats_on())
+ atomic64_add(val, &b->stats->page_stat[stat][size]);
+}
+
+static inline unsigned long
+__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
+ unsigned long arg2, unsigned long *result)
+{
+ unsigned long status, dummy1, dummy2, dummy3, local_result;
+
+ vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
+
+ asm volatile ("inl %%dx" :
+ "=a"(status),
+ "=c"(dummy1),
+ "=d"(dummy2),
+ "=b"(local_result),
+ "=S"(dummy3) :
+ "0"(VMW_BALLOON_HV_MAGIC),
+ "1"(cmd),
+ "2"(VMW_BALLOON_HV_PORT),
+ "3"(arg1),
+ "4"(arg2) :
+ "memory");
+
+ /* update the result if needed */
+ if (result)
+ *result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
+ local_result;
+
+ /* update target when applicable */
+ if (status == VMW_BALLOON_SUCCESS &&
+ ((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
+ WRITE_ONCE(b->target, local_result);
+
+ if (status != VMW_BALLOON_SUCCESS &&
+ status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
+ vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
+ pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
+ __func__, vmballoon_cmd_names[cmd], arg1, arg2,
+ status);
+ }
+
+ /* mark reset required accordingly */
+ if (status == VMW_BALLOON_ERROR_RESET)
+ b->reset_required = true;
+
+ return status;
+}
+
+static __always_inline unsigned long
+vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
+ unsigned long arg2)
+{
+ unsigned long dummy;
+
+ return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
+}
+
/*
* Send "start" command to the host, communicating supported version
* of the protocol.
*/
-static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
+static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
{
- unsigned long status, capabilities, dummy = 0;
- bool success;
-
- STATS_INC(b->stats.start);
+ unsigned long status, capabilities;
- status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);
+ status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
+ &capabilities);
switch (status) {
case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
b->capabilities = capabilities;
- success = true;
break;
case VMW_BALLOON_SUCCESS:
b->capabilities = VMW_BALLOON_BASIC_CMDS;
- success = true;
break;
default:
- success = false;
+ return -EIO;
}
/*
@@ -303,626 +501,693 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
* reason disabled, do not use 2MB pages, since otherwise the legacy
* mechanism is used with 2MB pages, causing a failure.
*/
+ b->max_page_size = VMW_BALLOON_4K_PAGE;
if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
(b->capabilities & VMW_BALLOON_BATCHED_CMDS))
- b->supported_page_sizes = 2;
- else
- b->supported_page_sizes = 1;
-
- if (!success) {
- pr_debug("%s - failed, hv returns %ld\n", __func__, status);
- STATS_INC(b->stats.start_fail);
- }
- return success;
-}
+ b->max_page_size = VMW_BALLOON_2M_PAGE;
-static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
-{
- switch (status) {
- case VMW_BALLOON_SUCCESS:
- return true;
- case VMW_BALLOON_ERROR_RESET:
- b->reset_required = true;
- /* fall through */
-
- default:
- return false;
- }
+ return 0;
}
-/*
+/**
+ * vmballoon_send_guest_id - communicate guest type to the host.
+ *
+ * @b: pointer to the balloon.
+ *
* Communicate guest type to the host so that it can adjust ballooning
* algorithm to the one most appropriate for the guest. This command
* is normally issued after sending "start" command and is part of
* standard reset sequence.
+ *
+ * Return: zero on success or appropriate error code.
*/
-static bool vmballoon_send_guest_id(struct vmballoon *b)
+static int vmballoon_send_guest_id(struct vmballoon *b)
{
- unsigned long status, dummy = 0;
-
- status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
- dummy);
-
- STATS_INC(b->stats.guest_type);
+ unsigned long status;
- if (vmballoon_check_status(b, status))
- return true;
+ status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
+ VMW_BALLOON_GUEST_ID, 0);
- pr_debug("%s - failed, hv returns %ld\n", __func__, status);
- STATS_INC(b->stats.guest_type_fail);
- return false;
+ return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
-static u16 vmballoon_page_size(bool is_2m_page)
+/**
+ * vmballoon_page_order() - return the order of the page
+ * @page_size: the size of the page.
+ *
+ * Return: the allocation order.
+ */
+static inline
+unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
{
- if (is_2m_page)
- return 1 << VMW_BALLOON_2M_SHIFT;
+ return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
+}
- return 1;
+/**
+ * vmballoon_page_in_frames() - returns the number of frames in a page.
+ * @page_size: the size of the page.
+ *
+ * Return: the number of 4k frames.
+ */
+static inline unsigned int
+vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
+{
+ return 1 << vmballoon_page_order(page_size);
}
-/*
- * Retrieve desired balloon size from the host.
+/**
+ * vmballoon_send_get_target() - Retrieve desired balloon size from the host.
+ *
+ * @b: pointer to the balloon.
+ *
+ * Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
+ * by the host-guest protocol and EIO if an error occurred in communicating with
+ * the host.
*/
-static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
+static int vmballoon_send_get_target(struct vmballoon *b)
{
unsigned long status;
- unsigned long target;
unsigned long limit;
- unsigned long dummy = 0;
- u32 limit32;
- /*
- * si_meminfo() is cheap. Moreover, we want to provide dynamic
- * max balloon size later. So let us call si_meminfo() every
- * iteration.
- */
- si_meminfo(&b->sysinfo);
- limit = b->sysinfo.totalram;
+ limit = totalram_pages;
/* Ensure limit fits in 32-bits */
- limit32 = (u32)limit;
- if (limit != limit32)
- return false;
-
- /* update stats */
- STATS_INC(b->stats.target);
+ if (limit != (u32)limit)
+ return -EINVAL;
- status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
- if (vmballoon_check_status(b, status)) {
- *new_target = target;
- return true;
- }
+ status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
- pr_debug("%s - failed, hv returns %ld\n", __func__, status);
- STATS_INC(b->stats.target_fail);
- return false;
+ return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
-/*
- * Notify the host about allocated page so that host can use it without
- * fear that guest will need it. Host may reject some pages, we need to
- * check the return value and maybe submit a different page.
+/**
+ * vmballoon_alloc_page_list - allocates a list of pages.
+ *
+ * @b: pointer to the balloon.
+ * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
+ * @req_n_pages: the number of requested pages.
+ *
+ * Tries to allocate @req_n_pages. Add them to the list of balloon pages in
+ * @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
+ *
+ * Return: zero on success or error code otherwise.
*/
-static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
- unsigned int *hv_status, unsigned int *target)
+static int vmballoon_alloc_page_list(struct vmballoon *b,
+ struct vmballoon_ctl *ctl,
+ unsigned int req_n_pages)
{
- unsigned long status, dummy = 0;
- u32 pfn32;
-
- pfn32 = (u32)pfn;
- if (pfn32 != pfn)
- return -EINVAL;
-
- STATS_INC(b->stats.lock[false]);
+ struct page *page;
+ unsigned int i;
- *hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
- if (vmballoon_check_status(b, status))
- return 0;
+ for (i = 0; i < req_n_pages; i++) {
+ if (ctl->page_size == VMW_BALLOON_2M_PAGE)
+ page = alloc_pages(VMW_HUGE_PAGE_ALLOC_FLAGS,
+ VMW_BALLOON_2M_ORDER);
+ else
+ page = alloc_page(VMW_PAGE_ALLOC_FLAGS);
- pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.lock_fail[false]);
- return -EIO;
-}
+ /* Update statistics */
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
+ ctl->page_size);
-static int vmballoon_send_batched_lock(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages, unsigned int *target)
-{
- unsigned long status;
- unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
+ if (page) {
+ /* Success. Add the page to the list and continue. */
+ list_add(&page->lru, &ctl->pages);
+ continue;
+ }
- STATS_INC(b->stats.lock[is_2m_pages]);
+ /* Allocation failed. Update statistics and stop. */
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
+ ctl->page_size);
+ break;
+ }
- if (is_2m_pages)
- status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages,
- *target);
- else
- status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages,
- *target);
+ ctl->n_pages = i;
- if (vmballoon_check_status(b, status))
- return 0;
-
- pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.lock_fail[is_2m_pages]);
- return 1;
+ return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
}
-/*
- * Notify the host that guest intends to release given page back into
- * the pool of available (to the guest) pages.
+/**
+ * vmballoon_handle_one_result - Handle lock/unlock result for a single page.
+ *
+ * @b: pointer for %struct vmballoon.
+ * @page: pointer for the page whose result should be handled.
+ * @page_size: size of the page.
+ * @status: status of the operation as provided by the hypervisor.
*/
-static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
- unsigned int *target)
+static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
+ enum vmballoon_page_size_type page_size,
+ unsigned long status)
{
- unsigned long status, dummy = 0;
- u32 pfn32;
-
- pfn32 = (u32)pfn;
- if (pfn32 != pfn)
- return false;
+ /* On success do nothing. The page is already on the balloon list. */
+ if (likely(status == VMW_BALLOON_SUCCESS))
+ return 0;
- STATS_INC(b->stats.unlock[false]);
+ pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
+ page_to_pfn(page), status,
+ vmballoon_page_size_names[page_size]);
- status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
- if (vmballoon_check_status(b, status))
- return true;
+ /* Error occurred */
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
+ page_size);
- pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.unlock_fail[false]);
- return false;
+ return -EIO;
}
-static bool vmballoon_send_batched_unlock(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages, unsigned int *target)
+/**
+ * vmballoon_status_page - returns the status of (un)lock operation
+ *
+ * @b: pointer to the balloon.
+ * @idx: index for the page for which the operation is performed.
+ * @p: pointer to where the page struct is returned.
+ *
+ * Following a lock or unlock operation, returns the status of the operation for
+ * an individual page. Provides the page that the operation was performed on on
+ * the @page argument.
+ *
+ * Returns: The status of a lock or unlock operation for an individual page.
+ */
+static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
+ struct page **p)
{
- unsigned long status;
- unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
-
- STATS_INC(b->stats.unlock[is_2m_pages]);
-
- if (is_2m_pages)
- status = VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK, pfn, num_pages,
- *target);
- else
- status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages,
- *target);
+ if (static_branch_likely(&vmw_balloon_batching)) {
+ /* batching mode */
+ *p = pfn_to_page(b->batch_page[idx].pfn);
+ return b->batch_page[idx].status;
+ }
- if (vmballoon_check_status(b, status))
- return true;
+ /* non-batching mode */
+ *p = b->page;
- pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
- STATS_INC(b->stats.unlock_fail[is_2m_pages]);
- return false;
+ /*
+ * If a failure occurs, the indication will be provided in the status
+ * of the entire operation, which is considered before the individual
+ * page status. So for non-batching mode, the indication is always of
+ * success.
+ */
+ return VMW_BALLOON_SUCCESS;
}
-static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page)
+/**
+ * vmballoon_lock_op - notifies the host about inflated/deflated pages.
+ * @b: pointer to the balloon.
+ * @num_pages: number of inflated/deflated pages.
+ * @page_size: size of the page.
+ * @op: the type of operation (lock or unlock).
+ *
+ * Notify the host about page(s) that were ballooned (or removed from the
+ * balloon) so that host can use it without fear that guest will need it (or
+ * stop using them since the VM does). Host may reject some pages, we need to
+ * check the return value and maybe submit a different page. The pages that are
+ * inflated/deflated are pointed by @b->page.
+ *
+ * Return: result as provided by the hypervisor.
+ */
+static unsigned long vmballoon_lock_op(struct vmballoon *b,
+ unsigned int num_pages,
+ enum vmballoon_page_size_type page_size,
+ enum vmballoon_op op)
{
- if (is_2m_page)
- return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);
+ unsigned long cmd, pfn;
- return alloc_page(flags);
-}
+ lockdep_assert_held(&b->comm_lock);
-static void vmballoon_free_page(struct page *page, bool is_2m_page)
-{
- if (is_2m_page)
- __free_pages(page, VMW_BALLOON_2M_SHIFT);
- else
- __free_page(page);
+ if (static_branch_likely(&vmw_balloon_batching)) {
+ if (op == VMW_BALLOON_INFLATE)
+ cmd = page_size == VMW_BALLOON_2M_PAGE ?
+ VMW_BALLOON_CMD_BATCHED_2M_LOCK :
+ VMW_BALLOON_CMD_BATCHED_LOCK;
+ else
+ cmd = page_size == VMW_BALLOON_2M_PAGE ?
+ VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
+ VMW_BALLOON_CMD_BATCHED_UNLOCK;
+
+ pfn = PHYS_PFN(virt_to_phys(b->batch_page));
+ } else {
+ cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
+ VMW_BALLOON_CMD_UNLOCK;
+ pfn = page_to_pfn(b->page);
+
+ /* In non-batching mode, PFNs must fit in 32-bit */
+ if (unlikely(pfn != (u32)pfn))
+ return VMW_BALLOON_ERROR_PPN_INVALID;
+ }
+
+ return vmballoon_cmd(b, cmd, pfn, num_pages);
}
-/*
- * Quickly release all pages allocated for the balloon. This function is
- * called when host decides to "reset" balloon for one reason or another.
- * Unlike normal "deflate" we do not (shall not) notify host of the pages
- * being released.
+/**
+ * vmballoon_add_page - adds a page towards lock/unlock operation.
+ *
+ * @b: pointer to the balloon.
+ * @idx: index of the page to be ballooned in this batch.
+ * @p: pointer to the page that is about to be ballooned.
+ *
+ * Adds the page to be ballooned. Must be called while holding @comm_lock.
*/
-static void vmballoon_pop(struct vmballoon *b)
+static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
+ struct page *p)
{
- struct page *page, *next;
- unsigned is_2m_pages;
-
- for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
- is_2m_pages++) {
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
- u16 size_per_page = vmballoon_page_size(is_2m_pages);
-
- list_for_each_entry_safe(page, next, &page_size->pages, lru) {
- list_del(&page->lru);
- vmballoon_free_page(page, is_2m_pages);
- STATS_INC(b->stats.free[is_2m_pages]);
- b->size -= size_per_page;
- cond_resched();
- }
- }
+ lockdep_assert_held(&b->comm_lock);
- /* Clearing the batch_page unconditionally has no adverse effect */
- free_page((unsigned long)b->batch_page);
- b->batch_page = NULL;
+ if (static_branch_likely(&vmw_balloon_batching))
+ b->batch_page[idx] = (struct vmballoon_batch_entry)
+ { .pfn = page_to_pfn(p) };
+ else
+ b->page = p;
}
-/*
- * Notify the host of a ballooned page. If host rejects the page put it on the
- * refuse list, those refused page are then released at the end of the
- * inflation cycle.
+/**
+ * vmballoon_lock - lock or unlock a batch of pages.
+ *
+ * @b: pointer to the balloon.
+ * @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
+ *
+ * Notifies the host of about ballooned pages (after inflation or deflation,
+ * according to @ctl). If the host rejects the page put it on the
+ * @ctl refuse list. These refused page are then released when moving to the
+ * next size of pages.
+ *
+ * Note that we neither free any @page here nor put them back on the ballooned
+ * pages list. Instead we queue it for later processing. We do that for several
+ * reasons. First, we do not want to free the page under the lock. Second, it
+ * allows us to unify the handling of lock and unlock. In the inflate case, the
+ * caller will check if there are too many refused pages and release them.
+ * Although it is not identical to the past behavior, it should not affect
+ * performance.
*/
-static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target)
+static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
{
- int locked, hv_status;
- struct page *page = b->page;
- struct vmballoon_page_size *page_size = &b->page_sizes[false];
-
- /* is_2m_pages can never happen as 2m pages support implies batching */
-
- locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
- target);
- if (locked) {
- STATS_INC(b->stats.refused_alloc[false]);
-
- if (locked == -EIO &&
- (hv_status == VMW_BALLOON_ERROR_RESET ||
- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) {
- vmballoon_free_page(page, false);
- return -EIO;
- }
-
- /*
- * Place page on the list of non-balloonable pages
- * and retry allocation, unless we already accumulated
- * too many of them, in which case take a breather.
- */
- if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
- page_size->n_refused_pages++;
- list_add(&page->lru, &page_size->refused_pages);
- } else {
- vmballoon_free_page(page, false);
- }
- return locked;
- }
-
- /* track allocated page */
- list_add(&page->lru, &page_size->pages);
+ unsigned long batch_status;
+ struct page *page;
+ unsigned int i, num_pages;
- /* update balloon size */
- b->size++;
+ num_pages = ctl->n_pages;
+ if (num_pages == 0)
+ return 0;
- return 0;
-}
+ /* communication with the host is done under the communication lock */
+ spin_lock(&b->comm_lock);
-static int vmballoon_lock_batched_page(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages, unsigned int *target)
-{
- int locked, i;
- u16 size_per_page = vmballoon_page_size(is_2m_pages);
+ i = 0;
+ list_for_each_entry(page, &ctl->pages, lru)
+ vmballoon_add_page(b, i++, page);
- locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages,
- target);
- if (locked > 0) {
- for (i = 0; i < num_pages; i++) {
- u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
- struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
+ batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
+ ctl->op);
- vmballoon_free_page(p, is_2m_pages);
- }
+ /*
+ * Iterate over the pages in the provided list. Since we are changing
+ * @ctl->n_pages we are saving the original value in @num_pages and
+ * use this value to bound the loop.
+ */
+ for (i = 0; i < num_pages; i++) {
+ unsigned long status;
- return -EIO;
- }
+ status = vmballoon_status_page(b, i, &page);
- for (i = 0; i < num_pages; i++) {
- u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
- struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
+ /*
+ * Failure of the whole batch overrides a single operation
+ * results.
+ */
+ if (batch_status != VMW_BALLOON_SUCCESS)
+ status = batch_status;
- locked = vmballoon_batch_get_status(b->batch_page, i);
+ /* Continue if no error happened */
+ if (!vmballoon_handle_one_result(b, page, ctl->page_size,
+ status))
+ continue;
- switch (locked) {
- case VMW_BALLOON_SUCCESS:
- list_add(&p->lru, &page_size->pages);
- b->size += size_per_page;
- break;
- case VMW_BALLOON_ERROR_PPN_PINNED:
- case VMW_BALLOON_ERROR_PPN_INVALID:
- if (page_size->n_refused_pages
- < VMW_BALLOON_MAX_REFUSED) {
- list_add(&p->lru, &page_size->refused_pages);
- page_size->n_refused_pages++;
- break;
- }
- /* Fallthrough */
- case VMW_BALLOON_ERROR_RESET:
- case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
- vmballoon_free_page(p, is_2m_pages);
- break;
- default:
- /* This should never happen */
- WARN_ON_ONCE(true);
- }
+ /*
+ * Error happened. Move the pages to the refused list and update
+ * the pages number.
+ */
+ list_move(&page->lru, &ctl->refused_pages);
+ ctl->n_pages--;
+ ctl->n_refused_pages++;
}
- return 0;
+ spin_unlock(&b->comm_lock);
+
+ return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
-/*
- * Release the page allocated for the balloon. Note that we first notify
- * the host so it can make sure the page will be available for the guest
- * to use, if needed.
+/**
+ * vmballoon_release_page_list() - Releases a page list
+ *
+ * @page_list: list of pages to release.
+ * @n_pages: pointer to the number of pages.
+ * @page_size: whether the pages in the list are 2MB (or else 4KB).
+ *
+ * Releases the list of pages and zeros the number of pages.
*/
-static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
- bool is_2m_pages, unsigned int *target)
+static void vmballoon_release_page_list(struct list_head *page_list,
+ int *n_pages,
+ enum vmballoon_page_size_type page_size)
{
- struct page *page = b->page;
- struct vmballoon_page_size *page_size = &b->page_sizes[false];
-
- /* is_2m_pages can never happen as 2m pages support implies batching */
+ struct page *page, *tmp;
- if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
- list_add(&page->lru, &page_size->pages);
- return -EIO;
+ list_for_each_entry_safe(page, tmp, page_list, lru) {
+ list_del(&page->lru);
+ __free_pages(page, vmballoon_page_order(page_size));
}
- /* deallocate page */
- vmballoon_free_page(page, false);
- STATS_INC(b->stats.free[false]);
+ *n_pages = 0;
+}
- /* update balloon size */
- b->size--;
- return 0;
+/*
+ * Release pages that were allocated while attempting to inflate the
+ * balloon but were refused by the host for one reason or another.
+ */
+static void vmballoon_release_refused_pages(struct vmballoon *b,
+ struct vmballoon_ctl *ctl)
+{
+ vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
+ ctl->page_size);
+
+ vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
+ ctl->page_size);
}
-static int vmballoon_unlock_batched_page(struct vmballoon *b,
- unsigned int num_pages, bool is_2m_pages,
- unsigned int *target)
+/**
+ * vmballoon_change - retrieve the required balloon change
+ *
+ * @b: pointer for the balloon.
+ *
+ * Return: the required change for the balloon size. A positive number
+ * indicates inflation, a negative number indicates a deflation.
+ */
+static int64_t vmballoon_change(struct vmballoon *b)
{
- int locked, i, ret = 0;
- bool hv_success;
- u16 size_per_page = vmballoon_page_size(is_2m_pages);
+ int64_t size, target;
- hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages,
- target);
- if (!hv_success)
- ret = -EIO;
+ size = atomic64_read(&b->size);
+ target = READ_ONCE(b->target);
- for (i = 0; i < num_pages; i++) {
- u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
- struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
+ /*
+ * We must cast first because of int sizes
+ * Otherwise we might get huge positives instead of negatives
+ */
- locked = vmballoon_batch_get_status(b->batch_page, i);
- if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
- /*
- * That page wasn't successfully unlocked by the
- * hypervisor, re-add it to the list of pages owned by
- * the balloon driver.
- */
- list_add(&p->lru, &page_size->pages);
- } else {
- /* deallocate page */
- vmballoon_free_page(p, is_2m_pages);
- STATS_INC(b->stats.free[is_2m_pages]);
-
- /* update balloon size */
- b->size -= size_per_page;
- }
- }
+ if (b->reset_required)
+ return 0;
+
+ /* consider a 2MB slack on deflate, unless the balloon is emptied */
+ if (target < size && target != 0 &&
+ size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
+ return 0;
- return ret;
+ return target - size;
}
-/*
- * Release pages that were allocated while attempting to inflate the
- * balloon but were refused by the host for one reason or another.
+/**
+ * vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
+ *
+ * @b: pointer to balloon.
+ * @pages: list of pages to enqueue.
+ * @n_pages: pointer to number of pages in list. The value is zeroed.
+ * @page_size: whether the pages are 2MB or 4KB pages.
+ *
+ * Enqueues the provides list of pages in the ballooned page list, clears the
+ * list and zeroes the number of pages that was provided.
*/
-static void vmballoon_release_refused_pages(struct vmballoon *b,
- bool is_2m_pages)
+static void vmballoon_enqueue_page_list(struct vmballoon *b,
+ struct list_head *pages,
+ unsigned int *n_pages,
+ enum vmballoon_page_size_type page_size)
{
- struct page *page, *next;
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
+ struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
- list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
- list_del(&page->lru);
- vmballoon_free_page(page, is_2m_pages);
- STATS_INC(b->stats.refused_free[is_2m_pages]);
- }
-
- page_size->n_refused_pages = 0;
+ list_splice_init(pages, &page_size_info->pages);
+ *n_pages = 0;
}
-static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
+/**
+ * vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
+ *
+ * @b: pointer to balloon.
+ * @pages: list of pages to enqueue.
+ * @n_pages: pointer to number of pages in list. The value is zeroed.
+ * @page_size: whether the pages are 2MB or 4KB pages.
+ * @n_req_pages: the number of requested pages.
+ *
+ * Dequeues the number of requested pages from the balloon for deflation. The
+ * number of dequeued pages may be lower, if not enough pages in the requested
+ * size are available.
+ */
+static void vmballoon_dequeue_page_list(struct vmballoon *b,
+ struct list_head *pages,
+ unsigned int *n_pages,
+ enum vmballoon_page_size_type page_size,
+ unsigned int n_req_pages)
{
- b->page = p;
-}
+ struct vmballoon_page_size *page_size_info = &b->page_sizes[page_size];
+ struct page *page, *tmp;
+ unsigned int i = 0;
-static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
- struct page *p)
-{
- vmballoon_batch_set_pa(b->batch_page, idx,
- (u64)page_to_pfn(p) << PAGE_SHIFT);
+ list_for_each_entry_safe(page, tmp, &page_size_info->pages, lru) {
+ list_move(&page->lru, pages);
+ if (++i == n_req_pages)
+ break;
+ }
+ *n_pages = i;
}
-/*
- * Inflate the balloon towards its target size. Note that we try to limit
- * the rate of allocation to make sure we are not choking the rest of the
- * system.
+/**
+ * vmballoon_inflate() - Inflate the balloon towards its target size.
+ *
+ * @b: pointer to the balloon.
*/
static void vmballoon_inflate(struct vmballoon *b)
{
- unsigned int num_pages = 0;
- int error = 0;
- gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
- bool is_2m_pages;
+ int64_t to_inflate_frames;
+ struct vmballoon_ctl ctl = {
+ .pages = LIST_HEAD_INIT(ctl.pages),
+ .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
+ .page_size = b->max_page_size,
+ .op = VMW_BALLOON_INFLATE
+ };
- pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+ while ((to_inflate_frames = vmballoon_change(b)) > 0) {
+ unsigned int to_inflate_pages, page_in_frames;
+ int alloc_error, lock_error = 0;
- /*
- * First try NOSLEEP page allocations to inflate balloon.
- *
- * If we do not throttle nosleep allocations, we can drain all
- * free pages in the guest quickly (if the balloon target is high).
- * As a side-effect, draining free pages helps to inform (force)
- * the guest to start swapping if balloon target is not met yet,
- * which is a desired behavior. However, balloon driver can consume
- * all available CPU cycles if too many pages are allocated in a
- * second. Therefore, we throttle nosleep allocations even when
- * the guest is not under memory pressure. OTOH, if we have already
- * predicted that the guest is under memory pressure, then we
- * slowdown page allocations considerably.
- */
+ VM_BUG_ON(!list_empty(&ctl.pages));
+ VM_BUG_ON(ctl.n_pages != 0);
- /*
- * Start with no sleep allocation rate which may be higher
- * than sleeping allocation rate.
- */
- is_2m_pages = b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
+ page_in_frames = vmballoon_page_in_frames(ctl.page_size);
- pr_debug("%s - goal: %d", __func__, b->target - b->size);
+ to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
+ DIV_ROUND_UP_ULL(to_inflate_frames,
+ page_in_frames));
- while (!b->reset_required &&
- b->size + num_pages * vmballoon_page_size(is_2m_pages)
- < b->target) {
- struct page *page;
+ /* Start by allocating */
+ alloc_error = vmballoon_alloc_page_list(b, &ctl,
+ to_inflate_pages);
- if (flags == VMW_PAGE_ALLOC_NOSLEEP)
- STATS_INC(b->stats.alloc[is_2m_pages]);
- else
- STATS_INC(b->stats.sleep_alloc);
-
- page = vmballoon_alloc_page(flags, is_2m_pages);
- if (!page) {
- STATS_INC(b->stats.alloc_fail[is_2m_pages]);
-
- if (is_2m_pages) {
- b->ops->lock(b, num_pages, true, &b->target);
-
- /*
- * ignore errors from locking as we now switch
- * to 4k pages and we might get different
- * errors.
- */
-
- num_pages = 0;
- is_2m_pages = false;
- continue;
- }
-
- if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
- /*
- * CANSLEEP page allocation failed, so guest
- * is under severe memory pressure. We just log
- * the event, but do not stop the inflation
- * due to its negative impact on performance.
- */
- STATS_INC(b->stats.sleep_alloc_fail);
+ /* Actually lock the pages by telling the hypervisor */
+ lock_error = vmballoon_lock(b, &ctl);
+
+ /*
+ * If an error indicates that something serious went wrong,
+ * stop the inflation.
+ */
+ if (lock_error)
+ break;
+
+ /* Update the balloon size */
+ atomic64_add(ctl.n_pages * page_in_frames, &b->size);
+
+ vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
+ ctl.page_size);
+
+ /*
+ * If allocation failed or the number of refused pages exceeds
+ * the maximum allowed, move to the next page size.
+ */
+ if (alloc_error ||
+ ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
+ if (ctl.page_size == VMW_BALLOON_4K_PAGE)
break;
- }
/*
- * NOSLEEP page allocation failed, so the guest is
- * under memory pressure. Slowing down page alloctions
- * seems to be reasonable, but doing so might actually
- * cause the hypervisor to throttle us down, resulting
- * in degraded performance. We will count on the
- * scheduler and standard memory management mechanisms
- * for now.
+ * Ignore errors from locking as we now switch to 4k
+ * pages and we might get different errors.
*/
- flags = VMW_PAGE_ALLOC_CANSLEEP;
- continue;
- }
-
- b->ops->add_page(b, num_pages++, page);
- if (num_pages == b->batch_max_pages) {
- error = b->ops->lock(b, num_pages, is_2m_pages,
- &b->target);
- num_pages = 0;
- if (error)
- break;
+ vmballoon_release_refused_pages(b, &ctl);
+ ctl.page_size--;
}
cond_resched();
}
- if (num_pages > 0)
- b->ops->lock(b, num_pages, is_2m_pages, &b->target);
-
- vmballoon_release_refused_pages(b, true);
- vmballoon_release_refused_pages(b, false);
+ /*
+ * Release pages that were allocated while attempting to inflate the
+ * balloon but were refused by the host for one reason or another,
+ * and update the statistics.
+ */
+ if (ctl.n_refused_pages != 0)
+ vmballoon_release_refused_pages(b, &ctl);
}
-/*
+/**
+ * vmballoon_deflate() - Decrease the size of the balloon.
+ *
+ * @b: pointer to the balloon
+ * @n_frames: the number of frames to deflate. If zero, automatically
+ * calculated according to the target size.
+ * @coordinated: whether to coordinate with the host
+ *
* Decrease the size of the balloon allowing guest to use more memory.
+ *
+ * Return: The number of deflated frames (i.e., basic page size units)
*/
-static void vmballoon_deflate(struct vmballoon *b)
+static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
+ bool coordinated)
{
- unsigned is_2m_pages;
-
- pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
+ unsigned long deflated_frames = 0;
+ unsigned long tried_frames = 0;
+ struct vmballoon_ctl ctl = {
+ .pages = LIST_HEAD_INIT(ctl.pages),
+ .refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
+ .page_size = VMW_BALLOON_4K_PAGE,
+ .op = VMW_BALLOON_DEFLATE
+ };
/* free pages to reach target */
- for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
- is_2m_pages++) {
- struct page *page, *next;
- unsigned int num_pages = 0;
- struct vmballoon_page_size *page_size =
- &b->page_sizes[is_2m_pages];
-
- list_for_each_entry_safe(page, next, &page_size->pages, lru) {
- if (b->reset_required ||
- (b->target > 0 &&
- b->size - num_pages
- * vmballoon_page_size(is_2m_pages)
- < b->target + vmballoon_page_size(true)))
- break;
+ while (true) {
+ unsigned int to_deflate_pages, n_unlocked_frames;
+ unsigned int page_in_frames;
+ int64_t to_deflate_frames;
+ bool deflated_all;
+
+ page_in_frames = vmballoon_page_in_frames(ctl.page_size);
+
+ VM_BUG_ON(!list_empty(&ctl.pages));
+ VM_BUG_ON(ctl.n_pages);
+ VM_BUG_ON(!list_empty(&ctl.refused_pages));
+ VM_BUG_ON(ctl.n_refused_pages);
+
+ /*
+ * If we were requested a specific number of frames, we try to
+ * deflate this number of frames. Otherwise, deflation is
+ * performed according to the target and balloon size.
+ */
+ to_deflate_frames = n_frames ? n_frames - tried_frames :
+ -vmballoon_change(b);
+
+ /* break if no work to do */
+ if (to_deflate_frames <= 0)
+ break;
+
+ /*
+ * Calculate the number of frames based on current page size,
+ * but limit the deflated frames to a single chunk
+ */
+ to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
+ DIV_ROUND_UP_ULL(to_deflate_frames,
+ page_in_frames));
+
+ /* First take the pages from the balloon pages. */
+ vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
+ ctl.page_size, to_deflate_pages);
+
+ /*
+ * Before pages are moving to the refused list, count their
+ * frames as frames that we tried to deflate.
+ */
+ tried_frames += ctl.n_pages * page_in_frames;
+
+ /*
+ * Unlock the pages by communicating with the hypervisor if the
+ * communication is coordinated (i.e., not pop). We ignore the
+ * return code. Instead we check if all the pages we manage to
+ * unlock all the pages. If we failed, we will move to the next
+ * page size, and would eventually try again later.
+ */
+ if (coordinated)
+ vmballoon_lock(b, &ctl);
+
+ /*
+ * Check if we deflated enough. We will move to the next page
+ * size if we did not manage to do so. This calculation takes
+ * place now, as once the pages are released, the number of
+ * pages is zeroed.
+ */
+ deflated_all = (ctl.n_pages == to_deflate_pages);
+
+ /* Update local and global counters */
+ n_unlocked_frames = ctl.n_pages * page_in_frames;
+ atomic64_sub(n_unlocked_frames, &b->size);
+ deflated_frames += n_unlocked_frames;
- list_del(&page->lru);
- b->ops->add_page(b, num_pages++, page);
+ vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
+ ctl.page_size, ctl.n_pages);
- if (num_pages == b->batch_max_pages) {
- int error;
+ /* free the ballooned pages */
+ vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
+ ctl.page_size);
- error = b->ops->unlock(b, num_pages,
- is_2m_pages, &b->target);
- num_pages = 0;
- if (error)
- return;
- }
+ /* Return the refused pages to the ballooned list. */
+ vmballoon_enqueue_page_list(b, &ctl.refused_pages,
+ &ctl.n_refused_pages,
+ ctl.page_size);
- cond_resched();
+ /* If we failed to unlock all the pages, move to next size. */
+ if (!deflated_all) {
+ if (ctl.page_size == b->max_page_size)
+ break;
+ ctl.page_size++;
}
- if (num_pages > 0)
- b->ops->unlock(b, num_pages, is_2m_pages, &b->target);
+ cond_resched();
}
-}
-static const struct vmballoon_ops vmballoon_basic_ops = {
- .add_page = vmballoon_add_page,
- .lock = vmballoon_lock_page,
- .unlock = vmballoon_unlock_page
-};
+ return deflated_frames;
+}
-static const struct vmballoon_ops vmballoon_batched_ops = {
- .add_page = vmballoon_add_batched_page,
- .lock = vmballoon_lock_batched_page,
- .unlock = vmballoon_unlock_batched_page
-};
+/**
+ * vmballoon_deinit_batching - disables batching mode.
+ *
+ * @b: pointer to &struct vmballoon.
+ *
+ * Disables batching, by deallocating the page for communication with the
+ * hypervisor and disabling the static key to indicate that batching is off.
+ */
+static void vmballoon_deinit_batching(struct vmballoon *b)
+{
+ free_page((unsigned long)b->batch_page);
+ b->batch_page = NULL;
+ static_branch_disable(&vmw_balloon_batching);
+ b->batch_max_pages = 1;
+}
-static bool vmballoon_init_batching(struct vmballoon *b)
+/**
+ * vmballoon_init_batching - enable batching mode.
+ *
+ * @b: pointer to &struct vmballoon.
+ *
+ * Enables batching, by allocating a page for communication with the hypervisor
+ * and enabling the static_key to use batching.
+ *
+ * Return: zero on success or an appropriate error-code.
+ */
+static int vmballoon_init_batching(struct vmballoon *b)
{
struct page *page;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
- return false;
+ return -ENOMEM;
b->batch_page = page_address(page);
- return true;
+ b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
+
+ static_branch_enable(&vmw_balloon_batching);
+
+ return 0;
}
/*
@@ -932,7 +1197,7 @@ static void vmballoon_doorbell(void *client_data)
{
struct vmballoon *b = client_data;
- STATS_INC(b->stats.doorbell);
+ vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
mod_delayed_work(system_freezable_wq, &b->dwork, 0);
}
@@ -942,11 +1207,8 @@ static void vmballoon_doorbell(void *client_data)
*/
static void vmballoon_vmci_cleanup(struct vmballoon *b)
{
- int error;
-
- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, VMCI_INVALID_ID,
- VMCI_INVALID_ID, error);
- STATS_INC(b->stats.doorbell_unset);
+ vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
+ VMCI_INVALID_ID, VMCI_INVALID_ID);
if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
vmci_doorbell_destroy(b->vmci_doorbell);
@@ -954,12 +1216,19 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b)
}
}
-/*
- * Initialize vmci doorbell, to get notified as soon as balloon changes
+/**
+ * vmballoon_vmci_init - Initialize vmci doorbell.
+ *
+ * @b: pointer to the balloon.
+ *
+ * Return: zero on success or when wakeup command not supported. Error-code
+ * otherwise.
+ *
+ * Initialize vmci doorbell, to get notified as soon as balloon changes.
*/
static int vmballoon_vmci_init(struct vmballoon *b)
{
- unsigned long error, dummy;
+ unsigned long error;
if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
return 0;
@@ -971,10 +1240,9 @@ static int vmballoon_vmci_init(struct vmballoon *b)
if (error != VMCI_SUCCESS)
goto fail;
- error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context,
- b->vmci_doorbell.resource, dummy);
-
- STATS_INC(b->stats.doorbell_set);
+ error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
+ b->vmci_doorbell.context,
+ b->vmci_doorbell.resource, NULL);
if (error != VMW_BALLOON_SUCCESS)
goto fail;
@@ -985,6 +1253,23 @@ fail:
return -EIO;
}
+/**
+ * vmballoon_pop - Quickly release all pages allocate for the balloon.
+ *
+ * @b: pointer to the balloon.
+ *
+ * This function is called when host decides to "reset" balloon for one reason
+ * or another. Unlike normal "deflate" we do not (shall not) notify host of the
+ * pages being released.
+ */
+static void vmballoon_pop(struct vmballoon *b)
+{
+ unsigned long size;
+
+ while ((size = atomic64_read(&b->size)))
+ vmballoon_deflate(b, size, false);
+}
+
/*
* Perform standard reset sequence by popping the balloon (in case it
* is not empty) and then restarting protocol. This operation normally
@@ -994,18 +1279,18 @@ static void vmballoon_reset(struct vmballoon *b)
{
int error;
+ down_write(&b->conf_sem);
+
vmballoon_vmci_cleanup(b);
/* free all pages, skipping monitor unlock */
vmballoon_pop(b);
- if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
+ if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
return;
if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
- b->ops = &vmballoon_batched_ops;
- b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
- if (!vmballoon_init_batching(b)) {
+ if (vmballoon_init_batching(b)) {
/*
* We failed to initialize batching, inform the monitor
* about it by sending a null capability.
@@ -1016,52 +1301,70 @@ static void vmballoon_reset(struct vmballoon *b)
return;
}
} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
- b->ops = &vmballoon_basic_ops;
- b->batch_max_pages = 1;
+ vmballoon_deinit_batching(b);
}
+ vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
b->reset_required = false;
error = vmballoon_vmci_init(b);
if (error)
pr_err("failed to initialize vmci doorbell\n");
- if (!vmballoon_send_guest_id(b))
+ if (vmballoon_send_guest_id(b))
pr_err("failed to send guest ID to the host\n");
+
+ up_write(&b->conf_sem);
}
-/*
- * Balloon work function: reset protocol, if needed, get the new size and
- * adjust balloon as needed. Repeat in 1 sec.
+/**
+ * vmballoon_work - periodic balloon worker for reset, inflation and deflation.
+ *
+ * @work: pointer to the &work_struct which is provided by the workqueue.
+ *
+ * Resets the protocol if needed, gets the new size and adjusts balloon as
+ * needed. Repeat in 1 sec.
*/
static void vmballoon_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
- unsigned int target;
-
- STATS_INC(b->stats.timer);
+ int64_t change = 0;
if (b->reset_required)
vmballoon_reset(b);
- if (!b->reset_required && vmballoon_send_get_target(b, &target)) {
- /* update target, adjust size */
- b->target = target;
+ down_read(&b->conf_sem);
+
+ /*
+ * Update the stats while holding the semaphore to ensure that
+ * @stats_enabled is consistent with whether the stats are actually
+ * enabled
+ */
+ vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
+
+ if (!vmballoon_send_get_target(b))
+ change = vmballoon_change(b);
+
+ if (change != 0) {
+ pr_debug("%s - size: %llu, target %lu\n", __func__,
+ atomic64_read(&b->size), READ_ONCE(b->target));
- if (b->size < target)
+ if (change > 0)
vmballoon_inflate(b);
- else if (target == 0 ||
- b->size > target + vmballoon_page_size(true))
- vmballoon_deflate(b);
+ else /* (change < 0) */
+ vmballoon_deflate(b, 0, true);
}
+ up_read(&b->conf_sem);
+
/*
* We are using a freezable workqueue so that balloon operations are
* stopped while the system transitions to/from sleep/hibernation.
*/
queue_delayed_work(system_freezable_wq,
dwork, round_jiffies_relative(HZ));
+
}
/*
@@ -1069,64 +1372,100 @@ static void vmballoon_work(struct work_struct *work)
*/
#ifdef CONFIG_DEBUG_FS
+static const char * const vmballoon_stat_page_names[] = {
+ [VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
+ [VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
+ [VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
+ [VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
+ [VMW_BALLOON_PAGE_STAT_FREE] = "free"
+};
+
+static const char * const vmballoon_stat_names[] = {
+ [VMW_BALLOON_STAT_TIMER] = "timer",
+ [VMW_BALLOON_STAT_DOORBELL] = "doorbell",
+ [VMW_BALLOON_STAT_RESET] = "reset",
+};
+
+static int vmballoon_enable_stats(struct vmballoon *b)
+{
+ int r = 0;
+
+ down_write(&b->conf_sem);
+
+ /* did we somehow race with another reader which enabled stats? */
+ if (b->stats)
+ goto out;
+
+ b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
+
+ if (!b->stats) {
+ /* allocation failed */
+ r = -ENOMEM;
+ goto out;
+ }
+ static_key_enable(&balloon_stat_enabled.key);
+out:
+ up_write(&b->conf_sem);
+ return r;
+}
+
+/**
+ * vmballoon_debug_show - shows statistics of balloon operations.
+ * @f: pointer to the &struct seq_file.
+ * @offset: ignored.
+ *
+ * Provides the statistics that can be accessed in vmmemctl in the debugfs.
+ * To avoid the overhead - mainly that of memory - of collecting the statistics,
+ * we only collect statistics after the first time the counters are read.
+ *
+ * Return: zero on success or an error code.
+ */
static int vmballoon_debug_show(struct seq_file *f, void *offset)
{
struct vmballoon *b = f->private;
- struct vmballoon_stats *stats = &b->stats;
+ int i, j;
+
+ /* enables stats if they are disabled */
+ if (!b->stats) {
+ int r = vmballoon_enable_stats(b);
+
+ if (r)
+ return r;
+ }
/* format capabilities info */
- seq_printf(f,
- "balloon capabilities: %#4x\n"
- "used capabilities: %#4lx\n"
- "is resetting: %c\n",
- VMW_BALLOON_CAPABILITIES, b->capabilities,
- b->reset_required ? 'y' : 'n');
+ seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
+ VMW_BALLOON_CAPABILITIES);
+ seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
+ seq_printf(f, "%-22s: %16s\n", "is resetting",
+ b->reset_required ? "y" : "n");
/* format size info */
- seq_printf(f,
- "target: %8d pages\n"
- "current: %8d pages\n",
- b->target, b->size);
-
- seq_printf(f,
- "\n"
- "timer: %8u\n"
- "doorbell: %8u\n"
- "start: %8u (%4u failed)\n"
- "guestType: %8u (%4u failed)\n"
- "2m-lock: %8u (%4u failed)\n"
- "lock: %8u (%4u failed)\n"
- "2m-unlock: %8u (%4u failed)\n"
- "unlock: %8u (%4u failed)\n"
- "target: %8u (%4u failed)\n"
- "prim2mAlloc: %8u (%4u failed)\n"
- "primNoSleepAlloc: %8u (%4u failed)\n"
- "primCanSleepAlloc: %8u (%4u failed)\n"
- "prim2mFree: %8u\n"
- "primFree: %8u\n"
- "err2mAlloc: %8u\n"
- "errAlloc: %8u\n"
- "err2mFree: %8u\n"
- "errFree: %8u\n"
- "doorbellSet: %8u\n"
- "doorbellUnset: %8u\n",
- stats->timer,
- stats->doorbell,
- stats->start, stats->start_fail,
- stats->guest_type, stats->guest_type_fail,
- stats->lock[true], stats->lock_fail[true],
- stats->lock[false], stats->lock_fail[false],
- stats->unlock[true], stats->unlock_fail[true],
- stats->unlock[false], stats->unlock_fail[false],
- stats->target, stats->target_fail,
- stats->alloc[true], stats->alloc_fail[true],
- stats->alloc[false], stats->alloc_fail[false],
- stats->sleep_alloc, stats->sleep_alloc_fail,
- stats->free[true],
- stats->free[false],
- stats->refused_alloc[true], stats->refused_alloc[false],
- stats->refused_free[true], stats->refused_free[false],
- stats->doorbell_set, stats->doorbell_unset);
+ seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
+ seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
+
+ for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
+ if (vmballoon_cmd_names[i] == NULL)
+ continue;
+
+ seq_printf(f, "%-22s: %16llu (%llu failed)\n",
+ vmballoon_cmd_names[i],
+ atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
+ atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
+ }
+
+ for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
+ seq_printf(f, "%-22s: %16llu\n",
+ vmballoon_stat_names[i],
+ atomic64_read(&b->stats->general_stat[i]));
+
+ for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
+ for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
+ seq_printf(f, "%-18s(%s): %16llu\n",
+ vmballoon_stat_page_names[i],
+ vmballoon_page_size_names[j],
+ atomic64_read(&b->stats->page_stat[i][j]));
+ }
return 0;
}
@@ -1161,7 +1500,10 @@ static int __init vmballoon_debugfs_init(struct vmballoon *b)
static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
{
+ static_key_disable(&balloon_stat_enabled.key);
debugfs_remove(b->dbg_entry);
+ kfree(b->stats);
+ b->stats = NULL;
}
#else
@@ -1179,8 +1521,9 @@ static inline void vmballoon_debugfs_exit(struct vmballoon *b)
static int __init vmballoon_init(void)
{
+ enum vmballoon_page_size_type page_size;
int error;
- unsigned is_2m_pages;
+
/*
* Check if we are running on VMware's hypervisor and bail out
* if we are not.
@@ -1188,11 +1531,10 @@ static int __init vmballoon_init(void)
if (x86_hyper_type != X86_HYPER_VMWARE)
return -ENODEV;
- for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
- is_2m_pages++) {
- INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
- INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
- }
+ for (page_size = VMW_BALLOON_4K_PAGE;
+ page_size <= VMW_BALLOON_LAST_SIZE; page_size++)
+ INIT_LIST_HEAD(&balloon.page_sizes[page_size].pages);
+
INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
@@ -1200,6 +1542,8 @@ static int __init vmballoon_init(void)
if (error)
return error;
+ spin_lock_init(&balloon.comm_lock);
+ init_rwsem(&balloon.conf_sem);
balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
balloon.batch_page = NULL;
balloon.page = NULL;
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index d7eaf1eb11e7..003bfba40758 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.1.5.0-k");
+MODULE_VERSION("1.1.6.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_host.c b/drivers/misc/vmw_vmci/vmci_host.c
index 83e0c95d20a4..edfffc9699ba 100644
--- a/drivers/misc/vmw_vmci/vmci_host.c
+++ b/drivers/misc/vmw_vmci/vmci_host.c
@@ -15,7 +15,6 @@
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
-#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
@@ -448,15 +447,12 @@ static int vmci_host_do_alloc_queuepair(struct vmci_host_dev *vmci_host_dev,
struct vmci_handle handle;
int vmci_status;
int __user *retptr;
- u32 cid;
if (vmci_host_dev->ct_type != VMCIOBJ_CONTEXT) {
vmci_ioctl_err("only valid for contexts\n");
return -EINVAL;
}
- cid = vmci_ctx_get_id(vmci_host_dev->context);
-
if (vmci_host_dev->user_version < VMCI_VERSION_NOVMVM) {
struct vmci_qp_alloc_info_vmvm alloc_info;
struct vmci_qp_alloc_info_vmvm __user *info = uptr;
diff --git a/drivers/misc/vmw_vmci/vmci_resource.c b/drivers/misc/vmw_vmci/vmci_resource.c
index 1ab6e8737a5f..da1ee2e1ba99 100644
--- a/drivers/misc/vmw_vmci/vmci_resource.c
+++ b/drivers/misc/vmw_vmci/vmci_resource.c
@@ -57,7 +57,8 @@ static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
if (r->type == type &&
rid == handle.resource &&
- (cid == handle.context || cid == VMCI_INVALID_ID)) {
+ (cid == handle.context || cid == VMCI_INVALID_ID ||
+ handle.context == VMCI_INVALID_ID)) {
resource = r;
break;
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 6d3221134927..7968c644ad86 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1964,8 +1964,6 @@ static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
if (!alx_reset_mac(hw))
rc = PCI_ERS_RESULT_RECOVERED;
out:
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
rtnl_unlock();
return rc;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 122fdb80a789..bbb247116045 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -8793,13 +8793,6 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
return result;
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err); /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 40093d88353f..95309b27c7d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14380,14 +14380,6 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
- /* If AER, perform cleanup of the PCIe registers */
- if (bp->flags & AER_ENABLED) {
- if (pci_cleanup_aer_uncorrect_error_status(pdev))
- BNX2X_ERR("pci_cleanup_aer_uncorrect_error_status failed\n");
- else
- DP(NETIF_MSG_HW, "pci_cleanup_aer_uncorrect_error_status succeeded\n");
- }
-
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index de987ccdcf1f..dd85d790f638 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -10354,13 +10354,6 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
rtnl_unlock();
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err); /* non-fatal, continue */
- }
-
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 35564a8a48f9..a6cbaca37e94 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -341,7 +341,7 @@ static struct device_node *bcmgenet_mii_of_find_mdio(struct bcmgenet_priv *priv)
if (!compat)
return NULL;
- priv->mdio_dn = of_find_compatible_node(dn, NULL, compat);
+ priv->mdio_dn = of_get_compatible_child(dn, compat);
kfree(compat);
if (!priv->mdio_dn) {
dev_err(kdev, "unable to find MDIO bus node\n");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2de0590a62c4..05a46926016a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4767,7 +4767,6 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 267322693ed5..9a6065a3fa46 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -520,10 +520,20 @@ setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
if (!txq_info)
return -ENOMEM;
+ if (uld_type == CXGB4_ULD_CRYPTO) {
+ i = min_t(int, adap->vres.ncrypto_fc,
+ num_online_cpus());
+ txq_info->ntxq = rounddown(i, adap->params.nports);
+ if (txq_info->ntxq <= 0) {
+ dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
+ kfree(txq_info);
+ return -EINVAL;
+ }
- i = min_t(int, uld_info->ntxq, num_online_cpus());
- txq_info->ntxq = roundup(i, adap->params.nports);
-
+ } else {
+ i = min_t(int, uld_info->ntxq, num_online_cpus());
+ txq_info->ntxq = roundup(i, adap->params.nports);
+ }
txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
GFP_KERNEL);
if (!txq_info->uldtxq) {
@@ -546,11 +556,14 @@ static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
struct cxgb4_lld_info *lli)
{
struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
+ int tx_uld_type = TX_ULD(uld_type);
+ struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
lli->rxq_ids = rxq_info->rspq_id;
lli->nrxq = rxq_info->nrxq;
lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
lli->nciq = rxq_info->nciq;
+ lli->ntxq = txq_info->ntxq;
}
int t4_uld_mem_alloc(struct adapter *adap)
@@ -634,7 +647,6 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
lld->ports = adap->port;
lld->vr = &adap->vres;
lld->mtus = adap->params.mtus;
- lld->ntxq = adap->sge.ofldqsets;
lld->nchan = adap->params.nports;
lld->nports = adap->params.nports;
lld->wr_cred = adap->params.ofldq_wr_cred;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index bff74752cef1..c5ad7a4f4d83 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -6146,7 +6146,6 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
if (status)
return PCI_ERS_RESULT_DISCONNECT;
- pci_cleanup_aer_uncorrect_error_status(pdev);
be_clear_error(adapter, BE_CLEAR_ALL);
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index c0f9faca70c4..16a73bd9f4cb 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6854,8 +6854,6 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
return result;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index c859ababeed5..02345d381303 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2440,8 +2440,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
return result;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 330bafe6a689..bc71a21c1dc2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -14552,7 +14552,6 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
pci_ers_result_t result;
- int err;
u32 reg;
dev_dbg(&pdev->dev, "%s\n", __func__);
@@ -14573,14 +14572,6 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_DISCONNECT;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_info(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err);
- /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 0d29df8accd8..5df88ad8ac81 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -9086,7 +9086,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
pci_ers_result_t result;
- int err;
if (pci_enable_device_mem(pdev)) {
dev_err(&pdev->dev,
@@ -9110,14 +9109,6 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- dev_err(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
- err);
- /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 51268772a999..0049a2becd7e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -11322,8 +11322,6 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
/* Free device reference count */
pci_dev_put(vfdev);
}
-
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
/*
@@ -11373,7 +11371,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
pci_ers_result_t result;
- int err;
if (pci_enable_device_mem(pdev)) {
e_err(probe, "Cannot re-enable PCI device after reset.\n");
@@ -11393,13 +11390,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
result = PCI_ERS_RESULT_RECOVERED;
}
- err = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (err) {
- e_dev_err("pci_cleanup_aer_uncorrect_error_status "
- "failed 0x%0x\n", err);
- /* non-fatal, continue */
- }
-
return result;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 59c70be22a84..7d9819d80e44 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1784,11 +1784,6 @@ static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
-static void netxen_io_resume(struct pci_dev *pdev)
-{
- pci_cleanup_aer_uncorrect_error_status(pdev);
-}
-
static void netxen_nic_shutdown(struct pci_dev *pdev)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
@@ -3465,7 +3460,6 @@ netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
static const struct pci_error_handlers netxen_err_handler = {
.error_detected = netxen_io_error_detected,
.slot_reset = netxen_io_slot_reset,
- .resume = netxen_io_resume,
};
static struct pci_driver netxen_driver = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index a79d84f99102..2a533280b124 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -4233,7 +4233,6 @@ static void qlcnic_83xx_io_resume(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
qlcnic_83xx_aer_start_poll_work(adapter);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index dbd48012224f..d42ba2293d8c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -3930,7 +3930,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
&adapter->state))
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 3d0dd39c289e..98fe7e762e17 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3821,7 +3821,6 @@ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
{
struct efx_nic *efx = pci_get_drvdata(pdev);
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int rc;
if (pci_enable_device(pdev)) {
netif_err(efx, hw, efx->net_dev,
@@ -3829,13 +3828,6 @@ static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
status = PCI_ERS_RESULT_DISCONNECT;
}
- rc = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
- /* Non-fatal error. Continue. */
- }
-
return status;
}
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 03e2455c502e..8b1f94d7a6c5 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -3160,7 +3160,6 @@ static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
{
struct ef4_nic *efx = pci_get_drvdata(pdev);
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
- int rc;
if (pci_enable_device(pdev)) {
netif_err(efx, hw, efx->net_dev,
@@ -3168,13 +3167,6 @@ static pci_ers_result_t ef4_io_slot_reset(struct pci_dev *pdev)
status = PCI_ERS_RESULT_DISCONNECT;
}
- rc = pci_cleanup_aer_uncorrect_error_status(pdev);
- if (rc) {
- netif_err(efx, hw, efx->net_dev,
- "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
- /* Non-fatal error. Continue. */
- }
-
return status;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index f9a61f90cfbc..0f660af01a4b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -714,8 +714,9 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
return -ENODEV;
}
- mdio_internal = of_find_compatible_node(mdio_mux, NULL,
+ mdio_internal = of_get_compatible_child(mdio_mux,
"allwinner,sun8i-h3-mdio-internal");
+ of_node_put(mdio_mux);
if (!mdio_internal) {
dev_err(priv->device, "Cannot get internal_mdio node\n");
return -ENODEV;
@@ -729,13 +730,20 @@ static int get_ephy_nodes(struct stmmac_priv *priv)
gmac->rst_ephy = of_reset_control_get_exclusive(iphynode, NULL);
if (IS_ERR(gmac->rst_ephy)) {
ret = PTR_ERR(gmac->rst_ephy);
- if (ret == -EPROBE_DEFER)
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(iphynode);
+ of_node_put(mdio_internal);
return ret;
+ }
continue;
}
dev_info(priv->device, "Found internal PHY node\n");
+ of_node_put(iphynode);
+ of_node_put(mdio_internal);
return 0;
}
+
+ of_node_put(mdio_internal);
return -ENODEV;
}
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index a205750b431b..7ccdc62c6052 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -95,7 +95,7 @@ static inline void sha_pad_init(struct sha_pad *shapad)
* State for an MPPE (de)compressor.
*/
struct ppp_mppe_state {
- struct crypto_skcipher *arc4;
+ struct crypto_sync_skcipher *arc4;
struct shash_desc *sha1;
unsigned char *sha1_digest;
unsigned char master_key[MPPE_MAX_KEY_LEN];
@@ -155,15 +155,15 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
struct scatterlist sg_in[1], sg_out[1];
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
get_new_key_from_sha(state);
if (!initial_key) {
- crypto_skcipher_setkey(state->arc4, state->sha1_digest,
- state->keylen);
+ crypto_sync_skcipher_setkey(state->arc4, state->sha1_digest,
+ state->keylen);
sg_init_table(sg_in, 1);
sg_init_table(sg_out, 1);
setup_sg(sg_in, state->sha1_digest, state->keylen);
@@ -181,7 +181,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
state->session_key[1] = 0x26;
state->session_key[2] = 0x9e;
}
- crypto_skcipher_setkey(state->arc4, state->session_key, state->keylen);
+ crypto_sync_skcipher_setkey(state->arc4, state->session_key,
+ state->keylen);
skcipher_request_zero(req);
}
@@ -203,7 +204,7 @@ static void *mppe_alloc(unsigned char *options, int optlen)
goto out;
- state->arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ state->arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(state->arc4)) {
state->arc4 = NULL;
goto out_free;
@@ -250,7 +251,7 @@ out_free:
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
}
- crypto_free_skcipher(state->arc4);
+ crypto_free_sync_skcipher(state->arc4);
kfree(state);
out:
return NULL;
@@ -266,7 +267,7 @@ static void mppe_free(void *arg)
kfree(state->sha1_digest);
crypto_free_shash(state->sha1->tfm);
kzfree(state->sha1);
- crypto_free_skcipher(state->arc4);
+ crypto_free_sync_skcipher(state->arc4);
kfree(state);
}
}
@@ -366,7 +367,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
int isize, int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
int proto;
int err;
struct scatterlist sg_in[1], sg_out[1];
@@ -426,7 +427,7 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
setup_sg(sg_in, ibuf, isize);
setup_sg(sg_out, obuf, osize);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
err = crypto_skcipher_encrypt(req);
@@ -480,7 +481,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
int osize)
{
struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
- SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
struct scatterlist sg_in[1], sg_out[1];
@@ -615,7 +616,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
setup_sg(sg_in, ibuf, 1);
setup_sg(sg_out, obuf, 1);
- skcipher_request_set_tfm(req, state->arc4);
+ skcipher_request_set_sync_tfm(req, state->arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
if (crypto_skcipher_decrypt(req)) {
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 91162f8e0366..9a22056e8d9e 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node,
struct device_node *matched_node;
int ret;
- matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart");
+ matched_node = of_get_compatible_child(node, "marvell,nfc-uart");
if (!matched_node) {
- matched_node = of_find_compatible_node(node, NULL,
- "mrvl,nfc-uart");
+ matched_node = of_get_compatible_child(node, "mrvl,nfc-uart");
if (!matched_node)
return -ENODEV;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 8aae6dcc839f..f1fb39921236 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -54,12 +54,6 @@ static int to_nd_device_type(struct device *dev)
static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
- /*
- * Ensure that region devices always have their numa node set as
- * early as possible.
- */
- if (is_nd_region(dev))
- set_dev_node(dev, to_nd_region(dev)->numa_node);
return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
to_nd_device_type(dev));
}
@@ -488,6 +482,8 @@ static void nd_async_device_register(void *d, async_cookie_t cookie)
put_device(dev);
}
put_device(dev);
+ if (dev->parent)
+ put_device(dev->parent);
}
static void nd_async_device_unregister(void *d, async_cookie_t cookie)
@@ -506,7 +502,19 @@ void __nd_device_register(struct device *dev)
{
if (!dev)
return;
+
+ /*
+ * Ensure that region devices always have their NUMA node set as
+ * early as possible. This way we are able to make certain that
+ * any memory associated with the creation and the creation
+ * itself of the region is associated with the correct node.
+ */
+ if (is_nd_region(dev))
+ set_dev_node(dev, to_nd_region(dev)->numa_node);
+
dev->bus = &nvdimm_bus_type;
+ if (dev->parent)
+ get_device(dev->parent);
get_device(dev);
async_schedule_domain(nd_async_device_register, dev,
&nd_async_domain);
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 6c8fb7590838..9899c97138a3 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -75,7 +75,7 @@ static int nvdimm_probe(struct device *dev)
* DIMM capacity. We fail the dimm probe to prevent regions from
* attempting to parse the label area.
*/
- rc = nvdimm_init_config_data(ndd);
+ rc = nd_label_data_init(ndd);
if (rc == -EACCES)
nvdimm_set_locked(dev);
if (rc)
@@ -84,10 +84,6 @@ static int nvdimm_probe(struct device *dev)
dev_dbg(dev, "config data size: %d\n", ndd->nsarea.config_size);
nvdimm_bus_lock(dev);
- ndd->ns_current = nd_label_validate(ndd);
- ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
- nd_label_copy(ndd, to_next_namespace_index(ndd),
- to_current_namespace_index(ndd));
if (ndd->ns_current >= 0) {
rc = nd_label_reserve_dpa(ndd);
if (rc == 0)
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 863cabc35215..6c3de2317390 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -85,56 +85,48 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
return cmd_rc;
}
-int nvdimm_init_config_data(struct nvdimm_drvdata *ndd)
+int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
+ size_t offset, size_t len)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
+ struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
int rc = validate_dimm(ndd), cmd_rc = 0;
struct nd_cmd_get_config_data_hdr *cmd;
- struct nvdimm_bus_descriptor *nd_desc;
- u32 max_cmd_size, config_size;
- size_t offset;
+ size_t max_cmd_size, buf_offset;
if (rc)
return rc;
- if (ndd->data)
- return 0;
-
- if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0
- || ndd->nsarea.config_size < ND_LABEL_MIN_SIZE) {
- dev_dbg(ndd->dev, "failed to init config data area: (%d:%d)\n",
- ndd->nsarea.max_xfer, ndd->nsarea.config_size);
+ if (offset + len > ndd->nsarea.config_size)
return -ENXIO;
- }
- ndd->data = kvmalloc(ndd->nsarea.config_size, GFP_KERNEL);
- if (!ndd->data)
- return -ENOMEM;
-
- max_cmd_size = min_t(u32, PAGE_SIZE, ndd->nsarea.max_xfer);
- cmd = kzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
+ max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
+ cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
- nd_desc = nvdimm_bus->nd_desc;
- for (config_size = ndd->nsarea.config_size, offset = 0;
- config_size; config_size -= cmd->in_length,
- offset += cmd->in_length) {
- cmd->in_length = min(config_size, max_cmd_size);
- cmd->in_offset = offset;
+ for (buf_offset = 0; len;
+ len -= cmd->in_length, buf_offset += cmd->in_length) {
+ size_t cmd_size;
+
+ cmd->in_offset = offset + buf_offset;
+ cmd->in_length = min(max_cmd_size, len);
+
+ cmd_size = sizeof(*cmd) + cmd->in_length;
+
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_GET_CONFIG_DATA, cmd,
- cmd->in_length + sizeof(*cmd), &cmd_rc);
+ ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
if (rc < 0)
break;
if (cmd_rc < 0) {
rc = cmd_rc;
break;
}
- memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length);
+
+ /* out_buf should be valid, copy it into our output buffer */
+ memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
}
- dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
- kfree(cmd);
+ kvfree(cmd);
return rc;
}
@@ -151,15 +143,11 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
if (rc)
return rc;
- if (!ndd->data)
- return -ENXIO;
-
if (offset + len > ndd->nsarea.config_size)
return -ENXIO;
- max_cmd_size = min_t(u32, PAGE_SIZE, len);
- max_cmd_size = min_t(u32, max_cmd_size, ndd->nsarea.max_xfer);
- cmd = kzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
+ max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
+ cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -183,7 +171,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
break;
}
}
- kfree(cmd);
+ kvfree(cmd);
return rc;
}
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 1d28cd656536..750dbaa6ce82 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -75,7 +75,8 @@ size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
/*
* Per UEFI 2.7, the minimum size of the Label Storage Area is large
* enough to hold 2 index blocks and 2 labels. The minimum index
- * block size is 256 bytes, and the minimum label size is 256 bytes.
+ * block size is 256 bytes. The label size is 128 for namespaces
+ * prior to version 1.2 and at minimum 256 for version 1.2 and later.
*/
nslot = nvdimm_num_label_slots(ndd);
space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
@@ -183,6 +184,13 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
__le64_to_cpu(nsindex[i]->otheroff));
continue;
}
+ if (__le64_to_cpu(nsindex[i]->labeloff)
+ != 2 * sizeof_namespace_index(ndd)) {
+ dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
+ i, (unsigned long long)
+ __le64_to_cpu(nsindex[i]->labeloff));
+ continue;
+ }
size = __le64_to_cpu(nsindex[i]->mysize);
if (size > sizeof_namespace_index(ndd)
@@ -227,7 +235,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
return -1;
}
-int nd_label_validate(struct nvdimm_drvdata *ndd)
+static int nd_label_validate(struct nvdimm_drvdata *ndd)
{
/*
* In order to probe for and validate namespace index blocks we
@@ -250,12 +258,12 @@ int nd_label_validate(struct nvdimm_drvdata *ndd)
return -1;
}
-void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
- struct nd_namespace_index *src)
+static void nd_label_copy(struct nvdimm_drvdata *ndd,
+ struct nd_namespace_index *dst,
+ struct nd_namespace_index *src)
{
- if (dst && src)
- /* pass */;
- else
+ /* just exit if either destination or source is NULL */
+ if (!dst || !src)
return;
memcpy(dst, src, sizeof_namespace_index(ndd));
@@ -410,6 +418,128 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
return 0;
}
+int nd_label_data_init(struct nvdimm_drvdata *ndd)
+{
+ size_t config_size, read_size, max_xfer, offset;
+ struct nd_namespace_index *nsindex;
+ unsigned int i;
+ int rc = 0;
+ u32 nslot;
+
+ if (ndd->data)
+ return 0;
+
+ if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
+ dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
+ ndd->nsarea.max_xfer, ndd->nsarea.config_size);
+ return -ENXIO;
+ }
+
+ /*
+ * We need to determine the maximum index area as this is the section
+ * we must read and validate before we can start processing labels.
+ *
+ * If the area is too small to contain the two indexes and 2 labels
+ * then we abort.
+ *
+ * Start at a label size of 128 as this should result in the largest
+ * possible namespace index size.
+ */
+ ndd->nslabel_size = 128;
+ read_size = sizeof_namespace_index(ndd) * 2;
+ if (!read_size)
+ return -ENXIO;
+
+ /* Allocate config data */
+ config_size = ndd->nsarea.config_size;
+ ndd->data = kvzalloc(config_size, GFP_KERNEL);
+ if (!ndd->data)
+ return -ENOMEM;
+
+ /*
+ * We want to guarantee as few reads as possible while conserving
+ * memory. To do that we figure out how much unused space will be left
+ * in the last read, divide that by the total number of reads it is
+ * going to take given our maximum transfer size, and then reduce our
+ * maximum transfer size based on that result.
+ */
+ max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
+ if (read_size < max_xfer) {
+ /* trim waste */
+ max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
+ DIV_ROUND_UP(config_size, max_xfer);
+ /* make certain we read indexes in exactly 1 read */
+ if (max_xfer < read_size)
+ max_xfer = read_size;
+ }
+
+ /* Make our initial read size a multiple of max_xfer size */
+ read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
+ config_size);
+
+ /* Read the index data */
+ rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
+ if (rc)
+ goto out_err;
+
+ /* Validate index data, if not valid assume all labels are invalid */
+ ndd->ns_current = nd_label_validate(ndd);
+ if (ndd->ns_current < 0)
+ return 0;
+
+ /* Record our index values */
+ ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
+
+ /* Copy "current" index on top of the "next" index */
+ nsindex = to_current_namespace_index(ndd);
+ nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
+
+ /* Determine starting offset for label data */
+ offset = __le64_to_cpu(nsindex->labeloff);
+ nslot = __le32_to_cpu(nsindex->nslot);
+
+ /* Loop through the free list pulling in any active labels */
+ for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
+ size_t label_read_size;
+
+ /* zero out the unused labels */
+ if (test_bit_le(i, nsindex->free)) {
+ memset(ndd->data + offset, 0, ndd->nslabel_size);
+ continue;
+ }
+
+ /* if we already read past here then just continue */
+ if (offset + ndd->nslabel_size <= read_size)
+ continue;
+
+ /* if we haven't read in a while reset our read_size offset */
+ if (read_size < offset)
+ read_size = offset;
+
+ /* determine how much more will be read after this next call. */
+ label_read_size = offset + ndd->nslabel_size - read_size;
+ label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
+ max_xfer;
+
+ /* truncate last read if needed */
+ if (read_size + label_read_size > config_size)
+ label_read_size = config_size - read_size;
+
+ /* Read the label data */
+ rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
+ read_size, label_read_size);
+ if (rc)
+ goto out_err;
+
+ /* push read_size to next read offset */
+ read_size += label_read_size;
+ }
+
+ dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
+out_err:
+ return rc;
+}
+
int nd_label_active_count(struct nvdimm_drvdata *ndd)
{
struct nd_namespace_index *nsindex;
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index 18bbe183b3a9..e9a2ad3c2150 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -138,9 +138,7 @@ static inline int nd_label_next_nsindex(int index)
}
struct nvdimm_drvdata;
-int nd_label_validate(struct nvdimm_drvdata *ndd);
-void nd_label_copy(struct nvdimm_drvdata *ndd, struct nd_namespace_index *dst,
- struct nd_namespace_index *src);
+int nd_label_data_init(struct nvdimm_drvdata *ndd);
size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd);
int nd_label_active_count(struct nvdimm_drvdata *ndd);
struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 4a4266250c28..681af3a8fd62 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -2099,7 +2099,6 @@ static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
return NULL;
}
dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
- dev->parent = &nd_region->dev;
dev->groups = nd_namespace_attribute_groups;
nd_namespace_pmem_set_resource(nd_region, nspm, 0);
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index ac68072fb8cd..182258f64417 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -14,7 +14,6 @@
#define __ND_CORE_H__
#include <linux/libnvdimm.h>
#include <linux/device.h>
-#include <linux/libnvdimm.h>
#include <linux/sizes.h>
#include <linux/mutex.h>
#include <linux/nd.h>
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 98317e7ce5b5..e79cc8e5c114 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -241,6 +241,8 @@ struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping);
int nvdimm_check_config_data(struct device *dev);
int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd);
int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
+int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
+ size_t offset, size_t len);
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len);
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 3f7ad5bc443e..24c64090169e 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -361,6 +361,65 @@ struct device *nd_pfn_create(struct nd_region *nd_region)
return dev;
}
+/*
+ * nd_pfn_clear_memmap_errors() clears any errors in the volatile memmap
+ * space associated with the namespace. If the memmap is set to DRAM, then
+ * this is a no-op. Since the memmap area is freshly initialized during
+ * probe, we have an opportunity to clear any badblocks in this area.
+ */
+static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
+{
+ struct nd_region *nd_region = to_nd_region(nd_pfn->dev.parent);
+ struct nd_namespace_common *ndns = nd_pfn->ndns;
+ void *zero_page = page_address(ZERO_PAGE(0));
+ struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
+ int num_bad, meta_num, rc, bb_present;
+ sector_t first_bad, meta_start;
+ struct nd_namespace_io *nsio;
+
+ if (nd_pfn->mode != PFN_MODE_PMEM)
+ return 0;
+
+ nsio = to_nd_namespace_io(&ndns->dev);
+ meta_start = (SZ_4K + sizeof(*pfn_sb)) >> 9;
+ meta_num = (le64_to_cpu(pfn_sb->dataoff) >> 9) - meta_start;
+
+ do {
+ unsigned long zero_len;
+ u64 nsoff;
+
+ bb_present = badblocks_check(&nd_region->bb, meta_start,
+ meta_num, &first_bad, &num_bad);
+ if (bb_present) {
+ dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %lx\n",
+ num_bad, first_bad);
+ nsoff = ALIGN_DOWN((nd_region->ndr_start
+ + (first_bad << 9)) - nsio->res.start,
+ PAGE_SIZE);
+ zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
+ while (zero_len) {
+ unsigned long chunk = min(zero_len, PAGE_SIZE);
+
+ rc = nvdimm_write_bytes(ndns, nsoff, zero_page,
+ chunk, 0);
+ if (rc)
+ break;
+
+ zero_len -= chunk;
+ nsoff += chunk;
+ }
+ if (rc) {
+ dev_err(&nd_pfn->dev,
+ "error clearing %x badblocks at %lx\n",
+ num_bad, first_bad);
+ return rc;
+ }
+ }
+ } while (bb_present);
+
+ return 0;
+}
+
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
@@ -477,7 +536,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -ENXIO;
}
- return 0;
+ return nd_pfn_clear_memmap_errors(nd_pfn);
}
EXPORT_SYMBOL(nd_pfn_validate);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index a75d10c23d80..0e39e3d1846f 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -421,9 +421,11 @@ static int pmem_attach_disk(struct device *dev,
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
- } else
+ } else {
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);
+ memcpy(&bb_res, &nsio->res, sizeof(bb_res));
+ }
/*
* At release time the queue must be frozen before
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index fa37afcd43ff..174a418cb171 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -560,10 +560,17 @@ static ssize_t region_badblocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
+ ssize_t rc;
- return badblocks_show(&nd_region->bb, buf, 0);
-}
+ device_lock(dev);
+ if (dev->driver)
+ rc = badblocks_show(&nd_region->bb, buf, 0);
+ else
+ rc = -ENXIO;
+ device_unlock(dev);
+ return rc;
+}
static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
static ssize_t resource_show(struct device *dev,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 9e4a30b05bd2..2e65be8b1387 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3064,7 +3064,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue))
goto out_free_ns;
+
blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue);
+ if (ctrl->ops->flags & NVME_F_PCI_P2PDMA)
+ blk_queue_flag_set(QUEUE_FLAG_PCI_P2PDMA, ns->queue);
+
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9fefba039d1e..cee79cb388af 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -343,6 +343,7 @@ struct nvme_ctrl_ops {
unsigned int flags;
#define NVME_F_FABRICS (1 << 0)
#define NVME_F_METADATA_SUPPORTED (1 << 1)
+#define NVME_F_PCI_P2PDMA (1 << 2)
int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4e023cd007e1..f30031945ee4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/sed-opal.h>
+#include <linux/pci-p2pdma.h>
#include "nvme.h"
@@ -99,9 +100,8 @@ struct nvme_dev {
struct work_struct remove_work;
struct mutex shutdown_lock;
bool subsystem;
- void __iomem *cmb;
- pci_bus_addr_t cmb_bus_addr;
u64 cmb_size;
+ bool cmb_use_sqes;
u32 cmbsz;
u32 cmbloc;
struct nvme_ctrl ctrl;
@@ -158,7 +158,7 @@ struct nvme_queue {
struct nvme_dev *dev;
spinlock_t sq_lock;
struct nvme_command *sq_cmds;
- struct nvme_command __iomem *sq_cmds_io;
+ bool sq_cmds_is_io;
spinlock_t cq_lock ____cacheline_aligned_in_smp;
volatile struct nvme_completion *cqes;
struct blk_mq_tags **tags;
@@ -447,11 +447,8 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
{
spin_lock(&nvmeq->sq_lock);
- if (nvmeq->sq_cmds_io)
- memcpy_toio(&nvmeq->sq_cmds_io[nvmeq->sq_tail], cmd,
- sizeof(*cmd));
- else
- memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
+
+ memcpy(&nvmeq->sq_cmds[nvmeq->sq_tail], cmd, sizeof(*cmd));
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
@@ -748,8 +745,13 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
goto out;
ret = BLK_STS_RESOURCE;
- nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
- DMA_ATTR_NO_WARN);
+
+ if (is_pci_p2pdma_page(sg_page(iod->sg)))
+ nr_mapped = pci_p2pdma_map_sg(dev->dev, iod->sg, iod->nents,
+ dma_dir);
+ else
+ nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
+ dma_dir, DMA_ATTR_NO_WARN);
if (!nr_mapped)
goto out;
@@ -791,7 +793,10 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
DMA_TO_DEVICE : DMA_FROM_DEVICE;
if (iod->nents) {
- dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
+ /* P2PDMA requests do not need to be unmapped */
+ if (!is_pci_p2pdma_page(sg_page(iod->sg)))
+ dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
+
if (blk_integrity_rq(req))
dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir);
}
@@ -1232,9 +1237,18 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
{
dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
(void *)nvmeq->cqes, nvmeq->cq_dma_addr);
- if (nvmeq->sq_cmds)
- dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
- nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+
+ if (nvmeq->sq_cmds) {
+ if (nvmeq->sq_cmds_is_io)
+ pci_free_p2pmem(to_pci_dev(nvmeq->q_dmadev),
+ nvmeq->sq_cmds,
+ SQ_SIZE(nvmeq->q_depth));
+ else
+ dma_free_coherent(nvmeq->q_dmadev,
+ SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds,
+ nvmeq->sq_dma_addr);
+ }
}
static void nvme_free_queues(struct nvme_dev *dev, int lowest)
@@ -1323,12 +1337,21 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
int qid, int depth)
{
- /* CMB SQEs will be mapped before creation */
- if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
- return 0;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (qid && dev->cmb_use_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+ nvmeq->sq_cmds = pci_alloc_p2pmem(pdev, SQ_SIZE(depth));
+ nvmeq->sq_dma_addr = pci_p2pmem_virt_to_bus(pdev,
+ nvmeq->sq_cmds);
+ nvmeq->sq_cmds_is_io = true;
+ }
+
+ if (!nvmeq->sq_cmds) {
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ nvmeq->sq_cmds_is_io = false;
+ }
- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
- &nvmeq->sq_dma_addr, GFP_KERNEL);
if (!nvmeq->sq_cmds)
return -ENOMEM;
return 0;
@@ -1405,13 +1428,6 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
int result;
s16 vector;
- if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
- unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
- dev->ctrl.page_size);
- nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
- nvmeq->sq_cmds_io = dev->cmb + offset;
- }
-
/*
* A queue's vector matches the queue identifier unless the controller
* has only one vector available.
@@ -1652,9 +1668,6 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
dev->cmbloc = readl(dev->bar + NVME_REG_CMBLOC);
- if (!use_cmb_sqes)
- return;
-
size = nvme_cmb_size_unit(dev) * nvme_cmb_size(dev);
offset = nvme_cmb_size_unit(dev) * NVME_CMB_OFST(dev->cmbloc);
bar = NVME_CMB_BIR(dev->cmbloc);
@@ -1671,11 +1684,18 @@ static void nvme_map_cmb(struct nvme_dev *dev)
if (size > bar_size - offset)
size = bar_size - offset;
- dev->cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
- if (!dev->cmb)
+ if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
+ dev_warn(dev->ctrl.device,
+ "failed to register the CMB\n");
return;
- dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
+ }
+
dev->cmb_size = size;
+ dev->cmb_use_sqes = use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS);
+
+ if ((dev->cmbsz & (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS)) ==
+ (NVME_CMBSZ_WDS | NVME_CMBSZ_RDS))
+ pci_p2pmem_publish(pdev, true);
if (sysfs_add_file_to_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL))
@@ -1685,12 +1705,10 @@ static void nvme_map_cmb(struct nvme_dev *dev)
static inline void nvme_release_cmb(struct nvme_dev *dev)
{
- if (dev->cmb) {
- iounmap(dev->cmb);
- dev->cmb = NULL;
+ if (dev->cmb_size) {
sysfs_remove_file_from_group(&dev->ctrl.device->kobj,
&dev_attr_cmb.attr, NULL);
- dev->cmbsz = 0;
+ dev->cmb_size = 0;
}
}
@@ -1889,13 +1907,13 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
if (nr_io_queues == 0)
return 0;
- if (dev->cmb && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+ if (dev->cmb_use_sqes) {
result = nvme_cmb_qdepth(dev, nr_io_queues,
sizeof(struct nvme_command));
if (result > 0)
dev->q_depth = result;
else
- nvme_release_cmb(dev);
+ dev->cmb_use_sqes = false;
}
do {
@@ -2390,7 +2408,8 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
.name = "pcie",
.module = THIS_MODULE,
- .flags = NVME_F_METADATA_SUPPORTED,
+ .flags = NVME_F_METADATA_SUPPORTED |
+ NVME_F_PCI_P2PDMA,
.reg_read32 = nvme_pci_reg_read32,
.reg_write32 = nvme_pci_reg_write32,
.reg_read64 = nvme_pci_reg_read64,
@@ -2648,7 +2667,6 @@ static void nvme_error_resume(struct pci_dev *pdev)
struct nvme_dev *dev = pci_get_drvdata(pdev);
flush_work(&dev->ctrl.reset_work);
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
static const struct pci_error_handlers nvme_err_handler = {
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index b37a8e3e3f80..d895579b6c5d 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -17,6 +17,8 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/ctype.h>
+#include <linux/pci.h>
+#include <linux/pci-p2pdma.h>
#include "nvmet.h"
@@ -340,6 +342,48 @@ out_unlock:
CONFIGFS_ATTR(nvmet_ns_, device_path);
+#ifdef CONFIG_PCI_P2PDMA
+static ssize_t nvmet_ns_p2pmem_show(struct config_item *item, char *page)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+
+ return pci_p2pdma_enable_show(page, ns->p2p_dev, ns->use_p2pmem);
+}
+
+static ssize_t nvmet_ns_p2pmem_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_ns *ns = to_nvmet_ns(item);
+ struct pci_dev *p2p_dev = NULL;
+ bool use_p2pmem;
+ int ret = count;
+ int error;
+
+ mutex_lock(&ns->subsys->lock);
+ if (ns->enabled) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+
+ error = pci_p2pdma_enable_store(page, &p2p_dev, &use_p2pmem);
+ if (error) {
+ ret = error;
+ goto out_unlock;
+ }
+
+ ns->use_p2pmem = use_p2pmem;
+ pci_dev_put(ns->p2p_dev);
+ ns->p2p_dev = p2p_dev;
+
+out_unlock:
+ mutex_unlock(&ns->subsys->lock);
+
+ return ret;
+}
+
+CONFIGFS_ATTR(nvmet_ns_, p2pmem);
+#endif /* CONFIG_PCI_P2PDMA */
+
static ssize_t nvmet_ns_device_uuid_show(struct config_item *item, char *page)
{
return sprintf(page, "%pUb\n", &to_nvmet_ns(item)->uuid);
@@ -509,6 +553,9 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
&nvmet_ns_attr_ana_grpid,
&nvmet_ns_attr_enable,
&nvmet_ns_attr_buffered_io,
+#ifdef CONFIG_PCI_P2PDMA
+ &nvmet_ns_attr_p2pmem,
+#endif
NULL,
};
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 0acdff9e6842..f4efe289dc7b 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/random.h>
#include <linux/rculist.h>
+#include <linux/pci-p2pdma.h>
#include "nvmet.h"
@@ -365,9 +366,93 @@ static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
nvmet_file_ns_disable(ns);
}
+static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
+{
+ int ret;
+ struct pci_dev *p2p_dev;
+
+ if (!ns->use_p2pmem)
+ return 0;
+
+ if (!ns->bdev) {
+ pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
+ return -EINVAL;
+ }
+
+ if (!blk_queue_pci_p2pdma(ns->bdev->bd_queue)) {
+ pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
+ if (ret < 0)
+ return -EINVAL;
+ } else {
+ /*
+ * Right now we just check that there is p2pmem available so
+ * we can report an error to the user right away if there
+ * is not. We'll find the actual device to use once we
+ * setup the controller when the port's device is available.
+ */
+
+ p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available for %s\n",
+ ns->device_path);
+ return -EINVAL;
+ }
+
+ pci_dev_put(p2p_dev);
+ }
+
+ return 0;
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
+ struct nvmet_ns *ns)
+{
+ struct device *clients[2];
+ struct pci_dev *p2p_dev;
+ int ret;
+
+ if (!ctrl->p2p_client)
+ return;
+
+ if (ns->p2p_dev) {
+ ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
+ if (ret < 0)
+ return;
+
+ p2p_dev = pci_dev_get(ns->p2p_dev);
+ } else {
+ clients[0] = ctrl->p2p_client;
+ clients[1] = nvmet_ns_dev(ns);
+
+ p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
+ if (!p2p_dev) {
+ pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
+ dev_name(ctrl->p2p_client), ns->device_path);
+ return;
+ }
+ }
+
+ ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
+ if (ret < 0)
+ pci_dev_put(p2p_dev);
+
+ pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
+ ns->nsid);
+}
+
int nvmet_ns_enable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
int ret;
mutex_lock(&subsys->lock);
@@ -384,6 +469,13 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
if (ret)
goto out_unlock;
+ ret = nvmet_p2pmem_ns_enable(ns);
+ if (ret)
+ goto out_unlock;
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
+
ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
0, GFP_KERNEL);
if (ret)
@@ -418,6 +510,9 @@ out_unlock:
mutex_unlock(&subsys->lock);
return ret;
out_dev_put:
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+
nvmet_ns_dev_disable(ns);
goto out_unlock;
}
@@ -425,6 +520,7 @@ out_dev_put:
void nvmet_ns_disable(struct nvmet_ns *ns)
{
struct nvmet_subsys *subsys = ns->subsys;
+ struct nvmet_ctrl *ctrl;
mutex_lock(&subsys->lock);
if (!ns->enabled)
@@ -434,6 +530,10 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
list_del_rcu(&ns->dev_link);
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
+
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
+
mutex_unlock(&subsys->lock);
/*
@@ -450,6 +550,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
percpu_ref_exit(&ns->ref);
mutex_lock(&subsys->lock);
+
subsys->nr_namespaces--;
nvmet_ns_changed(subsys, ns->nsid);
nvmet_ns_dev_disable(ns);
@@ -725,6 +826,51 @@ void nvmet_req_execute(struct nvmet_req *req)
}
EXPORT_SYMBOL_GPL(nvmet_req_execute);
+int nvmet_req_alloc_sgl(struct nvmet_req *req)
+{
+ struct pci_dev *p2p_dev = NULL;
+
+ if (IS_ENABLED(CONFIG_PCI_P2PDMA)) {
+ if (req->sq->ctrl && req->ns)
+ p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map,
+ req->ns->nsid);
+
+ req->p2p_dev = NULL;
+ if (req->sq->qid && p2p_dev) {
+ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
+ req->transfer_len);
+ if (req->sg) {
+ req->p2p_dev = p2p_dev;
+ return 0;
+ }
+ }
+
+ /*
+ * If no P2P memory was available we fallback to using
+ * regular memory
+ */
+ }
+
+ req->sg = sgl_alloc(req->transfer_len, GFP_KERNEL, &req->sg_cnt);
+ if (!req->sg)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgl);
+
+void nvmet_req_free_sgl(struct nvmet_req *req)
+{
+ if (req->p2p_dev)
+ pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
+ else
+ sgl_free(req->sg);
+
+ req->sg = NULL;
+ req->sg_cnt = 0;
+}
+EXPORT_SYMBOL_GPL(nvmet_req_free_sgl);
+
static inline bool nvmet_cc_en(u32 cc)
{
return (cc >> NVME_CC_EN_SHIFT) & 0x1;
@@ -921,6 +1067,37 @@ bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
return __nvmet_host_allowed(subsys, hostnqn);
}
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
+ struct nvmet_req *req)
+{
+ struct nvmet_ns *ns;
+
+ if (!req->p2p_client)
+ return;
+
+ ctrl->p2p_client = get_device(req->p2p_client);
+
+ list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
+ nvmet_p2pmem_ns_add_p2p(ctrl, ns);
+}
+
+/*
+ * Note: ctrl->subsys->lock should be held when calling this function
+ */
+static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
+{
+ struct radix_tree_iter iter;
+ void __rcu **slot;
+
+ radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
+ pci_dev_put(radix_tree_deref_slot(slot));
+
+ put_device(ctrl->p2p_client);
+}
+
u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
{
@@ -962,6 +1139,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
+ INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@@ -1026,6 +1204,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
mutex_lock(&subsys->lock);
list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
+ nvmet_setup_p2p_ns_map(ctrl, req);
mutex_unlock(&subsys->lock);
*ctrlp = ctrl;
@@ -1053,6 +1232,7 @@ static void nvmet_ctrl_free(struct kref *ref)
struct nvmet_subsys *subsys = ctrl->subsys;
mutex_lock(&subsys->lock);
+ nvmet_release_p2p_ns_map(ctrl);
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index f93fb5711142..c1ec3475a140 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -78,6 +78,9 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
op = REQ_OP_READ;
}
+ if (is_pci_p2pdma_page(sg_page(req->sg)))
+ op_flags |= REQ_NOMERGE;
+
sector = le64_to_cpu(req->cmd->rw.slba);
sector <<= (req->ns->blksize_shift - 9);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 08f7b57a1203..c2b4d9ee6391 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -26,6 +26,7 @@
#include <linux/configfs.h>
#include <linux/rcupdate.h>
#include <linux/blkdev.h>
+#include <linux/radix-tree.h>
#define NVMET_ASYNC_EVENTS 4
#define NVMET_ERROR_LOG_SLOTS 128
@@ -77,6 +78,9 @@ struct nvmet_ns {
struct completion disable_done;
mempool_t *bvec_pool;
struct kmem_cache *bvec_cache;
+
+ int use_p2pmem;
+ struct pci_dev *p2p_dev;
};
static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
@@ -84,6 +88,11 @@ static inline struct nvmet_ns *to_nvmet_ns(struct config_item *item)
return container_of(to_config_group(item), struct nvmet_ns, group);
}
+static inline struct device *nvmet_ns_dev(struct nvmet_ns *ns)
+{
+ return ns->bdev ? disk_to_dev(ns->bdev->bd_disk) : NULL;
+}
+
struct nvmet_cq {
u16 qid;
u16 size;
@@ -184,6 +193,9 @@ struct nvmet_ctrl {
char subsysnqn[NVMF_NQN_FIELD_LEN];
char hostnqn[NVMF_NQN_FIELD_LEN];
+
+ struct device *p2p_client;
+ struct radix_tree_root p2p_ns_map;
};
struct nvmet_subsys {
@@ -295,6 +307,9 @@ struct nvmet_req {
void (*execute)(struct nvmet_req *req);
const struct nvmet_fabrics_ops *ops;
+
+ struct pci_dev *p2p_dev;
+ struct device *p2p_client;
};
extern struct workqueue_struct *buffered_io_wq;
@@ -337,6 +352,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
void nvmet_req_uninit(struct nvmet_req *req);
void nvmet_req_execute(struct nvmet_req *req);
void nvmet_req_complete(struct nvmet_req *req, u16 status);
+int nvmet_req_alloc_sgl(struct nvmet_req *req);
+void nvmet_req_free_sgl(struct nvmet_req *req);
void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, u16 qid,
u16 size);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index bd265aceb90c..ddce100be57a 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -504,7 +504,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
}
if (rsp->req.sg != rsp->cmd->inline_sg)
- sgl_free(rsp->req.sg);
+ nvmet_req_free_sgl(&rsp->req);
if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list)))
nvmet_rdma_process_wr_wait_list(queue);
@@ -653,24 +653,24 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
{
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
u64 addr = le64_to_cpu(sgl->addr);
- u32 len = get_unaligned_le24(sgl->length);
u32 key = get_unaligned_le32(sgl->key);
int ret;
+ rsp->req.transfer_len = get_unaligned_le24(sgl->length);
+
/* no data command? */
- if (!len)
+ if (!rsp->req.transfer_len)
return 0;
- rsp->req.sg = sgl_alloc(len, GFP_KERNEL, &rsp->req.sg_cnt);
- if (!rsp->req.sg)
- return NVME_SC_INTERNAL;
+ ret = nvmet_req_alloc_sgl(&rsp->req);
+ if (ret < 0)
+ goto error_out;
ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num,
rsp->req.sg, rsp->req.sg_cnt, 0, addr, key,
nvmet_data_dir(&rsp->req));
if (ret < 0)
- return NVME_SC_INTERNAL;
- rsp->req.transfer_len += len;
+ goto error_out;
rsp->n_rdma += ret;
if (invalidate) {
@@ -679,6 +679,10 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
}
return 0;
+
+error_out:
+ rsp->req.transfer_len = 0;
+ return NVME_SC_INTERNAL;
}
static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
@@ -746,6 +750,8 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->send_sge.addr, cmd->send_sge.length,
DMA_TO_DEVICE);
+ cmd->req.p2p_client = &queue->dev->device->dev;
+
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops))
return;
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index aa1657831b70..9b18ce90f907 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -1,17 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* nvmem framework core.
*
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
* Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/device.h>
@@ -19,6 +11,7 @@
#include <linux/fs.h>
#include <linux/idr.h>
#include <linux/init.h>
+#include <linux/kref.h>
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/nvmem-provider.h>
@@ -26,18 +19,18 @@
#include <linux/slab.h>
struct nvmem_device {
- const char *name;
struct module *owner;
struct device dev;
int stride;
int word_size;
int id;
- int users;
+ struct kref refcnt;
size_t size;
bool read_only;
int flags;
struct bin_attribute eeprom;
struct device *base_dev;
+ struct list_head cells;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
void *priv;
@@ -58,8 +51,13 @@ struct nvmem_cell {
static DEFINE_MUTEX(nvmem_mutex);
static DEFINE_IDA(nvmem_ida);
-static LIST_HEAD(nvmem_cells);
-static DEFINE_MUTEX(nvmem_cells_mutex);
+static DEFINE_MUTEX(nvmem_cell_mutex);
+static LIST_HEAD(nvmem_cell_tables);
+
+static DEFINE_MUTEX(nvmem_lookup_mutex);
+static LIST_HEAD(nvmem_lookup_list);
+
+static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key eeprom_lock_key;
@@ -156,7 +154,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
static struct bin_attribute bin_attr_rw_nvmem = {
.attr = {
.name = "nvmem",
- .mode = S_IWUSR | S_IRUGO,
+ .mode = 0644,
},
.read = bin_attr_nvmem_read,
.write = bin_attr_nvmem_write,
@@ -180,7 +178,7 @@ static const struct attribute_group *nvmem_rw_dev_groups[] = {
static struct bin_attribute bin_attr_ro_nvmem = {
.attr = {
.name = "nvmem",
- .mode = S_IRUGO,
+ .mode = 0444,
},
.read = bin_attr_nvmem_read,
};
@@ -203,7 +201,7 @@ static const struct attribute_group *nvmem_ro_dev_groups[] = {
static struct bin_attribute bin_attr_rw_root_nvmem = {
.attr = {
.name = "nvmem",
- .mode = S_IWUSR | S_IRUSR,
+ .mode = 0600,
},
.read = bin_attr_nvmem_read,
.write = bin_attr_nvmem_write,
@@ -227,7 +225,7 @@ static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
static struct bin_attribute bin_attr_ro_root_nvmem = {
.attr = {
.name = "nvmem",
- .mode = S_IRUSR,
+ .mode = 0400,
},
.read = bin_attr_nvmem_read,
};
@@ -282,48 +280,42 @@ static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
return to_nvmem_device(d);
}
-static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
+static struct nvmem_device *nvmem_find(const char *name)
{
- struct nvmem_cell *p;
-
- mutex_lock(&nvmem_cells_mutex);
+ struct device *d;
- list_for_each_entry(p, &nvmem_cells, node)
- if (!strcmp(p->name, cell_id)) {
- mutex_unlock(&nvmem_cells_mutex);
- return p;
- }
+ d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
- mutex_unlock(&nvmem_cells_mutex);
+ if (!d)
+ return NULL;
- return NULL;
+ return to_nvmem_device(d);
}
static void nvmem_cell_drop(struct nvmem_cell *cell)
{
- mutex_lock(&nvmem_cells_mutex);
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
+ mutex_lock(&nvmem_mutex);
list_del(&cell->node);
- mutex_unlock(&nvmem_cells_mutex);
+ mutex_unlock(&nvmem_mutex);
+ kfree(cell->name);
kfree(cell);
}
static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
{
- struct nvmem_cell *cell;
- struct list_head *p, *n;
+ struct nvmem_cell *cell, *p;
- list_for_each_safe(p, n, &nvmem_cells) {
- cell = list_entry(p, struct nvmem_cell, node);
- if (cell->nvmem == nvmem)
- nvmem_cell_drop(cell);
- }
+ list_for_each_entry_safe(cell, p, &nvmem->cells, node)
+ nvmem_cell_drop(cell);
}
static void nvmem_cell_add(struct nvmem_cell *cell)
{
- mutex_lock(&nvmem_cells_mutex);
- list_add_tail(&cell->node, &nvmem_cells);
- mutex_unlock(&nvmem_cells_mutex);
+ mutex_lock(&nvmem_mutex);
+ list_add_tail(&cell->node, &cell->nvmem->cells);
+ mutex_unlock(&nvmem_mutex);
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
}
static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
@@ -361,7 +353,7 @@ static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
*
* Return: 0 or negative error code on failure.
*/
-int nvmem_add_cells(struct nvmem_device *nvmem,
+static int nvmem_add_cells(struct nvmem_device *nvmem,
const struct nvmem_cell_info *info,
int ncells)
{
@@ -400,7 +392,6 @@ err:
return rval;
}
-EXPORT_SYMBOL_GPL(nvmem_add_cells);
/*
* nvmem_setup_compat() - Create an additional binary entry in
@@ -440,6 +431,136 @@ static int nvmem_setup_compat(struct nvmem_device *nvmem,
}
/**
+ * nvmem_register_notifier() - Register a notifier block for nvmem events.
+ *
+ * @nb: notifier block to be called on nvmem events.
+ *
+ * Return: 0 on success, negative error number on failure.
+ */
+int nvmem_register_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&nvmem_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(nvmem_register_notifier);
+
+/**
+ * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
+ *
+ * @nb: notifier block to be unregistered.
+ *
+ * Return: 0 on success, negative error number on failure.
+ */
+int nvmem_unregister_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
+}
+EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
+
+static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
+{
+ const struct nvmem_cell_info *info;
+ struct nvmem_cell_table *table;
+ struct nvmem_cell *cell;
+ int rval = 0, i;
+
+ mutex_lock(&nvmem_cell_mutex);
+ list_for_each_entry(table, &nvmem_cell_tables, node) {
+ if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
+ for (i = 0; i < table->ncells; i++) {
+ info = &table->cells[i];
+
+ cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+ if (!cell) {
+ rval = -ENOMEM;
+ goto out;
+ }
+
+ rval = nvmem_cell_info_to_nvmem_cell(nvmem,
+ info,
+ cell);
+ if (rval) {
+ kfree(cell);
+ goto out;
+ }
+
+ nvmem_cell_add(cell);
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&nvmem_cell_mutex);
+ return rval;
+}
+
+static struct nvmem_cell *
+nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
+{
+ struct nvmem_cell *cell = NULL;
+
+ mutex_lock(&nvmem_mutex);
+ list_for_each_entry(cell, &nvmem->cells, node) {
+ if (strcmp(cell_id, cell->name) == 0)
+ break;
+ }
+ mutex_unlock(&nvmem_mutex);
+
+ return cell;
+}
+
+static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+{
+ struct device_node *parent, *child;
+ struct device *dev = &nvmem->dev;
+ struct nvmem_cell *cell;
+ const __be32 *addr;
+ int len;
+
+ parent = dev->of_node;
+
+ for_each_child_of_node(parent, child) {
+ addr = of_get_property(child, "reg", &len);
+ if (!addr || (len < 2 * sizeof(u32))) {
+ dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
+ return -EINVAL;
+ }
+
+ cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+ if (!cell)
+ return -ENOMEM;
+
+ cell->nvmem = nvmem;
+ cell->offset = be32_to_cpup(addr++);
+ cell->bytes = be32_to_cpup(addr);
+ cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
+
+ addr = of_get_property(child, "bits", &len);
+ if (addr && len == (2 * sizeof(u32))) {
+ cell->bit_offset = be32_to_cpup(addr++);
+ cell->nbits = be32_to_cpup(addr);
+ }
+
+ if (cell->nbits)
+ cell->bytes = DIV_ROUND_UP(
+ cell->nbits + cell->bit_offset,
+ BITS_PER_BYTE);
+
+ if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
+ dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
+ cell->name, nvmem->stride);
+ /* Cells already added will be freed later. */
+ kfree(cell->name);
+ kfree(cell);
+ return -EINVAL;
+ }
+
+ nvmem_cell_add(cell);
+ }
+
+ return 0;
+}
+
+/**
* nvmem_register() - Register a nvmem device for given nvmem_config.
* Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
*
@@ -467,6 +588,9 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
return ERR_PTR(rval);
}
+ kref_init(&nvmem->refcnt);
+ INIT_LIST_HEAD(&nvmem->cells);
+
nvmem->id = rval;
nvmem->owner = config->owner;
if (!nvmem->owner && config->dev->driver)
@@ -516,11 +640,31 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config)
goto err_device_del;
}
- if (config->cells)
- nvmem_add_cells(nvmem, config->cells, config->ncells);
+ if (config->cells) {
+ rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
+ if (rval)
+ goto err_teardown_compat;
+ }
+
+ rval = nvmem_add_cells_from_table(nvmem);
+ if (rval)
+ goto err_remove_cells;
+
+ rval = nvmem_add_cells_from_of(nvmem);
+ if (rval)
+ goto err_remove_cells;
+
+ rval = blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
+ if (rval)
+ goto err_remove_cells;
return nvmem;
+err_remove_cells:
+ nvmem_device_remove_all_cells(nvmem);
+err_teardown_compat:
+ if (config->compat)
+ device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
err_device_del:
device_del(&nvmem->dev);
err_put_device:
@@ -530,21 +674,13 @@ err_put_device:
}
EXPORT_SYMBOL_GPL(nvmem_register);
-/**
- * nvmem_unregister() - Unregister previously registered nvmem device
- *
- * @nvmem: Pointer to previously registered nvmem device.
- *
- * Return: Will be an negative on error or a zero on success.
- */
-int nvmem_unregister(struct nvmem_device *nvmem)
+static void nvmem_device_release(struct kref *kref)
{
- mutex_lock(&nvmem_mutex);
- if (nvmem->users) {
- mutex_unlock(&nvmem_mutex);
- return -EBUSY;
- }
- mutex_unlock(&nvmem_mutex);
+ struct nvmem_device *nvmem;
+
+ nvmem = container_of(kref, struct nvmem_device, refcnt);
+
+ blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
if (nvmem->flags & FLAG_COMPAT)
device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
@@ -552,14 +688,22 @@ int nvmem_unregister(struct nvmem_device *nvmem)
nvmem_device_remove_all_cells(nvmem);
device_del(&nvmem->dev);
put_device(&nvmem->dev);
+}
- return 0;
+/**
+ * nvmem_unregister() - Unregister previously registered nvmem device
+ *
+ * @nvmem: Pointer to previously registered nvmem device.
+ */
+void nvmem_unregister(struct nvmem_device *nvmem)
+{
+ kref_put(&nvmem->refcnt, nvmem_device_release);
}
EXPORT_SYMBOL_GPL(nvmem_unregister);
static void devm_nvmem_release(struct device *dev, void *res)
{
- WARN_ON(nvmem_unregister(*(struct nvmem_device **)res));
+ nvmem_unregister(*(struct nvmem_device **)res);
}
/**
@@ -617,71 +761,34 @@ int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
}
EXPORT_SYMBOL(devm_nvmem_unregister);
-
static struct nvmem_device *__nvmem_device_get(struct device_node *np,
- struct nvmem_cell **cellp,
- const char *cell_id)
+ const char *nvmem_name)
{
struct nvmem_device *nvmem = NULL;
mutex_lock(&nvmem_mutex);
-
- if (np) {
- nvmem = of_nvmem_find(np);
- if (!nvmem) {
- mutex_unlock(&nvmem_mutex);
- return ERR_PTR(-EPROBE_DEFER);
- }
- } else {
- struct nvmem_cell *cell = nvmem_find_cell(cell_id);
-
- if (cell) {
- nvmem = cell->nvmem;
- *cellp = cell;
- }
-
- if (!nvmem) {
- mutex_unlock(&nvmem_mutex);
- return ERR_PTR(-ENOENT);
- }
- }
-
- nvmem->users++;
+ nvmem = np ? of_nvmem_find(np) : nvmem_find(nvmem_name);
mutex_unlock(&nvmem_mutex);
+ if (!nvmem)
+ return ERR_PTR(-EPROBE_DEFER);
if (!try_module_get(nvmem->owner)) {
dev_err(&nvmem->dev,
"could not increase module refcount for cell %s\n",
- nvmem->name);
-
- mutex_lock(&nvmem_mutex);
- nvmem->users--;
- mutex_unlock(&nvmem_mutex);
+ nvmem_dev_name(nvmem));
return ERR_PTR(-EINVAL);
}
+ kref_get(&nvmem->refcnt);
+
return nvmem;
}
static void __nvmem_device_put(struct nvmem_device *nvmem)
{
module_put(nvmem->owner);
- mutex_lock(&nvmem_mutex);
- nvmem->users--;
- mutex_unlock(&nvmem_mutex);
-}
-
-static struct nvmem_device *nvmem_find(const char *name)
-{
- struct device *d;
-
- d = bus_find_device_by_name(&nvmem_bus_type, NULL, name);
-
- if (!d)
- return NULL;
-
- return to_nvmem_device(d);
+ kref_put(&nvmem->refcnt, nvmem_device_release);
}
#if IS_ENABLED(CONFIG_OF)
@@ -706,7 +813,7 @@ struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
if (!nvmem_np)
return ERR_PTR(-EINVAL);
- return __nvmem_device_get(nvmem_np, NULL, NULL);
+ return __nvmem_device_get(nvmem_np, NULL);
}
EXPORT_SYMBOL_GPL(of_nvmem_device_get);
#endif
@@ -810,44 +917,86 @@ struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
-static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
+static struct nvmem_cell *
+nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
{
- struct nvmem_cell *cell = NULL;
+ struct nvmem_cell *cell = ERR_PTR(-ENOENT);
+ struct nvmem_cell_lookup *lookup;
struct nvmem_device *nvmem;
+ const char *dev_id;
- nvmem = __nvmem_device_get(NULL, &cell, cell_id);
- if (IS_ERR(nvmem))
- return ERR_CAST(nvmem);
+ if (!dev)
+ return ERR_PTR(-EINVAL);
+ dev_id = dev_name(dev);
+
+ mutex_lock(&nvmem_lookup_mutex);
+
+ list_for_each_entry(lookup, &nvmem_lookup_list, node) {
+ if ((strcmp(lookup->dev_id, dev_id) == 0) &&
+ (strcmp(lookup->con_id, con_id) == 0)) {
+ /* This is the right entry. */
+ nvmem = __nvmem_device_get(NULL, lookup->nvmem_name);
+ if (IS_ERR(nvmem)) {
+ /* Provider may not be registered yet. */
+ cell = ERR_CAST(nvmem);
+ goto out;
+ }
+
+ cell = nvmem_find_cell_by_name(nvmem,
+ lookup->cell_name);
+ if (!cell) {
+ __nvmem_device_put(nvmem);
+ cell = ERR_PTR(-ENOENT);
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&nvmem_lookup_mutex);
return cell;
}
#if IS_ENABLED(CONFIG_OF)
+static struct nvmem_cell *
+nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index)
+{
+ struct nvmem_cell *cell = NULL;
+ int i = 0;
+
+ mutex_lock(&nvmem_mutex);
+ list_for_each_entry(cell, &nvmem->cells, node) {
+ if (index == i++)
+ break;
+ }
+ mutex_unlock(&nvmem_mutex);
+
+ return cell;
+}
+
/**
* of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
*
* @np: Device tree node that uses the nvmem cell.
- * @name: nvmem cell name from nvmem-cell-names property, or NULL
- * for the cell at index 0 (the lone cell with no accompanying
- * nvmem-cell-names property).
+ * @id: nvmem cell name from nvmem-cell-names property, or NULL
+ * for the cell at index 0 (the lone cell with no accompanying
+ * nvmem-cell-names property).
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
* nvmem_cell_put().
*/
-struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
- const char *name)
+struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
{
struct device_node *cell_np, *nvmem_np;
- struct nvmem_cell *cell;
struct nvmem_device *nvmem;
- const __be32 *addr;
- int rval, len;
+ struct nvmem_cell *cell;
int index = 0;
/* if cell name exists, find index to the name */
- if (name)
- index = of_property_match_string(np, "nvmem-cell-names", name);
+ if (id)
+ index = of_property_match_string(np, "nvmem-cell-names", id);
cell_np = of_parse_phandle(np, "nvmem-cells", index);
if (!cell_np)
@@ -857,59 +1006,18 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
if (!nvmem_np)
return ERR_PTR(-EINVAL);
- nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
+ nvmem = __nvmem_device_get(nvmem_np, NULL);
of_node_put(nvmem_np);
if (IS_ERR(nvmem))
return ERR_CAST(nvmem);
- addr = of_get_property(cell_np, "reg", &len);
- if (!addr || (len < 2 * sizeof(u32))) {
- dev_err(&nvmem->dev, "nvmem: invalid reg on %pOF\n",
- cell_np);
- rval = -EINVAL;
- goto err_mem;
- }
-
- cell = kzalloc(sizeof(*cell), GFP_KERNEL);
+ cell = nvmem_find_cell_by_index(nvmem, index);
if (!cell) {
- rval = -ENOMEM;
- goto err_mem;
- }
-
- cell->nvmem = nvmem;
- cell->offset = be32_to_cpup(addr++);
- cell->bytes = be32_to_cpup(addr);
- cell->name = cell_np->name;
-
- addr = of_get_property(cell_np, "bits", &len);
- if (addr && len == (2 * sizeof(u32))) {
- cell->bit_offset = be32_to_cpup(addr++);
- cell->nbits = be32_to_cpup(addr);
+ __nvmem_device_put(nvmem);
+ return ERR_PTR(-ENOENT);
}
- if (cell->nbits)
- cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
- BITS_PER_BYTE);
-
- if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
- dev_err(&nvmem->dev,
- "cell %s unaligned to nvmem stride %d\n",
- cell->name, nvmem->stride);
- rval = -EINVAL;
- goto err_sanity;
- }
-
- nvmem_cell_add(cell);
-
return cell;
-
-err_sanity:
- kfree(cell);
-
-err_mem:
- __nvmem_device_put(nvmem);
-
- return ERR_PTR(rval);
}
EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
#endif
@@ -918,27 +1026,29 @@ EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
* nvmem_cell_get() - Get nvmem cell of device form a given cell name
*
* @dev: Device that requests the nvmem cell.
- * @cell_id: nvmem cell name to get.
+ * @id: nvmem cell name to get (this corresponds with the name from the
+ * nvmem-cell-names property for DT systems and with the con_id from
+ * the lookup entry for non-DT systems).
*
* Return: Will be an ERR_PTR() on error or a valid pointer
* to a struct nvmem_cell. The nvmem_cell will be freed by the
* nvmem_cell_put().
*/
-struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
{
struct nvmem_cell *cell;
if (dev->of_node) { /* try dt first */
- cell = of_nvmem_cell_get(dev->of_node, cell_id);
+ cell = of_nvmem_cell_get(dev->of_node, id);
if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
return cell;
}
- /* NULL cell_id only allowed for device tree; invalid otherwise */
- if (!cell_id)
+ /* NULL cell id only allowed for device tree; invalid otherwise */
+ if (!id)
return ERR_PTR(-EINVAL);
- return nvmem_cell_get_from_list(cell_id);
+ return nvmem_cell_get_from_lookup(dev, id);
}
EXPORT_SYMBOL_GPL(nvmem_cell_get);
@@ -1015,7 +1125,6 @@ void nvmem_cell_put(struct nvmem_cell *cell)
struct nvmem_device *nvmem = cell->nvmem;
__nvmem_device_put(nvmem);
- nvmem_cell_drop(cell);
}
EXPORT_SYMBOL_GPL(nvmem_cell_put);
@@ -1267,7 +1376,7 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
* @buf: buffer to be written to cell.
*
* Return: length of bytes written or negative error code on failure.
- * */
+ */
int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf)
{
@@ -1323,7 +1432,7 @@ EXPORT_SYMBOL_GPL(nvmem_device_read);
* @buf: buffer to be written.
*
* Return: length of bytes written or negative error code on failure.
- * */
+ */
int nvmem_device_write(struct nvmem_device *nvmem,
unsigned int offset,
size_t bytes, void *buf)
@@ -1343,6 +1452,80 @@ int nvmem_device_write(struct nvmem_device *nvmem,
}
EXPORT_SYMBOL_GPL(nvmem_device_write);
+/**
+ * nvmem_add_cell_table() - register a table of cell info entries
+ *
+ * @table: table of cell info entries
+ */
+void nvmem_add_cell_table(struct nvmem_cell_table *table)
+{
+ mutex_lock(&nvmem_cell_mutex);
+ list_add_tail(&table->node, &nvmem_cell_tables);
+ mutex_unlock(&nvmem_cell_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
+
+/**
+ * nvmem_del_cell_table() - remove a previously registered cell info table
+ *
+ * @table: table of cell info entries
+ */
+void nvmem_del_cell_table(struct nvmem_cell_table *table)
+{
+ mutex_lock(&nvmem_cell_mutex);
+ list_del(&table->node);
+ mutex_unlock(&nvmem_cell_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
+
+/**
+ * nvmem_add_cell_lookups() - register a list of cell lookup entries
+ *
+ * @entries: array of cell lookup entries
+ * @nentries: number of cell lookup entries in the array
+ */
+void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
+{
+ int i;
+
+ mutex_lock(&nvmem_lookup_mutex);
+ for (i = 0; i < nentries; i++)
+ list_add_tail(&entries[i].node, &nvmem_lookup_list);
+ mutex_unlock(&nvmem_lookup_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
+
+/**
+ * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
+ * entries
+ *
+ * @entries: array of cell lookup entries
+ * @nentries: number of cell lookup entries in the array
+ */
+void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
+{
+ int i;
+
+ mutex_lock(&nvmem_lookup_mutex);
+ for (i = 0; i < nentries; i++)
+ list_del(&entries[i].node);
+ mutex_unlock(&nvmem_lookup_mutex);
+}
+EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
+
+/**
+ * nvmem_dev_name() - Get the name of a given nvmem device.
+ *
+ * @nvmem: nvmem device.
+ *
+ * Return: name of the nvmem device.
+ */
+const char *nvmem_dev_name(struct nvmem_device *nvmem)
+{
+ return dev_name(&nvmem->dev);
+}
+EXPORT_SYMBOL_GPL(nvmem_dev_name);
+
static int __init nvmem_init(void)
{
return bus_register(&nvmem_bus_type);
diff --git a/drivers/nvmem/lpc18xx_eeprom.c b/drivers/nvmem/lpc18xx_eeprom.c
index a9534a6e8636..66cff1e2147a 100644
--- a/drivers/nvmem/lpc18xx_eeprom.c
+++ b/drivers/nvmem/lpc18xx_eeprom.c
@@ -236,7 +236,7 @@ static int lpc18xx_eeprom_probe(struct platform_device *pdev)
lpc18xx_nvmem_config.dev = dev;
lpc18xx_nvmem_config.priv = eeprom;
- eeprom->nvmem = nvmem_register(&lpc18xx_nvmem_config);
+ eeprom->nvmem = devm_nvmem_register(dev, &lpc18xx_nvmem_config);
if (IS_ERR(eeprom->nvmem)) {
ret = PTR_ERR(eeprom->nvmem);
goto err_clk;
@@ -255,11 +255,6 @@ err_clk:
static int lpc18xx_eeprom_remove(struct platform_device *pdev)
{
struct lpc18xx_eeprom_dev *eeprom = platform_get_drvdata(pdev);
- int ret;
-
- ret = nvmem_unregister(eeprom->nvmem);
- if (ret < 0)
- return ret;
clk_disable_unprepare(eeprom->clk);
diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c
index 7018e2ef5714..53122f59c4b2 100644
--- a/drivers/nvmem/mxs-ocotp.c
+++ b/drivers/nvmem/mxs-ocotp.c
@@ -177,7 +177,7 @@ static int mxs_ocotp_probe(struct platform_device *pdev)
ocotp_config.size = data->size;
ocotp_config.priv = otp;
ocotp_config.dev = dev;
- otp->nvmem = nvmem_register(&ocotp_config);
+ otp->nvmem = devm_nvmem_register(dev, &ocotp_config);
if (IS_ERR(otp->nvmem)) {
ret = PTR_ERR(otp->nvmem);
goto err_clk;
@@ -199,7 +199,7 @@ static int mxs_ocotp_remove(struct platform_device *pdev)
clk_unprepare(otp->clk);
- return nvmem_unregister(otp->nvmem);
+ return 0;
}
static struct platform_driver mxs_ocotp_driver = {
diff --git a/drivers/nvmem/sunxi_sid.c b/drivers/nvmem/sunxi_sid.c
index d020f89248fd..570a2e354f30 100644
--- a/drivers/nvmem/sunxi_sid.c
+++ b/drivers/nvmem/sunxi_sid.c
@@ -154,7 +154,7 @@ static int sunxi_sid_probe(struct platform_device *pdev)
struct resource *res;
struct nvmem_device *nvmem;
struct sunxi_sid *sid;
- int ret, i, size;
+ int i, size;
char *randomness;
const struct sunxi_sid_cfg *cfg;
@@ -181,15 +181,13 @@ static int sunxi_sid_probe(struct platform_device *pdev)
else
econfig.reg_read = sunxi_sid_read;
econfig.priv = sid;
- nvmem = nvmem_register(&econfig);
+ nvmem = devm_nvmem_register(dev, &econfig);
if (IS_ERR(nvmem))
return PTR_ERR(nvmem);
randomness = kzalloc(size, GFP_KERNEL);
- if (!randomness) {
- ret = -EINVAL;
- goto err_unreg_nvmem;
- }
+ if (!randomness)
+ return -ENOMEM;
for (i = 0; i < size; i++)
econfig.reg_read(sid, i, &randomness[i], 1);
@@ -200,17 +198,6 @@ static int sunxi_sid_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, nvmem);
return 0;
-
-err_unreg_nvmem:
- nvmem_unregister(nvmem);
- return ret;
-}
-
-static int sunxi_sid_remove(struct platform_device *pdev)
-{
- struct nvmem_device *nvmem = platform_get_drvdata(pdev);
-
- return nvmem_unregister(nvmem);
}
static const struct sunxi_sid_cfg sun4i_a10_cfg = {
@@ -243,7 +230,6 @@ MODULE_DEVICE_TABLE(of, sunxi_sid_of_match);
static struct platform_driver sunxi_sid_driver = {
.probe = sunxi_sid_probe,
- .remove = sunxi_sid_remove,
.driver = {
.name = "eeprom-sunxi-sid",
.of_match_table = sunxi_sid_of_match,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 74eaedd5b860..13ebb16be64e 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -67,6 +67,7 @@ bool of_node_name_eq(const struct device_node *np, const char *name)
return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
}
+EXPORT_SYMBOL(of_node_name_eq);
bool of_node_name_prefix(const struct device_node *np, const char *prefix)
{
@@ -75,6 +76,7 @@ bool of_node_name_prefix(const struct device_node *np, const char *prefix)
return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
}
+EXPORT_SYMBOL(of_node_name_prefix);
int of_n_addr_cells(struct device_node *np)
{
@@ -330,6 +332,8 @@ static bool __of_find_n_match_cpu_property(struct device_node *cpun,
ac = of_n_addr_cells(cpun);
cell = of_get_property(cpun, prop_name, &prop_len);
+ if (!cell && !ac && arch_match_cpu_phys_id(cpu, 0))
+ return true;
if (!cell || !ac)
return false;
prop_len /= sizeof(*cell) * ac;
@@ -390,7 +394,7 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
{
struct device_node *cpun;
- for_each_node_by_type(cpun, "cpu") {
+ for_each_of_cpu_node(cpun) {
if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
return cpun;
}
@@ -745,6 +749,45 @@ struct device_node *of_get_next_available_child(const struct device_node *node,
EXPORT_SYMBOL(of_get_next_available_child);
/**
+ * of_get_next_cpu_node - Iterate on cpu nodes
+ * @prev: previous child of the /cpus node, or NULL to get first
+ *
+ * Returns a cpu node pointer with refcount incremented, use of_node_put()
+ * on it when done. Returns NULL when prev is the last child. Decrements
+ * the refcount of prev.
+ */
+struct device_node *of_get_next_cpu_node(struct device_node *prev)
+{
+ struct device_node *next = NULL;
+ unsigned long flags;
+ struct device_node *node;
+
+ if (!prev)
+ node = of_find_node_by_path("/cpus");
+
+ raw_spin_lock_irqsave(&devtree_lock, flags);
+ if (prev)
+ next = prev->sibling;
+ else if (node) {
+ next = node->child;
+ of_node_put(node);
+ }
+ for (; next; next = next->sibling) {
+ if (!(of_node_name_eq(next, "cpu") ||
+ (next->type && !of_node_cmp(next->type, "cpu"))))
+ continue;
+ if (!__of_device_is_available(next))
+ continue;
+ if (of_node_get(next))
+ break;
+ }
+ of_node_put(prev);
+ raw_spin_unlock_irqrestore(&devtree_lock, flags);
+ return next;
+}
+EXPORT_SYMBOL(of_get_next_cpu_node);
+
+/**
* of_get_compatible_child - Find compatible child node
* @parent: parent node
* @compatible: compatible string
@@ -2013,7 +2056,7 @@ struct device_node *of_find_next_cache_node(const struct device_node *np)
/* OF on pmac has nodes instead of properties named "l2-cache"
* beneath CPU nodes.
*/
- if (!strcmp(np->type, "cpu"))
+ if (IS_ENABLED(CONFIG_PPC_PMAC) && !strcmp(np->type, "cpu"))
for_each_child_of_node(np, child)
if (!strcmp(child->type, "cache"))
return child;
@@ -2045,3 +2088,105 @@ int of_find_last_cache_level(unsigned int cpu)
return cache_level;
}
+
+/**
+ * of_map_rid - Translate a requester ID through a downstream mapping.
+ * @np: root complex device node.
+ * @rid: device requester ID to map.
+ * @map_name: property name of the map to use.
+ * @map_mask_name: optional property name of the mask to use.
+ * @target: optional pointer to a target device node.
+ * @id_out: optional pointer to receive the translated ID.
+ *
+ * Given a device requester ID, look up the appropriate implementation-defined
+ * platform ID and/or the target device which receives transactions on that
+ * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
+ * @id_out may be NULL if only the other is required. If @target points to
+ * a non-NULL device node pointer, only entries targeting that node will be
+ * matched; if it points to a NULL value, it will receive the device node of
+ * the first matching target phandle, with a reference held.
+ *
+ * Return: 0 on success or a standard error code on failure.
+ */
+int of_map_rid(struct device_node *np, u32 rid,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out)
+{
+ u32 map_mask, masked_rid;
+ int map_len;
+ const __be32 *map = NULL;
+
+ if (!np || !map_name || (!target && !id_out))
+ return -EINVAL;
+
+ map = of_get_property(np, map_name, &map_len);
+ if (!map) {
+ if (target)
+ return -ENODEV;
+ /* Otherwise, no map implies no translation */
+ *id_out = rid;
+ return 0;
+ }
+
+ if (!map_len || map_len % (4 * sizeof(*map))) {
+ pr_err("%pOF: Error: Bad %s length: %d\n", np,
+ map_name, map_len);
+ return -EINVAL;
+ }
+
+ /* The default is to select all bits. */
+ map_mask = 0xffffffff;
+
+ /*
+ * Can be overridden by "{iommu,msi}-map-mask" property.
+ * If of_property_read_u32() fails, the default is used.
+ */
+ if (map_mask_name)
+ of_property_read_u32(np, map_mask_name, &map_mask);
+
+ masked_rid = map_mask & rid;
+ for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
+ struct device_node *phandle_node;
+ u32 rid_base = be32_to_cpup(map + 0);
+ u32 phandle = be32_to_cpup(map + 1);
+ u32 out_base = be32_to_cpup(map + 2);
+ u32 rid_len = be32_to_cpup(map + 3);
+
+ if (rid_base & ~map_mask) {
+ pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
+ np, map_name, map_name,
+ map_mask, rid_base);
+ return -EFAULT;
+ }
+
+ if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
+ continue;
+
+ phandle_node = of_find_node_by_phandle(phandle);
+ if (!phandle_node)
+ return -ENODEV;
+
+ if (target) {
+ if (*target)
+ of_node_put(phandle_node);
+ else
+ *target = phandle_node;
+
+ if (*target != phandle_node)
+ continue;
+ }
+
+ if (id_out)
+ *id_out = masked_rid - rid_base + out_base;
+
+ pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
+ np, map_name, map_mask, rid_base, out_base,
+ rid_len, rid, masked_rid - rid_base + out_base);
+ return 0;
+ }
+
+ pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
+ np, map_name, rid, target && *target ? *target : NULL);
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(of_map_rid);
diff --git a/drivers/of/device.c b/drivers/of/device.c
index c7fa5a9697c9..0f27fad9fe94 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -207,7 +207,8 @@ static ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len
return -ENODEV;
/* Name & Type */
- csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name,
+ /* %p eats all alphanum characters, so %c must be used here */
+ csize = snprintf(str, len, "of:N%pOFn%c%s", dev->of_node, 'T',
dev->of_node->type);
tsize = csize;
len -= csize;
@@ -286,7 +287,7 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
if ((!dev) || (!dev->of_node))
return;
- add_uevent_var(env, "OF_NAME=%s", dev->of_node->name);
+ add_uevent_var(env, "OF_NAME=%pOFn", dev->of_node);
add_uevent_var(env, "OF_FULLNAME=%pOF", dev->of_node);
if (dev->of_node->type && strcmp("<NULL>", dev->of_node->type) != 0)
add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 02ad93a304a4..e1f6f392a4c0 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/of_pci.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -588,8 +587,8 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
* "msi-map" property.
*/
for (parent_dev = dev; parent_dev; parent_dev = parent_dev->parent)
- if (!of_pci_map_rid(parent_dev->of_node, rid_in, "msi-map",
- "msi-map-mask", np, &rid_out))
+ if (!of_map_rid(parent_dev->of_node, rid_in, "msi-map",
+ "msi-map-mask", np, &rid_out))
break;
return rid_out;
}
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index e92391d6d1bd..5ad1342f5682 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -97,8 +97,8 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio,
return rc;
}
- dev_dbg(&mdio->dev, "registered phy %s at address %i\n",
- child->name, addr);
+ dev_dbg(&mdio->dev, "registered phy %pOFn at address %i\n",
+ child, addr);
return 0;
}
@@ -127,8 +127,8 @@ static int of_mdiobus_register_device(struct mii_bus *mdio,
return rc;
}
- dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n",
- child->name, addr);
+ dev_dbg(&mdio->dev, "registered mdio device %pOFn at address %i\n",
+ child, addr);
return 0;
}
@@ -263,8 +263,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
continue;
/* be noisy to encourage people to set reg property */
- dev_info(&mdio->dev, "scan phy %s at address %i\n",
- child->name, addr);
+ dev_info(&mdio->dev, "scan phy %pOFn at address %i\n",
+ child, addr);
if (of_mdiobus_child_is_phy(child)) {
rc = of_mdiobus_register_phy(mdio, child, addr);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index 27d9b4bba535..35c64a4295e0 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -24,18 +24,9 @@ static void __init of_numa_parse_cpu_nodes(void)
{
u32 nid;
int r;
- struct device_node *cpus;
- struct device_node *np = NULL;
-
- cpus = of_find_node_by_path("/cpus");
- if (!cpus)
- return;
-
- for_each_child_of_node(cpus, np) {
- /* Skip things that are not CPUs */
- if (of_node_cmp(np->type, "cpu") != 0)
- continue;
+ struct device_node *np;
+ for_each_of_cpu_node(np) {
r = of_property_read_u32(np, "numa-node-id", &nid);
if (r)
continue;
@@ -46,8 +37,6 @@ static void __init of_numa_parse_cpu_nodes(void)
else
node_set(nid, numa_nodes_parsed);
}
-
- of_node_put(cpus);
}
static int __init of_numa_parse_memory_nodes(void)
@@ -163,8 +152,8 @@ int of_node_to_nid(struct device_node *device)
np = of_get_next_parent(np);
}
if (np && r)
- pr_warn("Invalid \"numa-node-id\" property in node %s\n",
- np->name);
+ pr_warn("Invalid \"numa-node-id\" property in node %pOFn\n",
+ np);
of_node_put(np);
/*
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 216175d11d3d..5d1567025358 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -27,6 +27,14 @@ struct alias_prop {
char stem[0];
};
+#if defined(CONFIG_SPARC)
+#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2
+#else
+#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
+#endif
+
+#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
+
extern struct mutex of_mutex;
extern struct list_head aliases_lookup;
extern struct kset *of_kset;
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index eda57ef12fd0..42b1f73ac5f6 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -425,8 +425,8 @@ static int build_changeset_next_level(struct overlay_changeset *ovcs,
for_each_child_of_node(overlay_node, child) {
ret = add_changeset_node(ovcs, target_node, child);
if (ret) {
- pr_debug("Failed to apply node @%pOF/%s, err=%d\n",
- target_node, child->name, ret);
+ pr_debug("Failed to apply node @%pOF/%pOFn, err=%d\n",
+ target_node, child, ret);
of_node_put(child);
return ret;
}
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 6c59673933e9..04ad312fd85b 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -91,8 +91,8 @@ static void of_device_make_bus_id(struct device *dev)
*/
reg = of_get_property(node, "reg", NULL);
if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) {
- dev_set_name(dev, dev_name(dev) ? "%llx.%s:%s" : "%llx.%s",
- (unsigned long long)addr, node->name,
+ dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn",
+ (unsigned long long)addr, node,
dev_name(dev));
return;
}
@@ -142,8 +142,8 @@ struct platform_device *of_device_alloc(struct device_node *np,
WARN_ON(rc);
}
if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
- pr_debug("not all legacy IRQ resources mapped for %s\n",
- np->name);
+ pr_debug("not all legacy IRQ resources mapped for %pOFn\n",
+ np);
}
dev->dev.of_node = of_node_get(np);
diff --git a/drivers/of/unittest-data/overlay_15.dts b/drivers/of/unittest-data/overlay_15.dts
index b98f2514df4b..5728490474f6 100644
--- a/drivers/of/unittest-data/overlay_15.dts
+++ b/drivers/of/unittest-data/overlay_15.dts
@@ -20,8 +20,8 @@
#size-cells = <0>;
reg = <0>;
- test-mux-dev {
- reg = <32>;
+ test-mux-dev@20 {
+ reg = <0x20>;
compatible = "unittest-i2c-dev";
status = "okay";
};
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi
index 25cf397b8f6b..4ea024d908ee 100644
--- a/drivers/of/unittest-data/tests-overlay.dtsi
+++ b/drivers/of/unittest-data/tests-overlay.dtsi
@@ -103,8 +103,8 @@
#size-cells = <0>;
reg = <0>;
- test-mux-dev {
- reg = <32>;
+ test-mux-dev@20 {
+ reg = <0x20>;
compatible = "unittest-i2c-dev";
status = "okay";
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 41b49716ac75..a3a6866765f2 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -212,8 +212,8 @@ static int __init of_unittest_check_node_linkage(struct device_node *np)
for_each_child_of_node(np, child) {
if (child->parent != np) {
- pr_err("Child node %s links to wrong parent %s\n",
- child->name, np->name);
+ pr_err("Child node %pOFn links to wrong parent %pOFn\n",
+ child, np);
rc = -EINVAL;
goto put_child;
}
@@ -299,6 +299,10 @@ static void __init of_unittest_printf(void)
of_unittest_printf_one(np, "%pOF", full_name);
of_unittest_printf_one(np, "%pOFf", full_name);
+ of_unittest_printf_one(np, "%pOFn", "dev");
+ of_unittest_printf_one(np, "%2pOFn", "dev");
+ of_unittest_printf_one(np, "%5pOFn", " dev");
+ of_unittest_printf_one(np, "%pOFnc", "dev:test-sub-device");
of_unittest_printf_one(np, "%pOFp", phandle_str);
of_unittest_printf_one(np, "%pOFP", "dev@100");
of_unittest_printf_one(np, "ABC %pOFP ABC", "ABC dev@100 ABC");
@@ -1046,16 +1050,16 @@ static void __init of_unittest_platform_populate(void)
for_each_child_of_node(np, child) {
for_each_child_of_node(child, grandchild)
unittest(of_find_device_by_node(grandchild),
- "Could not create device for node '%s'\n",
- grandchild->name);
+ "Could not create device for node '%pOFn'\n",
+ grandchild);
}
of_platform_depopulate(&test_bus->dev);
for_each_child_of_node(np, child) {
for_each_child_of_node(child, grandchild)
unittest(!of_find_device_by_node(grandchild),
- "device didn't get destroyed '%s'\n",
- grandchild->name);
+ "device didn't get destroyed '%pOFn'\n",
+ grandchild);
}
platform_device_unregister(test_bus);
@@ -2357,11 +2361,14 @@ static __init void of_unittest_overlay_high_level(void)
}
}
- for (np = overlay_base_root->child; np; np = np->sibling) {
- if (of_get_child_by_name(of_root, np->name)) {
- unittest(0, "illegal node name in overlay_base %s",
- np->name);
- return;
+ for_each_child_of_node(overlay_base_root, np) {
+ struct device_node *base_child;
+ for_each_child_of_node(of_root, base_child) {
+ if (!strcmp(np->full_name, base_child->full_name)) {
+ unittest(0, "illegal node name in overlay_base %pOFn",
+ np);
+ return;
+ }
}
}
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 56ff8f6d31fc..2dcc30429e8b 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -98,6 +98,9 @@ config PCI_ECAM
config PCI_LOCKLESS_CONFIG
bool
+config PCI_BRIDGE_EMUL
+ bool
+
config PCI_IOV
bool "PCI IOV support"
depends on PCI
@@ -132,6 +135,23 @@ config PCI_PASID
If unsure, say N.
+config PCI_P2PDMA
+ bool "PCI peer-to-peer transfer support"
+ depends on PCI && ZONE_DEVICE
+ select GENERIC_ALLOCATOR
+ help
+ Enableѕ drivers to do PCI peer-to-peer transactions to and from
+ BARs that are exposed in other devices that are the part of
+ the hierarchy where peer-to-peer DMA is guaranteed by the PCI
+ specification to work (ie. anything below a single PCI bridge).
+
+ Many PCIe root complexes do not support P2P transactions and
+ it's hard to tell which support it at all, so at this time,
+ P2P DMA transations must be between devices behind the same root
+ port.
+
+ If unsure, say N.
+
config PCI_LABEL
def_bool y if (DMI || ACPI)
depends on PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 1b2cfe51e8d7..f2bda77a2df1 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
obj-$(CONFIG_PCI_MSI) += msi.o
obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
+obj-$(CONFIG_PCI_BRIDGE_EMUL) += pci-bridge-emul.o
obj-$(CONFIG_ACPI) += pci-acpi.o
obj-$(CONFIG_PCI_LABEL) += pci-label.o
obj-$(CONFIG_X86_INTEL_MID) += pci-mid.o
@@ -26,6 +27,7 @@ obj-$(CONFIG_PCI_SYSCALL) += syscall.o
obj-$(CONFIG_PCI_STUB) += pci-stub.o
obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o
obj-$(CONFIG_PCI_ECAM) += ecam.o
+obj-$(CONFIG_PCI_P2PDMA) += p2pdma.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
# Endpoint library must be initialized before its users
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index a3ad2fe185b9..544922f097c0 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -33,7 +33,7 @@ DEFINE_RAW_SPINLOCK(pci_lock);
#endif
#define PCI_OP_READ(size, type, len) \
-int pci_bus_read_config_##size \
+int noinline pci_bus_read_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
{ \
int res; \
@@ -48,7 +48,7 @@ int pci_bus_read_config_##size \
}
#define PCI_OP_WRITE(size, type, len) \
-int pci_bus_write_config_##size \
+int noinline pci_bus_write_config_##size \
(struct pci_bus *bus, unsigned int devfn, int pos, type value) \
{ \
int res; \
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 028b287466fb..6671946dbf66 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -9,12 +9,14 @@ config PCI_MVEBU
depends on MVEBU_MBUS
depends on ARM
depends on OF
+ select PCI_BRIDGE_EMUL
config PCI_AARDVARK
bool "Aardvark PCIe controller"
depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
+ select PCI_BRIDGE_EMUL
help
Add support for Aardvark 64bit PCIe Host Controller. This
controller is part of the South Bridge of the Marvel Armada
@@ -231,7 +233,7 @@ config PCIE_ROCKCHIP_EP
available to support GEN2 with 4 slots.
config PCIE_MEDIATEK
- bool "MediaTek PCIe controller"
+ tristate "MediaTek PCIe controller"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile
index 5d2ce72c7a52..fcf91eacfc63 100644
--- a/drivers/pci/controller/dwc/Makefile
+++ b/drivers/pci/controller/dwc/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
-obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
+obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone.o
obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index ce9224a36f62..a32d6dde7a57 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -542,7 +542,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
};
/*
- * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
+ * dra7xx_pcie_unaligned_memaccess: workaround for AM572x/AM571x Errata i870
* @dra7xx: the dra7xx device where the workaround should be applied
*
* Access to the PCIe slave port that are not 32-bit aligned will result
@@ -552,7 +552,7 @@ static const struct of_device_id of_dra7xx_pcie_match[] = {
*
* To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1.
*/
-static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev)
+static int dra7xx_pcie_unaligned_memaccess(struct device *dev)
{
int ret;
struct device_node *np = dev->of_node;
@@ -704,6 +704,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
DEVICE_TYPE_RC);
+
+ ret = dra7xx_pcie_unaligned_memaccess(dev);
+ if (ret)
+ dev_err(dev, "WA for Errata i870 not applied\n");
+
ret = dra7xx_add_pcie_port(dra7xx, pdev);
if (ret < 0)
goto err_gpio;
@@ -717,7 +722,7 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
DEVICE_TYPE_EP);
- ret = dra7xx_pcie_ep_unaligned_memaccess(dev);
+ ret = dra7xx_pcie_unaligned_memaccess(dev);
if (ret)
goto err_gpio;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 4a9a673b4777..2cbef2d7c207 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -50,6 +50,7 @@ struct imx6_pcie {
struct regmap *iomuxc_gpr;
struct reset_control *pciephy_reset;
struct reset_control *apps_reset;
+ struct reset_control *turnoff_reset;
enum imx6_pcie_variants variant;
u32 tx_deemph_gen1;
u32 tx_deemph_gen2_3p5db;
@@ -97,6 +98,16 @@ struct imx6_pcie {
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
/* PHY registers (not memory-mapped) */
+#define PCIE_PHY_ATEOVRD 0x10
+#define PCIE_PHY_ATEOVRD_EN (0x1 << 2)
+#define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0
+#define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1
+
+#define PCIE_PHY_MPLL_OVRD_IN_LO 0x11
+#define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2
+#define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f
+#define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9)
+
#define PCIE_PHY_RX_ASIC_OUT 0x100D
#define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0)
@@ -508,6 +519,50 @@ static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12);
}
+static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie)
+{
+ unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy);
+ int mult, div;
+ u32 val;
+
+ switch (phy_rate) {
+ case 125000000:
+ /*
+ * The default settings of the MPLL are for a 125MHz input
+ * clock, so no need to reconfigure anything in that case.
+ */
+ return 0;
+ case 100000000:
+ mult = 25;
+ div = 0;
+ break;
+ case 200000000:
+ mult = 25;
+ div = 1;
+ break;
+ default:
+ dev_err(imx6_pcie->pci->dev,
+ "Unsupported PHY reference clock rate %lu\n", phy_rate);
+ return -EINVAL;
+ }
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, &val);
+ val &= ~(PCIE_PHY_MPLL_MULTIPLIER_MASK <<
+ PCIE_PHY_MPLL_MULTIPLIER_SHIFT);
+ val |= mult << PCIE_PHY_MPLL_MULTIPLIER_SHIFT;
+ val |= PCIE_PHY_MPLL_MULTIPLIER_OVRD;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_MPLL_OVRD_IN_LO, val);
+
+ pcie_phy_read(imx6_pcie, PCIE_PHY_ATEOVRD, &val);
+ val &= ~(PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK <<
+ PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT);
+ val |= div << PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT;
+ val |= PCIE_PHY_ATEOVRD_EN;
+ pcie_phy_write(imx6_pcie, PCIE_PHY_ATEOVRD, val);
+
+ return 0;
+}
+
static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -542,6 +597,24 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie)
return -EINVAL;
}
+static void imx6_pcie_ltssm_enable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->variant) {
+ case IMX6Q:
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2,
+ IMX6Q_GPR12_PCIE_CTL_2);
+ break;
+ case IMX7D:
+ reset_control_deassert(imx6_pcie->apps_reset);
+ break;
+ }
+}
+
static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
{
struct dw_pcie *pci = imx6_pcie->pci;
@@ -560,11 +633,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie)
dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp);
/* Start LTSSM. */
- if (imx6_pcie->variant == IMX7D)
- reset_control_deassert(imx6_pcie->apps_reset);
- else
- regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
- IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
+ imx6_pcie_ltssm_enable(dev);
ret = imx6_pcie_wait_for_link(imx6_pcie);
if (ret)
@@ -632,6 +701,7 @@ static int imx6_pcie_host_init(struct pcie_port *pp)
imx6_pcie_assert_core_reset(imx6_pcie);
imx6_pcie_init_phy(imx6_pcie);
imx6_pcie_deassert_core_reset(imx6_pcie);
+ imx6_setup_phy_mpll(imx6_pcie);
dw_pcie_setup_rc(pp);
imx6_pcie_establish_link(imx6_pcie);
@@ -682,6 +752,94 @@ static const struct dw_pcie_ops dw_pcie_ops = {
.link_up = imx6_pcie_link_up,
};
+#ifdef CONFIG_PM_SLEEP
+static void imx6_pcie_ltssm_disable(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ switch (imx6_pcie->variant) {
+ case IMX6SX:
+ case IMX6QP:
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX6Q_GPR12_PCIE_CTL_2, 0);
+ break;
+ case IMX7D:
+ reset_control_assert(imx6_pcie->apps_reset);
+ break;
+ default:
+ dev_err(dev, "ltssm_disable not supported\n");
+ }
+}
+
+static void imx6_pcie_pm_turnoff(struct imx6_pcie *imx6_pcie)
+{
+ reset_control_assert(imx6_pcie->turnoff_reset);
+ reset_control_deassert(imx6_pcie->turnoff_reset);
+
+ /*
+ * Components with an upstream port must respond to
+ * PME_Turn_Off with PME_TO_Ack but we can't check.
+ *
+ * The standard recommends a 1-10ms timeout after which to
+ * proceed anyway as if acks were received.
+ */
+ usleep_range(1000, 10000);
+}
+
+static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
+{
+ clk_disable_unprepare(imx6_pcie->pcie);
+ clk_disable_unprepare(imx6_pcie->pcie_phy);
+ clk_disable_unprepare(imx6_pcie->pcie_bus);
+
+ if (imx6_pcie->variant == IMX7D) {
+ regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL,
+ IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
+ }
+}
+
+static int imx6_pcie_suspend_noirq(struct device *dev)
+{
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+
+ if (imx6_pcie->variant != IMX7D)
+ return 0;
+
+ imx6_pcie_pm_turnoff(imx6_pcie);
+ imx6_pcie_clk_disable(imx6_pcie);
+ imx6_pcie_ltssm_disable(dev);
+
+ return 0;
+}
+
+static int imx6_pcie_resume_noirq(struct device *dev)
+{
+ int ret;
+ struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev);
+ struct pcie_port *pp = &imx6_pcie->pci->pp;
+
+ if (imx6_pcie->variant != IMX7D)
+ return 0;
+
+ imx6_pcie_assert_core_reset(imx6_pcie);
+ imx6_pcie_init_phy(imx6_pcie);
+ imx6_pcie_deassert_core_reset(imx6_pcie);
+ dw_pcie_setup_rc(pp);
+
+ ret = imx6_pcie_establish_link(imx6_pcie);
+ if (ret < 0)
+ dev_info(dev, "pcie link is down after resume.\n");
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx6_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx6_pcie_suspend_noirq,
+ imx6_pcie_resume_noirq)
+};
+
static int imx6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -776,6 +934,13 @@ static int imx6_pcie_probe(struct platform_device *pdev)
break;
}
+ /* Grab turnoff reset */
+ imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
+ if (IS_ERR(imx6_pcie->turnoff_reset)) {
+ dev_err(dev, "Failed to get TURNOFF reset control\n");
+ return PTR_ERR(imx6_pcie->turnoff_reset);
+ }
+
/* Grab GPR config register range */
imx6_pcie->iomuxc_gpr =
syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
@@ -848,6 +1013,7 @@ static struct platform_driver imx6_pcie_driver = {
.name = "imx6q-pcie",
.of_match_table = imx6_pcie_of_match,
.suppress_bind_attrs = true,
+ .pm = &imx6_pcie_pm_ops,
},
.probe = imx6_pcie_probe,
.shutdown = imx6_pcie_shutdown,
diff --git a/drivers/pci/controller/dwc/pci-keystone-dw.c b/drivers/pci/controller/dwc/pci-keystone-dw.c
deleted file mode 100644
index 0682213328e9..000000000000
--- a/drivers/pci/controller/dwc/pci-keystone-dw.c
+++ /dev/null
@@ -1,484 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * DesignWare application register space functions for Keystone PCI controller
- *
- * Copyright (C) 2013-2014 Texas Instruments., Ltd.
- * http://www.ti.com
- *
- * Author: Murali Karicheri <m-karicheri2@ti.com>
- */
-
-#include <linux/irq.h>
-#include <linux/irqdomain.h>
-#include <linux/irqreturn.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_pci.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-
-#include "pcie-designware.h"
-#include "pci-keystone.h"
-
-/* Application register defines */
-#define LTSSM_EN_VAL 1
-#define LTSSM_STATE_MASK 0x1f
-#define LTSSM_STATE_L0 0x11
-#define DBI_CS2_EN_VAL 0x20
-#define OB_XLAT_EN_VAL 2
-
-/* Application registers */
-#define CMD_STATUS 0x004
-#define CFG_SETUP 0x008
-#define OB_SIZE 0x030
-#define CFG_PCIM_WIN_SZ_IDX 3
-#define CFG_PCIM_WIN_CNT 32
-#define SPACE0_REMOTE_CFG_OFFSET 0x1000
-#define OB_OFFSET_INDEX(n) (0x200 + (8 * n))
-#define OB_OFFSET_HI(n) (0x204 + (8 * n))
-
-/* IRQ register defines */
-#define IRQ_EOI 0x050
-#define IRQ_STATUS 0x184
-#define IRQ_ENABLE_SET 0x188
-#define IRQ_ENABLE_CLR 0x18c
-
-#define MSI_IRQ 0x054
-#define MSI0_IRQ_STATUS 0x104
-#define MSI0_IRQ_ENABLE_SET 0x108
-#define MSI0_IRQ_ENABLE_CLR 0x10c
-#define IRQ_STATUS 0x184
-#define MSI_IRQ_OFFSET 4
-
-/* Error IRQ bits */
-#define ERR_AER BIT(5) /* ECRC error */
-#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
-#define ERR_CORR BIT(3) /* Correctable error */
-#define ERR_NONFATAL BIT(2) /* Non-fatal error */
-#define ERR_FATAL BIT(1) /* Fatal error */
-#define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */
-#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
- ERR_NONFATAL | ERR_FATAL | ERR_SYS)
-#define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI)
-#define ERR_IRQ_STATUS_RAW 0x1c0
-#define ERR_IRQ_STATUS 0x1c4
-#define ERR_IRQ_ENABLE_SET 0x1c8
-#define ERR_IRQ_ENABLE_CLR 0x1cc
-
-/* Config space registers */
-#define DEBUG0 0x728
-
-#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
-
-static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
- u32 *bit_pos)
-{
- *reg_offset = offset % 8;
- *bit_pos = offset >> 3;
-}
-
-phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- return ks_pcie->app.start + MSI_IRQ;
-}
-
-static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
-{
- return readl(ks_pcie->va_app_base + offset);
-}
-
-static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val)
-{
- writel(val, ks_pcie->va_app_base + offset);
-}
-
-void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- u32 pending, vector;
- int src, virq;
-
- pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
-
- /*
- * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
- * shows 1, 9, 17, 25 and so forth
- */
- for (src = 0; src < 4; src++) {
- if (BIT(src) & pending) {
- vector = offset + (src << 3);
- virq = irq_linear_revmap(pp->irq_domain, vector);
- dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
- src, vector, virq);
- generic_handle_irq(virq);
- }
- }
-}
-
-void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
-{
- u32 reg_offset, bit_pos;
- struct keystone_pcie *ks_pcie;
- struct dw_pcie *pci;
-
- pci = to_dw_pcie_from_pp(pp);
- ks_pcie = to_keystone_pcie(pci);
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
-
- ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
- BIT(bit_pos));
- ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
-}
-
-void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
-{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
- BIT(bit_pos));
-}
-
-void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
-{
- u32 reg_offset, bit_pos;
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
- ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
- BIT(bit_pos));
-}
-
-int ks_dw_pcie_msi_host_init(struct pcie_port *pp)
-{
- return dw_pcie_allocate_domains(pp);
-}
-
-void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
-{
- int i;
-
- for (i = 0; i < PCI_NUM_INTX; i++)
- ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
-}
-
-void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct device *dev = pci->dev;
- u32 pending;
- int virq;
-
- pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
-
- if (BIT(0) & pending) {
- virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
- dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
- generic_handle_irq(virq);
- }
-
- /* EOI the INTx interrupt */
- ks_dw_app_writel(ks_pcie, IRQ_EOI, offset);
-}
-
-void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
-{
- ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
-}
-
-irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
-{
- u32 status;
-
- status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL;
- if (!status)
- return IRQ_NONE;
-
- if (status & ERR_FATAL_IRQ)
- dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n",
- status);
-
- /* Ack the IRQ; status bits are RW1C */
- ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status);
- return IRQ_HANDLED;
-}
-
-static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
-{
-}
-
-static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
-{
-}
-
-static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
-{
-}
-
-static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
- .name = "Keystone-PCI-Legacy-IRQ",
- .irq_ack = ks_dw_pcie_ack_legacy_irq,
- .irq_mask = ks_dw_pcie_mask_legacy_irq,
- .irq_unmask = ks_dw_pcie_unmask_legacy_irq,
-};
-
-static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
- unsigned int irq, irq_hw_number_t hw_irq)
-{
- irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
- handle_level_irq);
- irq_set_chip_data(irq, d->host_data);
-
- return 0;
-}
-
-static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
- .map = ks_dw_pcie_init_legacy_irq_map,
- .xlate = irq_domain_xlate_onetwocell,
-};
-
-/**
- * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
- * registers
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val);
-
- do {
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- } while (!(val & DBI_CS2_EN_VAL));
-}
-
-/**
- * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
- *
- * Since modification of dbi_cs2 involves different clock domain, read the
- * status back to ensure the transition is complete.
- */
-static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val);
-
- do {
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- } while (val & DBI_CS2_EN_VAL);
-}
-
-void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- u32 start = pp->mem->start, end = pp->mem->end;
- int i, tr_size;
- u32 val;
-
- /* Disable BARs for inbound access */
- ks_dw_pcie_set_dbi_mode(ks_pcie);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
- ks_dw_pcie_clear_dbi_mode(ks_pcie);
-
- /* Set outbound translation size per window division */
- ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7);
-
- tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
-
- /* Using Direct 1:1 mapping of RC <-> PCI memory space */
- for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
- ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1);
- ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0);
- start += tr_size;
- }
-
- /* Enable OB translation */
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val);
-}
-
-/**
- * ks_pcie_cfg_setup() - Set up configuration space address for a device
- *
- * @ks_pcie: ptr to keystone_pcie structure
- * @bus: Bus number the device is residing on
- * @devfn: device, function number info
- *
- * Forms and returns the address of configuration space mapped in PCIESS
- * address space 0. Also configures CFG_SETUP for remote configuration space
- * access.
- *
- * The address space has two regions to access configuration - local and remote.
- * We access local region for bus 0 (as RC is attached on bus 0) and remote
- * region for others with TYPE 1 access when bus > 1. As for device on bus = 1,
- * we will do TYPE 0 access as it will be on our secondary bus (logical).
- * CFG_SETUP is needed only for remote configuration access.
- */
-static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
- unsigned int devfn)
-{
- u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- u32 regval;
-
- if (bus == 0)
- return pci->dbi_base;
-
- regval = (bus << 16) | (device << 8) | function;
-
- /*
- * Since Bus#1 will be a virtual bus, we need to have TYPE0
- * access only.
- * TYPE 1
- */
- if (bus != 1)
- regval |= BIT(24);
-
- ks_dw_app_writel(ks_pcie, CFG_SETUP, regval);
- return pp->va_cfg0_base;
-}
-
-int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u8 bus_num = bus->number;
- void __iomem *addr;
-
- addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
-
- return dw_pcie_read(addr + where, size, val);
-}
-
-int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u8 bus_num = bus->number;
- void __iomem *addr;
-
- addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
-
- return dw_pcie_write(addr + where, size, val);
-}
-
-/**
- * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
- *
- * This sets BAR0 to enable inbound access for MSI_IRQ register
- */
-void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
-
- /* Configure and set up BAR0 */
- ks_dw_pcie_set_dbi_mode(ks_pcie);
-
- /* Enable BAR0 */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
-
- ks_dw_pcie_clear_dbi_mode(ks_pcie);
-
- /*
- * For BAR0, just setting bus address for inbound writes (MSI) should
- * be sufficient. Use physical address to avoid any conflicts.
- */
- dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
-}
-
-/**
- * ks_dw_pcie_link_up() - Check if link up
- */
-int ks_dw_pcie_link_up(struct dw_pcie *pci)
-{
- u32 val;
-
- val = dw_pcie_readl_dbi(pci, DEBUG0);
- return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
-}
-
-void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
-{
- u32 val;
-
- /* Disable Link training */
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- val &= ~LTSSM_EN_VAL;
- ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-
- /* Initiate Link Training */
- val = ks_dw_app_readl(ks_pcie, CMD_STATUS);
- ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-}
-
-/**
- * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
- *
- * Ioremap the register resources, initialize legacy irq domain
- * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
- * PCI host controller.
- */
-int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
- struct device_node *msi_intc_np)
-{
- struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
- struct device *dev = pci->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
-
- /* Index 0 is the config reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
-
- /*
- * We set these same and is used in pcie rd/wr_other_conf
- * functions
- */
- pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
- pp->va_cfg1_base = pp->va_cfg0_base;
-
- /* Index 1 is the application reg. space address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(ks_pcie->va_app_base))
- return PTR_ERR(ks_pcie->va_app_base);
-
- ks_pcie->app = *res;
-
- /* Create legacy IRQ domain */
- ks_pcie->legacy_irq_domain =
- irq_domain_add_linear(ks_pcie->legacy_intc_np,
- PCI_NUM_INTX,
- &ks_dw_pcie_legacy_irq_domain_ops,
- NULL);
- if (!ks_pcie->legacy_irq_domain) {
- dev_err(dev, "Failed to add irq domain for legacy irqs\n");
- return -EINVAL;
- }
-
- return dw_pcie_host_init(pp);
-}
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index e88bd221fffe..14f2b0b4ed5e 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -9,40 +9,510 @@
* Implementation based on pci-exynos.c and pcie-designware.c
*/
-#include <linux/irqchip/chained_irq.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
-#include <linux/init.h>
+#include <linux/mfd/syscon.h>
#include <linux/msi.h>
-#include <linux/of_irq.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/of_pci.h>
-#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/resource.h>
#include <linux/signal.h>
#include "pcie-designware.h"
-#include "pci-keystone.h"
-#define DRIVER_NAME "keystone-pcie"
+#define PCIE_VENDORID_MASK 0xffff
+#define PCIE_DEVICEID_SHIFT 16
+
+/* Application registers */
+#define CMD_STATUS 0x004
+#define LTSSM_EN_VAL BIT(0)
+#define OB_XLAT_EN_VAL BIT(1)
+#define DBI_CS2 BIT(5)
+
+#define CFG_SETUP 0x008
+#define CFG_BUS(x) (((x) & 0xff) << 16)
+#define CFG_DEVICE(x) (((x) & 0x1f) << 8)
+#define CFG_FUNC(x) ((x) & 0x7)
+#define CFG_TYPE1 BIT(24)
+
+#define OB_SIZE 0x030
+#define SPACE0_REMOTE_CFG_OFFSET 0x1000
+#define OB_OFFSET_INDEX(n) (0x200 + (8 * (n)))
+#define OB_OFFSET_HI(n) (0x204 + (8 * (n)))
+#define OB_ENABLEN BIT(0)
+#define OB_WIN_SIZE 8 /* 8MB */
+
+/* IRQ register defines */
+#define IRQ_EOI 0x050
+#define IRQ_STATUS 0x184
+#define IRQ_ENABLE_SET 0x188
+#define IRQ_ENABLE_CLR 0x18c
+
+#define MSI_IRQ 0x054
+#define MSI0_IRQ_STATUS 0x104
+#define MSI0_IRQ_ENABLE_SET 0x108
+#define MSI0_IRQ_ENABLE_CLR 0x10c
+#define IRQ_STATUS 0x184
+#define MSI_IRQ_OFFSET 4
+
+#define ERR_IRQ_STATUS 0x1c4
+#define ERR_IRQ_ENABLE_SET 0x1c8
+#define ERR_AER BIT(5) /* ECRC error */
+#define ERR_AXI BIT(4) /* AXI tag lookup fatal error */
+#define ERR_CORR BIT(3) /* Correctable error */
+#define ERR_NONFATAL BIT(2) /* Non-fatal error */
+#define ERR_FATAL BIT(1) /* Fatal error */
+#define ERR_SYS BIT(0) /* System error */
+#define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \
+ ERR_NONFATAL | ERR_FATAL | ERR_SYS)
+
+#define MAX_MSI_HOST_IRQS 8
+/* PCIE controller device IDs */
+#define PCIE_RC_K2HK 0xb008
+#define PCIE_RC_K2E 0xb009
+#define PCIE_RC_K2L 0xb00a
+#define PCIE_RC_K2G 0xb00b
+
+#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+
+struct keystone_pcie {
+ struct dw_pcie *pci;
+ /* PCI Device ID */
+ u32 device_id;
+ int num_legacy_host_irqs;
+ int legacy_host_irqs[PCI_NUM_INTX];
+ struct device_node *legacy_intc_np;
+
+ int num_msi_host_irqs;
+ int msi_host_irqs[MAX_MSI_HOST_IRQS];
+ int num_lanes;
+ u32 num_viewport;
+ struct phy **phy;
+ struct device_link **link;
+ struct device_node *msi_intc_np;
+ struct irq_domain *legacy_irq_domain;
+ struct device_node *np;
+
+ int error_irq;
+
+ /* Application register space */
+ void __iomem *va_app_base; /* DT 1st resource */
+ struct resource app;
+};
-/* DEV_STAT_CTRL */
-#define PCIE_CAP_BASE 0x70
+static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
+ u32 *bit_pos)
+{
+ *reg_offset = offset % 8;
+ *bit_pos = offset >> 3;
+}
-/* PCIE controller device IDs */
-#define PCIE_RC_K2HK 0xb008
-#define PCIE_RC_K2E 0xb009
-#define PCIE_RC_K2L 0xb00a
+static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ return ks_pcie->app.start + MSI_IRQ;
+}
+
+static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset)
+{
+ return readl(ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset,
+ u32 val)
+{
+ writel(val, ks_pcie->va_app_base + offset);
+}
+
+static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ u32 pending, vector;
+ int src, virq;
+
+ pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4));
+
+ /*
+ * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+ * shows 1, 9, 17, 25 and so forth
+ */
+ for (src = 0; src < 4; src++) {
+ if (BIT(src) & pending) {
+ vector = offset + (src << 3);
+ virq = irq_linear_revmap(pp->irq_domain, vector);
+ dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n",
+ src, vector, virq);
+ generic_handle_irq(virq);
+ }
+ }
+}
+
+static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp)
+{
+ u32 reg_offset, bit_pos;
+ struct keystone_pcie *ks_pcie;
+ struct dw_pcie *pci;
+
+ pci = to_dw_pcie_from_pp(pp);
+ ks_pcie = to_keystone_pcie(pci);
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+
+ ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4),
+ BIT(bit_pos));
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET);
+}
+
+static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+{
+ u32 reg_offset, bit_pos;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+ ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4),
+ BIT(bit_pos));
+}
+
+static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+{
+ u32 reg_offset, bit_pos;
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+ ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4),
+ BIT(bit_pos));
+}
+
+static int ks_pcie_msi_host_init(struct pcie_port *pp)
+{
+ return dw_pcie_allocate_domains(pp);
+}
+
+static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
+{
+ int i;
+
+ for (i = 0; i < PCI_NUM_INTX; i++)
+ ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1);
+}
+
+static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie,
+ int offset)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ u32 pending;
+ int virq;
+
+ pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4));
+
+ if (BIT(0) & pending) {
+ virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
+ dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq);
+ generic_handle_irq(virq);
+ }
+
+ /* EOI the INTx interrupt */
+ ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset);
+}
+
+static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie)
+{
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL);
+}
+
+static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie)
+{
+ u32 reg;
+ struct device *dev = ks_pcie->pci->dev;
+
+ reg = ks_pcie_app_readl(ks_pcie, ERR_IRQ_STATUS);
+ if (!reg)
+ return IRQ_NONE;
+
+ if (reg & ERR_SYS)
+ dev_err(dev, "System Error\n");
+
+ if (reg & ERR_FATAL)
+ dev_err(dev, "Fatal Error\n");
+
+ if (reg & ERR_NONFATAL)
+ dev_dbg(dev, "Non Fatal Error\n");
+
+ if (reg & ERR_CORR)
+ dev_dbg(dev, "Correctable Error\n");
+
+ if (reg & ERR_AXI)
+ dev_err(dev, "AXI tag lookup fatal Error\n");
+
+ if (reg & ERR_AER)
+ dev_err(dev, "ECRC Error\n");
+
+ ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg);
+
+ return IRQ_HANDLED;
+}
+
+static void ks_pcie_ack_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_mask_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_pcie_unmask_legacy_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip ks_pcie_legacy_irq_chip = {
+ .name = "Keystone-PCI-Legacy-IRQ",
+ .irq_ack = ks_pcie_ack_legacy_irq,
+ .irq_mask = ks_pcie_mask_legacy_irq,
+ .irq_unmask = ks_pcie_unmask_legacy_irq,
+};
+
+static int ks_pcie_init_legacy_irq_map(struct irq_domain *d,
+ unsigned int irq,
+ irq_hw_number_t hw_irq)
+{
+ irq_set_chip_and_handler(irq, &ks_pcie_legacy_irq_chip,
+ handle_level_irq);
+ irq_set_chip_data(irq, d->host_data);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ks_pcie_legacy_irq_domain_ops = {
+ .map = ks_pcie_init_legacy_irq_map,
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * ks_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
+ * registers
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (!(val & DBI_CS2));
+}
+
+/**
+ * ks_pcie_clear_dbi_mode() - Disable DBI mode
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~DBI_CS2;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+
+ do {
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ } while (val & DBI_CS2);
+}
+
+static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
+ u32 num_viewport = ks_pcie->num_viewport;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ u64 start = pp->mem->start;
+ u64 end = pp->mem->end;
+ int i;
+
+ /* Disable BARs for inbound access */
+ ks_pcie_set_dbi_mode(ks_pcie);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0);
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ val = ilog2(OB_WIN_SIZE);
+ ks_pcie_app_writel(ks_pcie, OB_SIZE, val);
+
+ /* Using Direct 1:1 mapping of RC <-> PCI memory space */
+ for (i = 0; i < num_viewport && (start < end); i++) {
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_INDEX(i),
+ lower_32_bits(start) | OB_ENABLEN);
+ ks_pcie_app_writel(ks_pcie, OB_OFFSET_HI(i),
+ upper_32_bits(start));
+ start += OB_WIN_SIZE;
+ }
+
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val |= OB_XLAT_EN_VAL;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, val);
+}
+
+static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size,
+ u32 *val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 reg;
+
+ reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+ CFG_FUNC(PCI_FUNC(devfn));
+ if (bus->parent->number != pp->root_bus_nr)
+ reg |= CFG_TYPE1;
+ ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+ return dw_pcie_read(pp->va_cfg0_base + where, size, val);
+}
+
+static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ unsigned int devfn, int where, int size,
+ u32 val)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+ u32 reg;
+
+ reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) |
+ CFG_FUNC(PCI_FUNC(devfn));
+ if (bus->parent->number != pp->root_bus_nr)
+ reg |= CFG_TYPE1;
+ ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg);
+
+ return dw_pcie_write(pp->va_cfg0_base + where, size, val);
+}
+
+/**
+ * ks_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
+ *
+ * This sets BAR0 to enable inbound access for MSI_IRQ register
+ */
+static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp)
+{
+ struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+ struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
+
+ /* Configure and set up BAR0 */
+ ks_pcie_set_dbi_mode(ks_pcie);
+
+ /* Enable BAR0 */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1);
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1);
+
+ ks_pcie_clear_dbi_mode(ks_pcie);
+
+ /*
+ * For BAR0, just setting bus address for inbound writes (MSI) should
+ * be sufficient. Use physical address to avoid any conflicts.
+ */
+ dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start);
+}
+
+/**
+ * ks_pcie_link_up() - Check if link up
+ */
+static int ks_pcie_link_up(struct dw_pcie *pci)
+{
+ u32 val;
+
+ val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
+ val &= PORT_LOGIC_LTSSM_STATE_MASK;
+ return (val == PORT_LOGIC_LTSSM_STATE_L0);
+}
+
+static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
+{
+ u32 val;
-#define to_keystone_pcie(x) dev_get_drvdata((x)->dev)
+ /* Disable Link training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ val &= ~LTSSM_EN_VAL;
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
-static void quirk_limit_mrrs(struct pci_dev *dev)
+ /* Initiate Link Training */
+ val = ks_pcie_app_readl(ks_pcie, CMD_STATUS);
+ ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val);
+}
+
+/**
+ * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware
+ *
+ * Ioremap the register resources, initialize legacy irq domain
+ * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
+ * PCI host controller.
+ */
+static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie)
+{
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct pcie_port *pp = &pci->pp;
+ struct device *dev = pci->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct resource *res;
+
+ /* Index 0 is the config reg. space address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
+ if (IS_ERR(pci->dbi_base))
+ return PTR_ERR(pci->dbi_base);
+
+ /*
+ * We set these same and is used in pcie rd/wr_other_conf
+ * functions
+ */
+ pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
+ pp->va_cfg1_base = pp->va_cfg0_base;
+
+ /* Index 1 is the application reg. space address */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ ks_pcie->va_app_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ks_pcie->va_app_base))
+ return PTR_ERR(ks_pcie->va_app_base);
+
+ ks_pcie->app = *res;
+
+ /* Create legacy IRQ domain */
+ ks_pcie->legacy_irq_domain =
+ irq_domain_add_linear(ks_pcie->legacy_intc_np,
+ PCI_NUM_INTX,
+ &ks_pcie_legacy_irq_domain_ops,
+ NULL);
+ if (!ks_pcie->legacy_irq_domain) {
+ dev_err(dev, "Failed to add irq domain for legacy irqs\n");
+ return -EINVAL;
+ }
+
+ return dw_pcie_host_init(pp);
+}
+
+static void ks_pcie_quirk(struct pci_dev *dev)
{
struct pci_bus *bus = dev->bus;
- struct pci_dev *bridge = bus->self;
+ struct pci_dev *bridge;
static const struct pci_device_id rc_pci_devids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
@@ -50,11 +520,13 @@ static void quirk_limit_mrrs(struct pci_dev *dev)
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
.class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+ { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2G),
+ .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
{ 0, },
};
if (pci_is_root_bus(bus))
- return;
+ bridge = dev;
/* look for the host bridge */
while (!pci_is_root_bus(bus)) {
@@ -62,43 +534,39 @@ static void quirk_limit_mrrs(struct pci_dev *dev)
bus = bus->parent;
}
- if (bridge) {
- /*
- * Keystone PCI controller has a h/w limitation of
- * 256 bytes maximum read request size. It can't handle
- * anything higher than this. So force this limit on
- * all downstream devices.
- */
- if (pci_match_id(rc_pci_devids, bridge)) {
- if (pcie_get_readrq(dev) > 256) {
- dev_info(&dev->dev, "limiting MRRS to 256\n");
- pcie_set_readrq(dev, 256);
- }
+ if (!bridge)
+ return;
+
+ /*
+ * Keystone PCI controller has a h/w limitation of
+ * 256 bytes maximum read request size. It can't handle
+ * anything higher than this. So force this limit on
+ * all downstream devices.
+ */
+ if (pci_match_id(rc_pci_devids, bridge)) {
+ if (pcie_get_readrq(dev) > 256) {
+ dev_info(&dev->dev, "limiting MRRS to 256\n");
+ pcie_set_readrq(dev, 256);
}
}
}
-DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk);
static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
{
struct dw_pcie *pci = ks_pcie->pci;
- struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
- unsigned int retries;
-
- dw_pcie_setup_rc(pp);
if (dw_pcie_link_up(pci)) {
dev_info(dev, "Link already up\n");
return 0;
}
+ ks_pcie_initiate_link_train(ks_pcie);
+
/* check if the link is up or not */
- for (retries = 0; retries < 5; retries++) {
- ks_dw_pcie_initiate_link_train(ks_pcie);
- if (!dw_pcie_wait_for_link(pci))
- return 0;
- }
+ if (!dw_pcie_wait_for_link(pci))
+ return 0;
dev_err(dev, "phy link never came up\n");
return -ETIMEDOUT;
@@ -121,7 +589,7 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
+ ks_pcie_handle_msi_irq(ks_pcie, offset);
chained_irq_exit(chip, desc);
}
@@ -150,7 +618,7 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
* ack operation.
*/
chained_irq_enter(chip, desc);
- ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+ ks_pcie_handle_legacy_irq(ks_pcie, irq_offset);
chained_irq_exit(chip, desc);
}
@@ -222,7 +690,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
ks_pcie_legacy_irq_handler,
ks_pcie);
}
- ks_dw_pcie_enable_legacy_irqs(ks_pcie);
+ ks_pcie_enable_legacy_irqs(ks_pcie);
/* MSI IRQ */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
@@ -234,7 +702,7 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
}
if (ks_pcie->error_irq > 0)
- ks_dw_pcie_enable_error_irq(ks_pcie);
+ ks_pcie_enable_error_irq(ks_pcie);
}
/*
@@ -242,8 +710,8 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
* bus error instead of returning 0xffffffff. This handler always returns 0
* for this kind of faults.
*/
-static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
- struct pt_regs *regs)
+static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
+ struct pt_regs *regs)
{
unsigned long instr = *(unsigned long *) instruction_pointer(regs);
@@ -257,59 +725,78 @@ static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
return 0;
}
+static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
+{
+ int ret;
+ unsigned int id;
+ struct regmap *devctrl_regs;
+ struct dw_pcie *pci = ks_pcie->pci;
+ struct device *dev = pci->dev;
+ struct device_node *np = dev->of_node;
+
+ devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
+ if (IS_ERR(devctrl_regs))
+ return PTR_ERR(devctrl_regs);
+
+ ret = regmap_read(devctrl_regs, 0, &id);
+ if (ret)
+ return ret;
+
+ dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK);
+ dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT);
+
+ return 0;
+}
+
static int __init ks_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct keystone_pcie *ks_pcie = to_keystone_pcie(pci);
- u32 val;
+ int ret;
+
+ dw_pcie_setup_rc(pp);
ks_pcie_establish_link(ks_pcie);
- ks_dw_pcie_setup_rc_app_regs(ks_pcie);
+ ks_pcie_setup_rc_app_regs(ks_pcie);
ks_pcie_setup_interrupts(ks_pcie);
writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
pci->dbi_base + PCI_IO_BASE);
- /* update the Vendor ID */
- writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID);
-
- /* update the DEV_STAT_CTRL to publish right mrrs */
- val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
- val &= ~PCI_EXP_DEVCTL_READRQ;
- /* set the mrrs to 256 bytes */
- val |= BIT(12);
- writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
+ ret = ks_pcie_init_id(ks_pcie);
+ if (ret < 0)
+ return ret;
/*
* PCIe access errors that result into OCP errors are caught by ARM as
* "External aborts"
*/
- hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
+ hook_fault_code(17, ks_pcie_fault, SIGBUS, 0,
"Asynchronous external abort");
return 0;
}
-static const struct dw_pcie_host_ops keystone_pcie_host_ops = {
- .rd_other_conf = ks_dw_pcie_rd_other_conf,
- .wr_other_conf = ks_dw_pcie_wr_other_conf,
+static const struct dw_pcie_host_ops ks_pcie_host_ops = {
+ .rd_other_conf = ks_pcie_rd_other_conf,
+ .wr_other_conf = ks_pcie_wr_other_conf,
.host_init = ks_pcie_host_init,
- .msi_set_irq = ks_dw_pcie_msi_set_irq,
- .msi_clear_irq = ks_dw_pcie_msi_clear_irq,
- .get_msi_addr = ks_dw_pcie_get_msi_addr,
- .msi_host_init = ks_dw_pcie_msi_host_init,
- .msi_irq_ack = ks_dw_pcie_msi_irq_ack,
- .scan_bus = ks_dw_pcie_v3_65_scan_bus,
+ .msi_set_irq = ks_pcie_msi_set_irq,
+ .msi_clear_irq = ks_pcie_msi_clear_irq,
+ .get_msi_addr = ks_pcie_get_msi_addr,
+ .msi_host_init = ks_pcie_msi_host_init,
+ .msi_irq_ack = ks_pcie_msi_irq_ack,
+ .scan_bus = ks_pcie_v3_65_scan_bus,
};
-static irqreturn_t pcie_err_irq_handler(int irq, void *priv)
+static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv)
{
struct keystone_pcie *ks_pcie = priv;
- return ks_dw_pcie_handle_error_irq(ks_pcie);
+ return ks_pcie_handle_error_irq(ks_pcie);
}
-static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
- struct platform_device *pdev)
+static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie,
+ struct platform_device *pdev)
{
struct dw_pcie *pci = ks_pcie->pci;
struct pcie_port *pp = &pci->pp;
@@ -338,7 +825,7 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
if (ks_pcie->error_irq <= 0)
dev_info(dev, "no error IRQ defined\n");
else {
- ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler,
+ ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler,
IRQF_SHARED, "pcie-error-irq", ks_pcie);
if (ret < 0) {
dev_err(dev, "failed to request error IRQ %d\n",
@@ -347,8 +834,8 @@ static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
}
}
- pp->ops = &keystone_pcie_host_ops;
- ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
+ pp->ops = &ks_pcie_host_ops;
+ ret = ks_pcie_dw_host_init(ks_pcie);
if (ret) {
dev_err(dev, "failed to initialize host\n");
return ret;
@@ -365,28 +852,62 @@ static const struct of_device_id ks_pcie_of_match[] = {
{ },
};
-static const struct dw_pcie_ops dw_pcie_ops = {
- .link_up = ks_dw_pcie_link_up,
+static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = {
+ .link_up = ks_pcie_link_up,
};
-static int __exit ks_pcie_remove(struct platform_device *pdev)
+static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie)
{
- struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ int num_lanes = ks_pcie->num_lanes;
- clk_disable_unprepare(ks_pcie->clk);
+ while (num_lanes--) {
+ phy_power_off(ks_pcie->phy[num_lanes]);
+ phy_exit(ks_pcie->phy[num_lanes]);
+ }
+}
+
+static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie)
+{
+ int i;
+ int ret;
+ int num_lanes = ks_pcie->num_lanes;
+
+ for (i = 0; i < num_lanes; i++) {
+ ret = phy_init(ks_pcie->phy[i]);
+ if (ret < 0)
+ goto err_phy;
+
+ ret = phy_power_on(ks_pcie->phy[i]);
+ if (ret < 0) {
+ phy_exit(ks_pcie->phy[i]);
+ goto err_phy;
+ }
+ }
return 0;
+
+err_phy:
+ while (--i >= 0) {
+ phy_power_off(ks_pcie->phy[i]);
+ phy_exit(ks_pcie->phy[i]);
+ }
+
+ return ret;
}
static int __init ks_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
struct dw_pcie *pci;
struct keystone_pcie *ks_pcie;
- struct resource *res;
- void __iomem *reg_p;
- struct phy *phy;
+ struct device_link **link;
+ u32 num_viewport;
+ struct phy **phy;
+ u32 num_lanes;
+ char name[10];
int ret;
+ int i;
ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL);
if (!ks_pcie)
@@ -397,54 +918,99 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pci->dev = dev;
- pci->ops = &dw_pcie_ops;
+ pci->ops = &ks_pcie_dw_pcie_ops;
- ks_pcie->pci = pci;
+ ret = of_property_read_u32(np, "num-viewport", &num_viewport);
+ if (ret < 0) {
+ dev_err(dev, "unable to read *num-viewport* property\n");
+ return ret;
+ }
- /* initialize SerDes Phy if present */
- phy = devm_phy_get(dev, "pcie-phy");
- if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER)
- return PTR_ERR(phy);
+ ret = of_property_read_u32(np, "num-lanes", &num_lanes);
+ if (ret)
+ num_lanes = 1;
- if (!IS_ERR_OR_NULL(phy)) {
- ret = phy_init(phy);
- if (ret < 0)
- return ret;
+ phy = devm_kzalloc(dev, sizeof(*phy) * num_lanes, GFP_KERNEL);
+ if (!phy)
+ return -ENOMEM;
+
+ link = devm_kzalloc(dev, sizeof(*link) * num_lanes, GFP_KERNEL);
+ if (!link)
+ return -ENOMEM;
+
+ for (i = 0; i < num_lanes; i++) {
+ snprintf(name, sizeof(name), "pcie-phy%d", i);
+ phy[i] = devm_phy_optional_get(dev, name);
+ if (IS_ERR(phy[i])) {
+ ret = PTR_ERR(phy[i]);
+ goto err_link;
+ }
+
+ if (!phy[i])
+ continue;
+
+ link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
+ if (!link[i]) {
+ ret = -EINVAL;
+ goto err_link;
+ }
}
- /* index 2 is to read PCI DEVICE_ID */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- reg_p = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg_p))
- return PTR_ERR(reg_p);
- ks_pcie->device_id = readl(reg_p) >> 16;
- devm_iounmap(dev, reg_p);
- devm_release_mem_region(dev, res->start, resource_size(res));
+ ks_pcie->np = np;
+ ks_pcie->pci = pci;
+ ks_pcie->link = link;
+ ks_pcie->num_lanes = num_lanes;
+ ks_pcie->num_viewport = num_viewport;
+ ks_pcie->phy = phy;
- ks_pcie->np = dev->of_node;
- platform_set_drvdata(pdev, ks_pcie);
- ks_pcie->clk = devm_clk_get(dev, "pcie");
- if (IS_ERR(ks_pcie->clk)) {
- dev_err(dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(ks_pcie->clk);
+ ret = ks_pcie_enable_phy(ks_pcie);
+ if (ret) {
+ dev_err(dev, "failed to enable phy\n");
+ goto err_link;
}
- ret = clk_prepare_enable(ks_pcie->clk);
- if (ret)
- return ret;
platform_set_drvdata(pdev, ks_pcie);
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_get_sync failed\n");
+ goto err_get_sync;
+ }
- ret = ks_add_pcie_port(ks_pcie, pdev);
+ ret = ks_pcie_add_pcie_port(ks_pcie, pdev);
if (ret < 0)
- goto fail_clk;
+ goto err_get_sync;
return 0;
-fail_clk:
- clk_disable_unprepare(ks_pcie->clk);
+
+err_get_sync:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ ks_pcie_disable_phy(ks_pcie);
+
+err_link:
+ while (--i >= 0 && link[i])
+ device_link_del(link[i]);
return ret;
}
+static int __exit ks_pcie_remove(struct platform_device *pdev)
+{
+ struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+ struct device_link **link = ks_pcie->link;
+ int num_lanes = ks_pcie->num_lanes;
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+ ks_pcie_disable_phy(ks_pcie);
+ while (num_lanes--)
+ device_link_del(link[num_lanes]);
+
+ return 0;
+}
+
static struct platform_driver ks_pcie_driver __refdata = {
.probe = ks_pcie_probe,
.remove = __exit_p(ks_pcie_remove),
diff --git a/drivers/pci/controller/dwc/pci-keystone.h b/drivers/pci/controller/dwc/pci-keystone.h
deleted file mode 100644
index 8a13da391543..000000000000
--- a/drivers/pci/controller/dwc/pci-keystone.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Keystone PCI Controller's common includes
- *
- * Copyright (C) 2013-2014 Texas Instruments., Ltd.
- * http://www.ti.com
- *
- * Author: Murali Karicheri <m-karicheri2@ti.com>
- */
-
-#define MAX_MSI_HOST_IRQS 8
-
-struct keystone_pcie {
- struct dw_pcie *pci;
- struct clk *clk;
- /* PCI Device ID */
- u32 device_id;
- int num_legacy_host_irqs;
- int legacy_host_irqs[PCI_NUM_INTX];
- struct device_node *legacy_intc_np;
-
- int num_msi_host_irqs;
- int msi_host_irqs[MAX_MSI_HOST_IRQS];
- struct device_node *msi_intc_np;
- struct irq_domain *legacy_irq_domain;
- struct device_node *np;
-
- int error_irq;
-
- /* Application register space */
- void __iomem *va_app_base; /* DT 1st resource */
- struct resource app;
-};
-
-/* Keystone DW specific MSI controller APIs/definitions */
-void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
-phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
-
-/* Keystone specific PCI controller APIs */
-void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
-void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
-void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie);
-irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie);
-int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
- struct device_node *msi_intc_np);
-int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val);
-int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val);
-void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
-void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
-void ks_dw_pcie_msi_irq_ack(int i, struct pcie_port *pp);
-void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
-void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
-void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
-int ks_dw_pcie_msi_host_init(struct pcie_port *pp);
-int ks_dw_pcie_link_up(struct dw_pcie *pci);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 9f1a5e399b70..0989d880ac46 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -36,6 +36,10 @@
#define PORT_LINK_MODE_4_LANES (0x7 << 16)
#define PORT_LINK_MODE_8_LANES (0xf << 16)
+#define PCIE_PORT_DEBUG0 0x728
+#define PORT_LOGIC_LTSSM_STATE_MASK 0x1f
+#define PORT_LOGIC_LTSSM_STATE_L0 0x11
+
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
#define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8)
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 5352e0c3be82..9b599296205d 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -467,8 +467,8 @@ static int kirin_pcie_add_msi(struct dw_pcie *pci,
return 0;
}
-static int __init kirin_add_pcie_port(struct dw_pcie *pci,
- struct platform_device *pdev)
+static int kirin_add_pcie_port(struct dw_pcie *pci,
+ struct platform_device *pdev)
{
int ret;
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 4352c1cb926d..d185ea5fe996 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1089,7 +1089,6 @@ static int qcom_pcie_host_init(struct pcie_port *pp)
struct qcom_pcie *pcie = to_qcom_pcie(pci);
int ret;
- pm_runtime_get_sync(pci->dev);
qcom_ep_reset_assert(pcie);
ret = pcie->ops->init(pcie);
@@ -1126,7 +1125,6 @@ err_disable_phy:
phy_power_off(pcie->phy);
err_deinit:
pcie->ops->deinit(pcie);
- pm_runtime_put(pci->dev);
return ret;
}
@@ -1216,6 +1214,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
return -ENOMEM;
pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
@@ -1225,44 +1229,56 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->ops = of_device_get_match_data(dev);
pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
- if (IS_ERR(pcie->reset))
- return PTR_ERR(pcie->reset);
+ if (IS_ERR(pcie->reset)) {
+ ret = PTR_ERR(pcie->reset);
+ goto err_pm_runtime_put;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf");
pcie->parf = devm_ioremap_resource(dev, res);
- if (IS_ERR(pcie->parf))
- return PTR_ERR(pcie->parf);
+ if (IS_ERR(pcie->parf)) {
+ ret = PTR_ERR(pcie->parf);
+ goto err_pm_runtime_put;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
pci->dbi_base = devm_pci_remap_cfg_resource(dev, res);
- if (IS_ERR(pci->dbi_base))
- return PTR_ERR(pci->dbi_base);
+ if (IS_ERR(pci->dbi_base)) {
+ ret = PTR_ERR(pci->dbi_base);
+ goto err_pm_runtime_put;
+ }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi");
pcie->elbi = devm_ioremap_resource(dev, res);
- if (IS_ERR(pcie->elbi))
- return PTR_ERR(pcie->elbi);
+ if (IS_ERR(pcie->elbi)) {
+ ret = PTR_ERR(pcie->elbi);
+ goto err_pm_runtime_put;
+ }
pcie->phy = devm_phy_optional_get(dev, "pciephy");
- if (IS_ERR(pcie->phy))
- return PTR_ERR(pcie->phy);
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
+ goto err_pm_runtime_put;
+ }
ret = pcie->ops->get_resources(pcie);
if (ret)
- return ret;
+ goto err_pm_runtime_put;
pp->ops = &qcom_pcie_dw_ops;
if (IS_ENABLED(CONFIG_PCI_MSI)) {
pp->msi_irq = platform_get_irq_byname(pdev, "msi");
- if (pp->msi_irq < 0)
- return pp->msi_irq;
+ if (pp->msi_irq < 0) {
+ ret = pp->msi_irq;
+ goto err_pm_runtime_put;
+ }
}
ret = phy_init(pcie->phy);
if (ret) {
pm_runtime_disable(&pdev->dev);
- return ret;
+ goto err_pm_runtime_put;
}
platform_set_drvdata(pdev, pcie);
@@ -1271,10 +1287,16 @@ static int qcom_pcie_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "cannot initialize host\n");
pm_runtime_disable(&pdev->dev);
- return ret;
+ goto err_pm_runtime_put;
}
return 0;
+
+err_pm_runtime_put:
+ pm_runtime_put(dev);
+ pm_runtime_disable(dev);
+
+ return ret;
}
static const struct of_device_id qcom_pcie_match[] = {
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 6b4555ff2548..750081c1cb48 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -20,12 +20,16 @@
#include <linux/of_pci.h>
#include "../pci.h"
+#include "../pci-bridge-emul.h"
/* PCIe core registers */
+#define PCIE_CORE_DEV_ID_REG 0x0
#define PCIE_CORE_CMD_STATUS_REG 0x4
#define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0)
#define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1)
#define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2)
+#define PCIE_CORE_DEV_REV_REG 0x8
+#define PCIE_CORE_PCIEXP_CAP 0xc0
#define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8
#define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4)
#define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
@@ -41,7 +45,10 @@
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7)
#define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8)
-
+#define PCIE_CORE_INT_A_ASSERT_ENABLE 1
+#define PCIE_CORE_INT_B_ASSERT_ENABLE 2
+#define PCIE_CORE_INT_C_ASSERT_ENABLE 3
+#define PCIE_CORE_INT_D_ASSERT_ENABLE 4
/* PIO registers base address and register offsets */
#define PIO_BASE_ADDR 0x4000
#define PIO_CTRL (PIO_BASE_ADDR + 0x0)
@@ -93,7 +100,9 @@
#define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5)
#define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6)
#define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10)
+#define PCIE_MSG_LOG_REG (CONTROL_BASE_ADDR + 0x30)
#define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40)
+#define PCIE_MSG_PM_PME_MASK BIT(7)
#define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44)
#define PCIE_ISR0_MSI_INT_PENDING BIT(24)
#define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val))
@@ -189,6 +198,7 @@ struct advk_pcie {
struct mutex msi_used_lock;
u16 msi_msg;
int root_bus_nr;
+ struct pci_bridge_emul bridge;
};
static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg)
@@ -390,6 +400,109 @@ static int advk_pcie_wait_pio(struct advk_pcie *pcie)
return -ETIMEDOUT;
}
+
+static pci_bridge_emul_read_status_t
+advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+
+ switch (reg) {
+ case PCI_EXP_SLTCTL:
+ *value = PCI_EXP_SLTSTA_PDS << 16;
+ return PCI_BRIDGE_EMUL_HANDLED;
+
+ case PCI_EXP_RTCTL: {
+ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+ *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
+ case PCI_EXP_RTSTA: {
+ u32 isr0 = advk_readl(pcie, PCIE_ISR0_REG);
+ u32 msglog = advk_readl(pcie, PCIE_MSG_LOG_REG);
+ *value = (isr0 & PCIE_MSG_PM_PME_MASK) << 16 | (msglog >> 16);
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
+ case PCI_CAP_LIST_ID:
+ case PCI_EXP_DEVCAP:
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_LNKCAP:
+ case PCI_EXP_LNKCTL:
+ *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
+ return PCI_BRIDGE_EMUL_HANDLED;
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+
+}
+
+static void
+advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct advk_pcie *pcie = bridge->data;
+
+ switch (reg) {
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_LNKCTL:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ break;
+
+ case PCI_EXP_RTCTL:
+ new = (new & PCI_EXP_RTCTL_PMEIE) << 3;
+ advk_writel(pcie, new, PCIE_ISR0_MASK_REG);
+ break;
+
+ case PCI_EXP_RTSTA:
+ new = (new & PCI_EXP_RTSTA_PME) >> 9;
+ advk_writel(pcie, new, PCIE_ISR0_REG);
+ break;
+
+ default:
+ break;
+ }
+}
+
+struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
+ .read_pcie = advk_pci_bridge_emul_pcie_conf_read,
+ .write_pcie = advk_pci_bridge_emul_pcie_conf_write,
+};
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+{
+ struct pci_bridge_emul *bridge = &pcie->bridge;
+
+ bridge->conf.vendor = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) & 0xffff;
+ bridge->conf.device = advk_readl(pcie, PCIE_CORE_DEV_ID_REG) >> 16;
+ bridge->conf.class_revision =
+ advk_readl(pcie, PCIE_CORE_DEV_REV_REG) & 0xff;
+
+ /* Support 32 bits I/O addressing */
+ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
+ bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
+
+ /* Support 64 bits memory pref */
+ bridge->conf.pref_mem_base = PCI_PREF_RANGE_TYPE_64;
+ bridge->conf.pref_mem_limit = PCI_PREF_RANGE_TYPE_64;
+
+ /* Support interrupt A for MSI feature */
+ bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
+
+ bridge->has_pcie = true;
+ bridge->data = pcie;
+ bridge->ops = &advk_pci_bridge_emul_ops;
+
+ pci_bridge_emul_init(bridge);
+
+}
+
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
int devfn)
{
@@ -411,6 +524,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
return PCIBIOS_DEVICE_NOT_FOUND;
}
+ if (bus->number == pcie->root_bus_nr)
+ return pci_bridge_emul_conf_read(&pcie->bridge, where,
+ size, val);
+
/* Start PIO */
advk_writel(pcie, 0, PIO_START);
advk_writel(pcie, 1, PIO_ISR);
@@ -418,7 +535,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
reg &= ~PIO_CTRL_TYPE_MASK;
- if (bus->number == pcie->root_bus_nr)
+ if (bus->primary == pcie->root_bus_nr)
reg |= PCIE_CONFIG_RD_TYPE0;
else
reg |= PCIE_CONFIG_RD_TYPE1;
@@ -463,6 +580,10 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
if (!advk_pcie_valid_device(pcie, bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
+ if (bus->number == pcie->root_bus_nr)
+ return pci_bridge_emul_conf_write(&pcie->bridge, where,
+ size, val);
+
if (where % size)
return PCIBIOS_SET_FAILED;
@@ -473,7 +594,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Program the control register */
reg = advk_readl(pcie, PIO_CTRL);
reg &= ~PIO_CTRL_TYPE_MASK;
- if (bus->number == pcie->root_bus_nr)
+ if (bus->primary == pcie->root_bus_nr)
reg |= PCIE_CONFIG_WR_TYPE0;
else
reg |= PCIE_CONFIG_WR_TYPE1;
@@ -875,6 +996,8 @@ static int advk_pcie_probe(struct platform_device *pdev)
advk_pcie_setup_hw(pcie);
+ advk_sw_pci_bridge_init(pcie);
+
ret = advk_pcie_init_irq_domain(pcie);
if (ret) {
dev_err(dev, "Failed to initialize irq\n");
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c
index d8f10451f273..c742881b5061 100644
--- a/drivers/pci/controller/pci-host-common.c
+++ b/drivers/pci/controller/pci-host-common.c
@@ -58,9 +58,7 @@ err_out:
int pci_host_common_probe(struct platform_device *pdev,
struct pci_ecam_ops *ops)
{
- const char *type;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge;
struct pci_config_window *cfg;
struct list_head resources;
@@ -70,12 +68,6 @@ int pci_host_common_probe(struct platform_device *pdev,
if (!bridge)
return -ENOMEM;
- type = of_get_property(np, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
-
of_pci_check_probe_only();
/* Parse and map our Configuration Space windows */
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index a41d79b8d46a..fa0fc46edb0c 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -22,6 +22,7 @@
#include <linux/of_platform.h>
#include "../pci.h"
+#include "../pci-bridge-emul.h"
/*
* PCIe unit register offsets.
@@ -63,61 +64,6 @@
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET BIT(20)
-enum {
- PCISWCAP = PCI_BRIDGE_CONTROL + 2,
- PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID,
- PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP,
- PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL,
- PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP,
- PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL,
- PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP,
- PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL,
- PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL,
- PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA,
- PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2,
- PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2,
- PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2,
- PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2,
- PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2,
- PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2,
-};
-
-/* PCI configuration space of a PCI-to-PCI bridge */
-struct mvebu_sw_pci_bridge {
- u16 vendor;
- u16 device;
- u16 command;
- u16 status;
- u16 class;
- u8 interface;
- u8 revision;
- u8 bist;
- u8 header_type;
- u8 latency_timer;
- u8 cache_line_size;
- u32 bar[2];
- u8 primary_bus;
- u8 secondary_bus;
- u8 subordinate_bus;
- u8 secondary_latency_timer;
- u8 iobase;
- u8 iolimit;
- u16 secondary_status;
- u16 membase;
- u16 memlimit;
- u16 iobaseupper;
- u16 iolimitupper;
- u32 romaddr;
- u8 intline;
- u8 intpin;
- u16 bridgectrl;
-
- /* PCI express capability */
- u32 pcie_sltcap;
- u16 pcie_devctl;
- u16 pcie_rtctl;
-};
-
struct mvebu_pcie_port;
/* Structure representing all PCIe interfaces */
@@ -153,7 +99,7 @@ struct mvebu_pcie_port {
struct clk *clk;
struct gpio_desc *reset_gpio;
char *reset_name;
- struct mvebu_sw_pci_bridge bridge;
+ struct pci_bridge_emul bridge;
struct device_node *dn;
struct mvebu_pcie *pcie;
struct mvebu_pcie_window memwin;
@@ -415,11 +361,12 @@ static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
{
struct mvebu_pcie_window desired = {};
+ struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new iobase/iolimit values invalid? */
- if (port->bridge.iolimit < port->bridge.iobase ||
- port->bridge.iolimitupper < port->bridge.iobaseupper ||
- !(port->bridge.command & PCI_COMMAND_IO)) {
+ if (conf->iolimit < conf->iobase ||
+ conf->iolimitupper < conf->iobaseupper ||
+ !(conf->command & PCI_COMMAND_IO)) {
mvebu_pcie_set_window(port, port->io_target, port->io_attr,
&desired, &port->iowin);
return;
@@ -438,11 +385,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
* specifications. iobase is the bus address, port->iowin_base
* is the CPU address.
*/
- desired.remap = ((port->bridge.iobase & 0xF0) << 8) |
- (port->bridge.iobaseupper << 16);
+ desired.remap = ((conf->iobase & 0xF0) << 8) |
+ (conf->iobaseupper << 16);
desired.base = port->pcie->io.start + desired.remap;
- desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
- (port->bridge.iolimitupper << 16)) -
+ desired.size = ((0xFFF | ((conf->iolimit & 0xF0) << 8) |
+ (conf->iolimitupper << 16)) -
desired.remap) +
1;
@@ -453,10 +400,11 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
{
struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
+ struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new membase/memlimit values invalid? */
- if (port->bridge.memlimit < port->bridge.membase ||
- !(port->bridge.command & PCI_COMMAND_MEMORY)) {
+ if (conf->memlimit < conf->membase ||
+ !(conf->command & PCI_COMMAND_MEMORY)) {
mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
&desired, &port->memwin);
return;
@@ -468,130 +416,32 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
* window to setup, according to the PCI-to-PCI bridge
* specifications.
*/
- desired.base = ((port->bridge.membase & 0xFFF0) << 16);
- desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
+ desired.base = ((conf->membase & 0xFFF0) << 16);
+ desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
desired.base + 1;
mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
&port->memwin);
}
-/*
- * Initialize the configuration space of the PCI-to-PCI bridge
- * associated with the given PCIe interface.
- */
-static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
-{
- struct mvebu_sw_pci_bridge *bridge = &port->bridge;
-
- memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge));
-
- bridge->class = PCI_CLASS_BRIDGE_PCI;
- bridge->vendor = PCI_VENDOR_ID_MARVELL;
- bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
- bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
- bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
- bridge->cache_line_size = 0x10;
-
- /* We support 32 bits I/O addressing */
- bridge->iobase = PCI_IO_RANGE_TYPE_32;
- bridge->iolimit = PCI_IO_RANGE_TYPE_32;
-
- /* Add capabilities */
- bridge->status = PCI_STATUS_CAP_LIST;
-}
-
-/*
- * Read the configuration space of the PCI-to-PCI bridge associated to
- * the given PCIe interface.
- */
-static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
- unsigned int where, int size, u32 *value)
+static pci_bridge_emul_read_status_t
+mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
{
- struct mvebu_sw_pci_bridge *bridge = &port->bridge;
-
- switch (where & ~3) {
- case PCI_VENDOR_ID:
- *value = bridge->device << 16 | bridge->vendor;
- break;
-
- case PCI_COMMAND:
- *value = bridge->command | bridge->status << 16;
- break;
-
- case PCI_CLASS_REVISION:
- *value = bridge->class << 16 | bridge->interface << 8 |
- bridge->revision;
- break;
-
- case PCI_CACHE_LINE_SIZE:
- *value = bridge->bist << 24 | bridge->header_type << 16 |
- bridge->latency_timer << 8 | bridge->cache_line_size;
- break;
-
- case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
- *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4];
- break;
-
- case PCI_PRIMARY_BUS:
- *value = (bridge->secondary_latency_timer << 24 |
- bridge->subordinate_bus << 16 |
- bridge->secondary_bus << 8 |
- bridge->primary_bus);
- break;
-
- case PCI_IO_BASE:
- if (!mvebu_has_ioport(port))
- *value = bridge->secondary_status << 16;
- else
- *value = (bridge->secondary_status << 16 |
- bridge->iolimit << 8 |
- bridge->iobase);
- break;
-
- case PCI_MEMORY_BASE:
- *value = (bridge->memlimit << 16 | bridge->membase);
- break;
-
- case PCI_PREF_MEMORY_BASE:
- *value = 0;
- break;
-
- case PCI_IO_BASE_UPPER16:
- *value = (bridge->iolimitupper << 16 | bridge->iobaseupper);
- break;
-
- case PCI_CAPABILITY_LIST:
- *value = PCISWCAP;
- break;
-
- case PCI_ROM_ADDRESS1:
- *value = 0;
- break;
+ struct mvebu_pcie_port *port = bridge->data;
- case PCI_INTERRUPT_LINE:
- /* LINE PIN MIN_GNT MAX_LAT */
- *value = 0;
- break;
-
- case PCISWCAP_EXP_LIST_ID:
- /* Set PCIe v2, root port, slot support */
- *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
- PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP;
- break;
-
- case PCISWCAP_EXP_DEVCAP:
+ switch (reg) {
+ case PCI_EXP_DEVCAP:
*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
break;
- case PCISWCAP_EXP_DEVCTL:
+ case PCI_EXP_DEVCTL:
*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
- *value |= bridge->pcie_devctl;
break;
- case PCISWCAP_EXP_LNKCAP:
+ case PCI_EXP_LNKCAP:
/*
* PCIe requires the clock power management capability to be
* hard-wired to zero for downstream ports
@@ -600,176 +450,140 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
~PCI_EXP_LNKCAP_CLKPM;
break;
- case PCISWCAP_EXP_LNKCTL:
+ case PCI_EXP_LNKCTL:
*value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
break;
- case PCISWCAP_EXP_SLTCAP:
- *value = bridge->pcie_sltcap;
- break;
-
- case PCISWCAP_EXP_SLTCTL:
+ case PCI_EXP_SLTCTL:
*value = PCI_EXP_SLTSTA_PDS << 16;
break;
- case PCISWCAP_EXP_RTCTL:
- *value = bridge->pcie_rtctl;
- break;
-
- case PCISWCAP_EXP_RTSTA:
+ case PCI_EXP_RTSTA:
*value = mvebu_readl(port, PCIE_RC_RTSTA);
break;
- /* PCIe requires the v2 fields to be hard-wired to zero */
- case PCISWCAP_EXP_DEVCAP2:
- case PCISWCAP_EXP_DEVCTL2:
- case PCISWCAP_EXP_LNKCAP2:
- case PCISWCAP_EXP_LNKCTL2:
- case PCISWCAP_EXP_SLTCAP2:
- case PCISWCAP_EXP_SLTCTL2:
default:
- /*
- * PCI defines configuration read accesses to reserved or
- * unimplemented registers to read as zero and complete
- * normally.
- */
- *value = 0;
- return PCIBIOS_SUCCESSFUL;
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
}
- if (size == 2)
- *value = (*value >> (8 * (where & 3))) & 0xffff;
- else if (size == 1)
- *value = (*value >> (8 * (where & 3))) & 0xff;
-
- return PCIBIOS_SUCCESSFUL;
+ return PCI_BRIDGE_EMUL_HANDLED;
}
-/* Write to the PCI-to-PCI bridge configuration space */
-static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
- unsigned int where, int size, u32 value)
+static void
+mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
{
- struct mvebu_sw_pci_bridge *bridge = &port->bridge;
- u32 mask, reg;
- int err;
-
- if (size == 4)
- mask = 0x0;
- else if (size == 2)
- mask = ~(0xffff << ((where & 3) * 8));
- else if (size == 1)
- mask = ~(0xff << ((where & 3) * 8));
- else
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, &reg);
- if (err)
- return err;
+ struct mvebu_pcie_port *port = bridge->data;
+ struct pci_bridge_emul_conf *conf = &bridge->conf;
- value = (reg & mask) | value << ((where & 3) * 8);
-
- switch (where & ~3) {
+ switch (reg) {
case PCI_COMMAND:
{
- u32 old = bridge->command;
-
if (!mvebu_has_ioport(port))
- value &= ~PCI_COMMAND_IO;
+ conf->command &= ~PCI_COMMAND_IO;
- bridge->command = value & 0xffff;
- if ((old ^ bridge->command) & PCI_COMMAND_IO)
+ if ((old ^ new) & PCI_COMMAND_IO)
mvebu_pcie_handle_iobase_change(port);
- if ((old ^ bridge->command) & PCI_COMMAND_MEMORY)
+ if ((old ^ new) & PCI_COMMAND_MEMORY)
mvebu_pcie_handle_membase_change(port);
- break;
- }
- case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
- bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value;
break;
+ }
case PCI_IO_BASE:
/*
- * We also keep bit 1 set, it is a read-only bit that
+ * We keep bit 1 set, it is a read-only bit that
* indicates we support 32 bits addressing for the
* I/O
*/
- bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32;
- bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32;
+ conf->iobase |= PCI_IO_RANGE_TYPE_32;
+ conf->iolimit |= PCI_IO_RANGE_TYPE_32;
mvebu_pcie_handle_iobase_change(port);
break;
case PCI_MEMORY_BASE:
- bridge->membase = value & 0xffff;
- bridge->memlimit = value >> 16;
mvebu_pcie_handle_membase_change(port);
break;
case PCI_IO_BASE_UPPER16:
- bridge->iobaseupper = value & 0xffff;
- bridge->iolimitupper = value >> 16;
mvebu_pcie_handle_iobase_change(port);
break;
case PCI_PRIMARY_BUS:
- bridge->primary_bus = value & 0xff;
- bridge->secondary_bus = (value >> 8) & 0xff;
- bridge->subordinate_bus = (value >> 16) & 0xff;
- bridge->secondary_latency_timer = (value >> 24) & 0xff;
- mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus);
+ mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
+ break;
+
+ default:
break;
+ }
+}
+
+static void
+mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
+ int reg, u32 old, u32 new, u32 mask)
+{
+ struct mvebu_pcie_port *port = bridge->data;
- case PCISWCAP_EXP_DEVCTL:
+ switch (reg) {
+ case PCI_EXP_DEVCTL:
/*
* Armada370 data says these bits must always
* be zero when in root complex mode.
*/
- value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
-
- /*
- * If the mask is 0xffff0000, then we only want to write
- * the device control register, rather than clearing the
- * RW1C bits in the device status register. Mask out the
- * status register bits.
- */
- if (mask == 0xffff0000)
- value &= 0xffff;
+ new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
- mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
break;
- case PCISWCAP_EXP_LNKCTL:
+ case PCI_EXP_LNKCTL:
/*
* If we don't support CLKREQ, we must ensure that the
* CLKREQ enable bit always reads zero. Since we haven't
* had this capability, and it's dependent on board wiring,
* disable it for the time being.
*/
- value &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
+ new &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
- /*
- * If the mask is 0xffff0000, then we only want to write
- * the link control register, rather than clearing the
- * RW1C bits in the link status register. Mask out the
- * RW1C status register bits.
- */
- if (mask == 0xffff0000)
- value &= ~((PCI_EXP_LNKSTA_LABS |
- PCI_EXP_LNKSTA_LBMS) << 16);
-
- mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
break;
- case PCISWCAP_EXP_RTSTA:
- mvebu_writel(port, value, PCIE_RC_RTSTA);
+ case PCI_EXP_RTSTA:
+ mvebu_writel(port, new, PCIE_RC_RTSTA);
break;
+ }
+}
- default:
- break;
+struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
+ .write_base = mvebu_pci_bridge_emul_base_conf_write,
+ .read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
+ .write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
+};
+
+/*
+ * Initialize the configuration space of the PCI-to-PCI bridge
+ * associated with the given PCIe interface.
+ */
+static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
+{
+ struct pci_bridge_emul *bridge = &port->bridge;
+
+ bridge->conf.vendor = PCI_VENDOR_ID_MARVELL;
+ bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
+ bridge->conf.class_revision =
+ mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
+
+ if (mvebu_has_ioport(port)) {
+ /* We support 32 bits I/O addressing */
+ bridge->conf.iobase = PCI_IO_RANGE_TYPE_32;
+ bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
}
- return PCIBIOS_SUCCESSFUL;
+ bridge->has_pcie = true;
+ bridge->data = port;
+ bridge->ops = &mvebu_pci_bridge_emul_ops;
+
+ pci_bridge_emul_init(bridge);
}
static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
@@ -789,8 +603,8 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
if (bus->number == 0 && port->devfn == devfn)
return port;
if (bus->number != 0 &&
- bus->number >= port->bridge.secondary_bus &&
- bus->number <= port->bridge.subordinate_bus)
+ bus->number >= port->bridge.conf.secondary_bus &&
+ bus->number <= port->bridge.conf.subordinate_bus)
return port;
}
@@ -811,7 +625,8 @@ static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
/* Access the emulated PCI-to-PCI bridge */
if (bus->number == 0)
- return mvebu_sw_pci_bridge_write(port, where, size, val);
+ return pci_bridge_emul_conf_write(&port->bridge, where,
+ size, val);
if (!mvebu_pcie_link_up(port))
return PCIBIOS_DEVICE_NOT_FOUND;
@@ -839,7 +654,8 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
/* Access the emulated PCI-to-PCI bridge */
if (bus->number == 0)
- return mvebu_sw_pci_bridge_read(port, where, size, val);
+ return pci_bridge_emul_conf_read(&port->bridge, where,
+ size, val);
if (!mvebu_pcie_link_up(port)) {
*val = 0xffffffff;
@@ -1297,7 +1113,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
mvebu_pcie_setup_hw(port);
mvebu_pcie_set_local_dev_nr(port, 1);
- mvebu_sw_pci_bridge_init(port);
+ mvebu_pci_bridge_emul_init(port);
}
pcie->nports = i;
diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c
index 9e87dd7f9ac3..c3a088910f48 100644
--- a/drivers/pci/controller/pcie-cadence-ep.c
+++ b/drivers/pci/controller/pcie-cadence-ep.c
@@ -258,7 +258,6 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
u8 intx, bool is_asserted)
{
struct cdns_pcie *pcie = &ep->pcie;
- u32 r = ep->max_regions - 1;
u32 offset;
u16 status;
u8 msg_code;
@@ -268,8 +267,8 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn,
/* Set the outbound region if needed. */
if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY ||
ep->irq_pci_fn != fn)) {
- /* Last region was reserved for IRQ writes. */
- cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r,
+ /* First region was reserved for IRQ writes. */
+ cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0,
ep->irq_phys_addr);
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY;
ep->irq_pci_fn = fn;
@@ -347,8 +346,8 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
/* Set the outbound region if needed. */
if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) ||
ep->irq_pci_fn != fn)) {
- /* Last region was reserved for IRQ writes. */
- cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1,
+ /* First region was reserved for IRQ writes. */
+ cdns_pcie_set_outbound_region(pcie, fn, 0,
false,
ep->irq_phys_addr,
pci_addr & ~pci_addr_mask,
@@ -356,7 +355,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn,
ep->irq_pci_addr = (pci_addr & ~pci_addr_mask);
ep->irq_pci_fn = fn;
}
- writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
+ writel(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask));
return 0;
}
@@ -517,6 +516,8 @@ static int cdns_pcie_ep_probe(struct platform_device *pdev)
goto free_epc_mem;
}
ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE;
+ /* Reserve region 0 for IRQs */
+ set_bit(0, &ep->ob_region_map);
return 0;
diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c
index ec394f6a19c8..97e251090b4f 100644
--- a/drivers/pci/controller/pcie-cadence-host.c
+++ b/drivers/pci/controller/pcie-cadence-host.c
@@ -235,7 +235,6 @@ static int cdns_pcie_host_init(struct device *dev,
static int cdns_pcie_host_probe(struct platform_device *pdev)
{
- const char *type;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct pci_host_bridge *bridge;
@@ -268,12 +267,6 @@ static int cdns_pcie_host_probe(struct platform_device *pdev)
rc->device_id = 0xffff;
of_property_read_u16(np, "device-id", &rc->device_id);
- type = of_get_property(np, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
-
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg");
pcie->reg_base = devm_ioremap_resource(dev, res);
if (IS_ERR(pcie->reg_base)) {
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c
index 975bcdd6b5c0..cd795f6fc1e2 100644
--- a/drivers/pci/controller/pcie-cadence.c
+++ b/drivers/pci/controller/pcie-cadence.c
@@ -190,14 +190,16 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
for (i = 0; i < phy_count; i++) {
of_property_read_string_index(np, "phy-names", i, &name);
- phy[i] = devm_phy_optional_get(dev, name);
- if (IS_ERR(phy))
- return PTR_ERR(phy);
-
+ phy[i] = devm_phy_get(dev, name);
+ if (IS_ERR(phy[i])) {
+ ret = PTR_ERR(phy[i]);
+ goto err_phy;
+ }
link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS);
if (!link[i]) {
+ devm_phy_put(dev, phy[i]);
ret = -EINVAL;
- goto err_link;
+ goto err_phy;
}
}
@@ -207,13 +209,15 @@ int cdns_pcie_init_phy(struct device *dev, struct cdns_pcie *pcie)
ret = cdns_pcie_enable_phy(pcie);
if (ret)
- goto err_link;
+ goto err_phy;
return 0;
-err_link:
- while (--i >= 0)
+err_phy:
+ while (--i >= 0) {
device_link_del(link[i]);
+ devm_phy_put(dev, phy[i]);
+ }
return ret;
}
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 3160e9342a2f..c20fd6bd68fd 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -630,14 +630,6 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
return (pcie->base + offset);
}
- /*
- * PAXC is connected to an internally emulated EP within the SoC. It
- * allows only one device.
- */
- if (pcie->ep_is_internal)
- if (slot > 0)
- return NULL;
-
return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
}
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 861dda69f366..d069a76cbb95 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -15,6 +15,7 @@
#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/msi.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/of_platform.h>
@@ -162,6 +163,7 @@ struct mtk_pcie_soc {
* @phy: pointer to PHY control block
* @lane: lane count
* @slot: port slot
+ * @irq: GIC irq
* @irq_domain: legacy INTx IRQ domain
* @inner_domain: inner IRQ domain
* @msi_domain: MSI IRQ domain
@@ -182,6 +184,7 @@ struct mtk_pcie_port {
struct phy *phy;
u32 lane;
u32 slot;
+ int irq;
struct irq_domain *irq_domain;
struct irq_domain *inner_domain;
struct irq_domain *msi_domain;
@@ -225,10 +228,8 @@ static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie)
clk_disable_unprepare(pcie->free_ck);
- if (dev->pm_domain) {
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
- }
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
}
static void mtk_pcie_port_free(struct mtk_pcie_port *port)
@@ -337,6 +338,17 @@ static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus,
{
struct mtk_pcie *pcie = bus->sysdata;
struct mtk_pcie_port *port;
+ struct pci_dev *dev = NULL;
+
+ /*
+ * Walk the bus hierarchy to get the devfn value
+ * of the port in the root bus.
+ */
+ while (bus && bus->number) {
+ dev = bus->self;
+ bus = dev->bus;
+ devfn = dev->devfn;
+ }
list_for_each_entry(port, &pcie->ports, list)
if (port->slot == PCI_SLOT(devfn))
@@ -383,75 +395,6 @@ static struct pci_ops mtk_pcie_ops_v2 = {
.write = mtk_pcie_config_write,
};
-static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
-{
- struct mtk_pcie *pcie = port->pcie;
- struct resource *mem = &pcie->mem;
- const struct mtk_pcie_soc *soc = port->pcie->soc;
- u32 val;
- size_t size;
- int err;
-
- /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
- if (pcie->base) {
- val = readl(pcie->base + PCIE_SYS_CFG_V2);
- val |= PCIE_CSR_LTSSM_EN(port->slot) |
- PCIE_CSR_ASPM_L1_EN(port->slot);
- writel(val, pcie->base + PCIE_SYS_CFG_V2);
- }
-
- /* Assert all reset signals */
- writel(0, port->base + PCIE_RST_CTRL);
-
- /*
- * Enable PCIe link down reset, if link status changed from link up to
- * link down, this will reset MAC control registers and configuration
- * space.
- */
- writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
-
- /* De-assert PHY, PE, PIPE, MAC and configuration reset */
- val = readl(port->base + PCIE_RST_CTRL);
- val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
- PCIE_MAC_SRSTB | PCIE_CRSTB;
- writel(val, port->base + PCIE_RST_CTRL);
-
- /* Set up vendor ID and class code */
- if (soc->need_fix_class_id) {
- val = PCI_VENDOR_ID_MEDIATEK;
- writew(val, port->base + PCIE_CONF_VEND_ID);
-
- val = PCI_CLASS_BRIDGE_HOST;
- writew(val, port->base + PCIE_CONF_CLASS_ID);
- }
-
- /* 100ms timeout value should be enough for Gen1/2 training */
- err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
- !!(val & PCIE_PORT_LINKUP_V2), 20,
- 100 * USEC_PER_MSEC);
- if (err)
- return -ETIMEDOUT;
-
- /* Set INTx mask */
- val = readl(port->base + PCIE_INT_MASK);
- val &= ~INTX_MASK;
- writel(val, port->base + PCIE_INT_MASK);
-
- /* Set AHB to PCIe translation windows */
- size = mem->end - mem->start;
- val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
- writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
-
- val = upper_32_bits(mem->start);
- writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
-
- /* Set PCIe to AXI translation memory space.*/
- val = fls(0xffffffff) | WIN_ENABLE;
- writel(val, port->base + PCIE_AXI_WINDOW0);
-
- return 0;
-}
-
static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
@@ -590,6 +533,27 @@ static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
writel(val, port->base + PCIE_INT_MASK);
}
+static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
+{
+ struct mtk_pcie_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
+ irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+
+ if (port->irq_domain)
+ irq_domain_remove(port->irq_domain);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI)) {
+ if (port->msi_domain)
+ irq_domain_remove(port->msi_domain);
+ if (port->inner_domain)
+ irq_domain_remove(port->inner_domain);
+ }
+
+ irq_dispose_mapping(port->irq);
+ }
+}
+
static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
@@ -628,8 +592,6 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port,
ret = mtk_pcie_allocate_msi_domains(port);
if (ret)
return ret;
-
- mtk_pcie_enable_msi(port);
}
return 0;
@@ -682,7 +644,7 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
struct mtk_pcie *pcie = port->pcie;
struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
- int err, irq;
+ int err;
err = mtk_pcie_init_irq_domain(port, node);
if (err) {
@@ -690,8 +652,81 @@ static int mtk_pcie_setup_irq(struct mtk_pcie_port *port,
return err;
}
- irq = platform_get_irq(pdev, port->slot);
- irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
+ port->irq = platform_get_irq(pdev, port->slot);
+ irq_set_chained_handler_and_data(port->irq,
+ mtk_pcie_intr_handler, port);
+
+ return 0;
+}
+
+static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
+{
+ struct mtk_pcie *pcie = port->pcie;
+ struct resource *mem = &pcie->mem;
+ const struct mtk_pcie_soc *soc = port->pcie->soc;
+ u32 val;
+ size_t size;
+ int err;
+
+ /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
+ if (pcie->base) {
+ val = readl(pcie->base + PCIE_SYS_CFG_V2);
+ val |= PCIE_CSR_LTSSM_EN(port->slot) |
+ PCIE_CSR_ASPM_L1_EN(port->slot);
+ writel(val, pcie->base + PCIE_SYS_CFG_V2);
+ }
+
+ /* Assert all reset signals */
+ writel(0, port->base + PCIE_RST_CTRL);
+
+ /*
+ * Enable PCIe link down reset, if link status changed from link up to
+ * link down, this will reset MAC control registers and configuration
+ * space.
+ */
+ writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
+
+ /* De-assert PHY, PE, PIPE, MAC and configuration reset */
+ val = readl(port->base + PCIE_RST_CTRL);
+ val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
+ PCIE_MAC_SRSTB | PCIE_CRSTB;
+ writel(val, port->base + PCIE_RST_CTRL);
+
+ /* Set up vendor ID and class code */
+ if (soc->need_fix_class_id) {
+ val = PCI_VENDOR_ID_MEDIATEK;
+ writew(val, port->base + PCIE_CONF_VEND_ID);
+
+ val = PCI_CLASS_BRIDGE_PCI;
+ writew(val, port->base + PCIE_CONF_CLASS_ID);
+ }
+
+ /* 100ms timeout value should be enough for Gen1/2 training */
+ err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val,
+ !!(val & PCIE_PORT_LINKUP_V2), 20,
+ 100 * USEC_PER_MSEC);
+ if (err)
+ return -ETIMEDOUT;
+
+ /* Set INTx mask */
+ val = readl(port->base + PCIE_INT_MASK);
+ val &= ~INTX_MASK;
+ writel(val, port->base + PCIE_INT_MASK);
+
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ mtk_pcie_enable_msi(port);
+
+ /* Set AHB to PCIe translation windows */
+ size = mem->end - mem->start;
+ val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
+
+ val = upper_32_bits(mem->start);
+ writel(val, port->base + PCIE_AHB_TRANS_BASE0_H);
+
+ /* Set PCIe to AXI translation memory space.*/
+ val = fls(0xffffffff) | WIN_ENABLE;
+ writel(val, port->base + PCIE_AXI_WINDOW0);
return 0;
}
@@ -987,10 +1022,8 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
pcie->free_ck = NULL;
}
- if (dev->pm_domain) {
- pm_runtime_enable(dev);
- pm_runtime_get_sync(dev);
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
/* enable top level clock */
err = clk_prepare_enable(pcie->free_ck);
@@ -1002,10 +1035,8 @@ static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie)
return 0;
err_free_ck:
- if (dev->pm_domain) {
- pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
- }
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return err;
}
@@ -1109,36 +1140,10 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
if (err < 0)
return err;
- devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
-
- return 0;
-}
-
-static int mtk_pcie_register_host(struct pci_host_bridge *host)
-{
- struct mtk_pcie *pcie = pci_host_bridge_priv(host);
- struct pci_bus *child;
- int err;
-
- host->busnr = pcie->busn.start;
- host->dev.parent = pcie->dev;
- host->ops = pcie->soc->ops;
- host->map_irq = of_irq_parse_and_map_pci;
- host->swizzle_irq = pci_common_swizzle;
- host->sysdata = pcie;
-
- err = pci_scan_root_bus_bridge(host);
- if (err < 0)
+ err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
+ if (err)
return err;
- pci_bus_size_bridges(host->bus);
- pci_bus_assign_resources(host->bus);
-
- list_for_each_entry(child, &host->bus->children, node)
- pcie_bus_configure_settings(child);
-
- pci_bus_add_devices(host->bus);
-
return 0;
}
@@ -1168,7 +1173,14 @@ static int mtk_pcie_probe(struct platform_device *pdev)
if (err)
goto put_resources;
- err = mtk_pcie_register_host(host);
+ host->busnr = pcie->busn.start;
+ host->dev.parent = pcie->dev;
+ host->ops = pcie->soc->ops;
+ host->map_irq = of_irq_parse_and_map_pci;
+ host->swizzle_irq = pci_common_swizzle;
+ host->sysdata = pcie;
+
+ err = pci_host_probe(host);
if (err)
goto put_resources;
@@ -1181,6 +1193,80 @@ put_resources:
return err;
}
+
+static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
+{
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+ struct list_head *windows = &host->windows;
+
+ pci_free_resource_list(windows);
+}
+
+static int mtk_pcie_remove(struct platform_device *pdev)
+{
+ struct mtk_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
+
+ pci_stop_root_bus(host->bus);
+ pci_remove_root_bus(host->bus);
+ mtk_pcie_free_resources(pcie);
+
+ mtk_pcie_irq_teardown(pcie);
+
+ mtk_pcie_put_resources(pcie);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
+{
+ struct mtk_pcie *pcie = dev_get_drvdata(dev);
+ struct mtk_pcie_port *port;
+
+ if (list_empty(&pcie->ports))
+ return 0;
+
+ list_for_each_entry(port, &pcie->ports, list) {
+ clk_disable_unprepare(port->pipe_ck);
+ clk_disable_unprepare(port->obff_ck);
+ clk_disable_unprepare(port->axi_ck);
+ clk_disable_unprepare(port->aux_ck);
+ clk_disable_unprepare(port->ahb_ck);
+ clk_disable_unprepare(port->sys_ck);
+ phy_power_off(port->phy);
+ phy_exit(port->phy);
+ }
+
+ clk_disable_unprepare(pcie->free_ck);
+
+ return 0;
+}
+
+static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
+{
+ struct mtk_pcie *pcie = dev_get_drvdata(dev);
+ struct mtk_pcie_port *port, *tmp;
+
+ if (list_empty(&pcie->ports))
+ return 0;
+
+ clk_prepare_enable(pcie->free_ck);
+
+ list_for_each_entry_safe(port, tmp, &pcie->ports, list)
+ mtk_pcie_enable_port(port);
+
+ /* In case of EP was removed while system suspend. */
+ if (list_empty(&pcie->ports))
+ clk_disable_unprepare(pcie->free_ck);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mtk_pcie_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
+ mtk_pcie_resume_noirq)
+};
+
static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
.ops = &mtk_pcie_ops,
.startup = mtk_pcie_startup_port,
@@ -1209,10 +1295,13 @@ static const struct of_device_id mtk_pcie_ids[] = {
static struct platform_driver mtk_pcie_driver = {
.probe = mtk_pcie_probe,
+ .remove = mtk_pcie_remove,
.driver = {
.name = "mtk-pcie",
.of_match_table = mtk_pcie_ids,
.suppress_bind_attrs = true,
+ .pm = &mtk_pcie_pm_ops,
},
};
-builtin_platform_driver(mtk_pcie_driver);
+module_platform_driver(mtk_pcie_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index a939e8d31735..77052a0712d0 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -301,13 +301,6 @@ static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie)
struct platform_device *pdev = pcie->pdev;
struct device_node *node = dev->of_node;
struct resource *res;
- const char *type;
-
- type = of_get_property(node, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
/* map config resource */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index fb32840ce8e6..81538d77f790 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -777,16 +777,7 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie,
struct platform_device *pdev)
{
struct device *dev = pcie->dev;
- struct device_node *node = dev->of_node;
struct resource *res;
- const char *type;
-
- /* Check for device type */
- type = of_get_property(node, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg");
pcie->breg_base = devm_ioremap_resource(dev, res);
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index 7b1389d8e2a5..9bd1a35cd5d8 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -574,15 +574,8 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
struct resource regs;
- const char *type;
int err;
- type = of_get_property(node, "device_type", NULL);
- if (!type || strcmp(type, "pci")) {
- dev_err(dev, "invalid \"device_type\" %s\n", type);
- return -EINVAL;
- }
-
err = of_address_to_resource(node, 0, &regs);
if (err) {
dev_err(dev, "missing \"reg\" property\n");
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index f31ed62d518c..e50b0b5815ff 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -809,12 +809,12 @@ static void vmd_remove(struct pci_dev *dev)
{
struct vmd_dev *vmd = pci_get_drvdata(dev);
- vmd_detach_resources(vmd);
sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
pci_stop_root_bus(vmd->bus);
pci_remove_root_bus(vmd->bus);
vmd_cleanup_srcu(vmd);
vmd_teardown_dma_ops(vmd);
+ vmd_detach_resources(vmd);
irq_domain_remove(vmd->irq_domain);
}
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
new file mode 100644
index 000000000000..a32070be5adf
--- /dev/null
+++ b/drivers/pci/hotplug/TODO
@@ -0,0 +1,74 @@
+Contributions are solicited in particular to remedy the following issues:
+
+cpcihp:
+
+* There are no implementations of the ->hardware_test, ->get_power and
+ ->set_power callbacks in struct cpci_hp_controller_ops. Why were they
+ introduced? Can they be removed from the struct?
+
+cpqphp:
+
+* The driver spawns a kthread cpqhp_event_thread() which is woken by the
+ hardirq handler cpqhp_ctrl_intr(). Convert this to threaded IRQ handling.
+ The kthread is also woken from the timer pushbutton_helper_thread(),
+ convert it to call irq_wake_thread(). Use pciehp as a template.
+
+* A large portion of cpqphp_ctrl.c and cpqphp_pci.c concerns resource
+ management. Doesn't this duplicate functionality in the core?
+
+ibmphp:
+
+* Implementations of hotplug_slot_ops callbacks such as get_adapter_present()
+ in ibmphp_core.c create a copy of the struct slot on the stack, then perform
+ the actual operation on that copy. Determine if this overhead is necessary,
+ delete it if not. The functions also perform a NULL pointer check on the
+ struct hotplug_slot, this seems superfluous.
+
+* Several functions access the pci_slot member in struct hotplug_slot even
+ though pci_hotplug.h declares it private. See get_max_bus_speed() for an
+ example. Either the pci_slot member should no longer be declared private
+ or ibmphp should store a pointer to its bus in struct slot. Probably the
+ former.
+
+* The functions get_max_adapter_speed() and get_bus_name() are commented out.
+ Can they be deleted? There are also forward declarations at the top of
+ ibmphp_core.c as well as pointers in ibmphp_hotplug_slot_ops, likewise
+ commented out.
+
+* ibmphp_init_devno() takes a struct slot **, it could instead take a
+ struct slot *.
+
+* The return value of pci_hp_register() is not checked.
+
+* iounmap(io_mem) is called in the error path of ebda_rsrc_controller()
+ and once more in the error path of its caller ibmphp_access_ebda().
+
+* The various slot data structures are difficult to follow and need to be
+ simplified. A lot of functions are too large and too complex, they need
+ to be broken up into smaller, manageable pieces. Negative examples are
+ ebda_rsrc_controller() and configure_bridge().
+
+* A large portion of ibmphp_res.c and ibmphp_pci.c concerns resource
+ management. Doesn't this duplicate functionality in the core?
+
+sgi_hotplug:
+
+* Several functions access the pci_slot member in struct hotplug_slot even
+ though pci_hotplug.h declares it private. See sn_hp_destroy() for an
+ example. Either the pci_slot member should no longer be declared private
+ or sgi_hotplug should store a pointer to it in struct slot. Probably the
+ former.
+
+shpchp:
+
+* There is only a single implementation of struct hpc_ops. Can the struct be
+ removed and its functions invoked directly? This has already been done in
+ pciehp with commit 82a9e79ef132 ("PCI: pciehp: remove hpc_ops"). Clarify
+ if there was a specific reason not to apply the same change to shpchp.
+
+* The ->get_mode1_ECC_cap callback in shpchp_hpc_ops is never invoked.
+ Why was it introduced? Can it be removed?
+
+* The hardirq handler shpc_isr() queues events on a workqueue. It can be
+ simplified by converting it to threaded IRQ handling. Use pciehp as a
+ template.
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index e438a2d734f2..cf3058404f41 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -33,15 +33,19 @@ struct acpiphp_slot;
* struct slot - slot information for each *physical* slot
*/
struct slot {
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct acpiphp_slot *acpi_slot;
- struct hotplug_slot_info info;
unsigned int sun; /* ACPI _SUN (Slot User Number) value */
};
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
+}
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
/*
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index ad32ffbc4b91..c9e2bd40c038 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -57,7 +57,7 @@ static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops acpi_hotplug_slot_ops = {
+static const struct hotplug_slot_ops acpi_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
@@ -118,7 +118,7 @@ EXPORT_SYMBOL_GPL(acpiphp_unregister_attention);
*/
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -135,7 +135,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
*/
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -179,7 +179,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
*/
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -225,7 +225,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -245,7 +245,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
pr_debug("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -266,39 +266,26 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
if (!slot)
goto error;
- slot->hotplug_slot = kzalloc(sizeof(*slot->hotplug_slot), GFP_KERNEL);
- if (!slot->hotplug_slot)
- goto error_slot;
-
- slot->hotplug_slot->info = &slot->info;
-
- slot->hotplug_slot->private = slot;
- slot->hotplug_slot->ops = &acpi_hotplug_slot_ops;
+ slot->hotplug_slot.ops = &acpi_hotplug_slot_ops;
slot->acpi_slot = acpiphp_slot;
- slot->hotplug_slot->info->power_status = acpiphp_get_power_status(slot->acpi_slot);
- slot->hotplug_slot->info->attention_status = 0;
- slot->hotplug_slot->info->latch_status = acpiphp_get_latch_status(slot->acpi_slot);
- slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot);
acpiphp_slot->slot = slot;
slot->sun = sun;
snprintf(name, SLOT_NAME_SIZE, "%u", sun);
- retval = pci_hp_register(slot->hotplug_slot, acpiphp_slot->bus,
+ retval = pci_hp_register(&slot->hotplug_slot, acpiphp_slot->bus,
acpiphp_slot->device, name);
if (retval == -EBUSY)
- goto error_hpslot;
+ goto error_slot;
if (retval) {
pr_err("pci_hp_register failed with error %d\n", retval);
- goto error_hpslot;
+ goto error_slot;
}
pr_info("Slot [%s] registered\n", slot_name(slot));
return 0;
-error_hpslot:
- kfree(slot->hotplug_slot);
error_slot:
kfree(slot);
error:
@@ -312,8 +299,7 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
pr_info("Slot [%s] unregistered\n", slot_name(slot));
- pci_hp_deregister(slot->hotplug_slot);
- kfree(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
kfree(slot);
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 41713f16ff97..df48b3b03ab4 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -41,7 +41,7 @@ MODULE_VERSION(DRIVER_VERSION);
#define IBM_HARDWARE_ID1 "IBM37D0"
#define IBM_HARDWARE_ID2 "IBM37D4"
-#define hpslot_to_sun(A) (((struct slot *)((A)->private))->sun)
+#define hpslot_to_sun(A) (to_slot(A)->sun)
/* union apci_descriptor - allows access to the
* various device descriptors that are embedded in the
diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
index 4658557be01a..f33ff2bca414 100644
--- a/drivers/pci/hotplug/cpci_hotplug.h
+++ b/drivers/pci/hotplug/cpci_hotplug.h
@@ -32,8 +32,10 @@ struct slot {
unsigned int devfn;
struct pci_bus *bus;
struct pci_dev *dev;
+ unsigned int latch_status:1;
+ unsigned int adapter_status:1;
unsigned int extracting;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct list_head slot_list;
};
@@ -58,7 +60,12 @@ struct cpci_hp_controller {
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
+}
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
int cpci_hp_register_controller(struct cpci_hp_controller *controller);
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index 52a339baf06c..603eadf3d965 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -57,7 +57,7 @@ static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
+static const struct hotplug_slot_ops cpci_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
@@ -68,29 +68,9 @@ static struct hotplug_slot_ops cpci_hotplug_slot_ops = {
};
static int
-update_latch_status(struct hotplug_slot *hotplug_slot, u8 value)
-{
- struct hotplug_slot_info info;
-
- memcpy(&info, hotplug_slot->info, sizeof(struct hotplug_slot_info));
- info.latch_status = value;
- return pci_hp_change_slot_info(hotplug_slot, &info);
-}
-
-static int
-update_adapter_status(struct hotplug_slot *hotplug_slot, u8 value)
-{
- struct hotplug_slot_info info;
-
- memcpy(&info, hotplug_slot->info, sizeof(struct hotplug_slot_info));
- info.adapter_status = value;
- return pci_hp_change_slot_info(hotplug_slot, &info);
-}
-
-static int
enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int retval = 0;
dbg("%s - physical_slot = %s", __func__, slot_name(slot));
@@ -103,7 +83,7 @@ enable_slot(struct hotplug_slot *hotplug_slot)
static int
disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int retval = 0;
dbg("%s - physical_slot = %s", __func__, slot_name(slot));
@@ -135,8 +115,7 @@ disable_slot(struct hotplug_slot *hotplug_slot)
goto disable_error;
}
- if (update_adapter_status(slot->hotplug_slot, 0))
- warn("failure to update adapter file");
+ slot->adapter_status = 0;
if (slot->extracting) {
slot->extracting = 0;
@@ -160,7 +139,7 @@ cpci_get_power_status(struct slot *slot)
static int
get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
*value = cpci_get_power_status(slot);
return 0;
@@ -169,7 +148,7 @@ get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int
get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
*value = cpci_get_attention_status(slot);
return 0;
@@ -178,27 +157,29 @@ get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int
set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
- return cpci_set_attention_status(hotplug_slot->private, status);
+ return cpci_set_attention_status(to_slot(hotplug_slot), status);
}
static int
get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- *value = hotplug_slot->info->adapter_status;
+ struct slot *slot = to_slot(hotplug_slot);
+
+ *value = slot->adapter_status;
return 0;
}
static int
get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- *value = hotplug_slot->info->latch_status;
+ struct slot *slot = to_slot(hotplug_slot);
+
+ *value = slot->latch_status;
return 0;
}
static void release_slot(struct slot *slot)
{
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
pci_dev_put(slot->dev);
kfree(slot);
}
@@ -209,8 +190,6 @@ int
cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
{
struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
int status;
int i;
@@ -229,43 +208,19 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
goto error;
}
- hotplug_slot =
- kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot) {
- status = -ENOMEM;
- goto error_slot;
- }
- slot->hotplug_slot = hotplug_slot;
-
- info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
- if (!info) {
- status = -ENOMEM;
- goto error_hpslot;
- }
- hotplug_slot->info = info;
-
slot->bus = bus;
slot->number = i;
slot->devfn = PCI_DEVFN(i, 0);
snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i);
- hotplug_slot->private = slot;
- hotplug_slot->ops = &cpci_hotplug_slot_ops;
-
- /*
- * Initialize the slot info structure with some known
- * good values.
- */
- dbg("initializing slot %s", name);
- info->power_status = cpci_get_power_status(slot);
- info->attention_status = cpci_get_attention_status(slot);
+ slot->hotplug_slot.ops = &cpci_hotplug_slot_ops;
dbg("registering slot %s", name);
- status = pci_hp_register(slot->hotplug_slot, bus, i, name);
+ status = pci_hp_register(&slot->hotplug_slot, bus, i, name);
if (status) {
err("pci_hp_register failed with error %d", status);
- goto error_info;
+ goto error_slot;
}
dbg("slot registered with name: %s", slot_name(slot));
@@ -276,10 +231,6 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last)
up_write(&list_rwsem);
}
return 0;
-error_info:
- kfree(info);
-error_hpslot:
- kfree(hotplug_slot);
error_slot:
kfree(slot);
error:
@@ -305,7 +256,7 @@ cpci_hp_unregister_bus(struct pci_bus *bus)
slots--;
dbg("deregistering slot %s", slot_name(slot));
- pci_hp_deregister(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
release_slot(slot);
}
}
@@ -359,10 +310,8 @@ init_slots(int clear_ins)
__func__, slot_name(slot));
dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0));
if (dev) {
- if (update_adapter_status(slot->hotplug_slot, 1))
- warn("failure to update adapter file");
- if (update_latch_status(slot->hotplug_slot, 1))
- warn("failure to update latch file");
+ slot->adapter_status = 1;
+ slot->latch_status = 1;
slot->dev = dev;
}
}
@@ -424,11 +373,8 @@ check_slots(void)
dbg("%s - slot %s HS_CSR (2) = %04x",
__func__, slot_name(slot), hs_csr);
- if (update_latch_status(slot->hotplug_slot, 1))
- warn("failure to update latch file");
-
- if (update_adapter_status(slot->hotplug_slot, 1))
- warn("failure to update adapter file");
+ slot->latch_status = 1;
+ slot->adapter_status = 1;
cpci_led_off(slot);
@@ -449,9 +395,7 @@ check_slots(void)
__func__, slot_name(slot), hs_csr);
if (!slot->extracting) {
- if (update_latch_status(slot->hotplug_slot, 0))
- warn("failure to update latch file");
-
+ slot->latch_status = 0;
slot->extracting = 1;
atomic_inc(&extracting);
}
@@ -465,8 +409,7 @@ check_slots(void)
*/
err("card in slot %s was improperly removed",
slot_name(slot));
- if (update_adapter_status(slot->hotplug_slot, 0))
- warn("failure to update adapter file");
+ slot->adapter_status = 0;
slot->extracting = 0;
atomic_dec(&extracting);
}
@@ -615,7 +558,7 @@ cleanup_slots(void)
goto cleanup_null;
list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) {
list_del(&slot->slot_list);
- pci_hp_deregister(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
release_slot(slot);
}
cleanup_null:
diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c
index 389b8fb50cd9..2c16adb7f4ec 100644
--- a/drivers/pci/hotplug/cpci_hotplug_pci.c
+++ b/drivers/pci/hotplug/cpci_hotplug_pci.c
@@ -194,8 +194,7 @@ int cpci_led_on(struct slot *slot)
slot->devfn,
hs_cap + 2,
hs_csr)) {
- err("Could not set LOO for slot %s",
- hotplug_slot_name(slot->hotplug_slot));
+ err("Could not set LOO for slot %s", slot_name(slot));
return -ENODEV;
}
}
@@ -223,8 +222,7 @@ int cpci_led_off(struct slot *slot)
slot->devfn,
hs_cap + 2,
hs_csr)) {
- err("Could not clear LOO for slot %s",
- hotplug_slot_name(slot->hotplug_slot));
+ err("Could not clear LOO for slot %s", slot_name(slot));
return -ENODEV;
}
}
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index db78b394a075..77e4e0142fbc 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -260,7 +260,7 @@ struct slot {
u8 hp_slot;
struct controller *ctrl;
void __iomem *p_sm_slot;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
};
struct pci_resource {
@@ -445,7 +445,12 @@ extern u8 cpqhp_disk_irq;
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
+}
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
/*
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 5a06636e910a..16bbb183695a 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -121,7 +121,6 @@ static int init_SERR(struct controller *ctrl)
{
u32 tempdword;
u32 number_of_slots;
- u8 physical_slot;
if (!ctrl)
return 1;
@@ -131,7 +130,6 @@ static int init_SERR(struct controller *ctrl)
number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
/* Loop through slots */
while (number_of_slots) {
- physical_slot = tempdword;
writeb(0, ctrl->hpc_reg + SLOT_SERR);
tempdword++;
number_of_slots--;
@@ -275,9 +273,7 @@ static int ctrl_slot_cleanup(struct controller *ctrl)
while (old_slot) {
next_slot = old_slot->next;
- pci_hp_deregister(old_slot->hotplug_slot);
- kfree(old_slot->hotplug_slot->info);
- kfree(old_slot->hotplug_slot);
+ pci_hp_deregister(&old_slot->hotplug_slot);
kfree(old_slot);
old_slot = next_slot;
}
@@ -419,7 +415,7 @@ cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func,
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
struct pci_func *slot_func;
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
@@ -446,7 +442,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
static int process_SI(struct hotplug_slot *hotplug_slot)
{
struct pci_func *slot_func;
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
@@ -478,7 +474,7 @@ static int process_SI(struct hotplug_slot *hotplug_slot)
static int process_SS(struct hotplug_slot *hotplug_slot)
{
struct pci_func *slot_func;
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
@@ -505,7 +501,7 @@ static int process_SS(struct hotplug_slot *hotplug_slot)
static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -516,7 +512,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -527,7 +523,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -538,7 +534,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -550,7 +546,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
@@ -560,7 +556,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
+static const struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = process_SI,
.disable_slot = process_SS,
@@ -578,8 +574,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
void __iomem *smbios_table)
{
struct slot *slot;
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *hotplug_slot_info;
struct pci_bus *bus = ctrl->pci_bus;
u8 number_of_slots;
u8 slot_device;
@@ -605,22 +599,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
goto error;
}
- slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
- GFP_KERNEL);
- if (!slot->hotplug_slot) {
- result = -ENOMEM;
- goto error_slot;
- }
- hotplug_slot = slot->hotplug_slot;
-
- hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
- GFP_KERNEL);
- if (!hotplug_slot->info) {
- result = -ENOMEM;
- goto error_hpslot;
- }
- hotplug_slot_info = hotplug_slot->info;
-
slot->ctrl = ctrl;
slot->bus = ctrl->bus;
slot->device = slot_device;
@@ -669,29 +647,20 @@ static int ctrl_slot_setup(struct controller *ctrl,
((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
/* register this slot with the hotplug pci core */
- hotplug_slot->private = slot;
snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
- hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
-
- hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
- hotplug_slot_info->attention_status =
- cpq_get_attention_status(ctrl, slot);
- hotplug_slot_info->latch_status =
- cpq_get_latch_status(ctrl, slot);
- hotplug_slot_info->adapter_status =
- get_presence_status(ctrl, slot);
+ slot->hotplug_slot.ops = &cpqphp_hotplug_slot_ops;
dbg("registering bus %d, dev %d, number %d, ctrl->slot_device_offset %d, slot %d\n",
slot->bus, slot->device,
slot->number, ctrl->slot_device_offset,
slot_number);
- result = pci_hp_register(hotplug_slot,
+ result = pci_hp_register(&slot->hotplug_slot,
ctrl->pci_dev->bus,
slot->device,
name);
if (result) {
err("pci_hp_register failed with error %d\n", result);
- goto error_info;
+ goto error_slot;
}
slot->next = ctrl->slot;
@@ -703,10 +672,6 @@ static int ctrl_slot_setup(struct controller *ctrl,
}
return 0;
-error_info:
- kfree(hotplug_slot_info);
-error_hpslot:
- kfree(hotplug_slot);
error_slot:
kfree(slot);
error:
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index 616df442520b..b7f4e1f099d9 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -1130,9 +1130,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_
for (slot = ctrl->slot; slot; slot = slot->next) {
if (slot->device == (hp_slot + ctrl->slot_device_offset))
continue;
- if (!slot->hotplug_slot || !slot->hotplug_slot->info)
- continue;
- if (slot->hotplug_slot->info->adapter_status == 0)
+ if (get_presence_status(ctrl, slot) == 0)
continue;
/* If another adapter is running on the same segment but at a
* lower speed/mode, we allow the new adapter to function at
@@ -1767,24 +1765,6 @@ void cpqhp_event_stop_thread(void)
}
-static int update_slot_info(struct controller *ctrl, struct slot *slot)
-{
- struct hotplug_slot_info *info;
- int result;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->power_status = get_slot_enabled(ctrl, slot);
- info->attention_status = cpq_get_attention_status(ctrl, slot);
- info->latch_status = cpq_get_latch_status(ctrl, slot);
- info->adapter_status = get_presence_status(ctrl, slot);
- result = pci_hp_change_slot_info(slot->hotplug_slot, info);
- kfree(info);
- return result;
-}
-
static void interrupt_event_handler(struct controller *ctrl)
{
int loop = 0;
@@ -1884,9 +1864,6 @@ static void interrupt_event_handler(struct controller *ctrl)
/***********POWER FAULT */
else if (ctrl->event_queue[loop].event_type == INT_POWER_FAULT) {
dbg("power fault\n");
- } else {
- /* refresh notification */
- update_slot_info(ctrl, p_slot);
}
ctrl->event_queue[loop].event_type = 0;
@@ -2057,9 +2034,6 @@ int cpqhp_process_SI(struct controller *ctrl, struct pci_func *func)
if (rc)
dbg("%s: rc = %d\n", __func__, rc);
- if (p_slot)
- update_slot_info(ctrl, p_slot);
-
return rc;
}
@@ -2125,9 +2099,6 @@ int cpqhp_process_SS(struct controller *ctrl, struct pci_func *func)
rc = 1;
}
- if (p_slot)
- update_slot_info(ctrl, p_slot);
-
return rc;
}
diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h
index fddb78606c74..b89f850c3a4e 100644
--- a/drivers/pci/hotplug/ibmphp.h
+++ b/drivers/pci/hotplug/ibmphp.h
@@ -698,7 +698,7 @@ struct slot {
u8 supported_bus_mode;
u8 flag; /* this is for disable slot and polling */
u8 ctlr_index;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct controller *ctrl;
struct pci_func *func;
u8 irq[4];
@@ -740,7 +740,12 @@ int ibmphp_do_disable_slot(struct slot *slot_cur);
int ibmphp_update_slot_info(struct slot *); /* This function is called from HPC, so we need it to not be be static */
int ibmphp_configure_card(struct pci_func *, u8);
int ibmphp_unconfigure_card(struct slot **, int);
-extern struct hotplug_slot_ops ibmphp_hotplug_slot_ops;
+extern const struct hotplug_slot_ops ibmphp_hotplug_slot_ops;
+
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
+}
#endif //__IBMPHP_H
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 4ea57e9019f1..08a58e911fc2 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -247,11 +247,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
break;
}
if (rc == 0) {
- pslot = hotplug_slot->private;
- if (pslot)
- rc = ibmphp_hpc_writeslot(pslot, cmd);
- else
- rc = -ENODEV;
+ pslot = to_slot(hotplug_slot);
+ rc = ibmphp_hpc_writeslot(pslot, cmd);
}
} else
rc = -ENODEV;
@@ -273,19 +270,15 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc)
- rc = ibmphp_hpc_readslot(pslot,
- READ_EXTSLOTSTATUS,
- &(myslot.ext_status));
- if (!rc)
- *value = SLOT_ATTN(myslot.status,
- myslot.ext_status);
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc)
+ rc = ibmphp_hpc_readslot(pslot, READ_EXTSLOTSTATUS,
+ &myslot.ext_status);
+ if (!rc)
+ *value = SLOT_ATTN(myslot.status, myslot.ext_status);
}
ibmphp_unlock_operations();
@@ -303,14 +296,12 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
(ulong) hotplug_slot, (ulong) value);
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc)
- *value = SLOT_LATCH(myslot.status);
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc)
+ *value = SLOT_LATCH(myslot.status);
}
ibmphp_unlock_operations();
@@ -330,14 +321,12 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
(ulong) hotplug_slot, (ulong) value);
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc)
- *value = SLOT_PWRGD(myslot.status);
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc)
+ *value = SLOT_PWRGD(myslot.status);
}
ibmphp_unlock_operations();
@@ -357,18 +346,16 @@ static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value)
(ulong) hotplug_slot, (ulong) value);
ibmphp_lock_operations();
if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
- if (!rc) {
- present = SLOT_PRESENT(myslot.status);
- if (present == HPC_SLOT_EMPTY)
- *value = 0;
- else
- *value = 1;
- }
+ pslot = to_slot(hotplug_slot);
+ memcpy(&myslot, pslot, sizeof(struct slot));
+ rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
+ &myslot.status);
+ if (!rc) {
+ present = SLOT_PRESENT(myslot.status);
+ if (present == HPC_SLOT_EMPTY)
+ *value = 0;
+ else
+ *value = 1;
}
}
@@ -382,7 +369,7 @@ static int get_max_bus_speed(struct slot *slot)
int rc = 0;
u8 mode = 0;
enum pci_bus_speed speed;
- struct pci_bus *bus = slot->hotplug_slot->pci_slot->bus;
+ struct pci_bus *bus = slot->hotplug_slot.pci_slot->bus;
debug("%s - Entry slot[%p]\n", __func__, slot);
@@ -582,29 +569,10 @@ static int validate(struct slot *slot_cur, int opn)
****************************************************************************/
int ibmphp_update_slot_info(struct slot *slot_cur)
{
- struct hotplug_slot_info *info;
- struct pci_bus *bus = slot_cur->hotplug_slot->pci_slot->bus;
- int rc;
+ struct pci_bus *bus = slot_cur->hotplug_slot.pci_slot->bus;
u8 bus_speed;
u8 mode;
- info = kmalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- info->power_status = SLOT_PWRGD(slot_cur->status);
- info->attention_status = SLOT_ATTN(slot_cur->status,
- slot_cur->ext_status);
- info->latch_status = SLOT_LATCH(slot_cur->status);
- if (!SLOT_PRESENT(slot_cur->status)) {
- info->adapter_status = 0;
-/* info->max_adapter_speed_status = MAX_ADAPTER_NONE; */
- } else {
- info->adapter_status = 1;
-/* get_max_adapter_speed_1(slot_cur->hotplug_slot,
- &info->max_adapter_speed_status, 0); */
- }
-
bus_speed = slot_cur->bus_on->current_speed;
mode = slot_cur->bus_on->current_bus_mode;
@@ -630,9 +598,7 @@ int ibmphp_update_slot_info(struct slot *slot_cur)
bus->cur_bus_speed = bus_speed;
// To do: bus_names
- rc = pci_hp_change_slot_info(slot_cur->hotplug_slot, info);
- kfree(info);
- return rc;
+ return 0;
}
@@ -673,7 +639,7 @@ static void free_slots(void)
list_for_each_entry_safe(slot_cur, next, &ibmphp_slot_head,
ibm_slot_list) {
- pci_hp_del(slot_cur->hotplug_slot);
+ pci_hp_del(&slot_cur->hotplug_slot);
slot_cur->ctrl = NULL;
slot_cur->bus_on = NULL;
@@ -683,9 +649,7 @@ static void free_slots(void)
*/
ibmphp_unconfigure_card(&slot_cur, -1);
- pci_hp_destroy(slot_cur->hotplug_slot);
- kfree(slot_cur->hotplug_slot->info);
- kfree(slot_cur->hotplug_slot);
+ pci_hp_destroy(&slot_cur->hotplug_slot);
kfree(slot_cur);
}
debug("%s -- exit\n", __func__);
@@ -1007,7 +971,7 @@ static int enable_slot(struct hotplug_slot *hs)
ibmphp_lock_operations();
debug("ENABLING SLOT........\n");
- slot_cur = hs->private;
+ slot_cur = to_slot(hs);
rc = validate(slot_cur, ENABLE);
if (rc) {
@@ -1095,8 +1059,7 @@ static int enable_slot(struct hotplug_slot *hs)
slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL);
if (!slot_cur->func) {
- /* We cannot do update_slot_info here, since no memory for
- * kmalloc n.e.ways, and update_slot_info allocates some */
+ /* do update_slot_info here? */
rc = -ENOMEM;
goto error_power;
}
@@ -1169,7 +1132,7 @@ error_power:
**************************************************************/
static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int rc;
ibmphp_lock_operations();
@@ -1259,7 +1222,7 @@ error:
goto exit;
}
-struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
+const struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = ibmphp_disable_slot,
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 6f8e90e3ec08..11a2661dc062 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -666,36 +666,8 @@ static int fillslotinfo(struct hotplug_slot *hotplug_slot)
struct slot *slot;
int rc = 0;
- if (!hotplug_slot || !hotplug_slot->private)
- return -EINVAL;
-
- slot = hotplug_slot->private;
+ slot = to_slot(hotplug_slot);
rc = ibmphp_hpc_readslot(slot, READ_ALLSTAT, NULL);
- if (rc)
- return rc;
-
- // power - enabled:1 not:0
- hotplug_slot->info->power_status = SLOT_POWER(slot->status);
-
- // attention - off:0, on:1, blinking:2
- hotplug_slot->info->attention_status = SLOT_ATTN(slot->status, slot->ext_status);
-
- // latch - open:1 closed:0
- hotplug_slot->info->latch_status = SLOT_LATCH(slot->status);
-
- // pci board - present:1 not:0
- if (SLOT_PRESENT(slot->status))
- hotplug_slot->info->adapter_status = 1;
- else
- hotplug_slot->info->adapter_status = 0;
-/*
- if (slot->bus_on->supported_bus_mode
- && (slot->bus_on->supported_speed == BUS_SPEED_66))
- hotplug_slot->info->max_bus_speed_status = BUS_SPEED_66PCIX;
- else
- hotplug_slot->info->max_bus_speed_status = slot->bus_on->supported_speed;
-*/
-
return rc;
}
@@ -712,7 +684,6 @@ static int __init ebda_rsrc_controller(void)
u8 ctlr_id, temp, bus_index;
u16 ctlr, slot, bus;
u16 slot_num, bus_num, index;
- struct hotplug_slot *hp_slot_ptr;
struct controller *hpc_ptr;
struct ebda_hpc_bus *bus_ptr;
struct ebda_hpc_slot *slot_ptr;
@@ -771,7 +742,7 @@ static int __init ebda_rsrc_controller(void)
bus_info_ptr1 = kzalloc(sizeof(struct bus_info), GFP_KERNEL);
if (!bus_info_ptr1) {
rc = -ENOMEM;
- goto error_no_hp_slot;
+ goto error_no_slot;
}
bus_info_ptr1->slot_min = slot_ptr->slot_num;
bus_info_ptr1->slot_max = slot_ptr->slot_num;
@@ -842,7 +813,7 @@ static int __init ebda_rsrc_controller(void)
(hpc_ptr->u.isa_ctlr.io_end - hpc_ptr->u.isa_ctlr.io_start + 1),
"ibmphp")) {
rc = -ENODEV;
- goto error_no_hp_slot;
+ goto error_no_slot;
}
hpc_ptr->irq = readb(io_mem + addr + 4);
addr += 5;
@@ -857,7 +828,7 @@ static int __init ebda_rsrc_controller(void)
break;
default:
rc = -ENODEV;
- goto error_no_hp_slot;
+ goto error_no_slot;
}
//reorganize chassis' linked list
@@ -870,19 +841,6 @@ static int __init ebda_rsrc_controller(void)
// register slots with hpc core as well as create linked list of ibm slot
for (index = 0; index < hpc_ptr->slot_count; index++) {
-
- hp_slot_ptr = kzalloc(sizeof(*hp_slot_ptr), GFP_KERNEL);
- if (!hp_slot_ptr) {
- rc = -ENOMEM;
- goto error_no_hp_slot;
- }
-
- hp_slot_ptr->info = kzalloc(sizeof(struct hotplug_slot_info), GFP_KERNEL);
- if (!hp_slot_ptr->info) {
- rc = -ENOMEM;
- goto error_no_hp_info;
- }
-
tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL);
if (!tmp_slot) {
rc = -ENOMEM;
@@ -909,7 +867,6 @@ static int __init ebda_rsrc_controller(void)
bus_info_ptr1 = ibmphp_find_same_bus_num(hpc_ptr->slots[index].slot_bus_num);
if (!bus_info_ptr1) {
- kfree(tmp_slot);
rc = -ENODEV;
goto error;
}
@@ -919,22 +876,19 @@ static int __init ebda_rsrc_controller(void)
tmp_slot->ctlr_index = hpc_ptr->slots[index].ctl_index;
tmp_slot->number = hpc_ptr->slots[index].slot_num;
- tmp_slot->hotplug_slot = hp_slot_ptr;
-
- hp_slot_ptr->private = tmp_slot;
- rc = fillslotinfo(hp_slot_ptr);
+ rc = fillslotinfo(&tmp_slot->hotplug_slot);
if (rc)
goto error;
- rc = ibmphp_init_devno((struct slot **) &hp_slot_ptr->private);
+ rc = ibmphp_init_devno(&tmp_slot);
if (rc)
goto error;
- hp_slot_ptr->ops = &ibmphp_hotplug_slot_ops;
+ tmp_slot->hotplug_slot.ops = &ibmphp_hotplug_slot_ops;
// end of registering ibm slot with hotplug core
- list_add(&((struct slot *)(hp_slot_ptr->private))->ibm_slot_list, &ibmphp_slot_head);
+ list_add(&tmp_slot->ibm_slot_list, &ibmphp_slot_head);
}
print_bus_info();
@@ -944,7 +898,7 @@ static int __init ebda_rsrc_controller(void)
list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) {
snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot));
- pci_hp_register(tmp_slot->hotplug_slot,
+ pci_hp_register(&tmp_slot->hotplug_slot,
pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name);
}
@@ -953,12 +907,8 @@ static int __init ebda_rsrc_controller(void)
return 0;
error:
- kfree(hp_slot_ptr->private);
+ kfree(tmp_slot);
error_no_slot:
- kfree(hp_slot_ptr->info);
-error_no_hp_info:
- kfree(hp_slot_ptr);
-error_no_hp_slot:
free_ebda_hpc(hpc_ptr);
error_no_hpc:
iounmap(io_mem);
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 90fde5f106d8..5ac31f683b85 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -49,15 +49,13 @@ static DEFINE_MUTEX(pci_hp_mutex);
#define GET_STATUS(name, type) \
static int get_##name(struct hotplug_slot *slot, type *value) \
{ \
- struct hotplug_slot_ops *ops = slot->ops; \
+ const struct hotplug_slot_ops *ops = slot->ops; \
int retval = 0; \
- if (!try_module_get(ops->owner)) \
+ if (!try_module_get(slot->owner)) \
return -ENODEV; \
if (ops->get_##name) \
retval = ops->get_##name(slot, value); \
- else \
- *value = slot->info->name; \
- module_put(ops->owner); \
+ module_put(slot->owner); \
return retval; \
}
@@ -90,7 +88,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
power = (u8)(lpower & 0xff);
dbg("power = %d\n", power);
- if (!try_module_get(slot->ops->owner)) {
+ if (!try_module_get(slot->owner)) {
retval = -ENODEV;
goto exit;
}
@@ -109,7 +107,7 @@ static ssize_t power_write_file(struct pci_slot *pci_slot, const char *buf,
err("Illegal value specified for power\n");
retval = -EINVAL;
}
- module_put(slot->ops->owner);
+ module_put(slot->owner);
exit:
if (retval)
@@ -138,7 +136,8 @@ static ssize_t attention_read_file(struct pci_slot *pci_slot, char *buf)
static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
size_t count)
{
- struct hotplug_slot_ops *ops = pci_slot->hotplug->ops;
+ struct hotplug_slot *slot = pci_slot->hotplug;
+ const struct hotplug_slot_ops *ops = slot->ops;
unsigned long lattention;
u8 attention;
int retval = 0;
@@ -147,13 +146,13 @@ static ssize_t attention_write_file(struct pci_slot *pci_slot, const char *buf,
attention = (u8)(lattention & 0xff);
dbg(" - attention = %d\n", attention);
- if (!try_module_get(ops->owner)) {
+ if (!try_module_get(slot->owner)) {
retval = -ENODEV;
goto exit;
}
if (ops->set_attention_status)
- retval = ops->set_attention_status(pci_slot->hotplug, attention);
- module_put(ops->owner);
+ retval = ops->set_attention_status(slot, attention);
+ module_put(slot->owner);
exit:
if (retval)
@@ -213,13 +212,13 @@ static ssize_t test_write_file(struct pci_slot *pci_slot, const char *buf,
test = (u32)(ltest & 0xffffffff);
dbg("test = %d\n", test);
- if (!try_module_get(slot->ops->owner)) {
+ if (!try_module_get(slot->owner)) {
retval = -ENODEV;
goto exit;
}
if (slot->ops->hardware_test)
retval = slot->ops->hardware_test(slot, test);
- module_put(slot->ops->owner);
+ module_put(slot->owner);
exit:
if (retval)
@@ -444,11 +443,11 @@ int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus,
if (slot == NULL)
return -ENODEV;
- if ((slot->info == NULL) || (slot->ops == NULL))
+ if (slot->ops == NULL)
return -EINVAL;
- slot->ops->owner = owner;
- slot->ops->mod_name = mod_name;
+ slot->owner = owner;
+ slot->mod_name = mod_name;
/*
* No problems if we call this interface from both ACPI_PCI_SLOT
@@ -559,28 +558,6 @@ void pci_hp_destroy(struct hotplug_slot *slot)
}
EXPORT_SYMBOL_GPL(pci_hp_destroy);
-/**
- * pci_hp_change_slot_info - changes the slot's information structure in the core
- * @slot: pointer to the slot whose info has changed
- * @info: pointer to the info copy into the slot's info structure
- *
- * @slot must have been registered with the pci
- * hotplug subsystem previously with a call to pci_hp_register().
- *
- * Returns 0 if successful, anything else for an error.
- */
-int pci_hp_change_slot_info(struct hotplug_slot *slot,
- struct hotplug_slot_info *info)
-{
- if (!slot || !info)
- return -ENODEV;
-
- memcpy(slot->info, info, sizeof(struct hotplug_slot_info));
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(pci_hp_change_slot_info);
-
static int __init pci_hotplug_init(void)
{
int result;
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 811cf83f956d..506e1d923a1f 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -19,7 +19,6 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/delay.h>
-#include <linux/sched/signal.h> /* signal_pending() */
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/workqueue.h>
@@ -60,71 +59,63 @@ do { \
#define SLOT_NAME_SIZE 10
/**
- * struct slot - PCIe hotplug slot
- * @state: current state machine position
- * @ctrl: pointer to the slot's controller structure
- * @hotplug_slot: pointer to the structure registered with the PCI hotplug core
- * @work: work item to turn the slot on or off after 5 seconds in response to
- * an Attention Button press
- * @lock: protects reads and writes of @state;
- * protects scheduling, execution and cancellation of @work
- */
-struct slot {
- u8 state;
- struct controller *ctrl;
- struct hotplug_slot *hotplug_slot;
- struct delayed_work work;
- struct mutex lock;
-};
-
-/**
* struct controller - PCIe hotplug controller
- * @ctrl_lock: serializes writes to the Slot Control register
* @pcie: pointer to the controller's PCIe port service device
- * @reset_lock: prevents access to the Data Link Layer Link Active bit in the
- * Link Status register and to the Presence Detect State bit in the Slot
- * Status register during a slot reset which may cause them to flap
- * @slot: pointer to the controller's slot structure
- * @queue: wait queue to wake up on reception of a Command Completed event,
- * used for synchronous writes to the Slot Control register
* @slot_cap: cached copy of the Slot Capabilities register
* @slot_ctrl: cached copy of the Slot Control register
- * @poll_thread: thread to poll for slot events if no IRQ is available,
- * enabled with pciehp_poll_mode module parameter
+ * @ctrl_lock: serializes writes to the Slot Control register
* @cmd_started: jiffies when the Slot Control register was last written;
* the next write is allowed 1 second later, absent a Command Completed
* interrupt (PCIe r4.0, sec 6.7.3.2)
* @cmd_busy: flag set on Slot Control register write, cleared by IRQ handler
* on reception of a Command Completed event
- * @link_active_reporting: cached copy of Data Link Layer Link Active Reporting
- * Capable bit in Link Capabilities register; if this bit is zero, the
- * Data Link Layer Link Active bit in the Link Status register will never
- * be set and the driver is thus confined to wait 1 second before assuming
- * the link to a hotplugged device is up and accessing it
+ * @queue: wait queue to wake up on reception of a Command Completed event,
+ * used for synchronous writes to the Slot Control register
+ * @pending_events: used by the IRQ handler to save events retrieved from the
+ * Slot Status register for later consumption by the IRQ thread
* @notification_enabled: whether the IRQ was requested successfully
* @power_fault_detected: whether a power fault was detected by the hardware
* that has not yet been cleared by the user
- * @pending_events: used by the IRQ handler to save events retrieved from the
- * Slot Status register for later consumption by the IRQ thread
+ * @poll_thread: thread to poll for slot events if no IRQ is available,
+ * enabled with pciehp_poll_mode module parameter
+ * @state: current state machine position
+ * @state_lock: protects reads and writes of @state;
+ * protects scheduling, execution and cancellation of @button_work
+ * @button_work: work item to turn the slot on or off after 5 seconds
+ * in response to an Attention Button press
+ * @hotplug_slot: structure registered with the PCI hotplug core
+ * @reset_lock: prevents access to the Data Link Layer Link Active bit in the
+ * Link Status register and to the Presence Detect State bit in the Slot
+ * Status register during a slot reset which may cause them to flap
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
* used for synchronous slot enable/disable request via sysfs
+ *
+ * PCIe hotplug has a 1:1 relationship between controller and slot, hence
+ * unlike other drivers, the two aren't represented by separate structures.
*/
struct controller {
- struct mutex ctrl_lock;
struct pcie_device *pcie;
- struct rw_semaphore reset_lock;
- struct slot *slot;
- wait_queue_head_t queue;
- u32 slot_cap;
- u16 slot_ctrl;
- struct task_struct *poll_thread;
- unsigned long cmd_started; /* jiffies */
+
+ u32 slot_cap; /* capabilities and quirks */
+
+ u16 slot_ctrl; /* control register access */
+ struct mutex ctrl_lock;
+ unsigned long cmd_started;
unsigned int cmd_busy:1;
- unsigned int link_active_reporting:1;
+ wait_queue_head_t queue;
+
+ atomic_t pending_events; /* event handling */
unsigned int notification_enabled:1;
unsigned int power_fault_detected;
- atomic_t pending_events;
+ struct task_struct *poll_thread;
+
+ u8 state; /* state machine */
+ struct mutex state_lock;
+ struct delayed_work button_work;
+
+ struct hotplug_slot hotplug_slot; /* hotplug core interface */
+ struct rw_semaphore reset_lock;
int request_result;
wait_queue_head_t requester;
};
@@ -174,42 +165,50 @@ struct controller {
#define NO_CMD_CMPL(ctrl) ((ctrl)->slot_cap & PCI_EXP_SLTCAP_NCCS)
#define PSN(ctrl) (((ctrl)->slot_cap & PCI_EXP_SLTCAP_PSN) >> 19)
-int pciehp_sysfs_enable_slot(struct slot *slot);
-int pciehp_sysfs_disable_slot(struct slot *slot);
void pciehp_request(struct controller *ctrl, int action);
-void pciehp_handle_button_press(struct slot *slot);
-void pciehp_handle_disable_request(struct slot *slot);
-void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events);
-int pciehp_configure_device(struct slot *p_slot);
-void pciehp_unconfigure_device(struct slot *p_slot);
+void pciehp_handle_button_press(struct controller *ctrl);
+void pciehp_handle_disable_request(struct controller *ctrl);
+void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events);
+int pciehp_configure_device(struct controller *ctrl);
+void pciehp_unconfigure_device(struct controller *ctrl, bool presence);
void pciehp_queue_pushbutton_work(struct work_struct *work);
struct controller *pcie_init(struct pcie_device *dev);
int pcie_init_notification(struct controller *ctrl);
void pcie_shutdown_notification(struct controller *ctrl);
void pcie_clear_hotplug_events(struct controller *ctrl);
-int pciehp_power_on_slot(struct slot *slot);
-void pciehp_power_off_slot(struct slot *slot);
-void pciehp_get_power_status(struct slot *slot, u8 *status);
-void pciehp_get_attention_status(struct slot *slot, u8 *status);
-
-void pciehp_set_attention_status(struct slot *slot, u8 status);
-void pciehp_get_latch_status(struct slot *slot, u8 *status);
-void pciehp_get_adapter_status(struct slot *slot, u8 *status);
-int pciehp_query_power_fault(struct slot *slot);
-void pciehp_green_led_on(struct slot *slot);
-void pciehp_green_led_off(struct slot *slot);
-void pciehp_green_led_blink(struct slot *slot);
+void pcie_enable_interrupt(struct controller *ctrl);
+void pcie_disable_interrupt(struct controller *ctrl);
+int pciehp_power_on_slot(struct controller *ctrl);
+void pciehp_power_off_slot(struct controller *ctrl);
+void pciehp_get_power_status(struct controller *ctrl, u8 *status);
+
+void pciehp_set_attention_status(struct controller *ctrl, u8 status);
+void pciehp_get_latch_status(struct controller *ctrl, u8 *status);
+int pciehp_query_power_fault(struct controller *ctrl);
+void pciehp_green_led_on(struct controller *ctrl);
+void pciehp_green_led_off(struct controller *ctrl);
+void pciehp_green_led_blink(struct controller *ctrl);
+bool pciehp_card_present(struct controller *ctrl);
+bool pciehp_card_present_or_link_active(struct controller *ctrl);
int pciehp_check_link_status(struct controller *ctrl);
bool pciehp_check_link_active(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
-int pciehp_reset_slot(struct slot *slot, int probe);
+int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot);
+int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot);
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe);
+int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status);
int pciehp_set_raw_indicator_status(struct hotplug_slot *h_slot, u8 status);
int pciehp_get_raw_indicator_status(struct hotplug_slot *h_slot, u8 *status);
-static inline const char *slot_name(struct slot *slot)
+static inline const char *slot_name(struct controller *ctrl)
+{
+ return hotplug_slot_name(&ctrl->hotplug_slot);
+}
+
+static inline struct controller *to_ctrl(struct hotplug_slot *hotplug_slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return container_of(hotplug_slot, struct controller, hotplug_slot);
}
#endif /* _PCIEHP_H */
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index ec48c9433ae5..fc5366b50e95 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -23,8 +23,6 @@
#include <linux/types.h>
#include <linux/pci.h>
#include "pciehp.h"
-#include <linux/interrupt.h>
-#include <linux/time.h>
#include "../pci.h"
@@ -47,45 +45,30 @@ MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
#define PCIE_MODULE_NAME "pciehp"
static int set_attention_status(struct hotplug_slot *slot, u8 value);
-static int enable_slot(struct hotplug_slot *slot);
-static int disable_slot(struct hotplug_slot *slot);
static int get_power_status(struct hotplug_slot *slot, u8 *value);
-static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
-static int reset_slot(struct hotplug_slot *slot, int probe);
static int init_slot(struct controller *ctrl)
{
- struct slot *slot = ctrl->slot;
- struct hotplug_slot *hotplug = NULL;
- struct hotplug_slot_info *info = NULL;
- struct hotplug_slot_ops *ops = NULL;
+ struct hotplug_slot_ops *ops;
char name[SLOT_NAME_SIZE];
- int retval = -ENOMEM;
-
- hotplug = kzalloc(sizeof(*hotplug), GFP_KERNEL);
- if (!hotplug)
- goto out;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto out;
+ int retval;
/* Setup hotplug slot ops */
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (!ops)
- goto out;
+ return -ENOMEM;
- ops->enable_slot = enable_slot;
- ops->disable_slot = disable_slot;
+ ops->enable_slot = pciehp_sysfs_enable_slot;
+ ops->disable_slot = pciehp_sysfs_disable_slot;
ops->get_power_status = get_power_status;
ops->get_adapter_status = get_adapter_status;
- ops->reset_slot = reset_slot;
+ ops->reset_slot = pciehp_reset_slot;
if (MRL_SENS(ctrl))
ops->get_latch_status = get_latch_status;
if (ATTN_LED(ctrl)) {
- ops->get_attention_status = get_attention_status;
+ ops->get_attention_status = pciehp_get_attention_status;
ops->set_attention_status = set_attention_status;
} else if (ctrl->pcie->port->hotplug_user_indicators) {
ops->get_attention_status = pciehp_get_raw_indicator_status;
@@ -93,33 +76,24 @@ static int init_slot(struct controller *ctrl)
}
/* register this slot with the hotplug pci core */
- hotplug->info = info;
- hotplug->private = slot;
- hotplug->ops = ops;
- slot->hotplug_slot = hotplug;
+ ctrl->hotplug_slot.ops = ops;
snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl));
- retval = pci_hp_initialize(hotplug,
+ retval = pci_hp_initialize(&ctrl->hotplug_slot,
ctrl->pcie->port->subordinate, 0, name);
- if (retval)
- ctrl_err(ctrl, "pci_hp_initialize failed: error %d\n", retval);
-out:
if (retval) {
+ ctrl_err(ctrl, "pci_hp_initialize failed: error %d\n", retval);
kfree(ops);
- kfree(info);
- kfree(hotplug);
}
return retval;
}
static void cleanup_slot(struct controller *ctrl)
{
- struct hotplug_slot *hotplug_slot = ctrl->slot->hotplug_slot;
+ struct hotplug_slot *hotplug_slot = &ctrl->hotplug_slot;
pci_hp_destroy(hotplug_slot);
kfree(hotplug_slot->ops);
- kfree(hotplug_slot->info);
- kfree(hotplug_slot);
}
/*
@@ -127,79 +101,48 @@ static void cleanup_slot(struct controller *ctrl)
*/
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_set_attention_status(slot, status);
+ pciehp_set_attention_status(ctrl, status);
pci_config_pm_runtime_put(pdev);
return 0;
}
-
-static int enable_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- return pciehp_sysfs_enable_slot(slot);
-}
-
-
-static int disable_slot(struct hotplug_slot *hotplug_slot)
-{
- struct slot *slot = hotplug_slot->private;
-
- return pciehp_sysfs_disable_slot(slot);
-}
-
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_get_power_status(slot, value);
+ pciehp_get_power_status(ctrl, value);
pci_config_pm_runtime_put(pdev);
return 0;
}
-static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
-{
- struct slot *slot = hotplug_slot->private;
-
- pciehp_get_attention_status(slot, value);
- return 0;
-}
-
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_get_latch_status(slot, value);
+ pciehp_get_latch_status(ctrl, value);
pci_config_pm_runtime_put(pdev);
return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = slot->ctrl->pcie->port;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl->pcie->port;
pci_config_pm_runtime_get(pdev);
- pciehp_get_adapter_status(slot, value);
+ *value = pciehp_card_present_or_link_active(ctrl);
pci_config_pm_runtime_put(pdev);
return 0;
}
-static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
-{
- struct slot *slot = hotplug_slot->private;
-
- return pciehp_reset_slot(slot, probe);
-}
-
/**
* pciehp_check_presence() - synthesize event if presence has changed
*
@@ -212,20 +155,19 @@ static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
*/
static void pciehp_check_presence(struct controller *ctrl)
{
- struct slot *slot = ctrl->slot;
- u8 occupied;
+ bool occupied;
down_read(&ctrl->reset_lock);
- mutex_lock(&slot->lock);
+ mutex_lock(&ctrl->state_lock);
- pciehp_get_adapter_status(slot, &occupied);
- if ((occupied && (slot->state == OFF_STATE ||
- slot->state == BLINKINGON_STATE)) ||
- (!occupied && (slot->state == ON_STATE ||
- slot->state == BLINKINGOFF_STATE)))
+ occupied = pciehp_card_present_or_link_active(ctrl);
+ if ((occupied && (ctrl->state == OFF_STATE ||
+ ctrl->state == BLINKINGON_STATE)) ||
+ (!occupied && (ctrl->state == ON_STATE ||
+ ctrl->state == BLINKINGOFF_STATE)))
pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC);
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
up_read(&ctrl->reset_lock);
}
@@ -233,7 +175,6 @@ static int pciehp_probe(struct pcie_device *dev)
{
int rc;
struct controller *ctrl;
- struct slot *slot;
/* If this is not a "hotplug" service, we have no business here. */
if (dev->service != PCIE_PORT_SERVICE_HP)
@@ -271,8 +212,7 @@ static int pciehp_probe(struct pcie_device *dev)
}
/* Publish to user space */
- slot = ctrl->slot;
- rc = pci_hp_add(slot->hotplug_slot);
+ rc = pci_hp_add(&ctrl->hotplug_slot);
if (rc) {
ctrl_err(ctrl, "Publication to user space failed (%d)\n", rc);
goto err_out_shutdown_notification;
@@ -295,29 +235,43 @@ static void pciehp_remove(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
- pci_hp_del(ctrl->slot->hotplug_slot);
+ pci_hp_del(&ctrl->hotplug_slot);
pcie_shutdown_notification(ctrl);
cleanup_slot(ctrl);
pciehp_release_ctrl(ctrl);
}
#ifdef CONFIG_PM
+static bool pme_is_native(struct pcie_device *dev)
+{
+ const struct pci_host_bridge *host;
+
+ host = pci_find_host_bridge(dev->port->bus);
+ return pcie_ports_native || host->native_pme;
+}
+
static int pciehp_suspend(struct pcie_device *dev)
{
+ /*
+ * Disable hotplug interrupt so that it does not trigger
+ * immediately when the downstream link goes down.
+ */
+ if (pme_is_native(dev))
+ pcie_disable_interrupt(get_service_data(dev));
+
return 0;
}
static int pciehp_resume_noirq(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
- struct slot *slot = ctrl->slot;
/* pci_restore_state() just wrote to the Slot Control register */
ctrl->cmd_started = jiffies;
ctrl->cmd_busy = true;
/* clear spurious events from rediscovery of inserted card */
- if (slot->state == ON_STATE || slot->state == BLINKINGOFF_STATE)
+ if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE)
pcie_clear_hotplug_events(ctrl);
return 0;
@@ -327,10 +281,29 @@ static int pciehp_resume(struct pcie_device *dev)
{
struct controller *ctrl = get_service_data(dev);
+ if (pme_is_native(dev))
+ pcie_enable_interrupt(ctrl);
+
pciehp_check_presence(ctrl);
return 0;
}
+
+static int pciehp_runtime_resume(struct pcie_device *dev)
+{
+ struct controller *ctrl = get_service_data(dev);
+
+ /* pci_restore_state() just wrote to the Slot Control register */
+ ctrl->cmd_started = jiffies;
+ ctrl->cmd_busy = true;
+
+ /* clear spurious events from rediscovery of inserted card */
+ if ((ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE) &&
+ pme_is_native(dev))
+ pcie_clear_hotplug_events(ctrl);
+
+ return pciehp_resume(dev);
+}
#endif /* PM */
static struct pcie_port_service_driver hpdriver_portdrv = {
@@ -345,10 +318,12 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
.suspend = pciehp_suspend,
.resume_noirq = pciehp_resume_noirq,
.resume = pciehp_resume,
+ .runtime_suspend = pciehp_suspend,
+ .runtime_resume = pciehp_runtime_resume,
#endif /* PM */
};
-static int __init pcied_init(void)
+int __init pcie_hp_init(void)
{
int retval = 0;
@@ -359,4 +334,3 @@ static int __init pcied_init(void)
return retval;
}
-device_initcall(pcied_init);
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index da7c72372ffc..3f3df4c29f6e 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -13,24 +13,24 @@
*
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
-#include "../pci.h"
#include "pciehp.h"
/* The following routines constitute the bulk of the
hotplug controller logic
*/
-static void set_slot_off(struct controller *ctrl, struct slot *pslot)
+#define SAFE_REMOVAL true
+#define SURPRISE_REMOVAL false
+
+static void set_slot_off(struct controller *ctrl)
{
/* turn off slot, turn on Amber LED, turn off Green LED if supported*/
if (POWER_CTRL(ctrl)) {
- pciehp_power_off_slot(pslot);
+ pciehp_power_off_slot(ctrl);
/*
* After turning power off, we must wait for at least 1 second
@@ -40,31 +40,30 @@ static void set_slot_off(struct controller *ctrl, struct slot *pslot)
msleep(1000);
}
- pciehp_green_led_off(pslot);
- pciehp_set_attention_status(pslot, 1);
+ pciehp_green_led_off(ctrl);
+ pciehp_set_attention_status(ctrl, 1);
}
/**
* board_added - Called after a board has been added to the system.
- * @p_slot: &slot where board is added
+ * @ctrl: PCIe hotplug controller where board is added
*
* Turns power on for the board.
* Configures board.
*/
-static int board_added(struct slot *p_slot)
+static int board_added(struct controller *ctrl)
{
int retval = 0;
- struct controller *ctrl = p_slot->ctrl;
struct pci_bus *parent = ctrl->pcie->port->subordinate;
if (POWER_CTRL(ctrl)) {
/* Power on slot */
- retval = pciehp_power_on_slot(p_slot);
+ retval = pciehp_power_on_slot(ctrl);
if (retval)
return retval;
}
- pciehp_green_led_blink(p_slot);
+ pciehp_green_led_blink(ctrl);
/* Check link training status */
retval = pciehp_check_link_status(ctrl);
@@ -74,13 +73,13 @@ static int board_added(struct slot *p_slot)
}
/* Check for a power fault */
- if (ctrl->power_fault_detected || pciehp_query_power_fault(p_slot)) {
- ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(p_slot));
+ if (ctrl->power_fault_detected || pciehp_query_power_fault(ctrl)) {
+ ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
retval = -EIO;
goto err_exit;
}
- retval = pciehp_configure_device(p_slot);
+ retval = pciehp_configure_device(ctrl);
if (retval) {
if (retval != -EEXIST) {
ctrl_err(ctrl, "Cannot add device at %04x:%02x:00\n",
@@ -89,27 +88,26 @@ static int board_added(struct slot *p_slot)
}
}
- pciehp_green_led_on(p_slot);
- pciehp_set_attention_status(p_slot, 0);
+ pciehp_green_led_on(ctrl);
+ pciehp_set_attention_status(ctrl, 0);
return 0;
err_exit:
- set_slot_off(ctrl, p_slot);
+ set_slot_off(ctrl);
return retval;
}
/**
* remove_board - Turns off slot and LEDs
- * @p_slot: slot where board is being removed
+ * @ctrl: PCIe hotplug controller where board is being removed
+ * @safe_removal: whether the board is safely removed (versus surprise removed)
*/
-static void remove_board(struct slot *p_slot)
+static void remove_board(struct controller *ctrl, bool safe_removal)
{
- struct controller *ctrl = p_slot->ctrl;
-
- pciehp_unconfigure_device(p_slot);
+ pciehp_unconfigure_device(ctrl, safe_removal);
if (POWER_CTRL(ctrl)) {
- pciehp_power_off_slot(p_slot);
+ pciehp_power_off_slot(ctrl);
/*
* After turning power off, we must wait for at least 1 second
@@ -120,11 +118,11 @@ static void remove_board(struct slot *p_slot)
}
/* turn off Green LED */
- pciehp_green_led_off(p_slot);
+ pciehp_green_led_off(ctrl);
}
-static int pciehp_enable_slot(struct slot *slot);
-static int pciehp_disable_slot(struct slot *slot);
+static int pciehp_enable_slot(struct controller *ctrl);
+static int pciehp_disable_slot(struct controller *ctrl, bool safe_removal);
void pciehp_request(struct controller *ctrl, int action)
{
@@ -135,11 +133,11 @@ void pciehp_request(struct controller *ctrl, int action)
void pciehp_queue_pushbutton_work(struct work_struct *work)
{
- struct slot *p_slot = container_of(work, struct slot, work.work);
- struct controller *ctrl = p_slot->ctrl;
+ struct controller *ctrl = container_of(work, struct controller,
+ button_work.work);
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGOFF_STATE:
pciehp_request(ctrl, DISABLE_SLOT);
break;
@@ -149,30 +147,28 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
default:
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
}
-void pciehp_handle_button_press(struct slot *p_slot)
+void pciehp_handle_button_press(struct controller *ctrl)
{
- struct controller *ctrl = p_slot->ctrl;
-
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case OFF_STATE:
case ON_STATE:
- if (p_slot->state == ON_STATE) {
- p_slot->state = BLINKINGOFF_STATE;
+ if (ctrl->state == ON_STATE) {
+ ctrl->state = BLINKINGOFF_STATE;
ctrl_info(ctrl, "Slot(%s): Powering off due to button press\n",
- slot_name(p_slot));
+ slot_name(ctrl));
} else {
- p_slot->state = BLINKINGON_STATE;
+ ctrl->state = BLINKINGON_STATE;
ctrl_info(ctrl, "Slot(%s) Powering on due to button press\n",
- slot_name(p_slot));
+ slot_name(ctrl));
}
/* blink green LED and turn off amber */
- pciehp_green_led_blink(p_slot);
- pciehp_set_attention_status(p_slot, 0);
- schedule_delayed_work(&p_slot->work, 5 * HZ);
+ pciehp_green_led_blink(ctrl);
+ pciehp_set_attention_status(ctrl, 0);
+ schedule_delayed_work(&ctrl->button_work, 5 * HZ);
break;
case BLINKINGOFF_STATE:
case BLINKINGON_STATE:
@@ -181,197 +177,184 @@ void pciehp_handle_button_press(struct slot *p_slot)
* press the attention again before the 5 sec. limit
* expires to cancel hot-add or hot-remove
*/
- ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(p_slot));
- cancel_delayed_work(&p_slot->work);
- if (p_slot->state == BLINKINGOFF_STATE) {
- p_slot->state = ON_STATE;
- pciehp_green_led_on(p_slot);
+ ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(ctrl));
+ cancel_delayed_work(&ctrl->button_work);
+ if (ctrl->state == BLINKINGOFF_STATE) {
+ ctrl->state = ON_STATE;
+ pciehp_green_led_on(ctrl);
} else {
- p_slot->state = OFF_STATE;
- pciehp_green_led_off(p_slot);
+ ctrl->state = OFF_STATE;
+ pciehp_green_led_off(ctrl);
}
- pciehp_set_attention_status(p_slot, 0);
+ pciehp_set_attention_status(ctrl, 0);
ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
default:
ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n",
- slot_name(p_slot), p_slot->state);
+ slot_name(ctrl), ctrl->state);
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
}
-void pciehp_handle_disable_request(struct slot *slot)
+void pciehp_handle_disable_request(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
- mutex_lock(&slot->lock);
- switch (slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGON_STATE:
case BLINKINGOFF_STATE:
- cancel_delayed_work(&slot->work);
+ cancel_delayed_work(&ctrl->button_work);
break;
}
- slot->state = POWEROFF_STATE;
- mutex_unlock(&slot->lock);
+ ctrl->state = POWEROFF_STATE;
+ mutex_unlock(&ctrl->state_lock);
- ctrl->request_result = pciehp_disable_slot(slot);
+ ctrl->request_result = pciehp_disable_slot(ctrl, SAFE_REMOVAL);
}
-void pciehp_handle_presence_or_link_change(struct slot *slot, u32 events)
+void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events)
{
- struct controller *ctrl = slot->ctrl;
- bool link_active;
- u8 present;
+ bool present, link_active;
/*
* If the slot is on and presence or link has changed, turn it off.
* Even if it's occupied again, we cannot assume the card is the same.
*/
- mutex_lock(&slot->lock);
- switch (slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGOFF_STATE:
- cancel_delayed_work(&slot->work);
+ cancel_delayed_work(&ctrl->button_work);
/* fall through */
case ON_STATE:
- slot->state = POWEROFF_STATE;
- mutex_unlock(&slot->lock);
+ ctrl->state = POWEROFF_STATE;
+ mutex_unlock(&ctrl->state_lock);
if (events & PCI_EXP_SLTSTA_DLLSC)
ctrl_info(ctrl, "Slot(%s): Link Down\n",
- slot_name(slot));
+ slot_name(ctrl));
if (events & PCI_EXP_SLTSTA_PDC)
ctrl_info(ctrl, "Slot(%s): Card not present\n",
- slot_name(slot));
- pciehp_disable_slot(slot);
+ slot_name(ctrl));
+ pciehp_disable_slot(ctrl, SURPRISE_REMOVAL);
break;
default:
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
break;
}
/* Turn the slot on if it's occupied or link is up */
- mutex_lock(&slot->lock);
- pciehp_get_adapter_status(slot, &present);
+ mutex_lock(&ctrl->state_lock);
+ present = pciehp_card_present(ctrl);
link_active = pciehp_check_link_active(ctrl);
if (!present && !link_active) {
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
return;
}
- switch (slot->state) {
+ switch (ctrl->state) {
case BLINKINGON_STATE:
- cancel_delayed_work(&slot->work);
+ cancel_delayed_work(&ctrl->button_work);
/* fall through */
case OFF_STATE:
- slot->state = POWERON_STATE;
- mutex_unlock(&slot->lock);
+ ctrl->state = POWERON_STATE;
+ mutex_unlock(&ctrl->state_lock);
if (present)
ctrl_info(ctrl, "Slot(%s): Card present\n",
- slot_name(slot));
+ slot_name(ctrl));
if (link_active)
ctrl_info(ctrl, "Slot(%s): Link Up\n",
- slot_name(slot));
- ctrl->request_result = pciehp_enable_slot(slot);
+ slot_name(ctrl));
+ ctrl->request_result = pciehp_enable_slot(ctrl);
break;
default:
- mutex_unlock(&slot->lock);
+ mutex_unlock(&ctrl->state_lock);
break;
}
}
-static int __pciehp_enable_slot(struct slot *p_slot)
+static int __pciehp_enable_slot(struct controller *ctrl)
{
u8 getstatus = 0;
- struct controller *ctrl = p_slot->ctrl;
- pciehp_get_adapter_status(p_slot, &getstatus);
- if (!getstatus) {
- ctrl_info(ctrl, "Slot(%s): No adapter\n", slot_name(p_slot));
- return -ENODEV;
- }
- if (MRL_SENS(p_slot->ctrl)) {
- pciehp_get_latch_status(p_slot, &getstatus);
+ if (MRL_SENS(ctrl)) {
+ pciehp_get_latch_status(ctrl, &getstatus);
if (getstatus) {
ctrl_info(ctrl, "Slot(%s): Latch open\n",
- slot_name(p_slot));
+ slot_name(ctrl));
return -ENODEV;
}
}
- if (POWER_CTRL(p_slot->ctrl)) {
- pciehp_get_power_status(p_slot, &getstatus);
+ if (POWER_CTRL(ctrl)) {
+ pciehp_get_power_status(ctrl, &getstatus);
if (getstatus) {
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
return 0;
}
}
- return board_added(p_slot);
+ return board_added(ctrl);
}
-static int pciehp_enable_slot(struct slot *slot)
+static int pciehp_enable_slot(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
int ret;
pm_runtime_get_sync(&ctrl->pcie->port->dev);
- ret = __pciehp_enable_slot(slot);
+ ret = __pciehp_enable_slot(ctrl);
if (ret && ATTN_BUTTN(ctrl))
- pciehp_green_led_off(slot); /* may be blinking */
+ pciehp_green_led_off(ctrl); /* may be blinking */
pm_runtime_put(&ctrl->pcie->port->dev);
- mutex_lock(&slot->lock);
- slot->state = ret ? OFF_STATE : ON_STATE;
- mutex_unlock(&slot->lock);
+ mutex_lock(&ctrl->state_lock);
+ ctrl->state = ret ? OFF_STATE : ON_STATE;
+ mutex_unlock(&ctrl->state_lock);
return ret;
}
-static int __pciehp_disable_slot(struct slot *p_slot)
+static int __pciehp_disable_slot(struct controller *ctrl, bool safe_removal)
{
u8 getstatus = 0;
- struct controller *ctrl = p_slot->ctrl;
- if (POWER_CTRL(p_slot->ctrl)) {
- pciehp_get_power_status(p_slot, &getstatus);
+ if (POWER_CTRL(ctrl)) {
+ pciehp_get_power_status(ctrl, &getstatus);
if (!getstatus) {
ctrl_info(ctrl, "Slot(%s): Already disabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
return -EINVAL;
}
}
- remove_board(p_slot);
+ remove_board(ctrl, safe_removal);
return 0;
}
-static int pciehp_disable_slot(struct slot *slot)
+static int pciehp_disable_slot(struct controller *ctrl, bool safe_removal)
{
- struct controller *ctrl = slot->ctrl;
int ret;
pm_runtime_get_sync(&ctrl->pcie->port->dev);
- ret = __pciehp_disable_slot(slot);
+ ret = __pciehp_disable_slot(ctrl, safe_removal);
pm_runtime_put(&ctrl->pcie->port->dev);
- mutex_lock(&slot->lock);
- slot->state = OFF_STATE;
- mutex_unlock(&slot->lock);
+ mutex_lock(&ctrl->state_lock);
+ ctrl->state = OFF_STATE;
+ mutex_unlock(&ctrl->state_lock);
return ret;
}
-int pciehp_sysfs_enable_slot(struct slot *p_slot)
+int pciehp_sysfs_enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct controller *ctrl = p_slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGON_STATE:
case OFF_STATE:
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
/*
* The IRQ thread becomes a no-op if the user pulls out the
* card before the thread wakes up, so initialize to -ENODEV.
@@ -383,53 +366,53 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot)
return ctrl->request_result;
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering on state\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
case BLINKINGOFF_STATE:
case ON_STATE:
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already enabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
default:
ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n",
- slot_name(p_slot), p_slot->state);
+ slot_name(ctrl), ctrl->state);
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
return -ENODEV;
}
-int pciehp_sysfs_disable_slot(struct slot *p_slot)
+int pciehp_sysfs_disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct controller *ctrl = p_slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
- mutex_lock(&p_slot->lock);
- switch (p_slot->state) {
+ mutex_lock(&ctrl->state_lock);
+ switch (ctrl->state) {
case BLINKINGOFF_STATE:
case ON_STATE:
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
pciehp_request(ctrl, DISABLE_SLOT);
wait_event(ctrl->requester,
!atomic_read(&ctrl->pending_events));
return ctrl->request_result;
case POWEROFF_STATE:
ctrl_info(ctrl, "Slot(%s): Already in powering off state\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
case BLINKINGON_STATE:
case OFF_STATE:
case POWERON_STATE:
ctrl_info(ctrl, "Slot(%s): Already disabled\n",
- slot_name(p_slot));
+ slot_name(ctrl));
break;
default:
ctrl_err(ctrl, "Slot(%s): Invalid state %#x\n",
- slot_name(p_slot), p_slot->state);
+ slot_name(ctrl), ctrl->state);
break;
}
- mutex_unlock(&p_slot->lock);
+ mutex_unlock(&ctrl->state_lock);
return -ENODEV;
}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index a938abdb41ce..7dd443aea5a5 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -13,15 +13,12 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/types.h>
-#include <linux/signal.h>
#include <linux/jiffies.h>
#include <linux/kthread.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
-#include <linux/time.h>
#include <linux/slab.h>
#include "../pci.h"
@@ -43,7 +40,7 @@ static inline int pciehp_request_irq(struct controller *ctrl)
if (pciehp_poll_mode) {
ctrl->poll_thread = kthread_run(&pciehp_poll, ctrl,
"pciehp_poll-%s",
- slot_name(ctrl->slot));
+ slot_name(ctrl));
return PTR_ERR_OR_ZERO(ctrl->poll_thread);
}
@@ -217,13 +214,6 @@ bool pciehp_check_link_active(struct controller *ctrl)
return ret;
}
-static void pcie_wait_link_active(struct controller *ctrl)
-{
- struct pci_dev *pdev = ctrl_dev(ctrl);
-
- pcie_wait_for_link(pdev, true);
-}
-
static bool pci_bus_check_dev(struct pci_bus *bus, int devfn)
{
u32 l;
@@ -256,18 +246,9 @@ int pciehp_check_link_status(struct controller *ctrl)
bool found;
u16 lnk_status;
- /*
- * Data Link Layer Link Active Reporting must be capable for
- * hot-plug capable downstream port. But old controller might
- * not implement it. In this case, we wait for 1000 ms.
- */
- if (ctrl->link_active_reporting)
- pcie_wait_link_active(ctrl);
- else
- msleep(1000);
+ if (!pcie_wait_for_link(pdev, true))
+ return -1;
- /* wait 100ms before read pci conf, and try in 1s */
- msleep(100);
found = pci_bus_check_dev(ctrl->pcie->port->subordinate,
PCI_DEVFN(0, 0));
@@ -318,8 +299,8 @@ static int pciehp_link_enable(struct controller *ctrl)
int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
u8 *status)
{
- struct slot *slot = hotplug_slot->private;
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct controller *ctrl = to_ctrl(hotplug_slot);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
pci_config_pm_runtime_get(pdev);
@@ -329,9 +310,9 @@ int pciehp_get_raw_indicator_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-void pciehp_get_attention_status(struct slot *slot, u8 *status)
+int pciehp_get_attention_status(struct hotplug_slot *hotplug_slot, u8 *status)
{
- struct controller *ctrl = slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
@@ -355,11 +336,12 @@ void pciehp_get_attention_status(struct slot *slot, u8 *status)
*status = 0xFF;
break;
}
+
+ return 0;
}
-void pciehp_get_power_status(struct slot *slot, u8 *status)
+void pciehp_get_power_status(struct controller *ctrl, u8 *status)
{
- struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_ctrl;
@@ -380,27 +362,41 @@ void pciehp_get_power_status(struct slot *slot, u8 *status)
}
}
-void pciehp_get_latch_status(struct slot *slot, u8 *status)
+void pciehp_get_latch_status(struct controller *ctrl, u8 *status)
{
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
*status = !!(slot_status & PCI_EXP_SLTSTA_MRLSS);
}
-void pciehp_get_adapter_status(struct slot *slot, u8 *status)
+bool pciehp_card_present(struct controller *ctrl)
{
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- *status = !!(slot_status & PCI_EXP_SLTSTA_PDS);
+ return slot_status & PCI_EXP_SLTSTA_PDS;
}
-int pciehp_query_power_fault(struct slot *slot)
+/**
+ * pciehp_card_present_or_link_active() - whether given slot is occupied
+ * @ctrl: PCIe hotplug controller
+ *
+ * Unlike pciehp_card_present(), which determines presence solely from the
+ * Presence Detect State bit, this helper also returns true if the Link Active
+ * bit is set. This is a concession to broken hotplug ports which hardwire
+ * Presence Detect State to zero, such as Wilocity's [1ae9:0200].
+ */
+bool pciehp_card_present_or_link_active(struct controller *ctrl)
+{
+ return pciehp_card_present(ctrl) || pciehp_check_link_active(ctrl);
+}
+
+int pciehp_query_power_fault(struct controller *ctrl)
{
- struct pci_dev *pdev = ctrl_dev(slot->ctrl);
+ struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
@@ -410,8 +406,7 @@ int pciehp_query_power_fault(struct slot *slot)
int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
u8 status)
{
- struct slot *slot = hotplug_slot->private;
- struct controller *ctrl = slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
pci_config_pm_runtime_get(pdev);
@@ -421,9 +416,8 @@ int pciehp_set_raw_indicator_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-void pciehp_set_attention_status(struct slot *slot, u8 value)
+void pciehp_set_attention_status(struct controller *ctrl, u8 value)
{
- struct controller *ctrl = slot->ctrl;
u16 slot_cmd;
if (!ATTN_LED(ctrl))
@@ -447,10 +441,8 @@ void pciehp_set_attention_status(struct slot *slot, u8 value)
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
}
-void pciehp_green_led_on(struct slot *slot)
+void pciehp_green_led_on(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
if (!PWR_LED(ctrl))
return;
@@ -461,10 +453,8 @@ void pciehp_green_led_on(struct slot *slot)
PCI_EXP_SLTCTL_PWR_IND_ON);
}
-void pciehp_green_led_off(struct slot *slot)
+void pciehp_green_led_off(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
if (!PWR_LED(ctrl))
return;
@@ -475,10 +465,8 @@ void pciehp_green_led_off(struct slot *slot)
PCI_EXP_SLTCTL_PWR_IND_OFF);
}
-void pciehp_green_led_blink(struct slot *slot)
+void pciehp_green_led_blink(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
if (!PWR_LED(ctrl))
return;
@@ -489,9 +477,8 @@ void pciehp_green_led_blink(struct slot *slot)
PCI_EXP_SLTCTL_PWR_IND_BLINK);
}
-int pciehp_power_on_slot(struct slot *slot)
+int pciehp_power_on_slot(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 slot_status;
int retval;
@@ -515,10 +502,8 @@ int pciehp_power_on_slot(struct slot *slot)
return retval;
}
-void pciehp_power_off_slot(struct slot *slot)
+void pciehp_power_off_slot(struct controller *ctrl)
{
- struct controller *ctrl = slot->ctrl;
-
pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PWR_OFF, PCI_EXP_SLTCTL_PCC);
ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL,
@@ -533,9 +518,11 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
u16 status, events;
/*
- * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
+ * Interrupts only occur in D3hot or shallower and only if enabled
+ * in the Slot Control register (PCIe r4.0, sec 6.7.3.4).
*/
- if (pdev->current_state == PCI_D3cold)
+ if (pdev->current_state == PCI_D3cold ||
+ (!(ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE) && !pciehp_poll_mode))
return IRQ_NONE;
/*
@@ -616,7 +603,6 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
{
struct controller *ctrl = (struct controller *)dev_id;
struct pci_dev *pdev = ctrl_dev(ctrl);
- struct slot *slot = ctrl->slot;
irqreturn_t ret;
u32 events;
@@ -642,16 +628,16 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
/* Check Attention Button Pressed */
if (events & PCI_EXP_SLTSTA_ABP) {
ctrl_info(ctrl, "Slot(%s): Attention button pressed\n",
- slot_name(slot));
- pciehp_handle_button_press(slot);
+ slot_name(ctrl));
+ pciehp_handle_button_press(ctrl);
}
/* Check Power Fault Detected */
if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
ctrl->power_fault_detected = 1;
- ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(slot));
- pciehp_set_attention_status(slot, 1);
- pciehp_green_led_off(slot);
+ ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
+ pciehp_set_attention_status(ctrl, 1);
+ pciehp_green_led_off(ctrl);
}
/*
@@ -660,9 +646,9 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
*/
down_read(&ctrl->reset_lock);
if (events & DISABLE_SLOT)
- pciehp_handle_disable_request(slot);
+ pciehp_handle_disable_request(ctrl);
else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
- pciehp_handle_presence_or_link_change(slot, events);
+ pciehp_handle_presence_or_link_change(ctrl, events);
up_read(&ctrl->reset_lock);
pci_config_pm_runtime_put(pdev);
@@ -748,6 +734,16 @@ void pcie_clear_hotplug_events(struct controller *ctrl)
PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
}
+void pcie_enable_interrupt(struct controller *ctrl)
+{
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_HPIE, PCI_EXP_SLTCTL_HPIE);
+}
+
+void pcie_disable_interrupt(struct controller *ctrl)
+{
+ pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_HPIE);
+}
+
/*
* pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
* bus reset of the bridge, but at the same time we want to ensure that it is
@@ -756,9 +752,9 @@ void pcie_clear_hotplug_events(struct controller *ctrl)
* momentarily, if we see that they could interfere. Also, clear any spurious
* events after.
*/
-int pciehp_reset_slot(struct slot *slot, int probe)
+int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, int probe)
{
- struct controller *ctrl = slot->ctrl;
+ struct controller *ctrl = to_ctrl(hotplug_slot);
struct pci_dev *pdev = ctrl_dev(ctrl);
u16 stat_mask = 0, ctrl_mask = 0;
int rc;
@@ -808,34 +804,6 @@ void pcie_shutdown_notification(struct controller *ctrl)
}
}
-static int pcie_init_slot(struct controller *ctrl)
-{
- struct pci_bus *subordinate = ctrl_dev(ctrl)->subordinate;
- struct slot *slot;
-
- slot = kzalloc(sizeof(*slot), GFP_KERNEL);
- if (!slot)
- return -ENOMEM;
-
- down_read(&pci_bus_sem);
- slot->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
- up_read(&pci_bus_sem);
-
- slot->ctrl = ctrl;
- mutex_init(&slot->lock);
- INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work);
- ctrl->slot = slot;
- return 0;
-}
-
-static void pcie_cleanup_slot(struct controller *ctrl)
-{
- struct slot *slot = ctrl->slot;
-
- cancel_delayed_work_sync(&slot->work);
- kfree(slot);
-}
-
static inline void dbg_ctrl(struct controller *ctrl)
{
struct pci_dev *pdev = ctrl->pcie->port;
@@ -857,12 +825,13 @@ struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
u32 slot_cap, link_cap;
- u8 occupied, poweron;
+ u8 poweron;
struct pci_dev *pdev = dev->port;
+ struct pci_bus *subordinate = pdev->subordinate;
ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl)
- goto abort;
+ return NULL;
ctrl->pcie = dev;
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
@@ -879,15 +848,19 @@ struct controller *pcie_init(struct pcie_device *dev)
ctrl->slot_cap = slot_cap;
mutex_init(&ctrl->ctrl_lock);
+ mutex_init(&ctrl->state_lock);
init_rwsem(&ctrl->reset_lock);
init_waitqueue_head(&ctrl->requester);
init_waitqueue_head(&ctrl->queue);
+ INIT_DELAYED_WORK(&ctrl->button_work, pciehp_queue_pushbutton_work);
dbg_ctrl(ctrl);
+ down_read(&pci_bus_sem);
+ ctrl->state = list_empty(&subordinate->devices) ? OFF_STATE : ON_STATE;
+ up_read(&pci_bus_sem);
+
/* Check if Data Link Layer Link Active Reporting is implemented */
pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
- if (link_cap & PCI_EXP_LNKCAP_DLLLARC)
- ctrl->link_active_reporting = 1;
/* Clear all remaining event bits in Slot Status register. */
pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
@@ -909,33 +882,24 @@ struct controller *pcie_init(struct pcie_device *dev)
FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC),
pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : "");
- if (pcie_init_slot(ctrl))
- goto abort_ctrl;
-
/*
* If empty slot's power status is on, turn power off. The IRQ isn't
* requested yet, so avoid triggering a notification with this command.
*/
if (POWER_CTRL(ctrl)) {
- pciehp_get_adapter_status(ctrl->slot, &occupied);
- pciehp_get_power_status(ctrl->slot, &poweron);
- if (!occupied && poweron) {
+ pciehp_get_power_status(ctrl, &poweron);
+ if (!pciehp_card_present_or_link_active(ctrl) && poweron) {
pcie_disable_notification(ctrl);
- pciehp_power_off_slot(ctrl->slot);
+ pciehp_power_off_slot(ctrl);
}
}
return ctrl;
-
-abort_ctrl:
- kfree(ctrl);
-abort:
- return NULL;
}
void pciehp_release_ctrl(struct controller *ctrl)
{
- pcie_cleanup_slot(ctrl);
+ cancel_delayed_work_sync(&ctrl->button_work);
kfree(ctrl);
}
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 5c58c22e0c08..b9c1396db6fe 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -13,20 +13,26 @@
*
*/
-#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
-int pciehp_configure_device(struct slot *p_slot)
+/**
+ * pciehp_configure_device() - enumerate PCI devices below a hotplug bridge
+ * @ctrl: PCIe hotplug controller
+ *
+ * Enumerate PCI devices below a hotplug bridge and add them to the system.
+ * Return 0 on success, %-EEXIST if the devices are already enumerated or
+ * %-ENODEV if enumeration failed.
+ */
+int pciehp_configure_device(struct controller *ctrl)
{
struct pci_dev *dev;
- struct pci_dev *bridge = p_slot->ctrl->pcie->port;
+ struct pci_dev *bridge = ctrl->pcie->port;
struct pci_bus *parent = bridge->subordinate;
int num, ret = 0;
- struct controller *ctrl = p_slot->ctrl;
pci_lock_rescan_remove();
@@ -62,17 +68,28 @@ int pciehp_configure_device(struct slot *p_slot)
return ret;
}
-void pciehp_unconfigure_device(struct slot *p_slot)
+/**
+ * pciehp_unconfigure_device() - remove PCI devices below a hotplug bridge
+ * @ctrl: PCIe hotplug controller
+ * @presence: whether the card is still present in the slot;
+ * true for safe removal via sysfs or an Attention Button press,
+ * false for surprise removal
+ *
+ * Unbind PCI devices below a hotplug bridge from their drivers and remove
+ * them from the system. Safely removed devices are quiesced. Surprise
+ * removed devices are marked as such to prevent further accesses.
+ */
+void pciehp_unconfigure_device(struct controller *ctrl, bool presence)
{
- u8 presence = 0;
struct pci_dev *dev, *temp;
- struct pci_bus *parent = p_slot->ctrl->pcie->port->subordinate;
+ struct pci_bus *parent = ctrl->pcie->port->subordinate;
u16 command;
- struct controller *ctrl = p_slot->ctrl;
ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n",
__func__, pci_domain_nr(parent), parent->number);
- pciehp_get_adapter_status(p_slot, &presence);
+
+ if (!presence)
+ pci_walk_bus(parent, pci_dev_set_disconnected, NULL);
pci_lock_rescan_remove();
@@ -85,12 +102,6 @@ void pciehp_unconfigure_device(struct slot *p_slot)
list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
bus_list) {
pci_dev_get(dev);
- if (!presence) {
- pci_dev_set_disconnected(dev, NULL);
- if (pci_has_subordinate(dev))
- pci_walk_bus(dev->subordinate,
- pci_dev_set_disconnected, NULL);
- }
pci_stop_and_remove_bus_device(dev);
/*
* Ensure that no new Requests will be generated from
diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c
index 3276a5e4c430..6758fd7c382e 100644
--- a/drivers/pci/hotplug/pnv_php.c
+++ b/drivers/pci/hotplug/pnv_php.c
@@ -275,14 +275,13 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot)
goto free_fdt1;
}
- fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL);
+ fdt = kmemdup(fdt1, fdt_totalsize(fdt1), GFP_KERNEL);
if (!fdt) {
ret = -ENOMEM;
goto free_fdt1;
}
/* Unflatten device tree blob */
- memcpy(fdt, fdt1, fdt_totalsize(fdt1));
dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL);
if (!dt) {
ret = -EINVAL;
@@ -328,10 +327,15 @@ out:
return ret;
}
+static inline struct pnv_php_slot *to_pnv_php_slot(struct hotplug_slot *slot)
+{
+ return container_of(slot, struct pnv_php_slot, slot);
+}
+
int pnv_php_set_slot_power_state(struct hotplug_slot *slot,
uint8_t state)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
struct opal_msg msg;
int ret;
@@ -363,7 +367,7 @@ EXPORT_SYMBOL_GPL(pnv_php_set_slot_power_state);
static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
uint8_t power_state = OPAL_PCI_SLOT_POWER_ON;
int ret;
@@ -378,7 +382,6 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
ret);
} else {
*state = power_state;
- slot->info->power_status = power_state;
}
return 0;
@@ -386,7 +389,7 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state)
static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
uint8_t presence = OPAL_PCI_SLOT_EMPTY;
int ret;
@@ -397,7 +400,6 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
ret = pnv_pci_get_presence_state(php_slot->id, &presence);
if (ret >= 0) {
*state = presence;
- slot->info->adapter_status = presence;
ret = 0;
} else {
pci_warn(php_slot->pdev, "Error %d getting presence\n", ret);
@@ -406,10 +408,20 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state)
return ret;
}
+static int pnv_php_get_attention_state(struct hotplug_slot *slot, u8 *state)
+{
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+
+ *state = php_slot->attention_state;
+ return 0;
+}
+
static int pnv_php_set_attention_state(struct hotplug_slot *slot, u8 state)
{
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
+
/* FIXME: Make it real once firmware supports it */
- slot->info->attention_status = state;
+ php_slot->attention_state = state;
return 0;
}
@@ -501,15 +513,14 @@ scan:
static int pnv_php_enable_slot(struct hotplug_slot *slot)
{
- struct pnv_php_slot *php_slot = container_of(slot,
- struct pnv_php_slot, slot);
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
return pnv_php_enable(php_slot, true);
}
static int pnv_php_disable_slot(struct hotplug_slot *slot)
{
- struct pnv_php_slot *php_slot = slot->private;
+ struct pnv_php_slot *php_slot = to_pnv_php_slot(slot);
int ret;
if (php_slot->state != PNV_PHP_STATE_POPULATED)
@@ -530,9 +541,10 @@ static int pnv_php_disable_slot(struct hotplug_slot *slot)
return ret;
}
-static struct hotplug_slot_ops php_slot_ops = {
+static const struct hotplug_slot_ops php_slot_ops = {
.get_power_status = pnv_php_get_power_state,
.get_adapter_status = pnv_php_get_adapter_state,
+ .get_attention_status = pnv_php_get_attention_state,
.set_attention_status = pnv_php_set_attention_state,
.enable_slot = pnv_php_enable_slot,
.disable_slot = pnv_php_disable_slot,
@@ -594,8 +606,6 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn)
php_slot->id = id;
php_slot->power_state_check = false;
php_slot->slot.ops = &php_slot_ops;
- php_slot->slot.info = &php_slot->slot_info;
- php_slot->slot.private = php_slot;
INIT_LIST_HEAD(&php_slot->children);
INIT_LIST_HEAD(&php_slot->link);
@@ -736,7 +746,7 @@ static irqreturn_t pnv_php_interrupt(int irq, void *data)
pe = edev ? edev->pe : NULL;
if (pe) {
eeh_serialize_lock(&flags);
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ eeh_pe_mark_isolated(pe);
eeh_serialize_unlock(flags);
eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE);
}
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index c8311724bd76..bdc954d70869 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -63,16 +63,22 @@ struct slot {
u32 index;
u32 type;
u32 power_domain;
+ u8 attention_status;
char *name;
struct device_node *dn;
struct pci_bus *bus;
struct list_head *pci_devs;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
};
-extern struct hotplug_slot_ops rpaphp_hotplug_slot_ops;
+extern const struct hotplug_slot_ops rpaphp_hotplug_slot_ops;
extern struct list_head rpaphp_slot_head;
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
+}
+
/* function prototypes */
/* rpaphp_pci.c */
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 857c358b727b..bcd5d357ca23 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -52,7 +52,7 @@ module_param_named(debug, rpaphp_debug, bool, 0644);
static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
{
int rc;
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
switch (value) {
case 0:
@@ -66,7 +66,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
rc = rtas_set_indicator(DR_INDICATOR, slot->index, value);
if (!rc)
- hotplug_slot->info->attention_status = value;
+ slot->attention_status = value;
return rc;
}
@@ -79,7 +79,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
int retval, level;
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
retval = rtas_get_power_level(slot->power_domain, &level);
if (!retval)
@@ -94,14 +94,14 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
*/
static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
- *value = slot->hotplug_slot->info->attention_status;
+ struct slot *slot = to_slot(hotplug_slot);
+ *value = slot->attention_status;
return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int rc, state;
rc = rpaphp_get_sensor_state(slot, &state);
@@ -409,7 +409,7 @@ static void __exit cleanup_slots(void)
list_for_each_entry_safe(slot, next, &rpaphp_slot_head,
rpaphp_slot_list) {
list_del(&slot->rpaphp_slot_list);
- pci_hp_deregister(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
dealloc_slot_struct(slot);
}
return;
@@ -434,7 +434,7 @@ static void __exit rpaphp_exit(void)
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int state;
int retval;
@@ -464,7 +464,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = (struct slot *)hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
if (slot->state == NOT_CONFIGURED)
return -EINVAL;
@@ -477,7 +477,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
return 0;
}
-struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
+const struct hotplug_slot_ops rpaphp_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.set_attention_status = set_attention_status,
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index 0aac33e15dab..beca61badeea 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -54,25 +54,21 @@ int rpaphp_get_sensor_state(struct slot *slot, int *state)
* rpaphp_enable_slot - record slot state, config pci device
* @slot: target &slot
*
- * Initialize values in the slot, and the hotplug_slot info
- * structures to indicate if there is a pci card plugged into
- * the slot. If the slot is not empty, run the pcibios routine
+ * Initialize values in the slot structure to indicate if there is a pci card
+ * plugged into the slot. If the slot is not empty, run the pcibios routine
* to get pcibios stuff correctly set up.
*/
int rpaphp_enable_slot(struct slot *slot)
{
int rc, level, state;
struct pci_bus *bus;
- struct hotplug_slot_info *info = slot->hotplug_slot->info;
- info->adapter_status = NOT_VALID;
slot->state = EMPTY;
/* Find out if the power is turned on for the slot */
rc = rtas_get_power_level(slot->power_domain, &level);
if (rc)
return rc;
- info->power_status = level;
/* Figure out if there is an adapter in the slot */
rc = rpaphp_get_sensor_state(slot, &state);
@@ -85,13 +81,11 @@ int rpaphp_enable_slot(struct slot *slot)
return -EINVAL;
}
- info->adapter_status = EMPTY;
slot->bus = bus;
slot->pci_devs = &bus->devices;
/* if there's an adapter in the slot, go add the pci devices */
if (state == PRESENT) {
- info->adapter_status = NOT_CONFIGURED;
slot->state = NOT_CONFIGURED;
/* non-empty slot has to have child */
@@ -105,7 +99,6 @@ int rpaphp_enable_slot(struct slot *slot)
pci_hp_add_devices(bus);
if (!list_empty(&bus->devices)) {
- info->adapter_status = CONFIGURED;
slot->state = CONFIGURED;
}
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c
index b916c8e4372d..5282aa3e33c5 100644
--- a/drivers/pci/hotplug/rpaphp_slot.c
+++ b/drivers/pci/hotplug/rpaphp_slot.c
@@ -21,9 +21,7 @@
/* free up the memory used by a slot */
void dealloc_slot_struct(struct slot *slot)
{
- kfree(slot->hotplug_slot->info);
kfree(slot->name);
- kfree(slot->hotplug_slot);
kfree(slot);
}
@@ -35,28 +33,16 @@ struct slot *alloc_slot_struct(struct device_node *dn,
slot = kzalloc(sizeof(struct slot), GFP_KERNEL);
if (!slot)
goto error_nomem;
- slot->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!slot->hotplug_slot)
- goto error_slot;
- slot->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!slot->hotplug_slot->info)
- goto error_hpslot;
slot->name = kstrdup(drc_name, GFP_KERNEL);
if (!slot->name)
- goto error_info;
+ goto error_slot;
slot->dn = dn;
slot->index = drc_index;
slot->power_domain = power_domain;
- slot->hotplug_slot->private = slot;
- slot->hotplug_slot->ops = &rpaphp_hotplug_slot_ops;
+ slot->hotplug_slot.ops = &rpaphp_hotplug_slot_ops;
return (slot);
-error_info:
- kfree(slot->hotplug_slot->info);
-error_hpslot:
- kfree(slot->hotplug_slot);
error_slot:
kfree(slot);
error_nomem:
@@ -77,7 +63,7 @@ static int is_registered(struct slot *slot)
int rpaphp_deregister_slot(struct slot *slot)
{
int retval = 0;
- struct hotplug_slot *php_slot = slot->hotplug_slot;
+ struct hotplug_slot *php_slot = &slot->hotplug_slot;
dbg("%s - Entry: deregistering slot=%s\n",
__func__, slot->name);
@@ -93,7 +79,7 @@ EXPORT_SYMBOL_GPL(rpaphp_deregister_slot);
int rpaphp_register_slot(struct slot *slot)
{
- struct hotplug_slot *php_slot = slot->hotplug_slot;
+ struct hotplug_slot *php_slot = &slot->hotplug_slot;
struct device_node *child;
u32 my_index;
int retval;
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index 93b5341d282c..30ee72268790 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -32,10 +32,15 @@ static int zpci_fn_configured(enum zpci_state state)
*/
struct slot {
struct list_head slot_list;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct zpci_dev *zdev;
};
+static inline struct slot *to_slot(struct hotplug_slot *hotplug_slot)
+{
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
+}
+
static inline int slot_configure(struct slot *slot)
{
int ret = sclp_pci_configure(slot->zdev->fid);
@@ -60,7 +65,7 @@ static inline int slot_deconfigure(struct slot *slot)
static int enable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
int rc;
if (slot->zdev->state != ZPCI_FN_STATE_STANDBY)
@@ -88,7 +93,7 @@ out_deconfigure:
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
struct pci_dev *pdev;
int rc;
@@ -110,7 +115,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot)
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
- struct slot *slot = hotplug_slot->private;
+ struct slot *slot = to_slot(hotplug_slot);
switch (slot->zdev->state) {
case ZPCI_FN_STATE_STANDBY:
@@ -130,7 +135,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return 0;
}
-static struct hotplug_slot_ops s390_hotplug_slot_ops = {
+static const struct hotplug_slot_ops s390_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
@@ -139,8 +144,6 @@ static struct hotplug_slot_ops s390_hotplug_slot_ops = {
int zpci_init_slot(struct zpci_dev *zdev)
{
- struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
struct slot *slot;
int rc;
@@ -152,26 +155,11 @@ int zpci_init_slot(struct zpci_dev *zdev)
if (!slot)
goto error;
- hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot)
- goto error_hp;
- hotplug_slot->private = slot;
-
- slot->hotplug_slot = hotplug_slot;
slot->zdev = zdev;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- goto error_info;
- hotplug_slot->info = info;
-
- hotplug_slot->ops = &s390_hotplug_slot_ops;
-
- get_power_status(hotplug_slot, &info->power_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
+ slot->hotplug_slot.ops = &s390_hotplug_slot_ops;
snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid);
- rc = pci_hp_register(slot->hotplug_slot, zdev->bus,
+ rc = pci_hp_register(&slot->hotplug_slot, zdev->bus,
ZPCI_DEVFN, name);
if (rc)
goto error_reg;
@@ -180,10 +168,6 @@ int zpci_init_slot(struct zpci_dev *zdev)
return 0;
error_reg:
- kfree(info);
-error_info:
- kfree(hotplug_slot);
-error_hp:
kfree(slot);
error:
return -ENOMEM;
@@ -198,9 +182,7 @@ void zpci_exit_slot(struct zpci_dev *zdev)
if (slot->zdev != zdev)
continue;
list_del(&slot->slot_list);
- pci_hp_deregister(slot->hotplug_slot);
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
kfree(slot);
}
}
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c
index babd23409f61..231f5bdd3d2d 100644
--- a/drivers/pci/hotplug/sgi_hotplug.c
+++ b/drivers/pci/hotplug/sgi_hotplug.c
@@ -56,7 +56,7 @@ struct slot {
int device_num;
struct pci_bus *pci_bus;
/* this struct for glue internal only */
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct list_head hp_list;
char physical_path[SN_SLOT_NAME_SIZE];
};
@@ -80,7 +80,7 @@ static int enable_slot(struct hotplug_slot *slot);
static int disable_slot(struct hotplug_slot *slot);
static inline int get_power_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops sn_hotplug_slot_ops = {
+static const struct hotplug_slot_ops sn_hotplug_slot_ops = {
.enable_slot = enable_slot,
.disable_slot = disable_slot,
.get_power_status = get_power_status,
@@ -88,10 +88,15 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = {
static DEFINE_MUTEX(sn_hotplug_mutex);
+static struct slot *to_slot(struct hotplug_slot *bss_hotplug_slot)
+{
+ return container_of(bss_hotplug_slot, struct slot, hotplug_slot);
+}
+
static ssize_t path_show(struct pci_slot *pci_slot, char *buf)
{
int retval = -ENOENT;
- struct slot *slot = pci_slot->hotplug->private;
+ struct slot *slot = to_slot(pci_slot->hotplug);
if (!slot)
return retval;
@@ -156,7 +161,7 @@ static int sn_pci_bus_valid(struct pci_bus *pci_bus)
return -EIO;
}
-static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
+static int sn_hp_slot_private_alloc(struct hotplug_slot **bss_hotplug_slot,
struct pci_bus *pci_bus, int device,
char *name)
{
@@ -168,7 +173,6 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
- bss_hotplug_slot->private = slot;
slot->device_num = device;
slot->pci_bus = pci_bus;
@@ -179,8 +183,8 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot,
sn_generate_path(pci_bus, slot->physical_path);
- slot->hotplug_slot = bss_hotplug_slot;
list_add(&slot->hp_list, &sn_hp_list);
+ *bss_hotplug_slot = &slot->hotplug_slot;
return 0;
}
@@ -192,10 +196,9 @@ static struct hotplug_slot *sn_hp_destroy(void)
struct hotplug_slot *bss_hotplug_slot = NULL;
list_for_each_entry(slot, &sn_hp_list, hp_list) {
- bss_hotplug_slot = slot->hotplug_slot;
+ bss_hotplug_slot = &slot->hotplug_slot;
pci_slot = bss_hotplug_slot->pci_slot;
- list_del(&((struct slot *)bss_hotplug_slot->private)->
- hp_list);
+ list_del(&slot->hp_list);
sysfs_remove_file(&pci_slot->kobj,
&sn_slot_path_attr.attr);
break;
@@ -227,7 +230,7 @@ static void sn_bus_free_data(struct pci_dev *dev)
static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
int device_num, char **ssdt)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pcibus_info *pcibus_info;
struct pcibr_slot_enable_resp resp;
int rc;
@@ -267,7 +270,7 @@ static int sn_slot_enable(struct hotplug_slot *bss_hotplug_slot,
static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
int device_num, int action)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pcibus_info *pcibus_info;
struct pcibr_slot_disable_resp resp;
int rc;
@@ -323,7 +326,7 @@ static int sn_slot_disable(struct hotplug_slot *bss_hotplug_slot,
*/
static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pci_bus *new_bus = NULL;
struct pci_dev *dev;
int num_funcs;
@@ -469,7 +472,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot)
static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pci_dev *dev, *temp;
int rc;
acpi_handle ssdt_hdl = NULL;
@@ -571,7 +574,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot)
static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
u8 *value)
{
- struct slot *slot = bss_hotplug_slot->private;
+ struct slot *slot = to_slot(bss_hotplug_slot);
struct pcibus_info *pcibus_info;
u32 power;
@@ -585,9 +588,7 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot,
static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot)
{
- kfree(bss_hotplug_slot->info);
- kfree(bss_hotplug_slot->private);
- kfree(bss_hotplug_slot);
+ kfree(to_slot(bss_hotplug_slot));
}
static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
@@ -607,22 +608,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
if (sn_pci_slot_valid(pci_bus, device) != 1)
continue;
- bss_hotplug_slot = kzalloc(sizeof(*bss_hotplug_slot),
- GFP_KERNEL);
- if (!bss_hotplug_slot) {
- rc = -ENOMEM;
- goto alloc_err;
- }
-
- bss_hotplug_slot->info =
- kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!bss_hotplug_slot->info) {
- rc = -ENOMEM;
- goto alloc_err;
- }
-
- if (sn_hp_slot_private_alloc(bss_hotplug_slot,
+ if (sn_hp_slot_private_alloc(&bss_hotplug_slot,
pci_bus, device, name)) {
rc = -ENOMEM;
goto alloc_err;
@@ -637,7 +623,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus)
rc = sysfs_create_file(&pci_slot->kobj,
&sn_slot_path_attr.attr);
if (rc)
- goto register_err;
+ goto alloc_err;
}
pci_dbg(pci_bus->self, "Registered bus with hotplug\n");
return rc;
@@ -646,14 +632,11 @@ register_err:
pci_dbg(pci_bus->self, "bus failed to register with err = %d\n",
rc);
-alloc_err:
- if (rc == -ENOMEM)
- pci_dbg(pci_bus->self, "Memory allocation error\n");
-
/* destroy THIS element */
- if (bss_hotplug_slot)
- sn_release_slot(bss_hotplug_slot);
+ sn_hp_destroy();
+ sn_release_slot(bss_hotplug_slot);
+alloc_err:
/* destroy anything else on the list */
while ((bss_hotplug_slot = sn_hp_destroy())) {
pci_hp_deregister(bss_hotplug_slot);
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index 516e4835019c..f7f13ee5d06e 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -67,11 +67,13 @@ struct slot {
u32 number;
u8 is_a_board;
u8 state;
+ u8 attention_save;
u8 presence_save;
+ u8 latch_save;
u8 pwr_save;
struct controller *ctrl;
const struct hpc_ops *hpc_ops;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct list_head slot_list;
struct delayed_work work; /* work for button event */
struct mutex lock;
@@ -169,7 +171,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev);
static inline const char *slot_name(struct slot *slot)
{
- return hotplug_slot_name(slot->hotplug_slot);
+ return hotplug_slot_name(&slot->hotplug_slot);
}
struct ctrl_reg {
@@ -207,7 +209,7 @@ enum ctrl_offsets {
static inline struct slot *get_slot(struct hotplug_slot *hotplug_slot)
{
- return hotplug_slot->private;
+ return container_of(hotplug_slot, struct slot, hotplug_slot);
}
static inline struct slot *shpchp_find_slot(struct controller *ctrl, u8 device)
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index 97cee23f3d51..81a918d47895 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -51,7 +51,7 @@ static int get_attention_status(struct hotplug_slot *slot, u8 *value);
static int get_latch_status(struct hotplug_slot *slot, u8 *value);
static int get_adapter_status(struct hotplug_slot *slot, u8 *value);
-static struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
+static const struct hotplug_slot_ops shpchp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = enable_slot,
.disable_slot = disable_slot,
@@ -65,7 +65,6 @@ static int init_slots(struct controller *ctrl)
{
struct slot *slot;
struct hotplug_slot *hotplug_slot;
- struct hotplug_slot_info *info;
char name[SLOT_NAME_SIZE];
int retval;
int i;
@@ -77,19 +76,7 @@ static int init_slots(struct controller *ctrl)
goto error;
}
- hotplug_slot = kzalloc(sizeof(*hotplug_slot), GFP_KERNEL);
- if (!hotplug_slot) {
- retval = -ENOMEM;
- goto error_slot;
- }
- slot->hotplug_slot = hotplug_slot;
-
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- retval = -ENOMEM;
- goto error_hpslot;
- }
- hotplug_slot->info = info;
+ hotplug_slot = &slot->hotplug_slot;
slot->hp_slot = i;
slot->ctrl = ctrl;
@@ -101,14 +88,13 @@ static int init_slots(struct controller *ctrl)
slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number);
if (!slot->wq) {
retval = -ENOMEM;
- goto error_info;
+ goto error_slot;
}
mutex_init(&slot->lock);
INIT_DELAYED_WORK(&slot->work, shpchp_queue_pushbutton_work);
/* register this slot with the hotplug pci core */
- hotplug_slot->private = slot;
snprintf(name, SLOT_NAME_SIZE, "%d", slot->number);
hotplug_slot->ops = &shpchp_hotplug_slot_ops;
@@ -116,7 +102,7 @@ static int init_slots(struct controller *ctrl)
pci_domain_nr(ctrl->pci_dev->subordinate),
slot->bus, slot->device, slot->hp_slot, slot->number,
ctrl->slot_device_offset);
- retval = pci_hp_register(slot->hotplug_slot,
+ retval = pci_hp_register(hotplug_slot,
ctrl->pci_dev->subordinate, slot->device, name);
if (retval) {
ctrl_err(ctrl, "pci_hp_register failed with error %d\n",
@@ -124,10 +110,10 @@ static int init_slots(struct controller *ctrl)
goto error_slotwq;
}
- get_power_status(hotplug_slot, &info->power_status);
- get_attention_status(hotplug_slot, &info->attention_status);
- get_latch_status(hotplug_slot, &info->latch_status);
- get_adapter_status(hotplug_slot, &info->adapter_status);
+ get_power_status(hotplug_slot, &slot->pwr_save);
+ get_attention_status(hotplug_slot, &slot->attention_save);
+ get_latch_status(hotplug_slot, &slot->latch_save);
+ get_adapter_status(hotplug_slot, &slot->presence_save);
list_add(&slot->slot_list, &ctrl->slot_list);
}
@@ -135,10 +121,6 @@ static int init_slots(struct controller *ctrl)
return 0;
error_slotwq:
destroy_workqueue(slot->wq);
-error_info:
- kfree(info);
-error_hpslot:
- kfree(hotplug_slot);
error_slot:
kfree(slot);
error:
@@ -153,9 +135,7 @@ void cleanup_slots(struct controller *ctrl)
list_del(&slot->slot_list);
cancel_delayed_work(&slot->work);
destroy_workqueue(slot->wq);
- pci_hp_deregister(slot->hotplug_slot);
- kfree(slot->hotplug_slot->info);
- kfree(slot->hotplug_slot);
+ pci_hp_deregister(&slot->hotplug_slot);
kfree(slot);
}
}
@@ -170,7 +150,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
__func__, slot_name(slot));
- hotplug_slot->info->attention_status = status;
+ slot->attention_save = status;
slot->hpc_ops->set_attention_status(slot, status);
return 0;
@@ -206,7 +186,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_power_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->power_status;
+ *value = slot->pwr_save;
return 0;
}
@@ -221,7 +201,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_attention_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->attention_status;
+ *value = slot->attention_save;
return 0;
}
@@ -236,7 +216,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_latch_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->latch_status;
+ *value = slot->latch_save;
return 0;
}
@@ -251,7 +231,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
retval = slot->hpc_ops->get_adapter_status(slot, value);
if (retval < 0)
- *value = hotplug_slot->info->adapter_status;
+ *value = slot->presence_save;
return 0;
}
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index 1267dcc5a531..078003dcde5b 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -446,23 +446,12 @@ void shpchp_queue_pushbutton_work(struct work_struct *work)
mutex_unlock(&p_slot->lock);
}
-static int update_slot_info (struct slot *slot)
+static void update_slot_info(struct slot *slot)
{
- struct hotplug_slot_info *info;
- int result;
-
- info = kmalloc(sizeof(*info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- slot->hpc_ops->get_power_status(slot, &(info->power_status));
- slot->hpc_ops->get_attention_status(slot, &(info->attention_status));
- slot->hpc_ops->get_latch_status(slot, &(info->latch_status));
- slot->hpc_ops->get_adapter_status(slot, &(info->adapter_status));
-
- result = pci_hp_change_slot_info(slot->hotplug_slot, info);
- kfree (info);
- return result;
+ slot->hpc_ops->get_power_status(slot, &slot->pwr_save);
+ slot->hpc_ops->get_attention_status(slot, &slot->attention_save);
+ slot->hpc_ops->get_latch_status(slot, &slot->latch_save);
+ slot->hpc_ops->get_adapter_status(slot, &slot->presence_save);
}
/*
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index c5f3cd4ed766..9616eca3182f 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -13,7 +13,6 @@
#include <linux/export.h>
#include <linux/string.h>
#include <linux/delay.h>
-#include <linux/pci-ats.h>
#include "pci.h"
#define VIRTFN_ID_LEN 16
@@ -133,6 +132,8 @@ static void pci_read_vf_config_common(struct pci_dev *virtfn)
&physfn->sriov->subsystem_vendor);
pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID,
&physfn->sriov->subsystem_device);
+
+ physfn->sriov->cfg_size = pci_cfg_space_size(virtfn);
}
int pci_iov_add_virtfn(struct pci_dev *dev, int id)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index f2ef896464b3..af24ed50a245 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -958,7 +958,6 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
}
}
}
- WARN_ON(!!dev->msix_enabled);
/* Check whether driver already requested for MSI irq */
if (dev->msi_enabled) {
@@ -1028,8 +1027,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (!pci_msi_supported(dev, minvec))
return -EINVAL;
- WARN_ON(!!dev->msi_enabled);
-
/* Check whether driver already requested MSI-X irqs */
if (dev->msix_enabled) {
pci_info(dev, "can't enable MSI (MSI-X already enabled)\n");
@@ -1039,6 +1036,9 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (maxvec < minvec)
return -ERANGE;
+ if (WARN_ON_ONCE(dev->msi_enabled))
+ return -EINVAL;
+
nvec = pci_msi_vec_count(dev);
if (nvec < 0)
return nvec;
@@ -1087,6 +1087,9 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
if (maxvec < minvec)
return -ERANGE;
+ if (WARN_ON_ONCE(dev->msix_enabled))
+ return -EINVAL;
+
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 1836b8ddf292..4c4217d0c3f1 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -355,107 +355,6 @@ failed:
EXPORT_SYMBOL_GPL(devm_of_pci_get_host_bridge_resources);
#endif /* CONFIG_OF_ADDRESS */
-/**
- * of_pci_map_rid - Translate a requester ID through a downstream mapping.
- * @np: root complex device node.
- * @rid: PCI requester ID to map.
- * @map_name: property name of the map to use.
- * @map_mask_name: optional property name of the mask to use.
- * @target: optional pointer to a target device node.
- * @id_out: optional pointer to receive the translated ID.
- *
- * Given a PCI requester ID, look up the appropriate implementation-defined
- * platform ID and/or the target device which receives transactions on that
- * ID, as per the "iommu-map" and "msi-map" bindings. Either of @target or
- * @id_out may be NULL if only the other is required. If @target points to
- * a non-NULL device node pointer, only entries targeting that node will be
- * matched; if it points to a NULL value, it will receive the device node of
- * the first matching target phandle, with a reference held.
- *
- * Return: 0 on success or a standard error code on failure.
- */
-int of_pci_map_rid(struct device_node *np, u32 rid,
- const char *map_name, const char *map_mask_name,
- struct device_node **target, u32 *id_out)
-{
- u32 map_mask, masked_rid;
- int map_len;
- const __be32 *map = NULL;
-
- if (!np || !map_name || (!target && !id_out))
- return -EINVAL;
-
- map = of_get_property(np, map_name, &map_len);
- if (!map) {
- if (target)
- return -ENODEV;
- /* Otherwise, no map implies no translation */
- *id_out = rid;
- return 0;
- }
-
- if (!map_len || map_len % (4 * sizeof(*map))) {
- pr_err("%pOF: Error: Bad %s length: %d\n", np,
- map_name, map_len);
- return -EINVAL;
- }
-
- /* The default is to select all bits. */
- map_mask = 0xffffffff;
-
- /*
- * Can be overridden by "{iommu,msi}-map-mask" property.
- * If of_property_read_u32() fails, the default is used.
- */
- if (map_mask_name)
- of_property_read_u32(np, map_mask_name, &map_mask);
-
- masked_rid = map_mask & rid;
- for ( ; map_len > 0; map_len -= 4 * sizeof(*map), map += 4) {
- struct device_node *phandle_node;
- u32 rid_base = be32_to_cpup(map + 0);
- u32 phandle = be32_to_cpup(map + 1);
- u32 out_base = be32_to_cpup(map + 2);
- u32 rid_len = be32_to_cpup(map + 3);
-
- if (rid_base & ~map_mask) {
- pr_err("%pOF: Invalid %s translation - %s-mask (0x%x) ignores rid-base (0x%x)\n",
- np, map_name, map_name,
- map_mask, rid_base);
- return -EFAULT;
- }
-
- if (masked_rid < rid_base || masked_rid >= rid_base + rid_len)
- continue;
-
- phandle_node = of_find_node_by_phandle(phandle);
- if (!phandle_node)
- return -ENODEV;
-
- if (target) {
- if (*target)
- of_node_put(phandle_node);
- else
- *target = phandle_node;
-
- if (*target != phandle_node)
- continue;
- }
-
- if (id_out)
- *id_out = masked_rid - rid_base + out_base;
-
- pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n",
- np, map_name, map_mask, rid_base, out_base,
- rid_len, rid, masked_rid - rid_base + out_base);
- return 0;
- }
-
- pr_err("%pOF: Invalid %s translation - no match for rid 0x%x on %pOF\n",
- np, map_name, rid, target && *target ? *target : NULL);
- return -EFAULT;
-}
-
#if IS_ENABLED(CONFIG_OF_IRQ)
/**
* of_irq_parse_pci - Resolve the interrupt for a PCI device
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
new file mode 100644
index 000000000000..ae3c5b25dcc7
--- /dev/null
+++ b/drivers/pci/p2pdma.c
@@ -0,0 +1,805 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Peer 2 Peer DMA support.
+ *
+ * Copyright (c) 2016-2018, Logan Gunthorpe
+ * Copyright (c) 2016-2017, Microsemi Corporation
+ * Copyright (c) 2017, Christoph Hellwig
+ * Copyright (c) 2018, Eideticom Inc.
+ */
+
+#define pr_fmt(fmt) "pci-p2pdma: " fmt
+#include <linux/ctype.h>
+#include <linux/pci-p2pdma.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/genalloc.h>
+#include <linux/memremap.h>
+#include <linux/percpu-refcount.h>
+#include <linux/random.h>
+#include <linux/seq_buf.h>
+
+struct pci_p2pdma {
+ struct percpu_ref devmap_ref;
+ struct completion devmap_ref_done;
+ struct gen_pool *pool;
+ bool p2pmem_published;
+};
+
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ size_t size = 0;
+
+ if (pdev->p2pdma->pool)
+ size = gen_pool_size(pdev->p2pdma->pool);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", size);
+}
+static DEVICE_ATTR_RO(size);
+
+static ssize_t available_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ size_t avail = 0;
+
+ if (pdev->p2pdma->pool)
+ avail = gen_pool_avail(pdev->p2pdma->pool);
+
+ return snprintf(buf, PAGE_SIZE, "%zd\n", avail);
+}
+static DEVICE_ATTR_RO(available);
+
+static ssize_t published_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ pdev->p2pdma->p2pmem_published);
+}
+static DEVICE_ATTR_RO(published);
+
+static struct attribute *p2pmem_attrs[] = {
+ &dev_attr_size.attr,
+ &dev_attr_available.attr,
+ &dev_attr_published.attr,
+ NULL,
+};
+
+static const struct attribute_group p2pmem_group = {
+ .attrs = p2pmem_attrs,
+ .name = "p2pmem",
+};
+
+static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
+{
+ struct pci_p2pdma *p2p =
+ container_of(ref, struct pci_p2pdma, devmap_ref);
+
+ complete_all(&p2p->devmap_ref_done);
+}
+
+static void pci_p2pdma_percpu_kill(void *data)
+{
+ struct percpu_ref *ref = data;
+
+ /*
+ * pci_p2pdma_add_resource() may be called multiple times
+ * by a driver and may register the percpu_kill devm action multiple
+ * times. We only want the first action to actually kill the
+ * percpu_ref.
+ */
+ if (percpu_ref_is_dying(ref))
+ return;
+
+ percpu_ref_kill(ref);
+}
+
+static void pci_p2pdma_release(void *data)
+{
+ struct pci_dev *pdev = data;
+
+ if (!pdev->p2pdma)
+ return;
+
+ wait_for_completion(&pdev->p2pdma->devmap_ref_done);
+ percpu_ref_exit(&pdev->p2pdma->devmap_ref);
+
+ gen_pool_destroy(pdev->p2pdma->pool);
+ sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
+ pdev->p2pdma = NULL;
+}
+
+static int pci_p2pdma_setup(struct pci_dev *pdev)
+{
+ int error = -ENOMEM;
+ struct pci_p2pdma *p2p;
+
+ p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
+ if (!p2p)
+ return -ENOMEM;
+
+ p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
+ if (!p2p->pool)
+ goto out;
+
+ init_completion(&p2p->devmap_ref_done);
+ error = percpu_ref_init(&p2p->devmap_ref,
+ pci_p2pdma_percpu_release, 0, GFP_KERNEL);
+ if (error)
+ goto out_pool_destroy;
+
+ error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
+ if (error)
+ goto out_pool_destroy;
+
+ pdev->p2pdma = p2p;
+
+ error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
+ if (error)
+ goto out_pool_destroy;
+
+ return 0;
+
+out_pool_destroy:
+ pdev->p2pdma = NULL;
+ gen_pool_destroy(p2p->pool);
+out:
+ devm_kfree(&pdev->dev, p2p);
+ return error;
+}
+
+/**
+ * pci_p2pdma_add_resource - add memory for use as p2p memory
+ * @pdev: the device to add the memory to
+ * @bar: PCI BAR to add
+ * @size: size of the memory to add, may be zero to use the whole BAR
+ * @offset: offset into the PCI BAR
+ *
+ * The memory will be given ZONE_DEVICE struct pages so that it may
+ * be used with any DMA request.
+ */
+int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
+ u64 offset)
+{
+ struct dev_pagemap *pgmap;
+ void *addr;
+ int error;
+
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ return -EINVAL;
+
+ if (offset >= pci_resource_len(pdev, bar))
+ return -EINVAL;
+
+ if (!size)
+ size = pci_resource_len(pdev, bar) - offset;
+
+ if (size + offset > pci_resource_len(pdev, bar))
+ return -EINVAL;
+
+ if (!pdev->p2pdma) {
+ error = pci_p2pdma_setup(pdev);
+ if (error)
+ return error;
+ }
+
+ pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL);
+ if (!pgmap)
+ return -ENOMEM;
+
+ pgmap->res.start = pci_resource_start(pdev, bar) + offset;
+ pgmap->res.end = pgmap->res.start + size - 1;
+ pgmap->res.flags = pci_resource_flags(pdev, bar);
+ pgmap->ref = &pdev->p2pdma->devmap_ref;
+ pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
+ pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
+ pci_resource_start(pdev, bar);
+
+ addr = devm_memremap_pages(&pdev->dev, pgmap);
+ if (IS_ERR(addr)) {
+ error = PTR_ERR(addr);
+ goto pgmap_free;
+ }
+
+ error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr,
+ pci_bus_address(pdev, bar) + offset,
+ resource_size(&pgmap->res), dev_to_node(&pdev->dev));
+ if (error)
+ goto pgmap_free;
+
+ error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_percpu_kill,
+ &pdev->p2pdma->devmap_ref);
+ if (error)
+ goto pgmap_free;
+
+ pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
+ &pgmap->res);
+
+ return 0;
+
+pgmap_free:
+ devm_kfree(&pdev->dev, pgmap);
+ return error;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
+
+/*
+ * Note this function returns the parent PCI device with a
+ * reference taken. It is the caller's responsibily to drop
+ * the reference.
+ */
+static struct pci_dev *find_parent_pci_dev(struct device *dev)
+{
+ struct device *parent;
+
+ dev = get_device(dev);
+
+ while (dev) {
+ if (dev_is_pci(dev))
+ return to_pci_dev(dev);
+
+ parent = get_device(dev->parent);
+ put_device(dev);
+ dev = parent;
+ }
+
+ return NULL;
+}
+
+/*
+ * Check if a PCI bridge has its ACS redirection bits set to redirect P2P
+ * TLPs upstream via ACS. Returns 1 if the packets will be redirected
+ * upstream, 0 otherwise.
+ */
+static int pci_bridge_has_acs_redir(struct pci_dev *pdev)
+{
+ int pos;
+ u16 ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return 0;
+
+ pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
+
+ if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC))
+ return 1;
+
+ return 0;
+}
+
+static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
+{
+ if (!buf)
+ return;
+
+ seq_buf_printf(buf, "%s;", pci_name(pdev));
+}
+
+/*
+ * Find the distance through the nearest common upstream bridge between
+ * two PCI devices.
+ *
+ * If the two devices are the same device then 0 will be returned.
+ *
+ * If there are two virtual functions of the same device behind the same
+ * bridge port then 2 will be returned (one step down to the PCIe switch,
+ * then one step back to the same device).
+ *
+ * In the case where two devices are connected to the same PCIe switch, the
+ * value 4 will be returned. This corresponds to the following PCI tree:
+ *
+ * -+ Root Port
+ * \+ Switch Upstream Port
+ * +-+ Switch Downstream Port
+ * + \- Device A
+ * \-+ Switch Downstream Port
+ * \- Device B
+ *
+ * The distance is 4 because we traverse from Device A through the downstream
+ * port of the switch, to the common upstream port, back up to the second
+ * downstream port and then to Device B.
+ *
+ * Any two devices that don't have a common upstream bridge will return -1.
+ * In this way devices on separate PCIe root ports will be rejected, which
+ * is what we want for peer-to-peer seeing each PCIe root port defines a
+ * separate hierarchy domain and there's no way to determine whether the root
+ * complex supports forwarding between them.
+ *
+ * In the case where two devices are connected to different PCIe switches,
+ * this function will still return a positive distance as long as both
+ * switches eventually have a common upstream bridge. Note this covers
+ * the case of using multiple PCIe switches to achieve a desired level of
+ * fan-out from a root port. The exact distance will be a function of the
+ * number of switches between Device A and Device B.
+ *
+ * If a bridge which has any ACS redirection bits set is in the path
+ * then this functions will return -2. This is so we reject any
+ * cases where the TLPs are forwarded up into the root complex.
+ * In this case, a list of all infringing bridge addresses will be
+ * populated in acs_list (assuming it's non-null) for printk purposes.
+ */
+static int upstream_bridge_distance(struct pci_dev *a,
+ struct pci_dev *b,
+ struct seq_buf *acs_list)
+{
+ int dist_a = 0;
+ int dist_b = 0;
+ struct pci_dev *bb = NULL;
+ int acs_cnt = 0;
+
+ /*
+ * Note, we don't need to take references to devices returned by
+ * pci_upstream_bridge() seeing we hold a reference to a child
+ * device which will already hold a reference to the upstream bridge.
+ */
+
+ while (a) {
+ dist_b = 0;
+
+ if (pci_bridge_has_acs_redir(a)) {
+ seq_buf_print_bus_devfn(acs_list, a);
+ acs_cnt++;
+ }
+
+ bb = b;
+
+ while (bb) {
+ if (a == bb)
+ goto check_b_path_acs;
+
+ bb = pci_upstream_bridge(bb);
+ dist_b++;
+ }
+
+ a = pci_upstream_bridge(a);
+ dist_a++;
+ }
+
+ return -1;
+
+check_b_path_acs:
+ bb = b;
+
+ while (bb) {
+ if (a == bb)
+ break;
+
+ if (pci_bridge_has_acs_redir(bb)) {
+ seq_buf_print_bus_devfn(acs_list, bb);
+ acs_cnt++;
+ }
+
+ bb = pci_upstream_bridge(bb);
+ }
+
+ if (acs_cnt)
+ return -2;
+
+ return dist_a + dist_b;
+}
+
+static int upstream_bridge_distance_warn(struct pci_dev *provider,
+ struct pci_dev *client)
+{
+ struct seq_buf acs_list;
+ int ret;
+
+ seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
+ if (!acs_list.buffer)
+ return -ENOMEM;
+
+ ret = upstream_bridge_distance(provider, client, &acs_list);
+ if (ret == -2) {
+ pci_warn(client, "cannot be used for peer-to-peer DMA as ACS redirect is set between the client and provider (%s)\n",
+ pci_name(provider));
+ /* Drop final semicolon */
+ acs_list.buffer[acs_list.len-1] = 0;
+ pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
+ acs_list.buffer);
+
+ } else if (ret < 0) {
+ pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge\n",
+ pci_name(provider));
+ }
+
+ kfree(acs_list.buffer);
+
+ return ret;
+}
+
+/**
+ * pci_p2pdma_distance_many - Determive the cumulative distance between
+ * a p2pdma provider and the clients in use.
+ * @provider: p2pdma provider to check against the client list
+ * @clients: array of devices to check (NULL-terminated)
+ * @num_clients: number of clients in the array
+ * @verbose: if true, print warnings for devices when we return -1
+ *
+ * Returns -1 if any of the clients are not compatible (behind the same
+ * root port as the provider), otherwise returns a positive number where
+ * a lower number is the preferrable choice. (If there's one client
+ * that's the same as the provider it will return 0, which is best choice).
+ *
+ * For now, "compatible" means the provider and the clients are all behind
+ * the same PCI root port. This cuts out cases that may work but is safest
+ * for the user. Future work can expand this to white-list root complexes that
+ * can safely forward between each ports.
+ */
+int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
+ int num_clients, bool verbose)
+{
+ bool not_supported = false;
+ struct pci_dev *pci_client;
+ int distance = 0;
+ int i, ret;
+
+ if (num_clients == 0)
+ return -1;
+
+ for (i = 0; i < num_clients; i++) {
+ pci_client = find_parent_pci_dev(clients[i]);
+ if (!pci_client) {
+ if (verbose)
+ dev_warn(clients[i],
+ "cannot be used for peer-to-peer DMA as it is not a PCI device\n");
+ return -1;
+ }
+
+ if (verbose)
+ ret = upstream_bridge_distance_warn(provider,
+ pci_client);
+ else
+ ret = upstream_bridge_distance(provider, pci_client,
+ NULL);
+
+ pci_dev_put(pci_client);
+
+ if (ret < 0)
+ not_supported = true;
+
+ if (not_supported && !verbose)
+ break;
+
+ distance += ret;
+ }
+
+ if (not_supported)
+ return -1;
+
+ return distance;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
+
+/**
+ * pci_has_p2pmem - check if a given PCI device has published any p2pmem
+ * @pdev: PCI device to check
+ */
+bool pci_has_p2pmem(struct pci_dev *pdev)
+{
+ return pdev->p2pdma && pdev->p2pdma->p2pmem_published;
+}
+EXPORT_SYMBOL_GPL(pci_has_p2pmem);
+
+/**
+ * pci_p2pmem_find - find a peer-to-peer DMA memory device compatible with
+ * the specified list of clients and shortest distance (as determined
+ * by pci_p2pmem_dma())
+ * @clients: array of devices to check (NULL-terminated)
+ * @num_clients: number of client devices in the list
+ *
+ * If multiple devices are behind the same switch, the one "closest" to the
+ * client devices in use will be chosen first. (So if one of the providers are
+ * the same as one of the clients, that provider will be used ahead of any
+ * other providers that are unrelated). If multiple providers are an equal
+ * distance away, one will be chosen at random.
+ *
+ * Returns a pointer to the PCI device with a reference taken (use pci_dev_put
+ * to return the reference) or NULL if no compatible device is found. The
+ * found provider will also be assigned to the client list.
+ */
+struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
+{
+ struct pci_dev *pdev = NULL;
+ int distance;
+ int closest_distance = INT_MAX;
+ struct pci_dev **closest_pdevs;
+ int dev_cnt = 0;
+ const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
+ int i;
+
+ closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!closest_pdevs)
+ return NULL;
+
+ while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+ if (!pci_has_p2pmem(pdev))
+ continue;
+
+ distance = pci_p2pdma_distance_many(pdev, clients,
+ num_clients, false);
+ if (distance < 0 || distance > closest_distance)
+ continue;
+
+ if (distance == closest_distance && dev_cnt >= max_devs)
+ continue;
+
+ if (distance < closest_distance) {
+ for (i = 0; i < dev_cnt; i++)
+ pci_dev_put(closest_pdevs[i]);
+
+ dev_cnt = 0;
+ closest_distance = distance;
+ }
+
+ closest_pdevs[dev_cnt++] = pci_dev_get(pdev);
+ }
+
+ if (dev_cnt)
+ pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]);
+
+ for (i = 0; i < dev_cnt; i++)
+ pci_dev_put(closest_pdevs[i]);
+
+ kfree(closest_pdevs);
+ return pdev;
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
+
+/**
+ * pci_alloc_p2p_mem - allocate peer-to-peer DMA memory
+ * @pdev: the device to allocate memory from
+ * @size: number of bytes to allocate
+ *
+ * Returns the allocated memory or NULL on error.
+ */
+void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
+{
+ void *ret;
+
+ if (unlikely(!pdev->p2pdma))
+ return NULL;
+
+ if (unlikely(!percpu_ref_tryget_live(&pdev->p2pdma->devmap_ref)))
+ return NULL;
+
+ ret = (void *)gen_pool_alloc(pdev->p2pdma->pool, size);
+
+ if (unlikely(!ret))
+ percpu_ref_put(&pdev->p2pdma->devmap_ref);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
+
+/**
+ * pci_free_p2pmem - free peer-to-peer DMA memory
+ * @pdev: the device the memory was allocated from
+ * @addr: address of the memory that was allocated
+ * @size: number of bytes that was allocated
+ */
+void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
+{
+ gen_pool_free(pdev->p2pdma->pool, (uintptr_t)addr, size);
+ percpu_ref_put(&pdev->p2pdma->devmap_ref);
+}
+EXPORT_SYMBOL_GPL(pci_free_p2pmem);
+
+/**
+ * pci_virt_to_bus - return the PCI bus address for a given virtual
+ * address obtained with pci_alloc_p2pmem()
+ * @pdev: the device the memory was allocated from
+ * @addr: address of the memory that was allocated
+ */
+pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
+{
+ if (!addr)
+ return 0;
+ if (!pdev->p2pdma)
+ return 0;
+
+ /*
+ * Note: when we added the memory to the pool we used the PCI
+ * bus address as the physical address. So gen_pool_virt_to_phys()
+ * actually returns the bus address despite the misleading name.
+ */
+ return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr);
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
+
+/**
+ * pci_p2pmem_alloc_sgl - allocate peer-to-peer DMA memory in a scatterlist
+ * @pdev: the device to allocate memory from
+ * @nents: the number of SG entries in the list
+ * @length: number of bytes to allocate
+ *
+ * Returns 0 on success
+ */
+struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
+ unsigned int *nents, u32 length)
+{
+ struct scatterlist *sg;
+ void *addr;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return NULL;
+
+ sg_init_table(sg, 1);
+
+ addr = pci_alloc_p2pmem(pdev, length);
+ if (!addr)
+ goto out_free_sg;
+
+ sg_set_buf(sg, addr, length);
+ *nents = 1;
+ return sg;
+
+out_free_sg:
+ kfree(sg);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl);
+
+/**
+ * pci_p2pmem_free_sgl - free a scatterlist allocated by pci_p2pmem_alloc_sgl()
+ * @pdev: the device to allocate memory from
+ * @sgl: the allocated scatterlist
+ */
+void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl)
+{
+ struct scatterlist *sg;
+ int count;
+
+ for_each_sg(sgl, sg, INT_MAX, count) {
+ if (!sg)
+ break;
+
+ pci_free_p2pmem(pdev, sg_virt(sg), sg->length);
+ }
+ kfree(sgl);
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
+
+/**
+ * pci_p2pmem_publish - publish the peer-to-peer DMA memory for use by
+ * other devices with pci_p2pmem_find()
+ * @pdev: the device with peer-to-peer DMA memory to publish
+ * @publish: set to true to publish the memory, false to unpublish it
+ *
+ * Published memory can be used by other PCI device drivers for
+ * peer-2-peer DMA operations. Non-published memory is reserved for
+ * exlusive use of the device driver that registers the peer-to-peer
+ * memory.
+ */
+void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
+{
+ if (pdev->p2pdma)
+ pdev->p2pdma->p2pmem_published = publish;
+}
+EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
+
+/**
+ * pci_p2pdma_map_sg - map a PCI peer-to-peer scatterlist for DMA
+ * @dev: device doing the DMA request
+ * @sg: scatter list to map
+ * @nents: elements in the scatterlist
+ * @dir: DMA direction
+ *
+ * Scatterlists mapped with this function should not be unmapped in any way.
+ *
+ * Returns the number of SG entries mapped or 0 on error.
+ */
+int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{
+ struct dev_pagemap *pgmap;
+ struct scatterlist *s;
+ phys_addr_t paddr;
+ int i;
+
+ /*
+ * p2pdma mappings are not compatible with devices that use
+ * dma_virt_ops. If the upper layers do the right thing
+ * this should never happen because it will be prevented
+ * by the check in pci_p2pdma_add_client()
+ */
+ if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
+ dev->dma_ops == &dma_virt_ops))
+ return 0;
+
+ for_each_sg(sg, s, nents, i) {
+ pgmap = sg_page(s)->pgmap;
+ paddr = sg_phys(s);
+
+ s->dma_address = paddr - pgmap->pci_p2pdma_bus_offset;
+ sg_dma_len(s) = s->length;
+ }
+
+ return nents;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg);
+
+/**
+ * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store
+ * to enable p2pdma
+ * @page: contents of the value to be stored
+ * @p2p_dev: returns the PCI device that was selected to be used
+ * (if one was specified in the stored value)
+ * @use_p2pdma: returns whether to enable p2pdma or not
+ *
+ * Parses an attribute value to decide whether to enable p2pdma.
+ * The value can select a PCI device (using it's full BDF device
+ * name) or a boolean (in any format strtobool() accepts). A false
+ * value disables p2pdma, a true value expects the caller
+ * to automatically find a compatible device and specifying a PCI device
+ * expects the caller to use the specific provider.
+ *
+ * pci_p2pdma_enable_show() should be used as the show operation for
+ * the attribute.
+ *
+ * Returns 0 on success
+ */
+int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
+ bool *use_p2pdma)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
+ if (dev) {
+ *use_p2pdma = true;
+ *p2p_dev = to_pci_dev(dev);
+
+ if (!pci_has_p2pmem(*p2p_dev)) {
+ pci_err(*p2p_dev,
+ "PCI device has no peer-to-peer memory: %s\n",
+ page);
+ pci_dev_put(*p2p_dev);
+ return -ENODEV;
+ }
+
+ return 0;
+ } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
+ /*
+ * If the user enters a PCI device that doesn't exist
+ * like "0000:01:00.1", we don't want strtobool to think
+ * it's a '0' when it's clearly not what the user wanted.
+ * So we require 0's and 1's to be exactly one character.
+ */
+ } else if (!strtobool(page, use_p2pdma)) {
+ return 0;
+ }
+
+ pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
+ return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
+
+/**
+ * pci_p2pdma_enable_show - show a configfs/sysfs attribute indicating
+ * whether p2pdma is enabled
+ * @page: contents of the stored value
+ * @p2p_dev: the selected p2p device (NULL if no device is selected)
+ * @use_p2pdma: whether p2pdme has been enabled
+ *
+ * Attributes that use pci_p2pdma_enable_store() should use this function
+ * to show the value of the attribute.
+ *
+ * Returns 0 on success
+ */
+ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
+ bool use_p2pdma)
+{
+ if (!use_p2pdma)
+ return sprintf(page, "0\n");
+
+ if (!p2p_dev)
+ return sprintf(page, "1\n");
+
+ return sprintf(page, "%s\n", pci_name(p2p_dev));
+}
+EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index c2ab57705043..2a4aa6468579 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -519,6 +519,46 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
return PCI_POWER_ERROR;
}
+static struct acpi_device *acpi_pci_find_companion(struct device *dev);
+
+static bool acpi_pci_bridge_d3(struct pci_dev *dev)
+{
+ const struct fwnode_handle *fwnode;
+ struct acpi_device *adev;
+ struct pci_dev *root;
+ u8 val;
+
+ if (!dev->is_hotplug_bridge)
+ return false;
+
+ /*
+ * Look for a special _DSD property for the root port and if it
+ * is set we know the hierarchy behind it supports D3 just fine.
+ */
+ root = pci_find_pcie_root_port(dev);
+ if (!root)
+ return false;
+
+ adev = ACPI_COMPANION(&root->dev);
+ if (root == dev) {
+ /*
+ * It is possible that the ACPI companion is not yet bound
+ * for the root port so look it up manually here.
+ */
+ if (!adev && !pci_dev_is_added(root))
+ adev = acpi_pci_find_companion(&root->dev);
+ }
+
+ if (!adev)
+ return false;
+
+ fwnode = acpi_fwnode_handle(adev);
+ if (fwnode_property_read_u8(fwnode, "HotPlugSupportInD3", &val))
+ return false;
+
+ return val == 1;
+}
+
static bool acpi_pci_power_manageable(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
@@ -548,6 +588,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
error = -EBUSY;
break;
}
+ /* Fall through */
case PCI_D0:
case PCI_D1:
case PCI_D2:
@@ -635,6 +676,7 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
}
static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
+ .bridge_d3 = acpi_pci_bridge_d3,
.is_manageable = acpi_pci_power_manageable,
.set_state = acpi_pci_set_power_state,
.get_state = acpi_pci_get_power_state,
@@ -751,10 +793,15 @@ static void pci_acpi_setup(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
struct acpi_device *adev = ACPI_COMPANION(dev);
+ int node;
if (!adev)
return;
+ node = acpi_get_node(adev->handle);
+ if (node != NUMA_NO_NODE)
+ set_dev_node(dev, node);
+
pci_acpi_optimize_delay(pci_dev, adev->handle);
pci_acpi_add_pm_notifier(adev, pci_dev);
@@ -762,19 +809,33 @@ static void pci_acpi_setup(struct device *dev)
return;
device_set_wakeup_capable(dev, true);
+ /*
+ * For bridges that can do D3 we enable wake automatically (as
+ * we do for the power management itself in that case). The
+ * reason is that the bridge may have additional methods such as
+ * _DSW that need to be called.
+ */
+ if (pci_dev->bridge_d3)
+ device_wakeup_enable(dev);
+
acpi_pci_wakeup(pci_dev, false);
}
static void pci_acpi_cleanup(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct pci_dev *pci_dev = to_pci_dev(dev);
if (!adev)
return;
pci_acpi_remove_pm_notifier(adev);
- if (adev->wakeup.flags.valid)
+ if (adev->wakeup.flags.valid) {
+ if (pci_dev->bridge_d3)
+ device_wakeup_disable(dev);
+
device_set_wakeup_capable(dev, false);
+ }
}
static bool pci_acpi_bus_match(struct device *dev)
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
new file mode 100644
index 000000000000..129738362d90
--- /dev/null
+++ b/drivers/pci/pci-bridge-emul.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Marvell
+ *
+ * Author: Thomas Petazzoni <thomas.petazzoni@bootlin.com>
+ *
+ * This file helps PCI controller drivers implement a fake root port
+ * PCI bridge when the HW doesn't provide such a root port PCI
+ * bridge.
+ *
+ * It emulates a PCI bridge by providing a fake PCI configuration
+ * space (and optionally a PCIe capability configuration space) in
+ * memory. By default the read/write operations simply read and update
+ * this fake configuration space in memory. However, PCI controller
+ * drivers can provide through the 'struct pci_sw_bridge_ops'
+ * structure a set of operations to override or complement this
+ * default behavior.
+ */
+
+#include <linux/pci.h>
+#include "pci-bridge-emul.h"
+
+#define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF
+#define PCI_CAP_PCIE_START PCI_BRIDGE_CONF_END
+#define PCI_CAP_PCIE_END (PCI_CAP_PCIE_START + PCI_EXP_SLTSTA2 + 2)
+
+/*
+ * Initialize a pci_bridge_emul structure to represent a fake PCI
+ * bridge configuration space. The caller needs to have initialized
+ * the PCI configuration space with whatever values make sense
+ * (typically at least vendor, device, revision), the ->ops pointer,
+ * and optionally ->data and ->has_pcie.
+ */
+void pci_bridge_emul_init(struct pci_bridge_emul *bridge)
+{
+ bridge->conf.class_revision |= PCI_CLASS_BRIDGE_PCI << 16;
+ bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
+ bridge->conf.cache_line_size = 0x10;
+ bridge->conf.status = PCI_STATUS_CAP_LIST;
+
+ if (bridge->has_pcie) {
+ bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
+ bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
+ /* Set PCIe v2, root port, slot support */
+ bridge->pcie_conf.cap = PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
+ PCI_EXP_FLAGS_SLOT;
+ }
+}
+
+struct pci_bridge_reg_behavior {
+ /* Read-only bits */
+ u32 ro;
+
+ /* Read-write bits */
+ u32 rw;
+
+ /* Write-1-to-clear bits */
+ u32 w1c;
+
+ /* Reserved bits (hardwired to 0) */
+ u32 rsvd;
+};
+
+const static struct pci_bridge_reg_behavior pci_regs_behavior[] = {
+ [PCI_VENDOR_ID / 4] = { .ro = ~0 },
+ [PCI_COMMAND / 4] = {
+ .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_PARITY |
+ PCI_COMMAND_SERR),
+ .ro = ((PCI_COMMAND_SPECIAL | PCI_COMMAND_INVALIDATE |
+ PCI_COMMAND_VGA_PALETTE | PCI_COMMAND_WAIT |
+ PCI_COMMAND_FAST_BACK) |
+ (PCI_STATUS_CAP_LIST | PCI_STATUS_66MHZ |
+ PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16),
+ .rsvd = GENMASK(15, 10) | ((BIT(6) | GENMASK(3, 0)) << 16),
+ .w1c = (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_DETECTED_PARITY) << 16,
+ },
+ [PCI_CLASS_REVISION / 4] = { .ro = ~0 },
+
+ /*
+ * Cache Line Size register: implement as read-only, we do not
+ * pretend implementing "Memory Write and Invalidate"
+ * transactions"
+ *
+ * Latency Timer Register: implemented as read-only, as "A
+ * bridge that is not capable of a burst transfer of more than
+ * two data phases on its primary interface is permitted to
+ * hardwire the Latency Timer to a value of 16 or less"
+ *
+ * Header Type: always read-only
+ *
+ * BIST register: implemented as read-only, as "A bridge that
+ * does not support BIST must implement this register as a
+ * read-only register that returns 0 when read"
+ */
+ [PCI_CACHE_LINE_SIZE / 4] = { .ro = ~0 },
+
+ /*
+ * Base Address registers not used must be implemented as
+ * read-only registers that return 0 when read.
+ */
+ [PCI_BASE_ADDRESS_0 / 4] = { .ro = ~0 },
+ [PCI_BASE_ADDRESS_1 / 4] = { .ro = ~0 },
+
+ [PCI_PRIMARY_BUS / 4] = {
+ /* Primary, secondary and subordinate bus are RW */
+ .rw = GENMASK(24, 0),
+ /* Secondary latency is read-only */
+ .ro = GENMASK(31, 24),
+ },
+
+ [PCI_IO_BASE / 4] = {
+ /* The high four bits of I/O base/limit are RW */
+ .rw = (GENMASK(15, 12) | GENMASK(7, 4)),
+
+ /* The low four bits of I/O base/limit are RO */
+ .ro = (((PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK |
+ PCI_STATUS_DEVSEL_MASK) << 16) |
+ GENMASK(11, 8) | GENMASK(3, 0)),
+
+ .w1c = (PCI_STATUS_PARITY |
+ PCI_STATUS_SIG_TARGET_ABORT |
+ PCI_STATUS_REC_TARGET_ABORT |
+ PCI_STATUS_REC_MASTER_ABORT |
+ PCI_STATUS_SIG_SYSTEM_ERROR |
+ PCI_STATUS_DETECTED_PARITY) << 16,
+
+ .rsvd = ((BIT(6) | GENMASK(4, 0)) << 16),
+ },
+
+ [PCI_MEMORY_BASE / 4] = {
+ /* The high 12-bits of mem base/limit are RW */
+ .rw = GENMASK(31, 20) | GENMASK(15, 4),
+
+ /* The low four bits of mem base/limit are RO */
+ .ro = GENMASK(19, 16) | GENMASK(3, 0),
+ },
+
+ [PCI_PREF_MEMORY_BASE / 4] = {
+ /* The high 12-bits of pref mem base/limit are RW */
+ .rw = GENMASK(31, 20) | GENMASK(15, 4),
+
+ /* The low four bits of pref mem base/limit are RO */
+ .ro = GENMASK(19, 16) | GENMASK(3, 0),
+ },
+
+ [PCI_PREF_BASE_UPPER32 / 4] = {
+ .rw = ~0,
+ },
+
+ [PCI_PREF_LIMIT_UPPER32 / 4] = {
+ .rw = ~0,
+ },
+
+ [PCI_IO_BASE_UPPER16 / 4] = {
+ .rw = ~0,
+ },
+
+ [PCI_CAPABILITY_LIST / 4] = {
+ .ro = GENMASK(7, 0),
+ .rsvd = GENMASK(31, 8),
+ },
+
+ [PCI_ROM_ADDRESS1 / 4] = {
+ .rw = GENMASK(31, 11) | BIT(0),
+ .rsvd = GENMASK(10, 1),
+ },
+
+ /*
+ * Interrupt line (bits 7:0) are RW, interrupt pin (bits 15:8)
+ * are RO, and bridge control (31:16) are a mix of RW, RO,
+ * reserved and W1C bits
+ */
+ [PCI_INTERRUPT_LINE / 4] = {
+ /* Interrupt line is RW */
+ .rw = (GENMASK(7, 0) |
+ ((PCI_BRIDGE_CTL_PARITY |
+ PCI_BRIDGE_CTL_SERR |
+ PCI_BRIDGE_CTL_ISA |
+ PCI_BRIDGE_CTL_VGA |
+ PCI_BRIDGE_CTL_MASTER_ABORT |
+ PCI_BRIDGE_CTL_BUS_RESET |
+ BIT(8) | BIT(9) | BIT(11)) << 16)),
+
+ /* Interrupt pin is RO */
+ .ro = (GENMASK(15, 8) | ((PCI_BRIDGE_CTL_FAST_BACK) << 16)),
+
+ .w1c = BIT(10) << 16,
+
+ .rsvd = (GENMASK(15, 12) | BIT(4)) << 16,
+ },
+};
+
+const static struct pci_bridge_reg_behavior pcie_cap_regs_behavior[] = {
+ [PCI_CAP_LIST_ID / 4] = {
+ /*
+ * Capability ID, Next Capability Pointer and
+ * Capabilities register are all read-only.
+ */
+ .ro = ~0,
+ },
+
+ [PCI_EXP_DEVCAP / 4] = {
+ .ro = ~0,
+ },
+
+ [PCI_EXP_DEVCTL / 4] = {
+ /* Device control register is RW */
+ .rw = GENMASK(15, 0),
+
+ /*
+ * Device status register has 4 bits W1C, then 2 bits
+ * RO, the rest is reserved
+ */
+ .w1c = GENMASK(19, 16),
+ .ro = GENMASK(20, 19),
+ .rsvd = GENMASK(31, 21),
+ },
+
+ [PCI_EXP_LNKCAP / 4] = {
+ /* All bits are RO, except bit 23 which is reserved */
+ .ro = lower_32_bits(~BIT(23)),
+ .rsvd = BIT(23),
+ },
+
+ [PCI_EXP_LNKCTL / 4] = {
+ /*
+ * Link control has bits [1:0] and [11:3] RW, the
+ * other bits are reserved.
+ * Link status has bits [13:0] RO, and bits [14:15]
+ * W1C.
+ */
+ .rw = GENMASK(11, 3) | GENMASK(1, 0),
+ .ro = GENMASK(13, 0) << 16,
+ .w1c = GENMASK(15, 14) << 16,
+ .rsvd = GENMASK(15, 12) | BIT(2),
+ },
+
+ [PCI_EXP_SLTCAP / 4] = {
+ .ro = ~0,
+ },
+
+ [PCI_EXP_SLTCTL / 4] = {
+ /*
+ * Slot control has bits [12:0] RW, the rest is
+ * reserved.
+ *
+ * Slot status has a mix of W1C and RO bits, as well
+ * as reserved bits.
+ */
+ .rw = GENMASK(12, 0),
+ .w1c = (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
+ PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC) << 16,
+ .ro = (PCI_EXP_SLTSTA_MRLSS | PCI_EXP_SLTSTA_PDS |
+ PCI_EXP_SLTSTA_EIS) << 16,
+ .rsvd = GENMASK(15, 12) | (GENMASK(15, 9) << 16),
+ },
+
+ [PCI_EXP_RTCTL / 4] = {
+ /*
+ * Root control has bits [4:0] RW, the rest is
+ * reserved.
+ *
+ * Root status has bit 0 RO, the rest is reserved.
+ */
+ .rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
+ PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE |
+ PCI_EXP_RTCTL_CRSSVE),
+ .ro = PCI_EXP_RTCAP_CRSVIS << 16,
+ .rsvd = GENMASK(15, 5) | (GENMASK(15, 1) << 16),
+ },
+
+ [PCI_EXP_RTSTA / 4] = {
+ .ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING,
+ .w1c = PCI_EXP_RTSTA_PME,
+ .rsvd = GENMASK(31, 18),
+ },
+};
+
+/*
+ * Should be called by the PCI controller driver when reading the PCI
+ * configuration space of the fake bridge. It will call back the
+ * ->ops->read_base or ->ops->read_pcie operations.
+ */
+int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
+ int size, u32 *value)
+{
+ int ret;
+ int reg = where & ~3;
+ pci_bridge_emul_read_status_t (*read_op)(struct pci_bridge_emul *bridge,
+ int reg, u32 *value);
+ u32 *cfgspace;
+ const struct pci_bridge_reg_behavior *behavior;
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END) {
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (!bridge->has_pcie && reg >= PCI_BRIDGE_CONF_END) {
+ *value = 0;
+ return PCIBIOS_SUCCESSFUL;
+ }
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
+ reg -= PCI_CAP_PCIE_START;
+ read_op = bridge->ops->read_pcie;
+ cfgspace = (u32 *) &bridge->pcie_conf;
+ behavior = pcie_cap_regs_behavior;
+ } else {
+ read_op = bridge->ops->read_base;
+ cfgspace = (u32 *) &bridge->conf;
+ behavior = pci_regs_behavior;
+ }
+
+ if (read_op)
+ ret = read_op(bridge, reg, value);
+ else
+ ret = PCI_BRIDGE_EMUL_NOT_HANDLED;
+
+ if (ret == PCI_BRIDGE_EMUL_NOT_HANDLED)
+ *value = cfgspace[reg / 4];
+
+ /*
+ * Make sure we never return any reserved bit with a value
+ * different from 0.
+ */
+ *value &= ~behavior[reg / 4].rsvd;
+
+ if (size == 1)
+ *value = (*value >> (8 * (where & 3))) & 0xff;
+ else if (size == 2)
+ *value = (*value >> (8 * (where & 3))) & 0xffff;
+ else if (size != 4)
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+/*
+ * Should be called by the PCI controller driver when writing the PCI
+ * configuration space of the fake bridge. It will call back the
+ * ->ops->write_base or ->ops->write_pcie operations.
+ */
+int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
+ int size, u32 value)
+{
+ int reg = where & ~3;
+ int mask, ret, old, new, shift;
+ void (*write_op)(struct pci_bridge_emul *bridge, int reg,
+ u32 old, u32 new, u32 mask);
+ u32 *cfgspace;
+ const struct pci_bridge_reg_behavior *behavior;
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_END)
+ return PCIBIOS_SUCCESSFUL;
+
+ if (!bridge->has_pcie && reg >= PCI_BRIDGE_CONF_END)
+ return PCIBIOS_SUCCESSFUL;
+
+ shift = (where & 0x3) * 8;
+
+ if (size == 4)
+ mask = 0xffffffff;
+ else if (size == 2)
+ mask = 0xffff << shift;
+ else if (size == 1)
+ mask = 0xff << shift;
+ else
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+
+ ret = pci_bridge_emul_conf_read(bridge, reg, 4, &old);
+ if (ret != PCIBIOS_SUCCESSFUL)
+ return ret;
+
+ if (bridge->has_pcie && reg >= PCI_CAP_PCIE_START) {
+ reg -= PCI_CAP_PCIE_START;
+ write_op = bridge->ops->write_pcie;
+ cfgspace = (u32 *) &bridge->pcie_conf;
+ behavior = pcie_cap_regs_behavior;
+ } else {
+ write_op = bridge->ops->write_base;
+ cfgspace = (u32 *) &bridge->conf;
+ behavior = pci_regs_behavior;
+ }
+
+ /* Keep all bits, except the RW bits */
+ new = old & (~mask | ~behavior[reg / 4].rw);
+
+ /* Update the value of the RW bits */
+ new |= (value << shift) & (behavior[reg / 4].rw & mask);
+
+ /* Clear the W1C bits */
+ new &= ~((value << shift) & (behavior[reg / 4].w1c & mask));
+
+ cfgspace[reg / 4] = new;
+
+ if (write_op)
+ write_op(bridge, reg, old, new, mask);
+
+ return PCIBIOS_SUCCESSFUL;
+}
diff --git a/drivers/pci/pci-bridge-emul.h b/drivers/pci/pci-bridge-emul.h
new file mode 100644
index 000000000000..9d510ccf738b
--- /dev/null
+++ b/drivers/pci/pci-bridge-emul.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PCI_BRIDGE_EMUL_H__
+#define __PCI_BRIDGE_EMUL_H__
+
+#include <linux/kernel.h>
+
+/* PCI configuration space of a PCI-to-PCI bridge. */
+struct pci_bridge_emul_conf {
+ u16 vendor;
+ u16 device;
+ u16 command;
+ u16 status;
+ u32 class_revision;
+ u8 cache_line_size;
+ u8 latency_timer;
+ u8 header_type;
+ u8 bist;
+ u32 bar[2];
+ u8 primary_bus;
+ u8 secondary_bus;
+ u8 subordinate_bus;
+ u8 secondary_latency_timer;
+ u8 iobase;
+ u8 iolimit;
+ u16 secondary_status;
+ u16 membase;
+ u16 memlimit;
+ u16 pref_mem_base;
+ u16 pref_mem_limit;
+ u32 prefbaseupper;
+ u32 preflimitupper;
+ u16 iobaseupper;
+ u16 iolimitupper;
+ u8 capabilities_pointer;
+ u8 reserve[3];
+ u32 romaddr;
+ u8 intline;
+ u8 intpin;
+ u16 bridgectrl;
+};
+
+/* PCI configuration space of the PCIe capabilities */
+struct pci_bridge_emul_pcie_conf {
+ u8 cap_id;
+ u8 next;
+ u16 cap;
+ u32 devcap;
+ u16 devctl;
+ u16 devsta;
+ u32 lnkcap;
+ u16 lnkctl;
+ u16 lnksta;
+ u32 slotcap;
+ u16 slotctl;
+ u16 slotsta;
+ u16 rootctl;
+ u16 rsvd;
+ u32 rootsta;
+ u32 devcap2;
+ u16 devctl2;
+ u16 devsta2;
+ u32 lnkcap2;
+ u16 lnkctl2;
+ u16 lnksta2;
+ u32 slotcap2;
+ u16 slotctl2;
+ u16 slotsta2;
+};
+
+struct pci_bridge_emul;
+
+typedef enum { PCI_BRIDGE_EMUL_HANDLED,
+ PCI_BRIDGE_EMUL_NOT_HANDLED } pci_bridge_emul_read_status_t;
+
+struct pci_bridge_emul_ops {
+ /*
+ * Called when reading from the regular PCI bridge
+ * configuration space. Return PCI_BRIDGE_EMUL_HANDLED when the
+ * operation has handled the read operation and filled in the
+ * *value, or PCI_BRIDGE_EMUL_NOT_HANDLED when the read should
+ * be emulated by the common code by reading from the
+ * in-memory copy of the configuration space.
+ */
+ pci_bridge_emul_read_status_t (*read_base)(struct pci_bridge_emul *bridge,
+ int reg, u32 *value);
+
+ /*
+ * Same as ->read_base(), except it is for reading from the
+ * PCIe capability configuration space.
+ */
+ pci_bridge_emul_read_status_t (*read_pcie)(struct pci_bridge_emul *bridge,
+ int reg, u32 *value);
+ /*
+ * Called when writing to the regular PCI bridge configuration
+ * space. old is the current value, new is the new value being
+ * written, and mask indicates which parts of the value are
+ * being changed.
+ */
+ void (*write_base)(struct pci_bridge_emul *bridge, int reg,
+ u32 old, u32 new, u32 mask);
+
+ /*
+ * Same as ->write_base(), except it is for writing from the
+ * PCIe capability configuration space.
+ */
+ void (*write_pcie)(struct pci_bridge_emul *bridge, int reg,
+ u32 old, u32 new, u32 mask);
+};
+
+struct pci_bridge_emul {
+ struct pci_bridge_emul_conf conf;
+ struct pci_bridge_emul_pcie_conf pcie_conf;
+ struct pci_bridge_emul_ops *ops;
+ void *data;
+ bool has_pcie;
+};
+
+void pci_bridge_emul_init(struct pci_bridge_emul *bridge);
+int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
+ int size, u32 *value);
+int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
+ int size, u32 value);
+
+#endif /* __PCI_BRIDGE_EMUL_H__ */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 51b6c81671c1..d068f11d08a7 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -35,6 +35,8 @@
#include <linux/aer.h>
#include "pci.h"
+DEFINE_MUTEX(pci_slot_mutex);
+
const char *pci_power_names[] = {
"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
};
@@ -196,7 +198,7 @@ EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
/**
* pci_dev_str_match_path - test if a path string matches a device
* @dev: the PCI device to test
- * @p: string to match the device against
+ * @path: string to match the device against
* @endptr: pointer to the string after the match
*
* Test if a string (typically from a kernel parameter) formatted as a
@@ -791,6 +793,11 @@ static inline bool platform_pci_need_resume(struct pci_dev *dev)
return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
}
+static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
+{
+ return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
+}
+
/**
* pci_raw_set_power_state - Use PCI PM registers to set the power state of
* given PCI device
@@ -999,7 +1006,7 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
* because have already delayed for the bridge.
*/
if (dev->runtime_d3cold) {
- if (dev->d3cold_delay)
+ if (dev->d3cold_delay && !dev->imm_ready)
msleep(dev->d3cold_delay);
/*
* When powering on a bridge from D3cold, the
@@ -1284,6 +1291,7 @@ int pci_save_state(struct pci_dev *dev)
if (i != 0)
return i;
+ pci_save_dpc_state(dev);
return pci_save_vc_state(dev);
}
EXPORT_SYMBOL(pci_save_state);
@@ -1389,6 +1397,7 @@ void pci_restore_state(struct pci_dev *dev)
pci_restore_ats_state(dev);
pci_restore_vc_state(dev);
pci_restore_rebar_state(dev);
+ pci_restore_dpc_state(dev);
pci_cleanup_aer_error_status_regs(dev);
@@ -2144,10 +2153,13 @@ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable
int ret = 0;
/*
- * Bridges can only signal wakeup on behalf of subordinate devices,
- * but that is set up elsewhere, so skip them.
+ * Bridges that are not power-manageable directly only signal
+ * wakeup on behalf of subordinate devices which is set up
+ * elsewhere, so skip them. However, bridges that are
+ * power-manageable may signal wakeup for themselves (for example,
+ * on a hotplug event) and they need to be covered here.
*/
- if (pci_has_subordinate(dev))
+ if (!pci_power_manageable(dev))
return 0;
/* Don't do the same thing twice in a row for one device. */
@@ -2522,6 +2534,10 @@ bool pci_bridge_d3_possible(struct pci_dev *bridge)
if (bridge->is_thunderbolt)
return true;
+ /* Platform might know better if the bridge supports D3 */
+ if (platform_pci_bridge_d3(bridge))
+ return true;
+
/*
* Hotplug ports handled natively by the OS were not validated
* by vendors for runtime D3 at least until 2018 because there
@@ -2655,6 +2671,7 @@ EXPORT_SYMBOL_GPL(pci_d3cold_disable);
void pci_pm_init(struct pci_dev *dev)
{
int pm;
+ u16 status;
u16 pmc;
pm_runtime_forbid(&dev->dev);
@@ -2717,6 +2734,10 @@ void pci_pm_init(struct pci_dev *dev)
/* Disable the PME# generation functionality */
pci_pme_active(dev, false);
}
+
+ pci_read_config_word(dev, PCI_STATUS, &status);
+ if (status & PCI_STATUS_IMM_READY)
+ dev->imm_ready = 1;
}
static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
@@ -4387,6 +4408,9 @@ int pcie_flr(struct pci_dev *dev)
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ if (dev->imm_ready)
+ return 0;
+
/*
* Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
* 100ms, but may silently discard requests while the FLR is in
@@ -4428,6 +4452,9 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
+ if (dev->imm_ready)
+ return 0;
+
/*
* Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
* updated 27 July 2006; a device must complete an FLR within
@@ -4496,21 +4523,42 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
bool ret;
u16 lnk_status;
+ /*
+ * Some controllers might not implement link active reporting. In this
+ * case, we wait for 1000 + 100 ms.
+ */
+ if (!pdev->link_active_reporting) {
+ msleep(1100);
+ return true;
+ }
+
+ /*
+ * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
+ * after which we should expect an link active if the reset was
+ * successful. If so, software must wait a minimum 100ms before sending
+ * configuration requests to devices downstream this port.
+ *
+ * If the link fails to activate, either the device was physically
+ * removed or the link is permanently failed.
+ */
+ if (active)
+ msleep(20);
for (;;) {
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
if (ret == active)
- return true;
+ break;
if (timeout <= 0)
break;
msleep(10);
timeout -= 10;
}
-
- pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
- active ? "set" : "cleared");
-
- return false;
+ if (active && ret)
+ msleep(100);
+ else if (ret != active)
+ pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
+ active ? "set" : "cleared");
+ return ret == active;
}
void pci_reset_secondary_bus(struct pci_dev *dev)
@@ -4582,13 +4630,13 @@ static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
{
int rc = -ENOTTY;
- if (!hotplug || !try_module_get(hotplug->ops->owner))
+ if (!hotplug || !try_module_get(hotplug->owner))
return rc;
if (hotplug->ops->reset_slot)
rc = hotplug->ops->reset_slot(hotplug, probe);
- module_put(hotplug->ops->owner);
+ module_put(hotplug->owner);
return rc;
}
@@ -5165,6 +5213,41 @@ static int pci_bus_reset(struct pci_bus *bus, int probe)
}
/**
+ * pci_bus_error_reset - reset the bridge's subordinate bus
+ * @bridge: The parent device that connects to the bus to reset
+ *
+ * This function will first try to reset the slots on this bus if the method is
+ * available. If slot reset fails or is not available, this will fall back to a
+ * secondary bus reset.
+ */
+int pci_bus_error_reset(struct pci_dev *bridge)
+{
+ struct pci_bus *bus = bridge->subordinate;
+ struct pci_slot *slot;
+
+ if (!bus)
+ return -ENOTTY;
+
+ mutex_lock(&pci_slot_mutex);
+ if (list_empty(&bus->slots))
+ goto bus_reset;
+
+ list_for_each_entry(slot, &bus->slots, list)
+ if (pci_probe_reset_slot(slot))
+ goto bus_reset;
+
+ list_for_each_entry(slot, &bus->slots, list)
+ if (pci_slot_reset(slot, 0))
+ goto bus_reset;
+
+ mutex_unlock(&pci_slot_mutex);
+ return 0;
+bus_reset:
+ mutex_unlock(&pci_slot_mutex);
+ return pci_bus_reset(bridge->subordinate, 0);
+}
+
+/**
* pci_probe_reset_bus - probe whether a PCI bus can be reset
* @bus: PCI bus to probe
*
@@ -5701,8 +5784,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
{
if (!dev->dma_alias_mask)
- dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
- sizeof(long), GFP_KERNEL);
+ dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
if (!dev->dma_alias_mask) {
pci_warn(dev, "Unable to allocate DMA alias mask\n");
return;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 6e0d1528d471..662b7457db23 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -35,10 +35,13 @@ int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vmai,
int pci_probe_reset_function(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
+int pci_bus_error_reset(struct pci_dev *dev);
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
*
+ * @bridge_d3: Does the bridge allow entering into D3
+ *
* @is_manageable: returns 'true' if given device is power manageable by the
* platform firmware
*
@@ -60,6 +63,7 @@ int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
* these callbacks are mandatory.
*/
struct pci_platform_pm_ops {
+ bool (*bridge_d3)(struct pci_dev *dev);
bool (*is_manageable)(struct pci_dev *dev);
int (*set_state)(struct pci_dev *dev, pci_power_t state);
pci_power_t (*get_state)(struct pci_dev *dev);
@@ -136,6 +140,7 @@ static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
/* Lock for read/write access to pci device and bus lists */
extern struct rw_semaphore pci_bus_sem;
+extern struct mutex pci_slot_mutex;
extern raw_spinlock_t pci_lock;
@@ -285,6 +290,7 @@ struct pci_sriov {
u16 driver_max_VFs; /* Max num VFs driver supports */
struct pci_dev *dev; /* Lowest numbered PF */
struct pci_dev *self; /* This PF */
+ u32 cfg_size; /* VF config space size */
u32 class; /* VF device */
u8 hdr_type; /* VF header type */
u16 subsystem_vendor; /* VF subsystem vendor */
@@ -293,21 +299,71 @@ struct pci_sriov {
bool drivers_autoprobe; /* Auto probing of VFs by driver */
};
-/* pci_dev priv_flags */
-#define PCI_DEV_DISCONNECTED 0
-#define PCI_DEV_ADDED 1
+/**
+ * pci_dev_set_io_state - Set the new error state if possible.
+ *
+ * @dev - pci device to set new error_state
+ * @new - the state we want dev to be in
+ *
+ * Must be called with device_lock held.
+ *
+ * Returns true if state has been changed to the requested state.
+ */
+static inline bool pci_dev_set_io_state(struct pci_dev *dev,
+ pci_channel_state_t new)
+{
+ bool changed = false;
+
+ device_lock_assert(&dev->dev);
+ switch (new) {
+ case pci_channel_io_perm_failure:
+ switch (dev->error_state) {
+ case pci_channel_io_frozen:
+ case pci_channel_io_normal:
+ case pci_channel_io_perm_failure:
+ changed = true;
+ break;
+ }
+ break;
+ case pci_channel_io_frozen:
+ switch (dev->error_state) {
+ case pci_channel_io_frozen:
+ case pci_channel_io_normal:
+ changed = true;
+ break;
+ }
+ break;
+ case pci_channel_io_normal:
+ switch (dev->error_state) {
+ case pci_channel_io_frozen:
+ case pci_channel_io_normal:
+ changed = true;
+ break;
+ }
+ break;
+ }
+ if (changed)
+ dev->error_state = new;
+ return changed;
+}
static inline int pci_dev_set_disconnected(struct pci_dev *dev, void *unused)
{
- set_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
+ device_lock(&dev->dev);
+ pci_dev_set_io_state(dev, pci_channel_io_perm_failure);
+ device_unlock(&dev->dev);
+
return 0;
}
static inline bool pci_dev_is_disconnected(const struct pci_dev *dev)
{
- return test_bit(PCI_DEV_DISCONNECTED, &dev->priv_flags);
+ return dev->error_state == pci_channel_io_perm_failure;
}
+/* pci_dev priv_flags */
+#define PCI_DEV_ADDED 0
+
static inline void pci_dev_assign_added(struct pci_dev *dev, bool added)
{
assign_bit(PCI_DEV_ADDED, &dev->priv_flags, added);
@@ -346,6 +402,14 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
#endif /* CONFIG_PCIEAER */
+#ifdef CONFIG_PCIE_DPC
+void pci_save_dpc_state(struct pci_dev *dev);
+void pci_restore_dpc_state(struct pci_dev *dev);
+#else
+static inline void pci_save_dpc_state(struct pci_dev *dev) {}
+static inline void pci_restore_dpc_state(struct pci_dev *dev) {}
+#endif
+
#ifdef CONFIG_PCI_ATS
void pci_restore_ats_state(struct pci_dev *dev);
#else
@@ -423,8 +487,8 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
#endif
/* PCI error reporting and recovery */
-void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service);
-void pcie_do_nonfatal_recovery(struct pci_dev *dev);
+void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
+ u32 service);
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 0a1e9d379bc5..44742b2e1126 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -36,7 +36,6 @@ config PCIEAER
config PCIEAER_INJECT
tristate "PCI Express error injection support"
depends on PCIEAER
- default n
help
This enables PCI Express Root Port Advanced Error Reporting
(AER) software error injector.
@@ -84,7 +83,6 @@ config PCIEASPM
config PCIEASPM_DEBUG
bool "Debug PCI Express ASPM"
depends on PCIEASPM
- default n
help
This enables PCI Express ASPM debug support. It will add per-device
interface to control ASPM.
@@ -129,7 +127,6 @@ config PCIE_PME
config PCIE_DPC
bool "PCI Express Downstream Port Containment support"
depends on PCIEPORTBUS && PCIEAER
- default n
help
This enables PCI Express Downstream Port Containment (DPC)
driver support. DPC events from Root and Downstream ports
@@ -139,7 +136,6 @@ config PCIE_DPC
config PCIE_PTM
bool "PCI Express Precision Time Measurement support"
- default n
depends on PCIEPORTBUS
help
This enables PCI Express Precision Time Measurement (PTM)
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c
index 83180edd6ed4..a90a9194ac4a 100644
--- a/drivers/pci/pcie/aer.c
+++ b/drivers/pci/pcie/aer.c
@@ -30,7 +30,7 @@
#include "../pci.h"
#include "portdrv.h"
-#define AER_ERROR_SOURCES_MAX 100
+#define AER_ERROR_SOURCES_MAX 128
#define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
#define AER_MAX_TYPEOF_UNCOR_ERRS 26 /* as per PCI_ERR_UNCOR_STATUS*/
@@ -42,21 +42,7 @@ struct aer_err_source {
struct aer_rpc {
struct pci_dev *rpd; /* Root Port device */
- struct work_struct dpc_handler;
- struct aer_err_source e_sources[AER_ERROR_SOURCES_MAX];
- struct aer_err_info e_info;
- unsigned short prod_idx; /* Error Producer Index */
- unsigned short cons_idx; /* Error Consumer Index */
- int isr;
- spinlock_t e_lock; /*
- * Lock access to Error Status/ID Regs
- * and error producer/consumer index
- */
- struct mutex rpc_mutex; /*
- * only one thread could do
- * recovery on the same
- * root port hierarchy
- */
+ DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
};
/* AER stats for the device */
@@ -866,7 +852,7 @@ void cper_print_aer(struct pci_dev *dev, int aer_severity,
static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
{
if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
- e_info->dev[e_info->error_dev_num] = dev;
+ e_info->dev[e_info->error_dev_num] = pci_dev_get(dev);
e_info->error_dev_num++;
return 0;
}
@@ -1010,9 +996,12 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info)
info->status);
pci_aer_clear_device_status(dev);
} else if (info->severity == AER_NONFATAL)
- pcie_do_nonfatal_recovery(dev);
+ pcie_do_recovery(dev, pci_channel_io_normal,
+ PCIE_PORT_SERVICE_AER);
else if (info->severity == AER_FATAL)
- pcie_do_fatal_recovery(dev, PCIE_PORT_SERVICE_AER);
+ pcie_do_recovery(dev, pci_channel_io_frozen,
+ PCIE_PORT_SERVICE_AER);
+ pci_dev_put(dev);
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -1047,9 +1036,11 @@ static void aer_recover_work_func(struct work_struct *work)
}
cper_print_aer(pdev, entry.severity, entry.regs);
if (entry.severity == AER_NONFATAL)
- pcie_do_nonfatal_recovery(pdev);
+ pcie_do_recovery(pdev, pci_channel_io_normal,
+ PCIE_PORT_SERVICE_AER);
else if (entry.severity == AER_FATAL)
- pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_AER);
+ pcie_do_recovery(pdev, pci_channel_io_frozen,
+ PCIE_PORT_SERVICE_AER);
pci_dev_put(pdev);
}
}
@@ -1065,7 +1056,6 @@ static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
int severity, struct aer_capability_regs *aer_regs)
{
- unsigned long flags;
struct aer_recover_entry entry = {
.bus = bus,
.devfn = devfn,
@@ -1074,13 +1064,12 @@ void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
.regs = aer_regs,
};
- spin_lock_irqsave(&aer_recover_ring_lock, flags);
- if (kfifo_put(&aer_recover_ring, entry))
+ if (kfifo_in_spinlocked(&aer_recover_ring, &entry, sizeof(entry),
+ &aer_recover_ring_lock))
schedule_work(&aer_recover_work);
else
pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
- spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
}
EXPORT_SYMBOL_GPL(aer_recover_queue);
#endif
@@ -1115,8 +1104,9 @@ int aer_get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
&info->mask);
if (!(info->status & ~info->mask))
return 0;
- } else if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
- info->severity == AER_NONFATAL) {
+ } else if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM ||
+ info->severity == AER_NONFATAL) {
/* Link is still healthy for IO reads */
pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
@@ -1170,7 +1160,7 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
struct aer_err_source *e_src)
{
struct pci_dev *pdev = rpc->rpd;
- struct aer_err_info *e_info = &rpc->e_info;
+ struct aer_err_info e_info;
pci_rootport_aer_stats_incr(pdev, e_src);
@@ -1179,83 +1169,57 @@ static void aer_isr_one_error(struct aer_rpc *rpc,
* uncorrectable error being logged. Report correctable error first.
*/
if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
- e_info->id = ERR_COR_ID(e_src->id);
- e_info->severity = AER_CORRECTABLE;
+ e_info.id = ERR_COR_ID(e_src->id);
+ e_info.severity = AER_CORRECTABLE;
if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
- e_info->multi_error_valid = 1;
+ e_info.multi_error_valid = 1;
else
- e_info->multi_error_valid = 0;
- aer_print_port_info(pdev, e_info);
+ e_info.multi_error_valid = 0;
+ aer_print_port_info(pdev, &e_info);
- if (find_source_device(pdev, e_info))
- aer_process_err_devices(e_info);
+ if (find_source_device(pdev, &e_info))
+ aer_process_err_devices(&e_info);
}
if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
- e_info->id = ERR_UNCOR_ID(e_src->id);
+ e_info.id = ERR_UNCOR_ID(e_src->id);
if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- e_info->severity = AER_FATAL;
+ e_info.severity = AER_FATAL;
else
- e_info->severity = AER_NONFATAL;
+ e_info.severity = AER_NONFATAL;
if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
- e_info->multi_error_valid = 1;
+ e_info.multi_error_valid = 1;
else
- e_info->multi_error_valid = 0;
+ e_info.multi_error_valid = 0;
- aer_print_port_info(pdev, e_info);
+ aer_print_port_info(pdev, &e_info);
- if (find_source_device(pdev, e_info))
- aer_process_err_devices(e_info);
+ if (find_source_device(pdev, &e_info))
+ aer_process_err_devices(&e_info);
}
}
/**
- * get_e_source - retrieve an error source
- * @rpc: pointer to the root port which holds an error
- * @e_src: pointer to store retrieved error source
- *
- * Return 1 if an error source is retrieved, otherwise 0.
- *
- * Invoked by DPC handler to consume an error.
- */
-static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
-{
- unsigned long flags;
-
- /* Lock access to Root error producer/consumer index */
- spin_lock_irqsave(&rpc->e_lock, flags);
- if (rpc->prod_idx == rpc->cons_idx) {
- spin_unlock_irqrestore(&rpc->e_lock, flags);
- return 0;
- }
-
- *e_src = rpc->e_sources[rpc->cons_idx];
- rpc->cons_idx++;
- if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
- rpc->cons_idx = 0;
- spin_unlock_irqrestore(&rpc->e_lock, flags);
-
- return 1;
-}
-
-/**
* aer_isr - consume errors detected by root port
* @work: definition of this work item
*
* Invoked, as DPC, when root port records new detected error
*/
-static void aer_isr(struct work_struct *work)
+static irqreturn_t aer_isr(int irq, void *context)
{
- struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
+ struct pcie_device *dev = (struct pcie_device *)context;
+ struct aer_rpc *rpc = get_service_data(dev);
struct aer_err_source uninitialized_var(e_src);
- mutex_lock(&rpc->rpc_mutex);
- while (get_e_source(rpc, &e_src))
+ if (kfifo_is_empty(&rpc->aer_fifo))
+ return IRQ_NONE;
+
+ while (kfifo_get(&rpc->aer_fifo, &e_src))
aer_isr_one_error(rpc, &e_src);
- mutex_unlock(&rpc->rpc_mutex);
+ return IRQ_HANDLED;
}
/**
@@ -1265,56 +1229,26 @@ static void aer_isr(struct work_struct *work)
*
* Invoked when Root Port detects AER messages.
*/
-irqreturn_t aer_irq(int irq, void *context)
+static irqreturn_t aer_irq(int irq, void *context)
{
- unsigned int status, id;
struct pcie_device *pdev = (struct pcie_device *)context;
struct aer_rpc *rpc = get_service_data(pdev);
- int next_prod_idx;
- unsigned long flags;
- int pos;
-
- pos = pdev->port->aer_cap;
- /*
- * Must lock access to Root Error Status Reg, Root Error ID Reg,
- * and Root error producer/consumer index
- */
- spin_lock_irqsave(&rpc->e_lock, flags);
+ struct pci_dev *rp = rpc->rpd;
+ struct aer_err_source e_src = {};
+ int pos = rp->aer_cap;
- /* Read error status */
- pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, &status);
- if (!(status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV))) {
- spin_unlock_irqrestore(&rpc->e_lock, flags);
+ pci_read_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, &e_src.status);
+ if (!(e_src.status & (PCI_ERR_ROOT_UNCOR_RCV|PCI_ERR_ROOT_COR_RCV)))
return IRQ_NONE;
- }
- /* Read error source and clear error status */
- pci_read_config_dword(pdev->port, pos + PCI_ERR_ROOT_ERR_SRC, &id);
- pci_write_config_dword(pdev->port, pos + PCI_ERR_ROOT_STATUS, status);
+ pci_read_config_dword(rp, pos + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
+ pci_write_config_dword(rp, pos + PCI_ERR_ROOT_STATUS, e_src.status);
- /* Store error source for later DPC handler */
- next_prod_idx = rpc->prod_idx + 1;
- if (next_prod_idx == AER_ERROR_SOURCES_MAX)
- next_prod_idx = 0;
- if (next_prod_idx == rpc->cons_idx) {
- /*
- * Error Storm Condition - possibly the same error occurred.
- * Drop the error.
- */
- spin_unlock_irqrestore(&rpc->e_lock, flags);
+ if (!kfifo_put(&rpc->aer_fifo, e_src))
return IRQ_HANDLED;
- }
- rpc->e_sources[rpc->prod_idx].status = status;
- rpc->e_sources[rpc->prod_idx].id = id;
- rpc->prod_idx = next_prod_idx;
- spin_unlock_irqrestore(&rpc->e_lock, flags);
-
- /* Invoke DPC handler */
- schedule_work(&rpc->dpc_handler);
- return IRQ_HANDLED;
+ return IRQ_WAKE_THREAD;
}
-EXPORT_SYMBOL_GPL(aer_irq);
static int set_device_error_reporting(struct pci_dev *dev, void *data)
{
@@ -1423,33 +1357,6 @@ static void aer_disable_rootport(struct aer_rpc *rpc)
}
/**
- * aer_alloc_rpc - allocate Root Port data structure
- * @dev: pointer to the pcie_dev data structure
- *
- * Invoked when Root Port's AER service is loaded.
- */
-static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
-{
- struct aer_rpc *rpc;
-
- rpc = kzalloc(sizeof(struct aer_rpc), GFP_KERNEL);
- if (!rpc)
- return NULL;
-
- /* Initialize Root lock access, e_lock, to Root Error Status Reg */
- spin_lock_init(&rpc->e_lock);
-
- rpc->rpd = dev->port;
- INIT_WORK(&rpc->dpc_handler, aer_isr);
- mutex_init(&rpc->rpc_mutex);
-
- /* Use PCIe bus function to store rpc into PCIe device */
- set_service_data(dev, rpc);
-
- return rpc;
-}
-
-/**
* aer_remove - clean up resources
* @dev: pointer to the pcie_dev data structure
*
@@ -1459,16 +1366,7 @@ static void aer_remove(struct pcie_device *dev)
{
struct aer_rpc *rpc = get_service_data(dev);
- if (rpc) {
- /* If register interrupt service, it must be free. */
- if (rpc->isr)
- free_irq(dev->irq, dev);
-
- flush_work(&rpc->dpc_handler);
- aer_disable_rootport(rpc);
- kfree(rpc);
- set_service_data(dev, NULL);
- }
+ aer_disable_rootport(rpc);
}
/**
@@ -1481,27 +1379,24 @@ static int aer_probe(struct pcie_device *dev)
{
int status;
struct aer_rpc *rpc;
- struct device *device = &dev->port->dev;
+ struct device *device = &dev->device;
- /* Alloc rpc data structure */
- rpc = aer_alloc_rpc(dev);
+ rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL);
if (!rpc) {
dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n");
- aer_remove(dev);
return -ENOMEM;
}
+ rpc->rpd = dev->port;
+ set_service_data(dev, rpc);
- /* Request IRQ ISR */
- status = request_irq(dev->irq, aer_irq, IRQF_SHARED, "aerdrv", dev);
+ status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr,
+ IRQF_SHARED, "aerdrv", dev);
if (status) {
dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n",
dev->irq);
- aer_remove(dev);
return status;
}
- rpc->isr = 1;
-
aer_enable_rootport(rpc);
dev_info(device, "AER enabled with IRQ %d\n", dev->irq);
return 0;
@@ -1526,7 +1421,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- rc = pci_bridge_secondary_bus_reset(dev);
+ rc = pci_bus_error_reset(dev);
pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
@@ -1541,18 +1436,6 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
-/**
- * aer_error_resume - clean up corresponding error status bits
- * @dev: pointer to Root Port's pci_dev data structure
- *
- * Invoked by Port Bus driver during nonfatal recovery.
- */
-static void aer_error_resume(struct pci_dev *dev)
-{
- pci_aer_clear_device_status(dev);
- pci_cleanup_aer_uncorrect_error_status(dev);
-}
-
static struct pcie_port_service_driver aerdriver = {
.name = "aer",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
@@ -1560,7 +1443,6 @@ static struct pcie_port_service_driver aerdriver = {
.probe = aer_probe,
.remove = aer_remove,
- .error_resume = aer_error_resume,
.reset_link = aer_root_reset,
};
@@ -1569,10 +1451,9 @@ static struct pcie_port_service_driver aerdriver = {
*
* Invoked when AER root service driver is loaded.
*/
-static int __init aer_service_init(void)
+int __init pcie_aer_init(void)
{
if (!pci_aer_available() || aer_acpi_firmware_first())
return -ENXIO;
return pcie_port_service_register(&aerdriver);
}
-device_initcall(aer_service_init);
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c
index 0eb24346cad3..95d4759664b3 100644
--- a/drivers/pci/pcie/aer_inject.c
+++ b/drivers/pci/pcie/aer_inject.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/irq.h>
#include <linux/miscdevice.h>
#include <linux/pci.h>
#include <linux/slab.h>
@@ -175,14 +176,48 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
return target;
}
+static int aer_inj_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ struct pci_ops *ops, *my_ops;
+ int rv;
+
+ ops = __find_pci_bus_ops(bus);
+ if (!ops)
+ return -1;
+
+ my_ops = bus->ops;
+ bus->ops = ops;
+ rv = ops->read(bus, devfn, where, size, val);
+ bus->ops = my_ops;
+
+ return rv;
+}
+
+static int aer_inj_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
+{
+ struct pci_ops *ops, *my_ops;
+ int rv;
+
+ ops = __find_pci_bus_ops(bus);
+ if (!ops)
+ return -1;
+
+ my_ops = bus->ops;
+ bus->ops = ops;
+ rv = ops->write(bus, devfn, where, size, val);
+ bus->ops = my_ops;
+
+ return rv;
+}
+
static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
u32 *sim;
struct aer_error *err;
unsigned long flags;
- struct pci_ops *ops;
- struct pci_ops *my_ops;
int domain;
int rv;
@@ -203,18 +238,7 @@ static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
return 0;
}
out:
- ops = __find_pci_bus_ops(bus);
- /*
- * pci_lock must already be held, so we can directly
- * manipulate bus->ops. Many config access functions,
- * including pci_generic_config_read() require the original
- * bus->ops be installed to function, so temporarily put them
- * back.
- */
- my_ops = bus->ops;
- bus->ops = ops;
- rv = ops->read(bus, devfn, where, size, val);
- bus->ops = my_ops;
+ rv = aer_inj_read(bus, devfn, where, size, val);
spin_unlock_irqrestore(&inject_lock, flags);
return rv;
}
@@ -226,8 +250,6 @@ static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
struct aer_error *err;
unsigned long flags;
int rw1cs;
- struct pci_ops *ops;
- struct pci_ops *my_ops;
int domain;
int rv;
@@ -251,18 +273,7 @@ static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
return 0;
}
out:
- ops = __find_pci_bus_ops(bus);
- /*
- * pci_lock must already be held, so we can directly
- * manipulate bus->ops. Many config access functions,
- * including pci_generic_config_write() require the original
- * bus->ops be installed to function, so temporarily put them
- * back.
- */
- my_ops = bus->ops;
- bus->ops = ops;
- rv = ops->write(bus, devfn, where, size, val);
- bus->ops = my_ops;
+ rv = aer_inj_write(bus, devfn, where, size, val);
spin_unlock_irqrestore(&inject_lock, flags);
return rv;
}
@@ -303,32 +314,13 @@ out:
return 0;
}
-static int find_aer_device_iter(struct device *device, void *data)
-{
- struct pcie_device **result = data;
- struct pcie_device *pcie_dev;
-
- if (device->bus == &pcie_port_bus_type) {
- pcie_dev = to_pcie_device(device);
- if (pcie_dev->service & PCIE_PORT_SERVICE_AER) {
- *result = pcie_dev;
- return 1;
- }
- }
- return 0;
-}
-
-static int find_aer_device(struct pci_dev *dev, struct pcie_device **result)
-{
- return device_for_each_child(&dev->dev, result, find_aer_device_iter);
-}
-
static int aer_inject(struct aer_error_inj *einj)
{
struct aer_error *err, *rperr;
struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
struct pci_dev *dev, *rpdev;
struct pcie_device *edev;
+ struct device *device;
unsigned long flags;
unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
int pos_cap_err, rp_pos_cap_err;
@@ -464,7 +456,9 @@ static int aer_inject(struct aer_error_inj *einj)
if (ret)
goto out_put;
- if (find_aer_device(rpdev, &edev)) {
+ device = pcie_port_find_device(rpdev, PCIE_PORT_SERVICE_AER);
+ if (device) {
+ edev = to_pcie_device(device);
if (!get_service_data(edev)) {
dev_warn(&edev->device,
"aer_inject: AER service is not initialized\n");
@@ -474,7 +468,9 @@ static int aer_inject(struct aer_error_inj *einj)
dev_info(&edev->device,
"aer_inject: Injecting errors %08x/%08x into device %s\n",
einj->cor_status, einj->uncor_status, pci_name(dev));
- aer_irq(-1, edev);
+ local_irq_disable();
+ generic_handle_irq(edev->irq);
+ local_irq_enable();
} else {
pci_err(rpdev, "aer_inject: AER device not found\n");
ret = -ENODEV;
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 5326916715d2..dcb29cb76dc6 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -895,7 +895,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
struct pcie_link_state *link;
int blacklist = !!pcie_aspm_sanity_check(pdev);
- if (!aspm_support_enabled)
+ if (!aspm_support_enabled || aspm_disabled)
return;
if (pdev->link_state)
@@ -991,7 +991,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
* All PCIe functions are in one slot, remove one function will remove
* the whole slot, so just wait until we are the last function left.
*/
- if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
+ if (!list_empty(&parent->subordinate->devices))
goto out;
link = parent->link_state;
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index f03279fc87cd..e435d12e61a0 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -44,6 +44,58 @@ static const char * const rp_pio_error_string[] = {
"Memory Request Completion Timeout", /* Bit Position 18 */
};
+static struct dpc_dev *to_dpc_dev(struct pci_dev *dev)
+{
+ struct device *device;
+
+ device = pcie_port_find_device(dev, PCIE_PORT_SERVICE_DPC);
+ if (!device)
+ return NULL;
+ return get_service_data(to_pcie_device(device));
+}
+
+void pci_save_dpc_state(struct pci_dev *dev)
+{
+ struct dpc_dev *dpc;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ dpc = to_dpc_dev(dev);
+ if (!dpc)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
+ if (!save_state)
+ return;
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pci_read_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, cap);
+}
+
+void pci_restore_dpc_state(struct pci_dev *dev)
+{
+ struct dpc_dev *dpc;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ dpc = to_dpc_dev(dev);
+ if (!dpc)
+ return;
+
+ save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_DPC);
+ if (!save_state)
+ return;
+
+ cap = (u16 *)&save_state->cap.data[0];
+ pci_write_config_word(dev, dpc->cap_pos + PCI_EXP_DPC_CTL, *cap);
+}
+
static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
{
unsigned long timeout = jiffies + HZ;
@@ -67,18 +119,13 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc)
static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
{
struct dpc_dev *dpc;
- struct pcie_device *pciedev;
- struct device *devdpc;
-
u16 cap;
/*
* DPC disables the Link automatically in hardware, so it has
* already been reset by the time we get here.
*/
- devdpc = pcie_port_find_device(pdev, PCIE_PORT_SERVICE_DPC);
- pciedev = to_pcie_device(devdpc);
- dpc = get_service_data(pciedev);
+ dpc = to_dpc_dev(pdev);
cap = dpc->cap_pos;
/*
@@ -93,10 +140,12 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
+ if (!pcie_wait_for_link(pdev, true))
+ return PCI_ERS_RESULT_DISCONNECT;
+
return PCI_ERS_RESULT_RECOVERED;
}
-
static void dpc_process_rp_pio_error(struct dpc_dev *dpc)
{
struct device *dev = &dpc->dev->device;
@@ -169,7 +218,7 @@ static irqreturn_t dpc_handler(int irq, void *context)
reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1;
ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5;
- dev_warn(dev, "DPC %s detected, remove downstream devices\n",
+ dev_warn(dev, "DPC %s detected\n",
(reason == 0) ? "unmasked uncorrectable error" :
(reason == 1) ? "ERR_NONFATAL" :
(reason == 2) ? "ERR_FATAL" :
@@ -186,7 +235,7 @@ static irqreturn_t dpc_handler(int irq, void *context)
}
/* We configure DPC so it only triggers on ERR_FATAL */
- pcie_do_fatal_recovery(pdev, PCIE_PORT_SERVICE_DPC);
+ pcie_do_recovery(pdev, pci_channel_io_frozen, PCIE_PORT_SERVICE_DPC);
return IRQ_HANDLED;
}
@@ -259,6 +308,8 @@ static int dpc_probe(struct pcie_device *dev)
FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP),
FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size,
FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE));
+
+ pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16));
return status;
}
@@ -282,8 +333,7 @@ static struct pcie_port_service_driver dpcdriver = {
.reset_link = dpc_reset_link,
};
-static int __init dpc_service_init(void)
+int __init pcie_dpc_init(void)
{
return pcie_port_service_register(&dpcdriver);
}
-device_initcall(dpc_service_init);
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index 708fd3a0d646..773197a12568 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -12,18 +12,12 @@
#include <linux/pci.h>
#include <linux/module.h>
-#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/aer.h>
#include "portdrv.h"
#include "../pci.h"
-struct aer_broadcast_data {
- enum pci_channel_state state;
- enum pci_ers_result result;
-};
-
static pci_ers_result_t merge_result(enum pci_ers_result orig,
enum pci_ers_result new)
{
@@ -49,66 +43,52 @@ static pci_ers_result_t merge_result(enum pci_ers_result orig,
return orig;
}
-static int report_error_detected(struct pci_dev *dev, void *data)
+static int report_error_detected(struct pci_dev *dev,
+ enum pci_channel_state state,
+ enum pci_ers_result *result)
{
pci_ers_result_t vote;
const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
-
- result_data = (struct aer_broadcast_data *) data;
device_lock(&dev->dev);
- dev->error_state = result_data->state;
-
- if (!dev->driver ||
+ if (!pci_dev_set_io_state(dev, state) ||
+ !dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->error_detected) {
- if (result_data->state == pci_channel_io_frozen &&
- dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
- /*
- * In case of fatal recovery, if one of down-
- * stream device has no driver. We might be
- * unable to recover because a later insmod
- * of a driver for this device is unaware of
- * its hw state.
- */
- pci_printk(KERN_DEBUG, dev, "device has %s\n",
- dev->driver ?
- "no AER-aware driver" : "no driver");
- }
-
/*
- * If there's any device in the subtree that does not
- * have an error_detected callback, returning
- * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
- * the subsequent mmio_enabled/slot_reset/resume
- * callbacks of "any" device in the subtree. All the
- * devices in the subtree are left in the error state
- * without recovery.
+ * If any device in the subtree does not have an error_detected
+ * callback, PCI_ERS_RESULT_NO_AER_DRIVER prevents subsequent
+ * error callbacks of "any" device in the subtree, and will
+ * exit in the disconnected error state.
*/
-
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
vote = PCI_ERS_RESULT_NO_AER_DRIVER;
else
vote = PCI_ERS_RESULT_NONE;
} else {
err_handler = dev->driver->err_handler;
- vote = err_handler->error_detected(dev, result_data->state);
- pci_uevent_ers(dev, PCI_ERS_RESULT_NONE);
+ vote = err_handler->error_detected(dev, state);
}
-
- result_data->result = merge_result(result_data->result, vote);
+ pci_uevent_ers(dev, vote);
+ *result = merge_result(*result, vote);
device_unlock(&dev->dev);
return 0;
}
+static int report_frozen_detected(struct pci_dev *dev, void *data)
+{
+ return report_error_detected(dev, pci_channel_io_frozen, data);
+}
+
+static int report_normal_detected(struct pci_dev *dev, void *data)
+{
+ return report_error_detected(dev, pci_channel_io_normal, data);
+}
+
static int report_mmio_enabled(struct pci_dev *dev, void *data)
{
- pci_ers_result_t vote;
+ pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
-
- result_data = (struct aer_broadcast_data *) data;
device_lock(&dev->dev);
if (!dev->driver ||
@@ -118,7 +98,7 @@ static int report_mmio_enabled(struct pci_dev *dev, void *data)
err_handler = dev->driver->err_handler;
vote = err_handler->mmio_enabled(dev);
- result_data->result = merge_result(result_data->result, vote);
+ *result = merge_result(*result, vote);
out:
device_unlock(&dev->dev);
return 0;
@@ -126,11 +106,8 @@ out:
static int report_slot_reset(struct pci_dev *dev, void *data)
{
- pci_ers_result_t vote;
+ pci_ers_result_t vote, *result = data;
const struct pci_error_handlers *err_handler;
- struct aer_broadcast_data *result_data;
-
- result_data = (struct aer_broadcast_data *) data;
device_lock(&dev->dev);
if (!dev->driver ||
@@ -140,7 +117,7 @@ static int report_slot_reset(struct pci_dev *dev, void *data)
err_handler = dev->driver->err_handler;
vote = err_handler->slot_reset(dev);
- result_data->result = merge_result(result_data->result, vote);
+ *result = merge_result(*result, vote);
out:
device_unlock(&dev->dev);
return 0;
@@ -151,17 +128,16 @@ static int report_resume(struct pci_dev *dev, void *data)
const struct pci_error_handlers *err_handler;
device_lock(&dev->dev);
- dev->error_state = pci_channel_io_normal;
-
- if (!dev->driver ||
+ if (!pci_dev_set_io_state(dev, pci_channel_io_normal) ||
+ !dev->driver ||
!dev->driver->err_handler ||
!dev->driver->err_handler->resume)
goto out;
err_handler = dev->driver->err_handler;
err_handler->resume(dev);
- pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
out:
+ pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
device_unlock(&dev->dev);
return 0;
}
@@ -177,207 +153,86 @@ static pci_ers_result_t default_reset_link(struct pci_dev *dev)
{
int rc;
- rc = pci_bridge_secondary_bus_reset(dev);
+ rc = pci_bus_error_reset(dev);
pci_printk(KERN_DEBUG, dev, "downstream link has been reset\n");
return rc ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}
static pci_ers_result_t reset_link(struct pci_dev *dev, u32 service)
{
- struct pci_dev *udev;
pci_ers_result_t status;
struct pcie_port_service_driver *driver = NULL;
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /* Reset this port for all subordinates */
- udev = dev;
- } else {
- /* Reset the upstream component (likely downstream port) */
- udev = dev->bus->self;
- }
-
- /* Use the aer driver of the component firstly */
- driver = pcie_port_find_service(udev, service);
-
+ driver = pcie_port_find_service(dev, service);
if (driver && driver->reset_link) {
- status = driver->reset_link(udev);
- } else if (udev->has_secondary_link) {
- status = default_reset_link(udev);
+ status = driver->reset_link(dev);
+ } else if (dev->has_secondary_link) {
+ status = default_reset_link(dev);
} else {
pci_printk(KERN_DEBUG, dev, "no link-reset support at upstream device %s\n",
- pci_name(udev));
+ pci_name(dev));
return PCI_ERS_RESULT_DISCONNECT;
}
if (status != PCI_ERS_RESULT_RECOVERED) {
pci_printk(KERN_DEBUG, dev, "link reset at upstream device %s failed\n",
- pci_name(udev));
+ pci_name(dev));
return PCI_ERS_RESULT_DISCONNECT;
}
return status;
}
-/**
- * broadcast_error_message - handle message broadcast to downstream drivers
- * @dev: pointer to from where in a hierarchy message is broadcasted down
- * @state: error state
- * @error_mesg: message to print
- * @cb: callback to be broadcasted
- *
- * Invoked during error recovery process. Once being invoked, the content
- * of error severity will be broadcasted to all downstream drivers in a
- * hierarchy in question.
- */
-static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
- enum pci_channel_state state,
- char *error_mesg,
- int (*cb)(struct pci_dev *, void *))
-{
- struct aer_broadcast_data result_data;
-
- pci_printk(KERN_DEBUG, dev, "broadcast %s message\n", error_mesg);
- result_data.state = state;
- if (cb == report_error_detected)
- result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
- else
- result_data.result = PCI_ERS_RESULT_RECOVERED;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- /*
- * If the error is reported by a bridge, we think this error
- * is related to the downstream link of the bridge, so we
- * do error recovery on all subordinates of the bridge instead
- * of the bridge and clear the error status of the bridge.
- */
- if (cb == report_error_detected)
- dev->error_state = state;
- pci_walk_bus(dev->subordinate, cb, &result_data);
- if (cb == report_resume) {
- pci_aer_clear_device_status(dev);
- pci_cleanup_aer_uncorrect_error_status(dev);
- dev->error_state = pci_channel_io_normal;
- }
- } else {
- /*
- * If the error is reported by an end point, we think this
- * error is related to the upstream link of the end point.
- * The error is non fatal so the bus is ok; just invoke
- * the callback for the function that logged the error.
- */
- cb(dev, &result_data);
- }
-
- return result_data.result;
-}
-
-/**
- * pcie_do_fatal_recovery - handle fatal error recovery process
- * @dev: pointer to a pci_dev data structure of agent detecting an error
- *
- * Invoked when an error is fatal. Once being invoked, removes the devices
- * beneath this AER agent, followed by reset link e.g. secondary bus reset
- * followed by re-enumeration of devices.
- */
-void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
+void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
+ u32 service)
{
- struct pci_dev *udev;
- struct pci_bus *parent;
- struct pci_dev *pdev, *temp;
- pci_ers_result_t result;
-
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
- udev = dev;
+ pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER;
+ struct pci_bus *bus;
+
+ /*
+ * Error recovery runs on all subordinates of the first downstream port.
+ * If the downstream port detected the error, it is cleared at the end.
+ */
+ if (!(pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM))
+ dev = dev->bus->self;
+ bus = dev->subordinate;
+
+ pci_dbg(dev, "broadcast error_detected message\n");
+ if (state == pci_channel_io_frozen)
+ pci_walk_bus(bus, report_frozen_detected, &status);
else
- udev = dev->bus->self;
-
- parent = udev->subordinate;
- pci_lock_rescan_remove();
- pci_dev_get(dev);
- list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
- bus_list) {
- pci_dev_get(pdev);
- pci_dev_set_disconnected(pdev, NULL);
- if (pci_has_subordinate(pdev))
- pci_walk_bus(pdev->subordinate,
- pci_dev_set_disconnected, NULL);
- pci_stop_and_remove_bus_device(pdev);
- pci_dev_put(pdev);
- }
-
- result = reset_link(udev, service);
+ pci_walk_bus(bus, report_normal_detected, &status);
- if ((service == PCIE_PORT_SERVICE_AER) &&
- (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) {
- /*
- * If the error is reported by a bridge, we think this error
- * is related to the downstream link of the bridge, so we
- * do error recovery on all subordinates of the bridge instead
- * of the bridge and clear the error status of the bridge.
- */
- pci_aer_clear_fatal_status(dev);
- pci_aer_clear_device_status(dev);
- }
+ if (state == pci_channel_io_frozen &&
+ reset_link(dev, service) != PCI_ERS_RESULT_RECOVERED)
+ goto failed;
- if (result == PCI_ERS_RESULT_RECOVERED) {
- if (pcie_wait_for_link(udev, true))
- pci_rescan_bus(udev->bus);
- pci_info(dev, "Device recovery from fatal error successful\n");
- } else {
- pci_uevent_ers(dev, PCI_ERS_RESULT_DISCONNECT);
- pci_info(dev, "Device recovery from fatal error failed\n");
+ if (status == PCI_ERS_RESULT_CAN_RECOVER) {
+ status = PCI_ERS_RESULT_RECOVERED;
+ pci_dbg(dev, "broadcast mmio_enabled message\n");
+ pci_walk_bus(bus, report_mmio_enabled, &status);
}
- pci_dev_put(dev);
- pci_unlock_rescan_remove();
-}
-
-/**
- * pcie_do_nonfatal_recovery - handle nonfatal error recovery process
- * @dev: pointer to a pci_dev data structure of agent detecting an error
- *
- * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
- * error detected message to all downstream drivers within a hierarchy in
- * question and return the returned code.
- */
-void pcie_do_nonfatal_recovery(struct pci_dev *dev)
-{
- pci_ers_result_t status;
- enum pci_channel_state state;
-
- state = pci_channel_io_normal;
-
- status = broadcast_error_message(dev,
- state,
- "error_detected",
- report_error_detected);
-
- if (status == PCI_ERS_RESULT_CAN_RECOVER)
- status = broadcast_error_message(dev,
- state,
- "mmio_enabled",
- report_mmio_enabled);
-
if (status == PCI_ERS_RESULT_NEED_RESET) {
/*
* TODO: Should call platform-specific
* functions to reset slot before calling
* drivers' slot_reset callbacks?
*/
- status = broadcast_error_message(dev,
- state,
- "slot_reset",
- report_slot_reset);
+ status = PCI_ERS_RESULT_RECOVERED;
+ pci_dbg(dev, "broadcast slot_reset message\n");
+ pci_walk_bus(bus, report_slot_reset, &status);
}
if (status != PCI_ERS_RESULT_RECOVERED)
goto failed;
- broadcast_error_message(dev,
- state,
- "resume",
- report_resume);
+ pci_dbg(dev, "broadcast resume message\n");
+ pci_walk_bus(bus, report_resume, &status);
+ pci_aer_clear_device_status(dev);
+ pci_cleanup_aer_uncorrect_error_status(dev);
pci_info(dev, "AER: Device recovery successful\n");
return;
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 3ed67676ea2a..0dbcf429089f 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -432,6 +432,31 @@ static void pcie_pme_remove(struct pcie_device *srv)
kfree(get_service_data(srv));
}
+static int pcie_pme_runtime_suspend(struct pcie_device *srv)
+{
+ struct pcie_pme_service_data *data = get_service_data(srv);
+
+ spin_lock_irq(&data->lock);
+ pcie_pme_interrupt_enable(srv->port, false);
+ pcie_clear_root_pme_status(srv->port);
+ data->noirq = true;
+ spin_unlock_irq(&data->lock);
+
+ return 0;
+}
+
+static int pcie_pme_runtime_resume(struct pcie_device *srv)
+{
+ struct pcie_pme_service_data *data = get_service_data(srv);
+
+ spin_lock_irq(&data->lock);
+ pcie_pme_interrupt_enable(srv->port, true);
+ data->noirq = false;
+ spin_unlock_irq(&data->lock);
+
+ return 0;
+}
+
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
@@ -439,6 +464,8 @@ static struct pcie_port_service_driver pcie_pme_driver = {
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
+ .runtime_suspend = pcie_pme_runtime_suspend,
+ .runtime_resume = pcie_pme_runtime_resume,
.resume = pcie_pme_resume,
.remove = pcie_pme_remove,
};
@@ -446,8 +473,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
/**
* pcie_pme_service_init - Register the PCIe PME service driver.
*/
-static int __init pcie_pme_service_init(void)
+int __init pcie_pme_init(void)
{
return pcie_port_service_register(&pcie_pme_driver);
}
-device_initcall(pcie_pme_service_init);
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index d59afa42fc14..e495f04394d0 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -23,6 +23,30 @@
#define PCIE_PORT_DEVICE_MAXSERVICES 4
+#ifdef CONFIG_PCIEAER
+int pcie_aer_init(void);
+#else
+static inline int pcie_aer_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_HOTPLUG_PCI_PCIE
+int pcie_hp_init(void);
+#else
+static inline int pcie_hp_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_PCIE_PME
+int pcie_pme_init(void);
+#else
+static inline int pcie_pme_init(void) { return 0; }
+#endif
+
+#ifdef CONFIG_PCIE_DPC
+int pcie_dpc_init(void);
+#else
+static inline int pcie_dpc_init(void) { return 0; }
+#endif
+
/* Port Type */
#define PCIE_ANY_PORT (~0)
@@ -52,6 +76,8 @@ struct pcie_port_service_driver {
int (*suspend) (struct pcie_device *dev);
int (*resume_noirq) (struct pcie_device *dev);
int (*resume) (struct pcie_device *dev);
+ int (*runtime_suspend) (struct pcie_device *dev);
+ int (*runtime_resume) (struct pcie_device *dev);
/* Device driver may resume normal operations */
void (*error_resume)(struct pci_dev *dev);
@@ -85,6 +111,8 @@ int pcie_port_device_register(struct pci_dev *dev);
int pcie_port_device_suspend(struct device *dev);
int pcie_port_device_resume_noirq(struct device *dev);
int pcie_port_device_resume(struct device *dev);
+int pcie_port_device_runtime_suspend(struct device *dev);
+int pcie_port_device_runtime_resume(struct device *dev);
#endif
void pcie_port_device_remove(struct pci_dev *dev);
int __must_check pcie_port_bus_register(void);
@@ -123,10 +151,6 @@ static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
}
#endif
-#ifdef CONFIG_PCIEAER
-irqreturn_t aer_irq(int irq, void *context);
-#endif
-
struct pcie_port_service_driver *pcie_port_find_service(struct pci_dev *dev,
u32 service);
struct device *pcie_port_find_device(struct pci_dev *dev, u32 service);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index 7c37d815229e..f458ac9cb70c 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -395,6 +395,26 @@ int pcie_port_device_resume(struct device *dev)
size_t off = offsetof(struct pcie_port_service_driver, resume);
return device_for_each_child(dev, &off, pm_iter);
}
+
+/**
+ * pcie_port_device_runtime_suspend - runtime suspend port services
+ * @dev: PCI Express port to handle
+ */
+int pcie_port_device_runtime_suspend(struct device *dev)
+{
+ size_t off = offsetof(struct pcie_port_service_driver, runtime_suspend);
+ return device_for_each_child(dev, &off, pm_iter);
+}
+
+/**
+ * pcie_port_device_runtime_resume - runtime resume port services
+ * @dev: PCI Express port to handle
+ */
+int pcie_port_device_runtime_resume(struct device *dev)
+{
+ size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
+ return device_for_each_child(dev, &off, pm_iter);
+}
#endif /* PM */
static int remove_iter(struct device *dev, void *data)
@@ -466,6 +486,7 @@ struct device *pcie_port_find_device(struct pci_dev *dev,
device = pdrvs.dev;
return device;
}
+EXPORT_SYMBOL_GPL(pcie_port_find_device);
/**
* pcie_port_device_remove - unregister PCI Express port service devices
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index eef22dc29140..0acca3596807 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -45,12 +45,10 @@ __setup("pcie_ports=", pcie_port_setup);
#ifdef CONFIG_PM
static int pcie_port_runtime_suspend(struct device *dev)
{
- return to_pci_dev(dev)->bridge_d3 ? 0 : -EBUSY;
-}
+ if (!to_pci_dev(dev)->bridge_d3)
+ return -EBUSY;
-static int pcie_port_runtime_resume(struct device *dev)
-{
- return 0;
+ return pcie_port_device_runtime_suspend(dev);
}
static int pcie_port_runtime_idle(struct device *dev)
@@ -73,7 +71,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
.restore_noirq = pcie_port_device_resume_noirq,
.restore = pcie_port_device_resume,
.runtime_suspend = pcie_port_runtime_suspend,
- .runtime_resume = pcie_port_runtime_resume,
+ .runtime_resume = pcie_port_device_runtime_resume,
.runtime_idle = pcie_port_runtime_idle,
};
@@ -109,8 +107,8 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
pci_save_state(dev);
- dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_SMART_SUSPEND |
- DPM_FLAG_LEAVE_SUSPENDED);
+ dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NEVER_SKIP |
+ DPM_FLAG_SMART_SUSPEND);
if (pci_bridge_d3_possible(dev)) {
/*
@@ -146,6 +144,13 @@ static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev,
return PCI_ERS_RESULT_CAN_RECOVER;
}
+static pci_ers_result_t pcie_portdrv_slot_reset(struct pci_dev *dev)
+{
+ pci_restore_state(dev);
+ pci_save_state(dev);
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
static pci_ers_result_t pcie_portdrv_mmio_enabled(struct pci_dev *dev)
{
return PCI_ERS_RESULT_RECOVERED;
@@ -185,6 +190,7 @@ static const struct pci_device_id port_pci_ids[] = { {
static const struct pci_error_handlers pcie_portdrv_err_handler = {
.error_detected = pcie_portdrv_error_detected,
+ .slot_reset = pcie_portdrv_slot_reset,
.mmio_enabled = pcie_portdrv_mmio_enabled,
.resume = pcie_portdrv_err_resume,
};
@@ -226,11 +232,20 @@ static const struct dmi_system_id pcie_portdrv_dmi_table[] __initconst = {
{}
};
+static void __init pcie_init_services(void)
+{
+ pcie_aer_init();
+ pcie_pme_init();
+ pcie_dpc_init();
+ pcie_hp_init();
+}
+
static int __init pcie_portdrv_init(void)
{
if (pcie_ports_disabled)
return -EACCES;
+ pcie_init_services();
dmi_check_system(pcie_portdrv_dmi_table);
return pci_register_driver(&pcie_portdriver);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 201f9e5ff55c..b1c05b5054a0 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -713,6 +713,7 @@ static void pci_set_bus_speed(struct pci_bus *bus)
pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
+ bridge->link_active_reporting = !!(linkcap & PCI_EXP_LNKCAP_DLLLARC);
pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
pcie_update_link_speed(bus, linksta);
@@ -1438,12 +1439,29 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev)
return PCI_CFG_SPACE_EXP_SIZE;
}
+#ifdef CONFIG_PCI_IOV
+static bool is_vf0(struct pci_dev *dev)
+{
+ if (pci_iov_virtfn_devfn(dev->physfn, 0) == dev->devfn &&
+ pci_iov_virtfn_bus(dev->physfn, 0) == dev->bus->number)
+ return true;
+
+ return false;
+}
+#endif
+
int pci_cfg_space_size(struct pci_dev *dev)
{
int pos;
u32 status;
u16 class;
+#ifdef CONFIG_PCI_IOV
+ /* Read cached value for all VFs except for VF0 */
+ if (dev->is_virtfn && !is_vf0(dev))
+ return dev->physfn->sriov->cfg_size;
+#endif
+
if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG)
return PCI_CFG_SPACE_SIZE;
@@ -2143,7 +2161,7 @@ static void pci_release_dev(struct device *dev)
pcibios_release_device(pci_dev);
pci_bus_put(pci_dev->bus);
kfree(pci_dev->driver_override);
- kfree(pci_dev->dma_alias_mask);
+ bitmap_free(pci_dev->dma_alias_mask);
kfree(pci_dev);
}
@@ -2397,8 +2415,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
dev->dev.dma_parms = &dev->dma_parms;
dev->dev.coherent_dma_mask = 0xffffffffull;
- pci_set_dma_max_seg_size(dev, 65536);
- pci_set_dma_seg_boundary(dev, 0xffffffff);
+ dma_set_max_seg_size(&dev->dev, 65536);
+ dma_set_seg_boundary(&dev->dev, 0xffffffff);
/* Fix up broken headers */
pci_fixup_device(pci_fixup_header, dev);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6bc27b7fd452..4700d24e5d55 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3190,7 +3190,11 @@ static void disable_igfx_irq(struct pci_dev *dev)
pci_iounmap(dev, regs);
}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
@@ -4987,7 +4991,6 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
void __iomem *mmio;
struct ntb_info_regs __iomem *mmio_ntb;
struct ntb_ctrl_regs __iomem *mmio_ctrl;
- struct sys_info_regs __iomem *mmio_sys_info;
u64 partition_map;
u8 partition;
int pp;
@@ -5008,7 +5011,6 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
- mmio_sys_info = mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET;
partition = ioread8(&mmio_ntb->partition_id);
@@ -5057,59 +5059,37 @@ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
pci_iounmap(pdev, mmio);
pci_disable_device(pdev);
}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8531,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8532,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8533,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8534,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8535,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8536,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8543,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8544,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8545,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8546,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8551,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8552,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8553,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8554,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8555,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8556,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8561,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8562,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8563,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8564,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8565,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8566,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8571,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8572,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8573,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8574,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8575,
- quirk_switchtec_ntb_dma_alias);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MICROSEMI, 0x8576,
- quirk_switchtec_ntb_dma_alias);
+#define SWITCHTEC_QUIRK(vid) \
+ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
+ PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
+
+SWITCHTEC_QUIRK(0x8531); /* PFX 24xG3 */
+SWITCHTEC_QUIRK(0x8532); /* PFX 32xG3 */
+SWITCHTEC_QUIRK(0x8533); /* PFX 48xG3 */
+SWITCHTEC_QUIRK(0x8534); /* PFX 64xG3 */
+SWITCHTEC_QUIRK(0x8535); /* PFX 80xG3 */
+SWITCHTEC_QUIRK(0x8536); /* PFX 96xG3 */
+SWITCHTEC_QUIRK(0x8541); /* PSX 24xG3 */
+SWITCHTEC_QUIRK(0x8542); /* PSX 32xG3 */
+SWITCHTEC_QUIRK(0x8543); /* PSX 48xG3 */
+SWITCHTEC_QUIRK(0x8544); /* PSX 64xG3 */
+SWITCHTEC_QUIRK(0x8545); /* PSX 80xG3 */
+SWITCHTEC_QUIRK(0x8546); /* PSX 96xG3 */
+SWITCHTEC_QUIRK(0x8551); /* PAX 24XG3 */
+SWITCHTEC_QUIRK(0x8552); /* PAX 32XG3 */
+SWITCHTEC_QUIRK(0x8553); /* PAX 48XG3 */
+SWITCHTEC_QUIRK(0x8554); /* PAX 64XG3 */
+SWITCHTEC_QUIRK(0x8555); /* PAX 80XG3 */
+SWITCHTEC_QUIRK(0x8556); /* PAX 96XG3 */
+SWITCHTEC_QUIRK(0x8561); /* PFXL 24XG3 */
+SWITCHTEC_QUIRK(0x8562); /* PFXL 32XG3 */
+SWITCHTEC_QUIRK(0x8563); /* PFXL 48XG3 */
+SWITCHTEC_QUIRK(0x8564); /* PFXL 64XG3 */
+SWITCHTEC_QUIRK(0x8565); /* PFXL 80XG3 */
+SWITCHTEC_QUIRK(0x8566); /* PFXL 96XG3 */
+SWITCHTEC_QUIRK(0x8571); /* PFXI 24XG3 */
+SWITCHTEC_QUIRK(0x8572); /* PFXI 32XG3 */
+SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
+SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
+SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
+SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 461e7fd2756f..e9c6b120cf45 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -25,9 +25,6 @@ static void pci_stop_dev(struct pci_dev *dev)
pci_dev_assign_added(dev, false);
}
-
- if (dev->bus->self)
- pcie_aspm_exit_link_state(dev);
}
static void pci_destroy_dev(struct pci_dev *dev)
@@ -41,6 +38,7 @@ static void pci_destroy_dev(struct pci_dev *dev)
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
+ pcie_aspm_exit_link_state(dev);
pci_bridge_d3_update(dev);
pci_free_resources(dev);
put_device(&dev->dev);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 79b1824e83b4..ed960436df5e 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -811,6 +811,8 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus,
static resource_size_t calculate_iosize(resource_size_t size,
resource_size_t min_size,
resource_size_t size1,
+ resource_size_t add_size,
+ resource_size_t children_add_size,
resource_size_t old_size,
resource_size_t align)
{
@@ -823,15 +825,18 @@ static resource_size_t calculate_iosize(resource_size_t size,
#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
size = (size & 0xff) + ((size & ~0xffUL) << 2);
#endif
- size = ALIGN(size + size1, align);
+ size = size + size1;
if (size < old_size)
size = old_size;
+
+ size = ALIGN(max(size, add_size) + children_add_size, align);
return size;
}
static resource_size_t calculate_memsize(resource_size_t size,
resource_size_t min_size,
- resource_size_t size1,
+ resource_size_t add_size,
+ resource_size_t children_add_size,
resource_size_t old_size,
resource_size_t align)
{
@@ -841,7 +846,8 @@ static resource_size_t calculate_memsize(resource_size_t size,
old_size = 0;
if (size < old_size)
size = old_size;
- size = ALIGN(size + size1, align);
+
+ size = ALIGN(max(size, add_size) + children_add_size, align);
return size;
}
@@ -930,12 +936,10 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
}
}
- size0 = calculate_iosize(size, min_size, size1,
+ size0 = calculate_iosize(size, min_size, size1, 0, 0,
resource_size(b_res), min_align);
- if (children_add_size > add_size)
- add_size = children_add_size;
- size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
- calculate_iosize(size, min_size, add_size + size1,
+ size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
+ calculate_iosize(size, min_size, size1, add_size, children_add_size,
resource_size(b_res), min_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
@@ -1079,12 +1083,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
min_align = calculate_mem_align(aligns, max_order);
min_align = max(min_align, window_alignment(bus, b_res->flags));
- size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
+ size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
add_align = max(min_align, add_align);
- if (children_add_size > add_size)
- add_size = children_add_size;
- size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 :
- calculate_memsize(size, min_size, add_size,
+ size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
+ calculate_memsize(size, min_size, add_size, children_add_size,
resource_size(b_res), add_align);
if (!size0 && !size1) {
if (b_res->start || b_res->end)
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index e634229ece89..c46d5e1ff536 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -14,7 +14,6 @@
struct kset *pci_slots_kset;
EXPORT_SYMBOL_GPL(pci_slots_kset);
-static DEFINE_MUTEX(pci_slot_mutex);
static ssize_t pci_slot_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -371,7 +370,7 @@ void pci_hp_create_module_link(struct pci_slot *pci_slot)
if (!slot || !slot->ops)
return;
- kobj = kset_find_obj(module_kset, slot->ops->mod_name);
+ kobj = kset_find_obj(module_kset, slot->mod_name);
if (!kobj)
return;
ret = sysfs_create_link(&pci_slot->kobj, kobj, "module");
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 9671ded549f0..b31abe35ed2c 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -230,7 +230,7 @@ static int electra_cf_probe(struct platform_device *ofdev)
if (!cf->mem_base || !cf->io_virt || !cf->gpio_base ||
(__ioremap_at(io.start, cf->io_virt, cf->io_size,
- pgprot_val(pgprot_noncached(__pgprot(0)))) == NULL)) {
+ pgprot_noncached(PAGE_KERNEL)) == NULL)) {
dev_err(device, "can't ioremap ranges\n");
status = -ENOMEM;
goto fail1;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index c89d3effd99d..60f949e2a684 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -43,6 +43,7 @@ config PHY_XGENE
source "drivers/phy/allwinner/Kconfig"
source "drivers/phy/amlogic/Kconfig"
source "drivers/phy/broadcom/Kconfig"
+source "drivers/phy/cadence/Kconfig"
source "drivers/phy/hisilicon/Kconfig"
source "drivers/phy/lantiq/Kconfig"
source "drivers/phy/marvell/Kconfig"
@@ -54,6 +55,7 @@ source "drivers/phy/ralink/Kconfig"
source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
+source "drivers/phy/socionext/Kconfig"
source "drivers/phy/st/Kconfig"
source "drivers/phy/tegra/Kconfig"
source "drivers/phy/ti/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index ce8339ff0022..0301e25d07c1 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_ARCH_RENESAS) += renesas/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += broadcom/ \
+ cadence/ \
hisilicon/ \
marvell/ \
motorola/ \
@@ -22,5 +23,6 @@ obj-y += broadcom/ \
qualcomm/ \
ralink/ \
samsung/ \
+ socionext/ \
st/ \
ti/
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index 8786a9674471..aa917a61071d 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -60,7 +60,8 @@ config PHY_NS2_USB_DRD
config PHY_BRCM_SATA
tristate "Broadcom SATA PHY driver"
- depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || COMPILE_TEST
+ depends on ARCH_BRCMSTB || ARCH_BCM_IPROC || BMIPS_GENERIC || \
+ ARCH_BCM_63XX || COMPILE_TEST
depends on OF
select GENERIC_PHY
default ARCH_BCM_IPROC
diff --git a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
index 0f4ac5d63cff..b074682d9dd8 100644
--- a/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-cygnus-pcie.c
@@ -153,8 +153,8 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
struct cygnus_pcie_phy *p;
if (of_property_read_u32(child, "reg", &id)) {
- dev_err(dev, "missing reg property for %s\n",
- child->name);
+ dev_err(dev, "missing reg property for %pOFn\n",
+ child);
ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 8708ea3b4d6d..0f4a06ff7fd3 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -47,6 +47,7 @@ enum brcm_sata_phy_version {
BRCM_SATA_PHY_IPROC_NS2,
BRCM_SATA_PHY_IPROC_NSP,
BRCM_SATA_PHY_IPROC_SR,
+ BRCM_SATA_PHY_DSL_28NM,
};
enum brcm_sata_phy_rxaeq_mode {
@@ -96,7 +97,10 @@ enum sata_phy_regs {
PLLCONTROL_0_FREQ_DET_RESTART = BIT(13),
PLLCONTROL_0_FREQ_MONITOR = BIT(12),
PLLCONTROL_0_SEQ_START = BIT(15),
+ PLL_CAP_CHARGE_TIME = 0x83,
+ PLL_VCO_CAL_THRESH = 0x84,
PLL_CAP_CONTROL = 0x85,
+ PLL_FREQ_DET_TIME = 0x86,
PLL_ACTRL2 = 0x8b,
PLL_ACTRL2_SELDIV_MASK = 0x1f,
PLL_ACTRL2_SELDIV_SHIFT = 9,
@@ -106,6 +110,9 @@ enum sata_phy_regs {
PLL1_ACTRL2 = 0x82,
PLL1_ACTRL3 = 0x83,
PLL1_ACTRL4 = 0x84,
+ PLL1_ACTRL5 = 0x85,
+ PLL1_ACTRL6 = 0x86,
+ PLL1_ACTRL7 = 0x87,
TX_REG_BANK = 0x070,
TX_ACTRL0 = 0x80,
@@ -119,6 +126,8 @@ enum sata_phy_regs {
AEQ_FRC_EQ_FORCE = BIT(0),
AEQ_FRC_EQ_FORCE_VAL = BIT(1),
AEQRX_REG_BANK_1 = 0xe0,
+ AEQRX_SLCAL0_CTRL0 = 0x82,
+ AEQRX_SLCAL1_CTRL0 = 0x86,
OOB_REG_BANK = 0x150,
OOB1_REG_BANK = 0x160,
@@ -168,6 +177,7 @@ static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
switch (priv->version) {
case BRCM_SATA_PHY_STB_28NM:
case BRCM_SATA_PHY_IPROC_NS2:
+ case BRCM_SATA_PHY_DSL_28NM:
size = SATA_PCB_REG_28NM_SPACE_SIZE;
break;
case BRCM_SATA_PHY_STB_40NM:
@@ -482,6 +492,61 @@ static int brcm_sr_sata_init(struct brcm_sata_port *port)
return 0;
}
+static int brcm_dsl_sata_init(struct brcm_sata_port *port)
+{
+ void __iomem *base = brcm_sata_pcb_base(port);
+ struct device *dev = port->phy_priv->dev;
+ unsigned int try;
+ u32 tmp;
+
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL7, 0, 0x873);
+
+ brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0xc000);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ 0, 0x3089);
+ usleep_range(1000, 2000);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ 0, 0x3088);
+ usleep_range(1000, 2000);
+
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL0_CTRL0,
+ 0, 0x3000);
+
+ brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL1_CTRL0,
+ 0, 0x3000);
+ usleep_range(1000, 2000);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_CAP_CHARGE_TIME, 0, 0x32);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_VCO_CAL_THRESH, 0, 0xa);
+
+ brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_FREQ_DET_TIME, 0, 0x64);
+ usleep_range(1000, 2000);
+
+ /* Acquire PLL lock */
+ try = 50;
+ while (try) {
+ tmp = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ BLOCK0_XGXSSTATUS);
+ if (tmp & BLOCK0_XGXSSTATUS_PLL_LOCK)
+ break;
+ msleep(20);
+ try--;
+ };
+
+ if (!try) {
+ /* PLL did not lock; give up */
+ dev_err(dev, "port%d PLL did not lock\n", port->portnum);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "port%d initialized\n", port->portnum);
+
+ return 0;
+}
+
static int brcm_sata_phy_init(struct phy *phy)
{
int rc;
@@ -501,6 +566,9 @@ static int brcm_sata_phy_init(struct phy *phy)
case BRCM_SATA_PHY_IPROC_SR:
rc = brcm_sr_sata_init(port);
break;
+ case BRCM_SATA_PHY_DSL_28NM:
+ rc = brcm_dsl_sata_init(port);
+ break;
default:
rc = -ENODEV;
}
@@ -552,6 +620,8 @@ static const struct of_device_id brcm_sata_phy_of_match[] = {
.data = (void *)BRCM_SATA_PHY_IPROC_NSP },
{ .compatible = "brcm,iproc-sr-sata-phy",
.data = (void *)BRCM_SATA_PHY_IPROC_SR },
+ { .compatible = "brcm,bcm63138-sata-phy",
+ .data = (void *)BRCM_SATA_PHY_DSL_28NM },
{},
};
MODULE_DEVICE_TABLE(of, brcm_sata_phy_of_match);
@@ -600,8 +670,8 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
struct brcm_sata_port *port;
if (of_property_read_u32(child, "reg", &id)) {
- dev_err(dev, "missing reg property in node %s\n",
- child->name);
+ dev_err(dev, "missing reg property in node %pOFn\n",
+ child);
ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index d1dab36fa5b7..f59b1dc30399 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -372,10 +372,8 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
clk_disable(priv->usb_30_clk);
phy_provider = devm_of_phy_provider_register(dev, brcm_usb_phy_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
- return 0;
+ return PTR_ERR_OR_ZERO(phy_provider);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/phy/cadence/Kconfig b/drivers/phy/cadence/Kconfig
new file mode 100644
index 000000000000..57fff7de4031
--- /dev/null
+++ b/drivers/phy/cadence/Kconfig
@@ -0,0 +1,10 @@
+#
+# Phy driver for Cadence MHDP DisplayPort controller
+#
+config PHY_CADENCE_DP
+ tristate "Cadence MHDP DisplayPort PHY driver"
+ depends on OF
+ depends on HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Support for Cadence MHDP DisplayPort PHY.
diff --git a/drivers/phy/cadence/Makefile b/drivers/phy/cadence/Makefile
new file mode 100644
index 000000000000..e5b0a11cf28a
--- /dev/null
+++ b/drivers/phy/cadence/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_PHY_CADENCE_DP) += phy-cadence-dp.o
diff --git a/drivers/phy/cadence/phy-cadence-dp.c b/drivers/phy/cadence/phy-cadence-dp.c
new file mode 100644
index 000000000000..bc10cb264b7a
--- /dev/null
+++ b/drivers/phy/cadence/phy-cadence-dp.c
@@ -0,0 +1,541 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Cadence MHDP DisplayPort SD0801 PHY driver.
+ *
+ * Copyright 2018 Cadence Design Systems, Inc.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#define DEFAULT_NUM_LANES 2
+#define MAX_NUM_LANES 4
+#define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */
+
+#define POLL_TIMEOUT_US 2000
+#define LANE_MASK 0x7
+
+/*
+ * register offsets from DPTX PHY register block base (i.e MHDP
+ * register base + 0x30a00)
+ */
+#define PHY_AUX_CONFIG 0x00
+#define PHY_AUX_CTRL 0x04
+#define PHY_RESET 0x20
+#define PHY_PMA_XCVR_PLLCLK_EN 0x24
+#define PHY_PMA_XCVR_PLLCLK_EN_ACK 0x28
+#define PHY_PMA_XCVR_POWER_STATE_REQ 0x2c
+#define PHY_POWER_STATE_LN_0 0x0000
+#define PHY_POWER_STATE_LN_1 0x0008
+#define PHY_POWER_STATE_LN_2 0x0010
+#define PHY_POWER_STATE_LN_3 0x0018
+#define PHY_PMA_XCVR_POWER_STATE_ACK 0x30
+#define PHY_PMA_CMN_READY 0x34
+#define PHY_PMA_XCVR_TX_VMARGIN 0x38
+#define PHY_PMA_XCVR_TX_DEEMPH 0x3c
+
+/*
+ * register offsets from SD0801 PHY register block base (i.e MHDP
+ * register base + 0x500000)
+ */
+#define CMN_SSM_BANDGAP_TMR 0x00084
+#define CMN_SSM_BIAS_TMR 0x00088
+#define CMN_PLLSM0_PLLPRE_TMR 0x000a8
+#define CMN_PLLSM0_PLLLOCK_TMR 0x000b0
+#define CMN_PLLSM1_PLLPRE_TMR 0x000c8
+#define CMN_PLLSM1_PLLLOCK_TMR 0x000d0
+#define CMN_BGCAL_INIT_TMR 0x00190
+#define CMN_BGCAL_ITER_TMR 0x00194
+#define CMN_IBCAL_INIT_TMR 0x001d0
+#define CMN_PLL0_VCOCAL_INIT_TMR 0x00210
+#define CMN_PLL0_VCOCAL_ITER_TMR 0x00214
+#define CMN_PLL0_VCOCAL_REFTIM_START 0x00218
+#define CMN_PLL0_VCOCAL_PLLCNT_START 0x00220
+#define CMN_PLL0_INTDIV_M0 0x00240
+#define CMN_PLL0_FRACDIVL_M0 0x00244
+#define CMN_PLL0_FRACDIVH_M0 0x00248
+#define CMN_PLL0_HIGH_THR_M0 0x0024c
+#define CMN_PLL0_DSM_DIAG_M0 0x00250
+#define CMN_PLL0_LOCK_PLLCNT_START 0x00278
+#define CMN_PLL1_VCOCAL_INIT_TMR 0x00310
+#define CMN_PLL1_VCOCAL_ITER_TMR 0x00314
+#define CMN_PLL1_DSM_DIAG_M0 0x00350
+#define CMN_TXPUCAL_INIT_TMR 0x00410
+#define CMN_TXPUCAL_ITER_TMR 0x00414
+#define CMN_TXPDCAL_INIT_TMR 0x00430
+#define CMN_TXPDCAL_ITER_TMR 0x00434
+#define CMN_RXCAL_INIT_TMR 0x00450
+#define CMN_RXCAL_ITER_TMR 0x00454
+#define CMN_SD_CAL_INIT_TMR 0x00490
+#define CMN_SD_CAL_ITER_TMR 0x00494
+#define CMN_SD_CAL_REFTIM_START 0x00498
+#define CMN_SD_CAL_PLLCNT_START 0x004a0
+#define CMN_PDIAG_PLL0_CTRL_M0 0x00680
+#define CMN_PDIAG_PLL0_CLK_SEL_M0 0x00684
+#define CMN_PDIAG_PLL0_CP_PADJ_M0 0x00690
+#define CMN_PDIAG_PLL0_CP_IADJ_M0 0x00694
+#define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x00698
+#define CMN_PDIAG_PLL0_CP_PADJ_M1 0x006d0
+#define CMN_PDIAG_PLL0_CP_IADJ_M1 0x006d4
+#define CMN_PDIAG_PLL1_CLK_SEL_M0 0x00704
+#define XCVR_DIAG_PLLDRC_CTRL 0x10394
+#define XCVR_DIAG_HSCLK_SEL 0x10398
+#define XCVR_DIAG_HSCLK_DIV 0x1039c
+#define TX_PSC_A0 0x10400
+#define TX_PSC_A1 0x10404
+#define TX_PSC_A2 0x10408
+#define TX_PSC_A3 0x1040c
+#define RX_PSC_A0 0x20000
+#define RX_PSC_A1 0x20004
+#define RX_PSC_A2 0x20008
+#define RX_PSC_A3 0x2000c
+#define PHY_PLL_CFG 0x30038
+
+struct cdns_dp_phy {
+ void __iomem *base; /* DPTX registers base */
+ void __iomem *sd_base; /* SD0801 registers base */
+ u32 num_lanes; /* Number of lanes to use */
+ u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */
+ struct device *dev;
+};
+
+static int cdns_dp_phy_init(struct phy *phy);
+static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
+ unsigned int lane);
+static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy);
+static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
+ unsigned int offset,
+ unsigned char start_bit,
+ unsigned char num_bits,
+ unsigned int val);
+
+static const struct phy_ops cdns_dp_phy_ops = {
+ .init = cdns_dp_phy_init,
+ .owner = THIS_MODULE,
+};
+
+static int cdns_dp_phy_init(struct phy *phy)
+{
+ unsigned char lane_bits;
+
+ struct cdns_dp_phy *cdns_phy = phy_get_drvdata(phy);
+
+ writel(0x0003, cdns_phy->base + PHY_AUX_CTRL); /* enable AUX */
+
+ /* PHY PMA registers configuration function */
+ cdns_dp_phy_pma_cfg(cdns_phy);
+
+ /*
+ * Set lines power state to A0
+ * Set lines pll clk enable to 0
+ */
+
+ cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_0, 6, 0x0000);
+
+ if (cdns_phy->num_lanes >= 2) {
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_1, 6, 0x0000);
+
+ if (cdns_phy->num_lanes == 4) {
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_2, 6, 0);
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_POWER_STATE_REQ,
+ PHY_POWER_STATE_LN_3, 6, 0);
+ }
+ }
+
+ cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
+ 0, 1, 0x0000);
+
+ if (cdns_phy->num_lanes >= 2) {
+ cdns_dp_phy_write_field(cdns_phy, PHY_PMA_XCVR_PLLCLK_EN,
+ 1, 1, 0x0000);
+ if (cdns_phy->num_lanes == 4) {
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_PLLCLK_EN,
+ 2, 1, 0x0000);
+ cdns_dp_phy_write_field(cdns_phy,
+ PHY_PMA_XCVR_PLLCLK_EN,
+ 3, 1, 0x0000);
+ }
+ }
+
+ /*
+ * release phy_l0*_reset_n and pma_tx_elec_idle_ln_* based on
+ * used lanes
+ */
+ lane_bits = (1 << cdns_phy->num_lanes) - 1;
+ writel(((0xF & ~lane_bits) << 4) | (0xF & lane_bits),
+ cdns_phy->base + PHY_RESET);
+
+ /* release pma_xcvr_pllclk_en_ln_*, only for the master lane */
+ writel(0x0001, cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN);
+
+ /* PHY PMA registers configuration functions */
+ cdns_dp_phy_pma_cmn_vco_cfg_25mhz(cdns_phy);
+ cdns_dp_phy_pma_cmn_rate(cdns_phy);
+
+ /* take out of reset */
+ cdns_dp_phy_write_field(cdns_phy, PHY_RESET, 8, 1, 1);
+ cdns_dp_phy_wait_pma_cmn_ready(cdns_phy);
+ cdns_dp_phy_run(cdns_phy);
+
+ return 0;
+}
+
+static void cdns_dp_phy_wait_pma_cmn_ready(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int reg;
+ int ret;
+
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_CMN_READY, reg,
+ reg & 1, 0, 500);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for PMA common ready\n");
+}
+
+static void cdns_dp_phy_pma_cfg(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int i;
+
+ /* PMA common configuration */
+ cdns_dp_phy_pma_cmn_cfg_25mhz(cdns_phy);
+
+ /* PMA lane configuration to deal with multi-link operation */
+ for (i = 0; i < cdns_phy->num_lanes; i++)
+ cdns_dp_phy_pma_lane_cfg(cdns_phy, i);
+}
+
+static void cdns_dp_phy_pma_cmn_cfg_25mhz(struct cdns_dp_phy *cdns_phy)
+{
+ /* refclock registers - assumes 25 MHz refclock */
+ writel(0x0019, cdns_phy->sd_base + CMN_SSM_BIAS_TMR);
+ writel(0x0032, cdns_phy->sd_base + CMN_PLLSM0_PLLPRE_TMR);
+ writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM0_PLLLOCK_TMR);
+ writel(0x0032, cdns_phy->sd_base + CMN_PLLSM1_PLLPRE_TMR);
+ writel(0x00D1, cdns_phy->sd_base + CMN_PLLSM1_PLLLOCK_TMR);
+ writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_INIT_TMR);
+ writel(0x007D, cdns_phy->sd_base + CMN_BGCAL_ITER_TMR);
+ writel(0x0019, cdns_phy->sd_base + CMN_IBCAL_INIT_TMR);
+ writel(0x001E, cdns_phy->sd_base + CMN_TXPUCAL_INIT_TMR);
+ writel(0x0006, cdns_phy->sd_base + CMN_TXPUCAL_ITER_TMR);
+ writel(0x001E, cdns_phy->sd_base + CMN_TXPDCAL_INIT_TMR);
+ writel(0x0006, cdns_phy->sd_base + CMN_TXPDCAL_ITER_TMR);
+ writel(0x02EE, cdns_phy->sd_base + CMN_RXCAL_INIT_TMR);
+ writel(0x0006, cdns_phy->sd_base + CMN_RXCAL_ITER_TMR);
+ writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_INIT_TMR);
+ writel(0x0002, cdns_phy->sd_base + CMN_SD_CAL_ITER_TMR);
+ writel(0x000E, cdns_phy->sd_base + CMN_SD_CAL_REFTIM_START);
+ writel(0x012B, cdns_phy->sd_base + CMN_SD_CAL_PLLCNT_START);
+ /* PLL registers */
+ writel(0x0409, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_PADJ_M0);
+ writel(0x1001, cdns_phy->sd_base + CMN_PDIAG_PLL0_CP_IADJ_M0);
+ writel(0x0F08, cdns_phy->sd_base + CMN_PDIAG_PLL0_FILT_PADJ_M0);
+ writel(0x0004, cdns_phy->sd_base + CMN_PLL0_DSM_DIAG_M0);
+ writel(0x00FA, cdns_phy->sd_base + CMN_PLL0_VCOCAL_INIT_TMR);
+ writel(0x0004, cdns_phy->sd_base + CMN_PLL0_VCOCAL_ITER_TMR);
+ writel(0x00FA, cdns_phy->sd_base + CMN_PLL1_VCOCAL_INIT_TMR);
+ writel(0x0004, cdns_phy->sd_base + CMN_PLL1_VCOCAL_ITER_TMR);
+ writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_REFTIM_START);
+}
+
+static void cdns_dp_phy_pma_cmn_vco_cfg_25mhz(struct cdns_dp_phy *cdns_phy)
+{
+ /* Assumes 25 MHz refclock */
+ switch (cdns_phy->max_bit_rate) {
+ /* Setting VCO for 10.8GHz */
+ case 2700:
+ case 5400:
+ writel(0x01B0, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x0120, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ /* Setting VCO for 9.72GHz */
+ case 2430:
+ case 3240:
+ writel(0x0184, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0xCCCD, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x0104, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ /* Setting VCO for 8.64GHz */
+ case 2160:
+ case 4320:
+ writel(0x0159, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0x999A, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x00E7, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ /* Setting VCO for 8.1GHz */
+ case 8100:
+ writel(0x0144, cdns_phy->sd_base + CMN_PLL0_INTDIV_M0);
+ writel(0x0000, cdns_phy->sd_base + CMN_PLL0_FRACDIVL_M0);
+ writel(0x0002, cdns_phy->sd_base + CMN_PLL0_FRACDIVH_M0);
+ writel(0x00D8, cdns_phy->sd_base + CMN_PLL0_HIGH_THR_M0);
+ break;
+ }
+
+ writel(0x0002, cdns_phy->sd_base + CMN_PDIAG_PLL0_CTRL_M0);
+ writel(0x0318, cdns_phy->sd_base + CMN_PLL0_VCOCAL_PLLCNT_START);
+}
+
+static void cdns_dp_phy_pma_cmn_rate(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int clk_sel_val = 0;
+ unsigned int hsclk_div_val = 0;
+ unsigned int i;
+
+ /* 16'h0000 for single DP link configuration */
+ writel(0x0000, cdns_phy->sd_base + PHY_PLL_CFG);
+
+ switch (cdns_phy->max_bit_rate) {
+ case 1620:
+ clk_sel_val = 0x0f01;
+ hsclk_div_val = 2;
+ break;
+ case 2160:
+ case 2430:
+ case 2700:
+ clk_sel_val = 0x0701;
+ hsclk_div_val = 1;
+ break;
+ case 3240:
+ clk_sel_val = 0x0b00;
+ hsclk_div_val = 2;
+ break;
+ case 4320:
+ case 5400:
+ clk_sel_val = 0x0301;
+ hsclk_div_val = 0;
+ break;
+ case 8100:
+ clk_sel_val = 0x0200;
+ hsclk_div_val = 0;
+ break;
+ }
+
+ writel(clk_sel_val, cdns_phy->sd_base + CMN_PDIAG_PLL0_CLK_SEL_M0);
+
+ /* PMA lane configuration to deal with multi-link operation */
+ for (i = 0; i < cdns_phy->num_lanes; i++) {
+ writel(hsclk_div_val,
+ cdns_phy->sd_base + (XCVR_DIAG_HSCLK_DIV | (i<<11)));
+ }
+}
+
+static void cdns_dp_phy_pma_lane_cfg(struct cdns_dp_phy *cdns_phy,
+ unsigned int lane)
+{
+ unsigned int lane_bits = (lane & LANE_MASK) << 11;
+
+ /* Writing Tx/Rx Power State Controllers registers */
+ writel(0x00FB, cdns_phy->sd_base + (TX_PSC_A0 | lane_bits));
+ writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A2 | lane_bits));
+ writel(0x04AA, cdns_phy->sd_base + (TX_PSC_A3 | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (RX_PSC_A0 | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (RX_PSC_A2 | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (RX_PSC_A3 | lane_bits));
+
+ writel(0x0001, cdns_phy->sd_base + (XCVR_DIAG_PLLDRC_CTRL | lane_bits));
+ writel(0x0000, cdns_phy->sd_base + (XCVR_DIAG_HSCLK_SEL | lane_bits));
+}
+
+static void cdns_dp_phy_run(struct cdns_dp_phy *cdns_phy)
+{
+ unsigned int read_val;
+ u32 write_val1 = 0;
+ u32 write_val2 = 0;
+ u32 mask = 0;
+ int ret;
+
+ /*
+ * waiting for ACK of pma_xcvr_pllclk_en_ln_*, only for the
+ * master lane
+ */
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_PLLCLK_EN_ACK,
+ read_val, read_val & 1, 0, POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link PLL clock enable ack\n");
+
+ ndelay(100);
+
+ switch (cdns_phy->num_lanes) {
+
+ case 1: /* lane 0 */
+ write_val1 = 0x00000004;
+ write_val2 = 0x00000001;
+ mask = 0x0000003f;
+ break;
+ case 2: /* lane 0-1 */
+ write_val1 = 0x00000404;
+ write_val2 = 0x00000101;
+ mask = 0x00003f3f;
+ break;
+ case 4: /* lane 0-3 */
+ write_val1 = 0x04040404;
+ write_val2 = 0x01010101;
+ mask = 0x3f3f3f3f;
+ break;
+ }
+
+ writel(write_val1, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK,
+ read_val, (read_val & mask) == write_val1, 0,
+ POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link power state ack\n");
+
+ writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+ ndelay(100);
+
+ writel(write_val2, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+
+ ret = readl_poll_timeout(cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_ACK,
+ read_val, (read_val & mask) == write_val2, 0,
+ POLL_TIMEOUT_US);
+ if (ret == -ETIMEDOUT)
+ dev_err(cdns_phy->dev,
+ "timeout waiting for link power state ack\n");
+
+ writel(0, cdns_phy->base + PHY_PMA_XCVR_POWER_STATE_REQ);
+ ndelay(100);
+}
+
+static void cdns_dp_phy_write_field(struct cdns_dp_phy *cdns_phy,
+ unsigned int offset,
+ unsigned char start_bit,
+ unsigned char num_bits,
+ unsigned int val)
+{
+ unsigned int read_val;
+
+ read_val = readl(cdns_phy->base + offset);
+ writel(((val << start_bit) | (read_val & ~(((1 << num_bits) - 1) <<
+ start_bit))), cdns_phy->base + offset);
+}
+
+static int cdns_dp_phy_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ struct cdns_dp_phy *cdns_phy;
+ struct device *dev = &pdev->dev;
+ struct phy_provider *phy_provider;
+ struct phy *phy;
+ int err;
+
+ cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL);
+ if (!cdns_phy)
+ return -ENOMEM;
+
+ cdns_phy->dev = &pdev->dev;
+
+ phy = devm_phy_create(dev, NULL, &cdns_dp_phy_ops);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "failed to create DisplayPort PHY\n");
+ return PTR_ERR(phy);
+ }
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ cdns_phy->base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(cdns_phy->base))
+ return PTR_ERR(cdns_phy->base);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ cdns_phy->sd_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(cdns_phy->sd_base))
+ return PTR_ERR(cdns_phy->sd_base);
+
+ err = device_property_read_u32(dev, "num_lanes",
+ &(cdns_phy->num_lanes));
+ if (err)
+ cdns_phy->num_lanes = DEFAULT_NUM_LANES;
+
+ switch (cdns_phy->num_lanes) {
+ case 1:
+ case 2:
+ case 4:
+ /* valid number of lanes */
+ break;
+ default:
+ dev_err(dev, "unsupported number of lanes: %d\n",
+ cdns_phy->num_lanes);
+ return -EINVAL;
+ }
+
+ err = device_property_read_u32(dev, "max_bit_rate",
+ &(cdns_phy->max_bit_rate));
+ if (err)
+ cdns_phy->max_bit_rate = DEFAULT_MAX_BIT_RATE;
+
+ switch (cdns_phy->max_bit_rate) {
+ case 2160:
+ case 2430:
+ case 2700:
+ case 3240:
+ case 4320:
+ case 5400:
+ case 8100:
+ /* valid bit rate */
+ break;
+ default:
+ dev_err(dev, "unsupported max bit rate: %dMbps\n",
+ cdns_phy->max_bit_rate);
+ return -EINVAL;
+ }
+
+ phy_set_drvdata(phy, cdns_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n",
+ cdns_phy->num_lanes,
+ cdns_phy->max_bit_rate / 1000,
+ cdns_phy->max_bit_rate % 1000);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id cdns_dp_phy_of_match[] = {
+ {
+ .compatible = "cdns,dp-phy"
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cdns_dp_phy_of_match);
+
+static struct platform_driver cdns_dp_phy_driver = {
+ .probe = cdns_dp_phy_probe,
+ .driver = {
+ .name = "cdns-dp-phy",
+ .of_match_table = cdns_dp_phy_of_match,
+ }
+};
+module_platform_driver(cdns_dp_phy_driver);
+
+MODULE_AUTHOR("Cadence Design Systems, Inc.");
+MODULE_DESCRIPTION("Cadence MHDP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
index 986224fca9e9..f9e0dd19ff26 100644
--- a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
+++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c
@@ -156,7 +156,6 @@ static int ltq_rcu_usb2_of_parse(struct ltq_rcu_usb2_priv *priv,
{
struct device *dev = priv->dev;
const __be32 *offset;
- int ret;
priv->reg_bits = of_device_get_match_data(dev);
@@ -196,10 +195,8 @@ static int ltq_rcu_usb2_of_parse(struct ltq_rcu_usb2_priv *priv,
}
priv->phy_reset = devm_reset_control_get_optional(dev, "phy");
- if (IS_ERR(priv->phy_reset))
- return PTR_ERR(priv->phy_reset);
- return 0;
+ return PTR_ERR_OR_ZERO(priv->phy_reset);
}
static int ltq_rcu_usb2_phy_probe(struct platform_device *pdev)
diff --git a/drivers/phy/marvell/Kconfig b/drivers/phy/marvell/Kconfig
index 68e321225400..6fb4b56e4c14 100644
--- a/drivers/phy/marvell/Kconfig
+++ b/drivers/phy/marvell/Kconfig
@@ -59,3 +59,14 @@ config PHY_PXA_28NM_USB2
The PHY driver will be used by Marvell udc/ehci/otg driver.
To compile this driver as a module, choose M here.
+
+config PHY_PXA_USB
+ tristate "Marvell PXA USB PHY Driver"
+ depends on ARCH_PXA || ARCH_MMP
+ select GENERIC_PHY
+ help
+ Enable this to support Marvell PXA USB PHY driver for Marvell
+ SoC. This driver will do the PHY initialization and shutdown.
+ The PHY driver will be used by Marvell udc/ehci/otg driver.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/phy/marvell/Makefile b/drivers/phy/marvell/Makefile
index 5c3ec5d10e0d..3975b144f8ec 100644
--- a/drivers/phy/marvell/Makefile
+++ b/drivers/phy/marvell/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_PHY_MVEBU_CP110_COMPHY) += phy-mvebu-cp110-comphy.o
obj-$(CONFIG_PHY_MVEBU_SATA) += phy-mvebu-sata.o
obj-$(CONFIG_PHY_PXA_28NM_HSIC) += phy-pxa-28nm-hsic.o
obj-$(CONFIG_PHY_PXA_28NM_USB2) += phy-pxa-28nm-usb2.o
+obj-$(CONFIG_PHY_PXA_USB) += phy-pxa-usb.o
diff --git a/drivers/phy/marvell/phy-berlin-sata.c b/drivers/phy/marvell/phy-berlin-sata.c
index c1bb6725e48f..a91fc67fc4e0 100644
--- a/drivers/phy/marvell/phy-berlin-sata.c
+++ b/drivers/phy/marvell/phy-berlin-sata.c
@@ -231,14 +231,14 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
struct phy_berlin_desc *phy_desc;
if (of_property_read_u32(child, "reg", &phy_id)) {
- dev_err(dev, "missing reg property in node %s\n",
- child->name);
+ dev_err(dev, "missing reg property in node %pOFn\n",
+ child);
ret = -EINVAL;
goto put_child;
}
if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) {
- dev_err(dev, "invalid reg in node %s\n", child->name);
+ dev_err(dev, "invalid reg in node %pOFn\n", child);
ret = -EINVAL;
goto put_child;
}
diff --git a/drivers/phy/marvell/phy-pxa-usb.c b/drivers/phy/marvell/phy-pxa-usb.c
new file mode 100644
index 000000000000..87ff7550b912
--- /dev/null
+++ b/drivers/phy/marvell/phy-pxa-usb.c
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Copyright (C) 2018 Lubomir Rintel <lkundrak@v3.sk>
+ */
+
+#include <dt-bindings/phy/phy.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+/* phy regs */
+#define UTMI_REVISION 0x0
+#define UTMI_CTRL 0x4
+#define UTMI_PLL 0x8
+#define UTMI_TX 0xc
+#define UTMI_RX 0x10
+#define UTMI_IVREF 0x14
+#define UTMI_T0 0x18
+#define UTMI_T1 0x1c
+#define UTMI_T2 0x20
+#define UTMI_T3 0x24
+#define UTMI_T4 0x28
+#define UTMI_T5 0x2c
+#define UTMI_RESERVE 0x30
+#define UTMI_USB_INT 0x34
+#define UTMI_DBG_CTL 0x38
+#define UTMI_OTG_ADDON 0x3c
+
+/* For UTMICTRL Register */
+#define UTMI_CTRL_USB_CLK_EN (1 << 31)
+/* pxa168 */
+#define UTMI_CTRL_SUSPEND_SET1 (1 << 30)
+#define UTMI_CTRL_SUSPEND_SET2 (1 << 29)
+#define UTMI_CTRL_RXBUF_PDWN (1 << 24)
+#define UTMI_CTRL_TXBUF_PDWN (1 << 11)
+
+#define UTMI_CTRL_INPKT_DELAY_SHIFT 30
+#define UTMI_CTRL_INPKT_DELAY_SOF_SHIFT 28
+#define UTMI_CTRL_PU_REF_SHIFT 20
+#define UTMI_CTRL_ARC_PULLDN_SHIFT 12
+#define UTMI_CTRL_PLL_PWR_UP_SHIFT 1
+#define UTMI_CTRL_PWR_UP_SHIFT 0
+
+/* For UTMI_PLL Register */
+#define UTMI_PLL_PLLCALI12_SHIFT 29
+#define UTMI_PLL_PLLCALI12_MASK (0x3 << 29)
+
+#define UTMI_PLL_PLLVDD18_SHIFT 27
+#define UTMI_PLL_PLLVDD18_MASK (0x3 << 27)
+
+#define UTMI_PLL_PLLVDD12_SHIFT 25
+#define UTMI_PLL_PLLVDD12_MASK (0x3 << 25)
+
+#define UTMI_PLL_CLK_BLK_EN_SHIFT 24
+#define CLK_BLK_EN (0x1 << 24)
+#define PLL_READY (0x1 << 23)
+#define KVCO_EXT (0x1 << 22)
+#define VCOCAL_START (0x1 << 21)
+
+#define UTMI_PLL_KVCO_SHIFT 15
+#define UTMI_PLL_KVCO_MASK (0x7 << 15)
+
+#define UTMI_PLL_ICP_SHIFT 12
+#define UTMI_PLL_ICP_MASK (0x7 << 12)
+
+#define UTMI_PLL_FBDIV_SHIFT 4
+#define UTMI_PLL_FBDIV_MASK (0xFF << 4)
+
+#define UTMI_PLL_REFDIV_SHIFT 0
+#define UTMI_PLL_REFDIV_MASK (0xF << 0)
+
+/* For UTMI_TX Register */
+#define UTMI_TX_REG_EXT_FS_RCAL_SHIFT 27
+#define UTMI_TX_REG_EXT_FS_RCAL_MASK (0xf << 27)
+
+#define UTMI_TX_REG_EXT_FS_RCAL_EN_SHIFT 26
+#define UTMI_TX_REG_EXT_FS_RCAL_EN_MASK (0x1 << 26)
+
+#define UTMI_TX_TXVDD12_SHIFT 22
+#define UTMI_TX_TXVDD12_MASK (0x3 << 22)
+
+#define UTMI_TX_CK60_PHSEL_SHIFT 17
+#define UTMI_TX_CK60_PHSEL_MASK (0xf << 17)
+
+#define UTMI_TX_IMPCAL_VTH_SHIFT 14
+#define UTMI_TX_IMPCAL_VTH_MASK (0x7 << 14)
+
+#define REG_RCAL_START (0x1 << 12)
+
+#define UTMI_TX_LOW_VDD_EN_SHIFT 11
+
+#define UTMI_TX_AMP_SHIFT 0
+#define UTMI_TX_AMP_MASK (0x7 << 0)
+
+/* For UTMI_RX Register */
+#define UTMI_REG_SQ_LENGTH_SHIFT 15
+#define UTMI_REG_SQ_LENGTH_MASK (0x3 << 15)
+
+#define UTMI_RX_SQ_THRESH_SHIFT 4
+#define UTMI_RX_SQ_THRESH_MASK (0xf << 4)
+
+#define UTMI_OTG_ADDON_OTG_ON (1 << 0)
+
+enum pxa_usb_phy_version {
+ PXA_USB_PHY_MMP2,
+ PXA_USB_PHY_PXA910,
+ PXA_USB_PHY_PXA168,
+};
+
+struct pxa_usb_phy {
+ struct phy *phy;
+ void __iomem *base;
+ enum pxa_usb_phy_version version;
+};
+
+/*****************************************************************************
+ * The registers read/write routines
+ *****************************************************************************/
+
+static unsigned int u2o_get(void __iomem *base, unsigned int offset)
+{
+ return readl_relaxed(base + offset);
+}
+
+static void u2o_set(void __iomem *base, unsigned int offset,
+ unsigned int value)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg |= value;
+ writel_relaxed(reg, base + offset);
+ readl_relaxed(base + offset);
+}
+
+static void u2o_clear(void __iomem *base, unsigned int offset,
+ unsigned int value)
+{
+ u32 reg;
+
+ reg = readl_relaxed(base + offset);
+ reg &= ~value;
+ writel_relaxed(reg, base + offset);
+ readl_relaxed(base + offset);
+}
+
+static void u2o_write(void __iomem *base, unsigned int offset,
+ unsigned int value)
+{
+ writel_relaxed(value, base + offset);
+ readl_relaxed(base + offset);
+}
+
+static int pxa_usb_phy_init(struct phy *phy)
+{
+ struct pxa_usb_phy *pxa_usb_phy = phy_get_drvdata(phy);
+ void __iomem *base = pxa_usb_phy->base;
+ int loops;
+
+ dev_info(&phy->dev, "initializing Marvell PXA USB PHY");
+
+ /* Initialize the USB PHY power */
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA910) {
+ u2o_set(base, UTMI_CTRL, (1<<UTMI_CTRL_INPKT_DELAY_SOF_SHIFT)
+ | (1<<UTMI_CTRL_PU_REF_SHIFT));
+ }
+
+ u2o_set(base, UTMI_CTRL, 1<<UTMI_CTRL_PLL_PWR_UP_SHIFT);
+ u2o_set(base, UTMI_CTRL, 1<<UTMI_CTRL_PWR_UP_SHIFT);
+
+ /* UTMI_PLL settings */
+ u2o_clear(base, UTMI_PLL, UTMI_PLL_PLLVDD18_MASK
+ | UTMI_PLL_PLLVDD12_MASK | UTMI_PLL_PLLCALI12_MASK
+ | UTMI_PLL_FBDIV_MASK | UTMI_PLL_REFDIV_MASK
+ | UTMI_PLL_ICP_MASK | UTMI_PLL_KVCO_MASK);
+
+ u2o_set(base, UTMI_PLL, 0xee<<UTMI_PLL_FBDIV_SHIFT
+ | 0xb<<UTMI_PLL_REFDIV_SHIFT | 3<<UTMI_PLL_PLLVDD18_SHIFT
+ | 3<<UTMI_PLL_PLLVDD12_SHIFT | 3<<UTMI_PLL_PLLCALI12_SHIFT
+ | 1<<UTMI_PLL_ICP_SHIFT | 3<<UTMI_PLL_KVCO_SHIFT);
+
+ /* UTMI_TX */
+ u2o_clear(base, UTMI_TX, UTMI_TX_REG_EXT_FS_RCAL_EN_MASK
+ | UTMI_TX_TXVDD12_MASK | UTMI_TX_CK60_PHSEL_MASK
+ | UTMI_TX_IMPCAL_VTH_MASK | UTMI_TX_REG_EXT_FS_RCAL_MASK
+ | UTMI_TX_AMP_MASK);
+ u2o_set(base, UTMI_TX, 3<<UTMI_TX_TXVDD12_SHIFT
+ | 4<<UTMI_TX_CK60_PHSEL_SHIFT | 4<<UTMI_TX_IMPCAL_VTH_SHIFT
+ | 8<<UTMI_TX_REG_EXT_FS_RCAL_SHIFT | 3<<UTMI_TX_AMP_SHIFT);
+
+ /* UTMI_RX */
+ u2o_clear(base, UTMI_RX, UTMI_RX_SQ_THRESH_MASK
+ | UTMI_REG_SQ_LENGTH_MASK);
+ u2o_set(base, UTMI_RX, 7<<UTMI_RX_SQ_THRESH_SHIFT
+ | 2<<UTMI_REG_SQ_LENGTH_SHIFT);
+
+ /* UTMI_IVREF */
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA168) {
+ /*
+ * fixing Microsoft Altair board interface with NEC hub issue -
+ * Set UTMI_IVREF from 0x4a3 to 0x4bf
+ */
+ u2o_write(base, UTMI_IVREF, 0x4bf);
+ }
+
+ /* toggle VCOCAL_START bit of UTMI_PLL */
+ udelay(200);
+ u2o_set(base, UTMI_PLL, VCOCAL_START);
+ udelay(40);
+ u2o_clear(base, UTMI_PLL, VCOCAL_START);
+
+ /* toggle REG_RCAL_START bit of UTMI_TX */
+ udelay(400);
+ u2o_set(base, UTMI_TX, REG_RCAL_START);
+ udelay(40);
+ u2o_clear(base, UTMI_TX, REG_RCAL_START);
+ udelay(400);
+
+ /* Make sure PHY PLL is ready */
+ loops = 0;
+ while ((u2o_get(base, UTMI_PLL) & PLL_READY) == 0) {
+ mdelay(1);
+ loops++;
+ if (loops > 100) {
+ dev_warn(&phy->dev, "calibrate timeout, UTMI_PLL %x\n",
+ u2o_get(base, UTMI_PLL));
+ break;
+ }
+ }
+
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA168) {
+ u2o_set(base, UTMI_RESERVE, 1 << 5);
+ /* Turn on UTMI PHY OTG extension */
+ u2o_write(base, UTMI_OTG_ADDON, 1);
+ }
+
+ return 0;
+
+}
+
+static int pxa_usb_phy_exit(struct phy *phy)
+{
+ struct pxa_usb_phy *pxa_usb_phy = phy_get_drvdata(phy);
+ void __iomem *base = pxa_usb_phy->base;
+
+ dev_info(&phy->dev, "deinitializing Marvell PXA USB PHY");
+
+ if (pxa_usb_phy->version == PXA_USB_PHY_PXA168)
+ u2o_clear(base, UTMI_OTG_ADDON, UTMI_OTG_ADDON_OTG_ON);
+
+ u2o_clear(base, UTMI_CTRL, UTMI_CTRL_RXBUF_PDWN);
+ u2o_clear(base, UTMI_CTRL, UTMI_CTRL_TXBUF_PDWN);
+ u2o_clear(base, UTMI_CTRL, UTMI_CTRL_USB_CLK_EN);
+ u2o_clear(base, UTMI_CTRL, 1<<UTMI_CTRL_PWR_UP_SHIFT);
+ u2o_clear(base, UTMI_CTRL, 1<<UTMI_CTRL_PLL_PWR_UP_SHIFT);
+
+ return 0;
+}
+
+static const struct phy_ops pxa_usb_phy_ops = {
+ .init = pxa_usb_phy_init,
+ .exit = pxa_usb_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id pxa_usb_phy_of_match[] = {
+ {
+ .compatible = "marvell,mmp2-usb-phy",
+ .data = (void *)PXA_USB_PHY_MMP2,
+ }, {
+ .compatible = "marvell,pxa910-usb-phy",
+ .data = (void *)PXA_USB_PHY_PXA910,
+ }, {
+ .compatible = "marvell,pxa168-usb-phy",
+ .data = (void *)PXA_USB_PHY_PXA168,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pxa_usb_phy_of_match);
+
+static int pxa_usb_phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *resource;
+ struct pxa_usb_phy *pxa_usb_phy;
+ struct phy_provider *provider;
+ const struct of_device_id *of_id;
+
+ pxa_usb_phy = devm_kzalloc(dev, sizeof(struct pxa_usb_phy), GFP_KERNEL);
+ if (!pxa_usb_phy)
+ return -ENOMEM;
+
+ of_id = of_match_node(pxa_usb_phy_of_match, dev->of_node);
+ if (of_id)
+ pxa_usb_phy->version = (enum pxa_usb_phy_version)of_id->data;
+ else
+ pxa_usb_phy->version = PXA_USB_PHY_MMP2;
+
+ resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pxa_usb_phy->base = devm_ioremap_resource(dev, resource);
+ if (IS_ERR(pxa_usb_phy->base)) {
+ dev_err(dev, "failed to remap PHY regs\n");
+ return PTR_ERR(pxa_usb_phy->base);
+ }
+
+ pxa_usb_phy->phy = devm_phy_create(dev, NULL, &pxa_usb_phy_ops);
+ if (IS_ERR(pxa_usb_phy->phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(pxa_usb_phy->phy);
+ }
+
+ phy_set_drvdata(pxa_usb_phy->phy, pxa_usb_phy);
+ provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(provider)) {
+ dev_err(dev, "failed to register PHY provider\n");
+ return PTR_ERR(provider);
+ }
+
+ if (!dev->of_node) {
+ phy_create_lookup(pxa_usb_phy->phy, "usb", "mv-udc");
+ phy_create_lookup(pxa_usb_phy->phy, "usb", "pxa-u2oehci");
+ phy_create_lookup(pxa_usb_phy->phy, "usb", "mv-otg");
+ }
+
+ dev_info(dev, "Marvell PXA USB PHY");
+ return 0;
+}
+
+static struct platform_driver pxa_usb_phy_driver = {
+ .probe = pxa_usb_phy_probe,
+ .driver = {
+ .name = "pxa-usb-phy",
+ .of_match_table = pxa_usb_phy_of_match,
+ },
+};
+module_platform_driver(pxa_usb_phy_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Marvell PXA USB PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index 632a0e73ee10..32f7d34eb784 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -50,6 +50,23 @@ config PHY_QCOM_UFS
help
Support for UFS PHY on QCOM chipsets.
+if PHY_QCOM_UFS
+
+config PHY_QCOM_UFS_14NM
+ tristate
+ default PHY_QCOM_UFS
+ help
+ Support for 14nm UFS QMP phy present on QCOM chipsets.
+
+config PHY_QCOM_UFS_20NM
+ tristate
+ default PHY_QCOM_UFS
+ depends on BROKEN
+ help
+ Support for 20nm UFS QMP phy present on QCOM chipsets.
+
+endif
+
config PHY_QCOM_USB_HS
tristate "Qualcomm USB HS PHY module"
depends on USB_ULPI_BUS
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index deb831f453ae..c56efd3af205 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_QMP) += phy-qcom-qmp.o
obj-$(CONFIG_PHY_QCOM_QUSB2) += phy-qcom-qusb2.o
obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs.o
-obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-14nm.o
-obj-$(CONFIG_PHY_QCOM_UFS) += phy-qcom-ufs-qmp-20nm.o
+obj-$(CONFIG_PHY_QCOM_UFS_14NM) += phy-qcom-ufs-qmp-14nm.o
+obj-$(CONFIG_PHY_QCOM_UFS_20NM) += phy-qcom-ufs-qmp-20nm.o
obj-$(CONFIG_PHY_QCOM_USB_HS) += phy-qcom-usb-hs.o
obj-$(CONFIG_PHY_QCOM_USB_HSIC) += phy-qcom-usb-hsic.o
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index 4c470104a0d6..a83332411026 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -156,6 +156,11 @@ static const unsigned int qmp_v3_usb3phy_regs_layout[] = {
[QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170,
};
+static const unsigned int sdm845_ufsphy_regs_layout[] = {
+ [QPHY_START_CTRL] = 0x00,
+ [QPHY_PCS_READY_STATUS] = 0x160,
+};
+
static const struct qmp_phy_init_tbl msm8996_pcie_serdes_tbl[] = {
QMP_PHY_INIT_CFG(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x1c),
QMP_PHY_INIT_CFG(QSERDES_COM_CLK_ENABLE1, 0x10),
@@ -601,6 +606,83 @@ static const struct qmp_phy_init_tbl qmp_v3_usb3_uniphy_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_REFGEN_REQ_CONFIG2, 0x60),
};
+static const struct qmp_phy_init_tbl sdm845_ufsphy_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0xd5),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_RESETSM_CNTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL1, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_INITVAL2, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE0, 0xda),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE0, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE1, 0x98),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE1_MODE1, 0xc1),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE2_MODE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE1, 0x32),
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE1, 0x0f),
+
+ /* Rate B */
+ QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_LANE_MODE_1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_LVL, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_CNTRL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_SIGDET_DEGLITCH_CNTRL, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_INTERFACE_MODE, 0x40),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_TERM_BW, 0x5b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL4, 0x1b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SVS_SO_GAIN, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE, 0x4b),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_PI_CONTROLS, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+ QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_MODE_00, 0x59),
+};
+
+static const struct qmp_phy_init_tbl sdm845_ufsphy_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_CTRL2, 0x6e),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_LARGE_AMP_DRV_LVL, 0x0a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_SMALL_AMP_DRV_LVL, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SYM_RESYNC_CTRL, 0x03),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_TX_MID_TERM_CTRL1, 0x43),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_CTRL1, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_MIN_HIBERN8_TIME, 0x9a),
+ QMP_PHY_INIT_CFG(QPHY_V3_PCS_MULTI_LANE_CTRL1, 0x02),
+};
/* struct qmp_phy_cfg - per-PHY initialization config */
struct qmp_phy_cfg {
@@ -649,9 +731,14 @@ struct qmp_phy_cfg {
/* true, if PHY has a separate DP_COM control block */
bool has_phy_dp_com_ctrl;
+ /* true, if PHY has secondary tx/rx lanes to be configured */
+ bool is_dual_lane_phy;
/* Register offset of secondary tx/rx lanes for USB DP combo PHY */
unsigned int tx_b_lane_offset;
unsigned int rx_b_lane_offset;
+
+ /* true, if PCS block has no separate SW_RESET register */
+ bool no_pcs_sw_reset;
};
/**
@@ -748,6 +835,10 @@ static const char * const qmp_v3_phy_clk_l[] = {
"aux", "cfg_ahb", "ref", "com_aux",
};
+static const char * const sdm845_ufs_phy_clk_l[] = {
+ "ref", "ref_aux",
+};
+
/* list of resets */
static const char * const msm8996_pciephy_reset_l[] = {
"phy", "common", "cfg",
@@ -758,7 +849,7 @@ static const char * const msm8996_usb3phy_reset_l[] = {
};
/* list of regulators */
-static const char * const msm8996_phy_vreg_l[] = {
+static const char * const qmp_phy_vreg_l[] = {
"vdda-phy", "vdda-pll",
};
@@ -778,8 +869,8 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = {
.num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
.reset_list = msm8996_pciephy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_pciephy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = pciephy_regs_layout,
.start_ctrl = PCS_START | PLL_READY_GATE_EN,
@@ -809,8 +900,8 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = {
.num_clks = ARRAY_SIZE(msm8996_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = usb3phy_regs_layout,
.start_ctrl = SERDES_START | PCS_START,
@@ -870,8 +961,8 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
.start_ctrl = SERDES_START | PCS_START,
@@ -883,6 +974,7 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
.has_phy_dp_com_ctrl = true,
+ .is_dual_lane_phy = true,
.tx_b_lane_offset = 0x400,
.rx_b_lane_offset = 0x400,
};
@@ -903,8 +995,8 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l),
.reset_list = msm8996_usb3phy_reset_l,
.num_resets = ARRAY_SIZE(msm8996_usb3phy_reset_l),
- .vreg_list = msm8996_phy_vreg_l,
- .num_vregs = ARRAY_SIZE(msm8996_phy_vreg_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
.regs = qmp_v3_usb3phy_regs_layout,
.start_ctrl = SERDES_START | PCS_START,
@@ -916,6 +1008,35 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
+static const struct qmp_phy_cfg sdm845_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sdm845_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sdm845_ufsphy_serdes_tbl),
+ .tx_tbl = sdm845_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_tx_tbl),
+ .rx_tbl = sdm845_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sdm845_ufsphy_rx_tbl),
+ .pcs_tbl = sdm845_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sdm845_ufsphy_pcs_tbl),
+ .clk_list = sdm845_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sdm845_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .mask_pcs_ready = PCS_READY,
+
+ .is_dual_lane_phy = true,
+ .tx_b_lane_offset = 0x400,
+ .rx_b_lane_offset = 0x400,
+
+ .no_pcs_sw_reset = true,
+};
+
static void qcom_qmp_phy_configure(void __iomem *base,
const unsigned int *regs,
const struct qmp_phy_init_tbl tbl[],
@@ -935,10 +1056,12 @@ static void qcom_qmp_phy_configure(void __iomem *base,
}
}
-static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
+static int qcom_qmp_phy_com_init(struct qmp_phy *qphy)
{
+ struct qcom_qmp *qmp = qphy->qmp;
const struct qmp_phy_cfg *cfg = qmp->cfg;
void __iomem *serdes = qmp->serdes;
+ void __iomem *pcs = qphy->pcs;
void __iomem *dp_com = qmp->dp_com;
int ret, i;
@@ -979,10 +1102,6 @@ static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
goto err_rst;
}
- if (cfg->has_phy_com_ctrl)
- qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
- SW_PWRDN);
-
if (cfg->has_phy_dp_com_ctrl) {
qphy_setbits(dp_com, QPHY_V3_DP_COM_POWER_DOWN_CTRL,
SW_PWRDN);
@@ -1000,6 +1119,12 @@ static int qcom_qmp_phy_com_init(struct qcom_qmp *qmp)
SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET);
}
+ if (cfg->has_phy_com_ctrl)
+ qphy_setbits(serdes, cfg->regs[QPHY_COM_POWER_DOWN_CONTROL],
+ SW_PWRDN);
+ else
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+
/* Serdes configuration */
qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl,
cfg->serdes_tbl_num);
@@ -1090,7 +1215,7 @@ static int qcom_qmp_phy_init(struct phy *phy)
dev_vdbg(qmp->dev, "Initializing QMP phy\n");
- ret = qcom_qmp_phy_com_init(qmp);
+ ret = qcom_qmp_phy_com_init(qphy);
if (ret)
return ret;
@@ -1112,22 +1237,31 @@ static int qcom_qmp_phy_init(struct phy *phy)
/* Tx, Rx, and PCS configurations */
qcom_qmp_phy_configure(tx, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num);
/* Configuration for other LANE for USB-DP combo PHY */
- if (cfg->has_phy_dp_com_ctrl)
+ if (cfg->is_dual_lane_phy)
qcom_qmp_phy_configure(tx + cfg->tx_b_lane_offset, cfg->regs,
cfg->tx_tbl, cfg->tx_tbl_num);
qcom_qmp_phy_configure(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num);
- if (cfg->has_phy_dp_com_ctrl)
+ if (cfg->is_dual_lane_phy)
qcom_qmp_phy_configure(rx + cfg->rx_b_lane_offset, cfg->regs,
cfg->rx_tbl, cfg->rx_tbl_num);
qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num);
/*
+ * UFS PHY requires the deassert of software reset before serdes start.
+ * For UFS PHYs that do not have software reset control bits, defer
+ * starting serdes until the power on callback.
+ */
+ if ((cfg->type == PHY_TYPE_UFS) && cfg->no_pcs_sw_reset)
+ goto out;
+
+ /*
* Pull out PHY from POWER DOWN state.
* This is active low enable signal to power-down PHY.
*/
- qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
+ if(cfg->type == PHY_TYPE_PCIE)
+ qphy_setbits(pcs, QPHY_POWER_DOWN_CONTROL, cfg->pwrdn_ctrl);
if (cfg->has_pwrdn_delay)
usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max);
@@ -1151,6 +1285,7 @@ static int qcom_qmp_phy_init(struct phy *phy)
}
qmp->phy_initialized = true;
+out:
return ret;
err_pcs_ready:
@@ -1173,7 +1308,8 @@ static int qcom_qmp_phy_exit(struct phy *phy)
clk_disable_unprepare(qphy->pipe_clk);
/* PHY reset */
- qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
+ if (!cfg->no_pcs_sw_reset)
+ qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET);
/* stop SerDes and Phy-Coding-Sublayer */
qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
@@ -1191,6 +1327,44 @@ static int qcom_qmp_phy_exit(struct phy *phy)
return 0;
}
+static int qcom_qmp_phy_poweron(struct phy *phy)
+{
+ struct qmp_phy *qphy = phy_get_drvdata(phy);
+ struct qcom_qmp *qmp = qphy->qmp;
+ const struct qmp_phy_cfg *cfg = qmp->cfg;
+ void __iomem *pcs = qphy->pcs;
+ void __iomem *status;
+ unsigned int mask, val;
+ int ret = 0;
+
+ if (cfg->type != PHY_TYPE_UFS)
+ return 0;
+
+ /*
+ * For UFS PHY that has not software reset control, serdes start
+ * should only happen when UFS driver explicitly calls phy_power_on
+ * after it deasserts software reset.
+ */
+ if (cfg->no_pcs_sw_reset && !qmp->phy_initialized &&
+ (qmp->init_count != 0)) {
+ /* start SerDes and Phy-Coding-Sublayer */
+ qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl);
+
+ status = pcs + cfg->regs[QPHY_PCS_READY_STATUS];
+ mask = cfg->mask_pcs_ready;
+
+ ret = readl_poll_timeout(status, val, !(val & mask), 1,
+ PHY_INIT_COMPLETE_TIMEOUT);
+ if (ret) {
+ dev_err(qmp->dev, "phy initialization timed-out\n");
+ return ret;
+ }
+ qmp->phy_initialized = true;
+ }
+
+ return ret;
+}
+
static int qcom_qmp_phy_set_mode(struct phy *phy, enum phy_mode mode)
{
struct qmp_phy *qphy = phy_get_drvdata(phy);
@@ -1400,7 +1574,7 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
ret = of_property_read_string(np, "clock-output-names", &init.name);
if (ret) {
- dev_err(qmp->dev, "%s: No clock-output-names\n", np->name);
+ dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np);
return ret;
}
@@ -1420,6 +1594,7 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np)
static const struct phy_ops qcom_qmp_phy_gen_ops = {
.init = qcom_qmp_phy_init,
.exit = qcom_qmp_phy_exit,
+ .power_on = qcom_qmp_phy_poweron,
.set_mode = qcom_qmp_phy_set_mode,
.owner = THIS_MODULE,
};
@@ -1522,6 +1697,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
}, {
.compatible = "qcom,sdm845-qmp-usb3-uni-phy",
.data = &qmp_v3_usb3_uniphy_cfg,
+ }, {
+ .compatible = "qcom,sdm845-qmp-ufs-phy",
+ .data = &sdm845_ufsphy_cfg,
},
{ },
};
@@ -1586,7 +1764,9 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev)
ret = qcom_qmp_phy_vreg_init(dev);
if (ret) {
- dev_err(dev, "failed to get regulator supplies\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index 5d78d43ba9fc..d201cc307151 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -184,6 +184,8 @@
#define QSERDES_V3_COM_VCO_TUNE2_MODE0 0x0f8
#define QSERDES_V3_COM_VCO_TUNE1_MODE1 0x0fc
#define QSERDES_V3_COM_VCO_TUNE2_MODE1 0x100
+#define QSERDES_V3_COM_VCO_TUNE_INITVAL1 0x104
+#define QSERDES_V3_COM_VCO_TUNE_INITVAL2 0x108
#define QSERDES_V3_COM_VCO_TUNE_TIMER1 0x11c
#define QSERDES_V3_COM_VCO_TUNE_TIMER2 0x120
#define QSERDES_V3_COM_CLK_SELECT 0x138
@@ -211,8 +213,13 @@
/* Only for QMP V3 PHY - RX registers */
#define QSERDES_V3_RX_UCDR_SO_GAIN_HALF 0x00c
#define QSERDES_V3_RX_UCDR_SO_GAIN 0x014
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_HALF 0x024
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN_QUARTER 0x028
+#define QSERDES_V3_RX_UCDR_SVS_SO_GAIN 0x02c
#define QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN 0x030
#define QSERDES_V3_RX_UCDR_SO_SATURATION_AND_ENABLE 0x034
+#define QSERDES_V3_RX_UCDR_FASTLOCK_COUNT_LOW 0x03c
+#define QSERDES_V3_RX_UCDR_PI_CONTROLS 0x044
#define QSERDES_V3_RX_RX_TERM_BW 0x07c
#define QSERDES_V3_RX_VGA_CAL_CNTRL1 0x0bc
#define QSERDES_V3_RX_VGA_CAL_CNTRL2 0x0c0
@@ -239,6 +246,8 @@
#define QPHY_V3_PCS_TXMGN_V3 0x018
#define QPHY_V3_PCS_TXMGN_V4 0x01c
#define QPHY_V3_PCS_TXMGN_LS 0x020
+#define QPHY_V3_PCS_TX_LARGE_AMP_DRV_LVL 0x02c
+#define QPHY_V3_PCS_TX_SMALL_AMP_DRV_LVL 0x034
#define QPHY_V3_PCS_TXDEEMPH_M6DB_V0 0x024
#define QPHY_V3_PCS_TXDEEMPH_M3P5DB_V0 0x028
#define QPHY_V3_PCS_TXDEEMPH_M6DB_V1 0x02c
@@ -275,6 +284,12 @@
#define QPHY_V3_PCS_FLL_CNT_VAL_L 0x0cc
#define QPHY_V3_PCS_FLL_CNT_VAL_H_TOL 0x0d0
#define QPHY_V3_PCS_FLL_MAN_CODE 0x0d4
+#define QPHY_V3_PCS_RX_SYM_RESYNC_CTRL 0x134
+#define QPHY_V3_PCS_RX_MIN_HIBERN8_TIME 0x138
+#define QPHY_V3_PCS_RX_SIGDET_CTRL1 0x13c
+#define QPHY_V3_PCS_RX_SIGDET_CTRL2 0x140
+#define QPHY_V3_PCS_TX_MID_TERM_CTRL1 0x1bc
+#define QPHY_V3_PCS_MULTI_LANE_CTRL1 0x1c4
#define QPHY_V3_PCS_RX_SIGDET_LVL 0x1d8
#define QPHY_V3_PCS_REFGEN_REQ_CONFIG1 0x20c
#define QPHY_V3_PCS_REFGEN_REQ_CONFIG2 0x210
diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
index e70e425f26f5..9ce531194f8a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
+++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
@@ -800,7 +800,9 @@ static int qusb2_phy_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(dev, num, qphy->vregs);
if (ret) {
- dev_err(dev, "failed to get regulator supplies\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get regulator supplies: %d\n",
+ ret);
return ret;
}
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-i.h b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
index 822c83b8efcd..681644e43248 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-i.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-i.h
@@ -17,9 +17,9 @@
#include <linux/module.h>
#include <linux/clk.h>
+#include <linux/phy/phy.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <linux/phy/phy-qcom-ufs.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/delay.h>
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index c5493ea51282..f2979ccad00a 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -431,56 +431,6 @@ static void ufs_qcom_phy_disable_ref_clk(struct ufs_qcom_phy *phy)
}
}
-#define UFS_REF_CLK_EN (1 << 5)
-
-static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable)
-{
- struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
- if (phy->dev_ref_clk_ctrl_mmio &&
- (enable ^ phy->is_dev_ref_clk_enabled)) {
- u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio);
-
- if (enable)
- temp |= UFS_REF_CLK_EN;
- else
- temp &= ~UFS_REF_CLK_EN;
-
- /*
- * If we are here to disable this clock immediately after
- * entering into hibern8, we need to make sure that device
- * ref_clk is active atleast 1us after the hibern8 enter.
- */
- if (!enable)
- udelay(1);
-
- writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio);
- /* ensure that ref_clk is enabled/disabled before we return */
- wmb();
- /*
- * If we call hibern8 exit after this, we need to make sure that
- * device ref_clk is stable for atleast 1us before the hibern8
- * exit command.
- */
- if (enable)
- udelay(1);
-
- phy->is_dev_ref_clk_enabled = enable;
- }
-}
-
-void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
-{
- ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk);
-
-void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
-{
- ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
-
/* Turn ON M-PHY RMMI interface clocks */
static int ufs_qcom_phy_enable_iface_clk(struct ufs_qcom_phy *phy)
{
diff --git a/drivers/phy/renesas/Kconfig b/drivers/phy/renesas/Kconfig
index 4bd390c79d21..e340a925bbb1 100644
--- a/drivers/phy/renesas/Kconfig
+++ b/drivers/phy/renesas/Kconfig
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Phy drivers for Renesas platforms
#
diff --git a/drivers/phy/renesas/Makefile b/drivers/phy/renesas/Makefile
index 4b76fc439ed6..b599ff8a4349 100644
--- a/drivers/phy/renesas/Makefile
+++ b/drivers/phy/renesas/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_RCAR_GEN2) += phy-rcar-gen2.o
obj-$(CONFIG_PHY_RCAR_GEN3_PCIE) += phy-rcar-gen3-pcie.o
obj-$(CONFIG_PHY_RCAR_GEN3_USB2) += phy-rcar-gen3-usb2.o
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index 97d4dd6ea924..72eeb066912d 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen2 PHY driver
*
* Copyright (C) 2014 Renesas Solutions Corp.
* Copyright (C) 2014 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
index fb8f05e39cf7..d0f412c25981 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen3 for USB2.0 PHY driver
*
@@ -6,10 +7,6 @@
* This is based on the phy-rcar-gen2 driver:
* Copyright (C) 2014 Renesas Solutions Corp.
* Copyright (C) 2014 Cogent Embedded, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/extcon-provider.h>
@@ -81,18 +78,29 @@
#define USB2_ADPCTRL_IDPULLUP BIT(5) /* 1 = ID sampling is enabled */
#define USB2_ADPCTRL_DRVVBUS BIT(4)
-#define RCAR_GEN3_PHY_HAS_DEDICATED_PINS 1
-
struct rcar_gen3_chan {
void __iomem *base;
struct extcon_dev *extcon;
struct phy *phy;
struct regulator *vbus;
struct work_struct work;
+ enum usb_dr_mode dr_mode;
bool extcon_host;
- bool has_otg_pins;
+ bool is_otg_channel;
+ bool uses_otg_pins;
};
+/*
+ * Combination about is_otg_channel and uses_otg_pins:
+ *
+ * Parameters || Behaviors
+ * is_otg_channel | uses_otg_pins || irqs | role sysfs
+ * ---------------------+---------------++--------------+------------
+ * true | true || enabled | enabled
+ * true | false || disabled | enabled
+ * false | any || disabled | disabled
+ */
+
static void rcar_gen3_phy_usb2_work(struct work_struct *work)
{
struct rcar_gen3_chan *ch = container_of(work, struct rcar_gen3_chan,
@@ -147,6 +155,18 @@ static void rcar_gen3_enable_vbus_ctrl(struct rcar_gen3_chan *ch, int vbus)
writel(val, usb2_base + USB2_ADPCTRL);
}
+static void rcar_gen3_control_otg_irq(struct rcar_gen3_chan *ch, int enable)
+{
+ void __iomem *usb2_base = ch->base;
+ u32 val = readl(usb2_base + USB2_OBINTEN);
+
+ if (ch->uses_otg_pins && enable)
+ val |= USB2_OBINT_BITS;
+ else
+ val &= ~USB2_OBINT_BITS;
+ writel(val, usb2_base + USB2_OBINTEN);
+}
+
static void rcar_gen3_init_for_host(struct rcar_gen3_chan *ch)
{
rcar_gen3_set_linectrl(ch, 1, 1);
@@ -192,20 +212,19 @@ static void rcar_gen3_init_for_a_peri(struct rcar_gen3_chan *ch)
static void rcar_gen3_init_from_a_peri_to_a_host(struct rcar_gen3_chan *ch)
{
- void __iomem *usb2_base = ch->base;
- u32 val;
+ rcar_gen3_control_otg_irq(ch, 0);
- val = readl(usb2_base + USB2_OBINTEN);
- writel(val & ~USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
-
- rcar_gen3_enable_vbus_ctrl(ch, 0);
+ rcar_gen3_enable_vbus_ctrl(ch, 1);
rcar_gen3_init_for_host(ch);
- writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+ rcar_gen3_control_otg_irq(ch, 1);
}
static bool rcar_gen3_check_id(struct rcar_gen3_chan *ch)
{
+ if (!ch->uses_otg_pins)
+ return (ch->dr_mode == USB_DR_MODE_HOST) ? false : true;
+
return !!(readl(ch->base + USB2_ADPCTRL) & USB2_ADPCTRL_IDDIG);
}
@@ -237,7 +256,7 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
bool is_b_device;
enum phy_mode cur_mode, new_mode;
- if (!ch->has_otg_pins || !ch->phy->init_count)
+ if (!ch->is_otg_channel || !ch->phy->init_count)
return -EIO;
if (!strncmp(buf, "host", strlen("host")))
@@ -275,7 +294,7 @@ static ssize_t role_show(struct device *dev, struct device_attribute *attr,
{
struct rcar_gen3_chan *ch = dev_get_drvdata(dev);
- if (!ch->has_otg_pins || !ch->phy->init_count)
+ if (!ch->is_otg_channel || !ch->phy->init_count)
return -EIO;
return sprintf(buf, "%s\n", rcar_gen3_is_host(ch) ? "host" :
@@ -291,8 +310,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch)
val = readl(usb2_base + USB2_VBCTRL);
writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL);
writel(USB2_OBINT_BITS, usb2_base + USB2_OBINTSTA);
- val = readl(usb2_base + USB2_OBINTEN);
- writel(val | USB2_OBINT_BITS, usb2_base + USB2_OBINTEN);
+ rcar_gen3_control_otg_irq(ch, 1);
val = readl(usb2_base + USB2_ADPCTRL);
writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL);
val = readl(usb2_base + USB2_LINECTRL1);
@@ -314,7 +332,7 @@ static int rcar_gen3_phy_usb2_init(struct phy *p)
writel(USB2_OC_TIMSET_INIT, usb2_base + USB2_OC_TIMSET);
/* Initialize otg part */
- if (channel->has_otg_pins)
+ if (channel->is_otg_channel)
rcar_gen3_init_otg(channel);
return 0;
@@ -388,21 +406,10 @@ static irqreturn_t rcar_gen3_phy_usb2_irq(int irq, void *_ch)
}
static const struct of_device_id rcar_gen3_phy_usb2_match_table[] = {
- {
- .compatible = "renesas,usb2-phy-r8a7795",
- .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
- },
- {
- .compatible = "renesas,usb2-phy-r8a7796",
- .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
- },
- {
- .compatible = "renesas,usb2-phy-r8a77965",
- .data = (void *)RCAR_GEN3_PHY_HAS_DEDICATED_PINS,
- },
- {
- .compatible = "renesas,rcar-gen3-usb2-phy",
- },
+ { .compatible = "renesas,usb2-phy-r8a7795" },
+ { .compatible = "renesas,usb2-phy-r8a7796" },
+ { .compatible = "renesas,usb2-phy-r8a77965" },
+ { .compatible = "renesas,rcar-gen3-usb2-phy" },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_gen3_phy_usb2_match_table);
@@ -445,10 +452,13 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
dev_err(dev, "No irq handler (%d)\n", irq);
}
- if (of_usb_get_dr_mode_by_phy(dev->of_node, 0) == USB_DR_MODE_OTG) {
+ channel->dr_mode = of_usb_get_dr_mode_by_phy(dev->of_node, 0);
+ if (channel->dr_mode != USB_DR_MODE_UNKNOWN) {
int ret;
- channel->has_otg_pins = (uintptr_t)of_device_get_match_data(dev);
+ channel->is_otg_channel = true;
+ channel->uses_otg_pins = !of_property_read_bool(dev->of_node,
+ "renesas,no-otg-pins");
channel->extcon = devm_extcon_dev_allocate(dev,
rcar_gen3_phy_cable);
if (IS_ERR(channel->extcon))
@@ -490,7 +500,7 @@ static int rcar_gen3_phy_usb2_probe(struct platform_device *pdev)
dev_err(dev, "Failed to register PHY provider\n");
ret = PTR_ERR(provider);
goto error;
- } else if (channel->has_otg_pins) {
+ } else if (channel->is_otg_channel) {
int ret;
ret = device_create_file(dev, &dev_attr_role);
@@ -510,7 +520,7 @@ static int rcar_gen3_phy_usb2_remove(struct platform_device *pdev)
{
struct rcar_gen3_chan *channel = platform_get_drvdata(pdev);
- if (channel->has_otg_pins)
+ if (channel->is_otg_channel)
device_remove_file(&pdev->dev, &dev_attr_role);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb3.c b/drivers/phy/renesas/phy-rcar-gen3-usb3.c
index 88c83c9b8ff9..566b4cf4ff38 100644
--- a/drivers/phy/renesas/phy-rcar-gen3-usb3.c
+++ b/drivers/phy/renesas/phy-rcar-gen3-usb3.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Car Gen3 for USB3.0 PHY driver
*
* Copyright (C) 2017 Renesas Electronics Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/clk.h>
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 0e15119ddfc6..990204a46eb6 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -15,6 +15,14 @@ config PHY_ROCKCHIP_EMMC
help
Enable this to support the Rockchip EMMC PHY.
+config PHY_ROCKCHIP_INNO_HDMI
+ tristate "Rockchip INNO HDMI PHY Driver"
+ depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ help
+ Enable this to support the Rockchip Innosilicon HDMI PHY.
+
config PHY_ROCKCHIP_INNO_USB2
tristate "Rockchip INNO USB2PHY Driver"
depends on (ARCH_ROCKCHIP || COMPILE_TEST) && OF
diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile
index 7f149d989046..fd21cbaf40dd 100644
--- a/drivers/phy/rockchip/Makefile
+++ b/drivers/phy/rockchip/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o
obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o
+obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o
obj-$(CONFIG_PHY_ROCKCHIP_INNO_USB2) += phy-rockchip-inno-usb2.o
obj-$(CONFIG_PHY_ROCKCHIP_PCIE) += phy-rockchip-pcie.o
obj-$(CONFIG_PHY_ROCKCHIP_TYPEC) += phy-rockchip-typec.o
diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
index b237360f95f6..19bf84f0bc67 100644
--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
+++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
@@ -337,8 +337,8 @@ static int rockchip_emmc_phy_probe(struct platform_device *pdev)
return -ENOMEM;
if (of_property_read_u32(dev->of_node, "reg", &reg_offset)) {
- dev_err(dev, "missing reg property in node %s\n",
- dev->of_node->name);
+ dev_err(dev, "missing reg property in node %pOFn\n",
+ dev->of_node);
return -EINVAL;
}
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
new file mode 100644
index 000000000000..b10a84cab4a7
--- /dev/null
+++ b/drivers/phy/rockchip/phy-rockchip-inno-hdmi.c
@@ -0,0 +1,1277 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2017 Rockchip Electronics Co. Ltd.
+ *
+ * Author: Zheng Yang <zhengyang@rock-chips.com>
+ * Heiko Stuebner <heiko@sntech.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/phy/phy.h>
+#include <linux/slab.h>
+
+#define UPDATE(x, h, l) (((x) << (l)) & GENMASK((h), (l)))
+
+/* REG: 0x00 */
+#define RK3228_PRE_PLL_REFCLK_SEL_PCLK BIT(0)
+/* REG: 0x01 */
+#define RK3228_BYPASS_RXSENSE_EN BIT(2)
+#define RK3228_BYPASS_PWRON_EN BIT(1)
+#define RK3228_BYPASS_PLLPD_EN BIT(0)
+/* REG: 0x02 */
+#define RK3228_BYPASS_PDATA_EN BIT(4)
+#define RK3228_PDATAEN_DISABLE BIT(0)
+/* REG: 0x03 */
+#define RK3228_BYPASS_AUTO_TERM_RES_CAL BIT(7)
+#define RK3228_AUTO_TERM_RES_CAL_SPEED_14_8(x) UPDATE(x, 6, 0)
+/* REG: 0x04 */
+#define RK3228_AUTO_TERM_RES_CAL_SPEED_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xaa */
+#define RK3228_POST_PLL_CTRL_MANUAL BIT(0)
+/* REG: 0xe0 */
+#define RK3228_POST_PLL_POWER_DOWN BIT(5)
+#define RK3228_PRE_PLL_POWER_DOWN BIT(4)
+#define RK3228_RXSENSE_CLK_CH_ENABLE BIT(3)
+#define RK3228_RXSENSE_DATA_CH2_ENABLE BIT(2)
+#define RK3228_RXSENSE_DATA_CH1_ENABLE BIT(1)
+#define RK3228_RXSENSE_DATA_CH0_ENABLE BIT(0)
+/* REG: 0xe1 */
+#define RK3228_BANDGAP_ENABLE BIT(4)
+#define RK3228_TMDS_DRIVER_ENABLE GENMASK(3, 0)
+/* REG: 0xe2 */
+#define RK3228_PRE_PLL_FB_DIV_8_MASK BIT(7)
+#define RK3228_PRE_PLL_FB_DIV_8(x) UPDATE((x) >> 8, 7, 7)
+#define RK3228_PCLK_VCO_DIV_5_MASK BIT(5)
+#define RK3228_PCLK_VCO_DIV_5(x) UPDATE(x, 5, 5)
+#define RK3228_PRE_PLL_PRE_DIV_MASK GENMASK(4, 0)
+#define RK3228_PRE_PLL_PRE_DIV(x) UPDATE(x, 4, 0)
+/* REG: 0xe3 */
+#define RK3228_PRE_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xe4 */
+#define RK3228_PRE_PLL_PCLK_DIV_B_MASK GENMASK(6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_B_SHIFT 5
+#define RK3228_PRE_PLL_PCLK_DIV_B(x) UPDATE(x, 6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_A_MASK GENMASK(4, 0)
+#define RK3228_PRE_PLL_PCLK_DIV_A(x) UPDATE(x, 4, 0)
+/* REG: 0xe5 */
+#define RK3228_PRE_PLL_PCLK_DIV_C_MASK GENMASK(6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_C(x) UPDATE(x, 6, 5)
+#define RK3228_PRE_PLL_PCLK_DIV_D_MASK GENMASK(4, 0)
+#define RK3228_PRE_PLL_PCLK_DIV_D(x) UPDATE(x, 4, 0)
+/* REG: 0xe6 */
+#define RK3228_PRE_PLL_TMDSCLK_DIV_C_MASK GENMASK(5, 4)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_C(x) UPDATE(x, 5, 4)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_A_MASK GENMASK(3, 2)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_A(x) UPDATE(x, 3, 2)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_B_MASK GENMASK(1, 0)
+#define RK3228_PRE_PLL_TMDSCLK_DIV_B(x) UPDATE(x, 1, 0)
+/* REG: 0xe8 */
+#define RK3228_PRE_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xe9 */
+#define RK3228_POST_PLL_POST_DIV_ENABLE UPDATE(3, 7, 6)
+#define RK3228_POST_PLL_PRE_DIV_MASK GENMASK(4, 0)
+#define RK3228_POST_PLL_PRE_DIV(x) UPDATE(x, 4, 0)
+/* REG: 0xea */
+#define RK3228_POST_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xeb */
+#define RK3228_POST_PLL_FB_DIV_8_MASK BIT(7)
+#define RK3228_POST_PLL_FB_DIV_8(x) UPDATE((x) >> 8, 7, 7)
+#define RK3228_POST_PLL_POST_DIV_MASK GENMASK(5, 4)
+#define RK3228_POST_PLL_POST_DIV(x) UPDATE(x, 5, 4)
+#define RK3228_POST_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xee */
+#define RK3228_TMDS_CH_TA_ENABLE GENMASK(7, 4)
+/* REG: 0xef */
+#define RK3228_TMDS_CLK_CH_TA(x) UPDATE(x, 7, 6)
+#define RK3228_TMDS_DATA_CH2_TA(x) UPDATE(x, 5, 4)
+#define RK3228_TMDS_DATA_CH1_TA(x) UPDATE(x, 3, 2)
+#define RK3228_TMDS_DATA_CH0_TA(x) UPDATE(x, 1, 0)
+/* REG: 0xf0 */
+#define RK3228_TMDS_DATA_CH2_PRE_EMPHASIS_MASK GENMASK(5, 4)
+#define RK3228_TMDS_DATA_CH2_PRE_EMPHASIS(x) UPDATE(x, 5, 4)
+#define RK3228_TMDS_DATA_CH1_PRE_EMPHASIS_MASK GENMASK(3, 2)
+#define RK3228_TMDS_DATA_CH1_PRE_EMPHASIS(x) UPDATE(x, 3, 2)
+#define RK3228_TMDS_DATA_CH0_PRE_EMPHASIS_MASK GENMASK(1, 0)
+#define RK3228_TMDS_DATA_CH0_PRE_EMPHASIS(x) UPDATE(x, 1, 0)
+/* REG: 0xf1 */
+#define RK3228_TMDS_CLK_CH_OUTPUT_SWING(x) UPDATE(x, 7, 4)
+#define RK3228_TMDS_DATA_CH2_OUTPUT_SWING(x) UPDATE(x, 3, 0)
+/* REG: 0xf2 */
+#define RK3228_TMDS_DATA_CH1_OUTPUT_SWING(x) UPDATE(x, 7, 4)
+#define RK3228_TMDS_DATA_CH0_OUTPUT_SWING(x) UPDATE(x, 3, 0)
+
+/* REG: 0x01 */
+#define RK3328_BYPASS_RXSENSE_EN BIT(2)
+#define RK3328_BYPASS_POWERON_EN BIT(1)
+#define RK3328_BYPASS_PLLPD_EN BIT(0)
+/* REG: 0x02 */
+#define RK3328_INT_POL_HIGH BIT(7)
+#define RK3328_BYPASS_PDATA_EN BIT(4)
+#define RK3328_PDATA_EN BIT(0)
+/* REG:0x05 */
+#define RK3328_INT_TMDS_CLK(x) UPDATE(x, 7, 4)
+#define RK3328_INT_TMDS_D2(x) UPDATE(x, 3, 0)
+/* REG:0x07 */
+#define RK3328_INT_TMDS_D1(x) UPDATE(x, 7, 4)
+#define RK3328_INT_TMDS_D0(x) UPDATE(x, 3, 0)
+/* for all RK3328_INT_TMDS_*, ESD_DET as defined in 0xc8-0xcb */
+#define RK3328_INT_AGND_LOW_PULSE_LOCKED BIT(3)
+#define RK3328_INT_RXSENSE_LOW_PULSE_LOCKED BIT(2)
+#define RK3328_INT_VSS_AGND_ESD_DET BIT(1)
+#define RK3328_INT_AGND_VSS_ESD_DET BIT(0)
+/* REG: 0xa0 */
+#define RK3328_PCLK_VCO_DIV_5_MASK BIT(1)
+#define RK3328_PCLK_VCO_DIV_5(x) UPDATE(x, 1, 1)
+#define RK3328_PRE_PLL_POWER_DOWN BIT(0)
+/* REG: 0xa1 */
+#define RK3328_PRE_PLL_PRE_DIV_MASK GENMASK(5, 0)
+#define RK3328_PRE_PLL_PRE_DIV(x) UPDATE(x, 5, 0)
+/* REG: 0xa2 */
+/* unset means center spread */
+#define RK3328_SPREAD_SPECTRUM_MOD_DOWN BIT(7)
+#define RK3328_SPREAD_SPECTRUM_MOD_DISABLE BIT(6)
+#define RK3328_PRE_PLL_FRAC_DIV_DISABLE UPDATE(3, 5, 4)
+#define RK3328_PRE_PLL_FB_DIV_11_8_MASK GENMASK(3, 0)
+#define RK3328_PRE_PLL_FB_DIV_11_8(x) UPDATE((x) >> 8, 3, 0)
+/* REG: 0xa3 */
+#define RK3328_PRE_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xa4*/
+#define RK3328_PRE_PLL_TMDSCLK_DIV_C_MASK GENMASK(1, 0)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_C(x) UPDATE(x, 1, 0)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_B_MASK GENMASK(3, 2)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_B(x) UPDATE(x, 3, 2)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_A_MASK GENMASK(5, 4)
+#define RK3328_PRE_PLL_TMDSCLK_DIV_A(x) UPDATE(x, 5, 4)
+/* REG: 0xa5 */
+#define RK3328_PRE_PLL_PCLK_DIV_B_SHIFT 5
+#define RK3328_PRE_PLL_PCLK_DIV_B_MASK GENMASK(6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_B(x) UPDATE(x, 6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_A_MASK GENMASK(4, 0)
+#define RK3328_PRE_PLL_PCLK_DIV_A(x) UPDATE(x, 4, 0)
+/* REG: 0xa6 */
+#define RK3328_PRE_PLL_PCLK_DIV_C_SHIFT 5
+#define RK3328_PRE_PLL_PCLK_DIV_C_MASK GENMASK(6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_C(x) UPDATE(x, 6, 5)
+#define RK3328_PRE_PLL_PCLK_DIV_D_MASK GENMASK(4, 0)
+#define RK3328_PRE_PLL_PCLK_DIV_D(x) UPDATE(x, 4, 0)
+/* REG: 0xa9 */
+#define RK3328_PRE_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xaa */
+#define RK3328_POST_PLL_POST_DIV_ENABLE GENMASK(3, 2)
+#define RK3328_POST_PLL_REFCLK_SEL_TMDS BIT(1)
+#define RK3328_POST_PLL_POWER_DOWN BIT(0)
+/* REG:0xab */
+#define RK3328_POST_PLL_FB_DIV_8(x) UPDATE((x) >> 8, 7, 7)
+#define RK3328_POST_PLL_PRE_DIV(x) UPDATE(x, 4, 0)
+/* REG: 0xac */
+#define RK3328_POST_PLL_FB_DIV_7_0(x) UPDATE(x, 7, 0)
+/* REG: 0xad */
+#define RK3328_POST_PLL_POST_DIV_MASK GENMASK(1, 0)
+#define RK3328_POST_PLL_POST_DIV_2 0x0
+#define RK3328_POST_PLL_POST_DIV_4 0x1
+#define RK3328_POST_PLL_POST_DIV_8 0x3
+/* REG: 0xaf */
+#define RK3328_POST_PLL_LOCK_STATUS BIT(0)
+/* REG: 0xb0 */
+#define RK3328_BANDGAP_ENABLE BIT(2)
+/* REG: 0xb2 */
+#define RK3328_TMDS_CLK_DRIVER_EN BIT(3)
+#define RK3328_TMDS_D2_DRIVER_EN BIT(2)
+#define RK3328_TMDS_D1_DRIVER_EN BIT(1)
+#define RK3328_TMDS_D0_DRIVER_EN BIT(0)
+#define RK3328_TMDS_DRIVER_ENABLE (RK3328_TMDS_CLK_DRIVER_EN | \
+ RK3328_TMDS_D2_DRIVER_EN | \
+ RK3328_TMDS_D1_DRIVER_EN | \
+ RK3328_TMDS_D0_DRIVER_EN)
+/* REG:0xc5 */
+#define RK3328_BYPASS_TERM_RESISTOR_CALIB BIT(7)
+#define RK3328_TERM_RESISTOR_CALIB_SPEED_14_8(x) UPDATE((x) >> 8, 6, 0)
+/* REG:0xc6 */
+#define RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(x) UPDATE(x, 7, 9)
+/* REG:0xc7 */
+#define RK3328_TERM_RESISTOR_50 UPDATE(0, 2, 1)
+#define RK3328_TERM_RESISTOR_62_5 UPDATE(1, 2, 1)
+#define RK3328_TERM_RESISTOR_75 UPDATE(2, 2, 1)
+#define RK3328_TERM_RESISTOR_100 UPDATE(3, 2, 1)
+/* REG 0xc8 - 0xcb */
+#define RK3328_ESD_DETECT_MASK GENMASK(7, 6)
+#define RK3328_ESD_DETECT_340MV (0x0 << 6)
+#define RK3328_ESD_DETECT_280MV (0x1 << 6)
+#define RK3328_ESD_DETECT_260MV (0x2 << 6)
+#define RK3328_ESD_DETECT_240MV (0x3 << 6)
+/* resistors can be used in parallel */
+#define RK3328_TMDS_TERM_RESIST_MASK GENMASK(5, 0)
+#define RK3328_TMDS_TERM_RESIST_75 BIT(5)
+#define RK3328_TMDS_TERM_RESIST_150 BIT(4)
+#define RK3328_TMDS_TERM_RESIST_300 BIT(3)
+#define RK3328_TMDS_TERM_RESIST_600 BIT(2)
+#define RK3328_TMDS_TERM_RESIST_1000 BIT(1)
+#define RK3328_TMDS_TERM_RESIST_2000 BIT(0)
+/* REG: 0xd1 */
+#define RK3328_PRE_PLL_FRAC_DIV_23_16(x) UPDATE((x) >> 16, 7, 0)
+/* REG: 0xd2 */
+#define RK3328_PRE_PLL_FRAC_DIV_15_8(x) UPDATE((x) >> 8, 7, 0)
+/* REG: 0xd3 */
+#define RK3328_PRE_PLL_FRAC_DIV_7_0(x) UPDATE(x, 7, 0)
+
+struct inno_hdmi_phy_drv_data;
+
+struct inno_hdmi_phy {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+
+ struct phy *phy;
+ struct clk *sysclk;
+ struct clk *refoclk;
+ struct clk *refpclk;
+
+ /* platform data */
+ const struct inno_hdmi_phy_drv_data *plat_data;
+ int chip_version;
+
+ /* clk provider */
+ struct clk_hw hw;
+ struct clk *phyclk;
+ unsigned long pixclock;
+};
+
+struct pre_pll_config {
+ unsigned long pixclock;
+ unsigned long tmdsclock;
+ u8 prediv;
+ u16 fbdiv;
+ u8 tmds_div_a;
+ u8 tmds_div_b;
+ u8 tmds_div_c;
+ u8 pclk_div_a;
+ u8 pclk_div_b;
+ u8 pclk_div_c;
+ u8 pclk_div_d;
+ u8 vco_div_5_en;
+ u32 fracdiv;
+};
+
+struct post_pll_config {
+ unsigned long tmdsclock;
+ u8 prediv;
+ u16 fbdiv;
+ u8 postdiv;
+ u8 version;
+};
+
+struct phy_config {
+ unsigned long tmdsclock;
+ u8 regs[14];
+};
+
+struct inno_hdmi_phy_ops {
+ int (*init)(struct inno_hdmi_phy *inno);
+ int (*power_on)(struct inno_hdmi_phy *inno,
+ const struct post_pll_config *cfg,
+ const struct phy_config *phy_cfg);
+ void (*power_off)(struct inno_hdmi_phy *inno);
+};
+
+struct inno_hdmi_phy_drv_data {
+ const struct inno_hdmi_phy_ops *ops;
+ const struct clk_ops *clk_ops;
+ const struct phy_config *phy_cfg_table;
+};
+
+static const struct pre_pll_config pre_pll_cfg_table[] = {
+ { 27000000, 27000000, 1, 90, 3, 2, 2, 10, 3, 3, 4, 0, 0},
+ { 27000000, 33750000, 1, 90, 1, 3, 3, 10, 3, 3, 4, 0, 0},
+ { 40000000, 40000000, 1, 80, 2, 2, 2, 12, 2, 2, 2, 0, 0},
+ { 59341000, 59341000, 1, 98, 3, 1, 2, 1, 3, 3, 4, 0, 0xE6AE6B},
+ { 59400000, 59400000, 1, 99, 3, 1, 1, 1, 3, 3, 4, 0, 0},
+ { 59341000, 74176250, 1, 98, 0, 3, 3, 1, 3, 3, 4, 0, 0xE6AE6B},
+ { 59400000, 74250000, 1, 99, 1, 2, 2, 1, 3, 3, 4, 0, 0},
+ { 74176000, 74176000, 1, 98, 1, 2, 2, 1, 2, 3, 4, 0, 0xE6AE6B},
+ { 74250000, 74250000, 1, 99, 1, 2, 2, 1, 2, 3, 4, 0, 0},
+ { 74176000, 92720000, 4, 494, 1, 2, 2, 1, 3, 3, 4, 0, 0x816817},
+ { 74250000, 92812500, 4, 495, 1, 2, 2, 1, 3, 3, 4, 0, 0},
+ {148352000, 148352000, 1, 98, 1, 1, 1, 1, 2, 2, 2, 0, 0xE6AE6B},
+ {148500000, 148500000, 1, 99, 1, 1, 1, 1, 2, 2, 2, 0, 0},
+ {148352000, 185440000, 4, 494, 0, 2, 2, 1, 3, 2, 2, 0, 0x816817},
+ {148500000, 185625000, 4, 495, 0, 2, 2, 1, 3, 2, 2, 0, 0},
+ {296703000, 296703000, 1, 98, 0, 1, 1, 1, 0, 2, 2, 0, 0xE6AE6B},
+ {297000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 2, 0, 0},
+ {296703000, 370878750, 4, 494, 1, 2, 0, 1, 3, 1, 1, 0, 0x816817},
+ {297000000, 371250000, 4, 495, 1, 2, 0, 1, 3, 1, 1, 0, 0},
+ {593407000, 296703500, 1, 98, 0, 1, 1, 1, 0, 2, 1, 0, 0xE6AE6B},
+ {594000000, 297000000, 1, 99, 0, 1, 1, 1, 0, 2, 1, 0, 0},
+ {593407000, 370879375, 4, 494, 1, 2, 0, 1, 3, 1, 1, 1, 0x816817},
+ {594000000, 371250000, 4, 495, 1, 2, 0, 1, 3, 1, 1, 1, 0},
+ {593407000, 593407000, 1, 98, 0, 2, 0, 1, 0, 1, 1, 0, 0xE6AE6B},
+ {594000000, 594000000, 1, 99, 0, 2, 0, 1, 0, 1, 1, 0, 0},
+ { /* sentinel */ }
+};
+
+static const struct post_pll_config post_pll_cfg_table[] = {
+ {33750000, 1, 40, 8, 1},
+ {33750000, 1, 80, 8, 2},
+ {74250000, 1, 40, 8, 1},
+ {74250000, 18, 80, 8, 2},
+ {148500000, 2, 40, 4, 3},
+ {297000000, 4, 40, 2, 3},
+ {594000000, 8, 40, 1, 3},
+ { /* sentinel */ }
+};
+
+/* phy tuning values for an undocumented set of registers */
+static const struct phy_config rk3228_phy_cfg[] = {
+ { 165000000, {
+ 0xaa, 0x00, 0x44, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ }, {
+ 340000000, {
+ 0xaa, 0x15, 0x6a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ }, {
+ 594000000, {
+ 0xaa, 0x15, 0x7a, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ }, { /* sentinel */ },
+};
+
+/* phy tuning values for an undocumented set of registers */
+static const struct phy_config rk3328_phy_cfg[] = {
+ { 165000000, {
+ 0x07, 0x0a, 0x0a, 0x0a, 0x00, 0x00, 0x08, 0x08, 0x08,
+ 0x00, 0xac, 0xcc, 0xcc, 0xcc,
+ },
+ }, {
+ 340000000, {
+ 0x0b, 0x0d, 0x0d, 0x0d, 0x07, 0x15, 0x08, 0x08, 0x08,
+ 0x3f, 0xac, 0xcc, 0xcd, 0xdd,
+ },
+ }, {
+ 594000000, {
+ 0x10, 0x1a, 0x1a, 0x1a, 0x07, 0x15, 0x08, 0x08, 0x08,
+ 0x00, 0xac, 0xcc, 0xcc, 0xcc,
+ },
+ }, { /* sentinel */ },
+};
+
+static inline struct inno_hdmi_phy *to_inno_hdmi_phy(struct clk_hw *hw)
+{
+ return container_of(hw, struct inno_hdmi_phy, hw);
+}
+
+/*
+ * The register description of the IP block does not use any distinct names
+ * but instead the databook simply numbers the registers in one-increments.
+ * As the registers are obviously 32bit sized, the inno_* functions
+ * translate the databook register names to the actual registers addresses.
+ */
+static inline void inno_write(struct inno_hdmi_phy *inno, u32 reg, u8 val)
+{
+ regmap_write(inno->regmap, reg * 4, val);
+}
+
+static inline u8 inno_read(struct inno_hdmi_phy *inno, u32 reg)
+{
+ u32 val;
+
+ regmap_read(inno->regmap, reg * 4, &val);
+
+ return val;
+}
+
+static inline void inno_update_bits(struct inno_hdmi_phy *inno, u8 reg,
+ u8 mask, u8 val)
+{
+ regmap_update_bits(inno->regmap, reg * 4, mask, val);
+}
+
+#define inno_poll(inno, reg, val, cond, sleep_us, timeout_us) \
+ regmap_read_poll_timeout((inno)->regmap, (reg) * 4, val, cond, \
+ sleep_us, timeout_us)
+
+static unsigned long inno_hdmi_phy_get_tmdsclk(struct inno_hdmi_phy *inno,
+ unsigned long rate)
+{
+ int bus_width = phy_get_bus_width(inno->phy);
+
+ switch (bus_width) {
+ case 4:
+ case 5:
+ case 6:
+ case 10:
+ case 12:
+ case 16:
+ return (u64)rate * bus_width / 8;
+ default:
+ return rate;
+ }
+}
+
+static irqreturn_t inno_hdmi_phy_rk3328_hardirq(int irq, void *dev_id)
+{
+ struct inno_hdmi_phy *inno = dev_id;
+ int intr_stat1, intr_stat2, intr_stat3;
+
+ intr_stat1 = inno_read(inno, 0x04);
+ intr_stat2 = inno_read(inno, 0x06);
+ intr_stat3 = inno_read(inno, 0x08);
+
+ if (intr_stat1)
+ inno_write(inno, 0x04, intr_stat1);
+ if (intr_stat2)
+ inno_write(inno, 0x06, intr_stat2);
+ if (intr_stat3)
+ inno_write(inno, 0x08, intr_stat3);
+
+ if (intr_stat1 || intr_stat2 || intr_stat3)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t inno_hdmi_phy_rk3328_irq(int irq, void *dev_id)
+{
+ struct inno_hdmi_phy *inno = dev_id;
+
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, 0);
+ usleep_range(10, 20);
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, RK3328_PDATA_EN);
+
+ return IRQ_HANDLED;
+}
+
+static int inno_hdmi_phy_power_on(struct phy *phy)
+{
+ struct inno_hdmi_phy *inno = phy_get_drvdata(phy);
+ const struct post_pll_config *cfg = post_pll_cfg_table;
+ const struct phy_config *phy_cfg = inno->plat_data->phy_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno,
+ inno->pixclock);
+ int ret;
+
+ if (!tmdsclock) {
+ dev_err(inno->dev, "TMDS clock is zero!\n");
+ return -EINVAL;
+ }
+
+ if (!inno->plat_data->ops->power_on)
+ return -EINVAL;
+
+ for (; cfg->tmdsclock != 0; cfg++)
+ if (tmdsclock <= cfg->tmdsclock &&
+ cfg->version & inno->chip_version)
+ break;
+
+ for (; phy_cfg->tmdsclock != 0; phy_cfg++)
+ if (tmdsclock <= phy_cfg->tmdsclock)
+ break;
+
+ if (cfg->tmdsclock == 0 || phy_cfg->tmdsclock == 0)
+ return -EINVAL;
+
+ dev_dbg(inno->dev, "Inno HDMI PHY Power On\n");
+
+ ret = clk_prepare_enable(inno->phyclk);
+ if (ret)
+ return ret;
+
+ ret = inno->plat_data->ops->power_on(inno, cfg, phy_cfg);
+ if (ret) {
+ clk_disable_unprepare(inno->phyclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int inno_hdmi_phy_power_off(struct phy *phy)
+{
+ struct inno_hdmi_phy *inno = phy_get_drvdata(phy);
+
+ if (!inno->plat_data->ops->power_off)
+ return -EINVAL;
+
+ inno->plat_data->ops->power_off(inno);
+
+ clk_disable_unprepare(inno->phyclk);
+
+ dev_dbg(inno->dev, "Inno HDMI PHY Power Off\n");
+
+ return 0;
+}
+
+static const struct phy_ops inno_hdmi_phy_ops = {
+ .owner = THIS_MODULE,
+ .power_on = inno_hdmi_phy_power_on,
+ .power_off = inno_hdmi_phy_power_off,
+};
+
+static const
+struct pre_pll_config *inno_hdmi_phy_get_pre_pll_cfg(struct inno_hdmi_phy *inno,
+ unsigned long rate)
+{
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno, rate);
+
+ for (; cfg->pixclock != 0; cfg++)
+ if (cfg->pixclock == rate && cfg->tmdsclock == tmdsclock)
+ break;
+
+ if (cfg->pixclock == 0)
+ return ERR_PTR(-EINVAL);
+
+ return cfg;
+}
+
+static int inno_hdmi_phy_rk3228_clk_is_prepared(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ u8 status;
+
+ status = inno_read(inno, 0xe0) & RK3228_PRE_PLL_POWER_DOWN;
+ return status ? 0 : 1;
+}
+
+static int inno_hdmi_phy_rk3228_clk_prepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN, 0);
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3228_clk_unprepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN,
+ RK3228_PRE_PLL_POWER_DOWN);
+}
+
+static
+unsigned long inno_hdmi_phy_rk3228_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ u8 nd, no_a, no_b, no_d;
+ u64 vco;
+ u16 nf;
+
+ nd = inno_read(inno, 0xe2) & RK3228_PRE_PLL_PRE_DIV_MASK;
+ nf = (inno_read(inno, 0xe2) & RK3228_PRE_PLL_FB_DIV_8_MASK) << 1;
+ nf |= inno_read(inno, 0xe3);
+ vco = parent_rate * nf;
+
+ if (inno_read(inno, 0xe2) & RK3228_PCLK_VCO_DIV_5_MASK) {
+ do_div(vco, nd * 5);
+ } else {
+ no_a = inno_read(inno, 0xe4) & RK3228_PRE_PLL_PCLK_DIV_A_MASK;
+ if (!no_a)
+ no_a = 1;
+ no_b = inno_read(inno, 0xe4) & RK3228_PRE_PLL_PCLK_DIV_B_MASK;
+ no_b >>= RK3228_PRE_PLL_PCLK_DIV_B_SHIFT;
+ no_b += 2;
+ no_d = inno_read(inno, 0xe5) & RK3228_PRE_PLL_PCLK_DIV_D_MASK;
+
+ do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
+ }
+
+ inno->pixclock = vco;
+
+ dev_dbg(inno->dev, "%s rate %lu\n", __func__, inno->pixclock);
+
+ return vco;
+}
+
+static long inno_hdmi_phy_rk3228_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+
+ for (; cfg->pixclock != 0; cfg++)
+ if (cfg->pixclock == rate && !cfg->fracdiv)
+ break;
+
+ if (cfg->pixclock == 0)
+ return -EINVAL;
+
+ return cfg->pixclock;
+}
+
+static int inno_hdmi_phy_rk3228_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno, rate);
+ u32 v;
+ int ret;
+
+ dev_dbg(inno->dev, "%s rate %lu tmdsclk %lu\n",
+ __func__, rate, tmdsclock);
+
+ cfg = inno_hdmi_phy_get_pre_pll_cfg(inno, rate);
+ if (IS_ERR(cfg))
+ return PTR_ERR(cfg);
+
+ /* Power down PRE-PLL */
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN,
+ RK3228_PRE_PLL_POWER_DOWN);
+
+ inno_update_bits(inno, 0xe2, RK3228_PRE_PLL_FB_DIV_8_MASK |
+ RK3228_PCLK_VCO_DIV_5_MASK |
+ RK3228_PRE_PLL_PRE_DIV_MASK,
+ RK3228_PRE_PLL_FB_DIV_8(cfg->fbdiv) |
+ RK3228_PCLK_VCO_DIV_5(cfg->vco_div_5_en) |
+ RK3228_PRE_PLL_PRE_DIV(cfg->prediv));
+ inno_write(inno, 0xe3, RK3228_PRE_PLL_FB_DIV_7_0(cfg->fbdiv));
+ inno_update_bits(inno, 0xe4, RK3228_PRE_PLL_PCLK_DIV_B_MASK |
+ RK3228_PRE_PLL_PCLK_DIV_A_MASK,
+ RK3228_PRE_PLL_PCLK_DIV_B(cfg->pclk_div_b) |
+ RK3228_PRE_PLL_PCLK_DIV_A(cfg->pclk_div_a));
+ inno_update_bits(inno, 0xe5, RK3228_PRE_PLL_PCLK_DIV_C_MASK |
+ RK3228_PRE_PLL_PCLK_DIV_D_MASK,
+ RK3228_PRE_PLL_PCLK_DIV_C(cfg->pclk_div_c) |
+ RK3228_PRE_PLL_PCLK_DIV_D(cfg->pclk_div_d));
+ inno_update_bits(inno, 0xe6, RK3228_PRE_PLL_TMDSCLK_DIV_C_MASK |
+ RK3228_PRE_PLL_TMDSCLK_DIV_A_MASK |
+ RK3228_PRE_PLL_TMDSCLK_DIV_B_MASK,
+ RK3228_PRE_PLL_TMDSCLK_DIV_C(cfg->tmds_div_c) |
+ RK3228_PRE_PLL_TMDSCLK_DIV_A(cfg->tmds_div_a) |
+ RK3228_PRE_PLL_TMDSCLK_DIV_B(cfg->tmds_div_b));
+
+ /* Power up PRE-PLL */
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN, 0);
+
+ /* Wait for Pre-PLL lock */
+ ret = inno_poll(inno, 0xe8, v, v & RK3228_PRE_PLL_LOCK_STATUS,
+ 100, 100000);
+ if (ret) {
+ dev_err(inno->dev, "Pre-PLL locking failed\n");
+ return ret;
+ }
+
+ inno->pixclock = rate;
+
+ return 0;
+}
+
+static const struct clk_ops inno_hdmi_phy_rk3228_clk_ops = {
+ .prepare = inno_hdmi_phy_rk3228_clk_prepare,
+ .unprepare = inno_hdmi_phy_rk3228_clk_unprepare,
+ .is_prepared = inno_hdmi_phy_rk3228_clk_is_prepared,
+ .recalc_rate = inno_hdmi_phy_rk3228_clk_recalc_rate,
+ .round_rate = inno_hdmi_phy_rk3228_clk_round_rate,
+ .set_rate = inno_hdmi_phy_rk3228_clk_set_rate,
+};
+
+static int inno_hdmi_phy_rk3328_clk_is_prepared(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ u8 status;
+
+ status = inno_read(inno, 0xa0) & RK3328_PRE_PLL_POWER_DOWN;
+ return status ? 0 : 1;
+}
+
+static int inno_hdmi_phy_rk3328_clk_prepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN, 0);
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3328_clk_unprepare(struct clk_hw *hw)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN,
+ RK3328_PRE_PLL_POWER_DOWN);
+}
+
+static
+unsigned long inno_hdmi_phy_rk3328_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ unsigned long frac;
+ u8 nd, no_a, no_b, no_c, no_d;
+ u64 vco;
+ u16 nf;
+
+ nd = inno_read(inno, 0xa1) & RK3328_PRE_PLL_PRE_DIV_MASK;
+ nf = ((inno_read(inno, 0xa2) & RK3328_PRE_PLL_FB_DIV_11_8_MASK) << 8);
+ nf |= inno_read(inno, 0xa3);
+ vco = parent_rate * nf;
+
+ if (!(inno_read(inno, 0xa2) & RK3328_PRE_PLL_FRAC_DIV_DISABLE)) {
+ frac = inno_read(inno, 0xd3) |
+ (inno_read(inno, 0xd2) << 8) |
+ (inno_read(inno, 0xd1) << 16);
+ vco += DIV_ROUND_CLOSEST(parent_rate * frac, (1 << 24));
+ }
+
+ if (inno_read(inno, 0xa0) & RK3328_PCLK_VCO_DIV_5_MASK) {
+ do_div(vco, nd * 5);
+ } else {
+ no_a = inno_read(inno, 0xa5) & RK3328_PRE_PLL_PCLK_DIV_A_MASK;
+ no_b = inno_read(inno, 0xa5) & RK3328_PRE_PLL_PCLK_DIV_B_MASK;
+ no_b >>= RK3328_PRE_PLL_PCLK_DIV_B_SHIFT;
+ no_b += 2;
+ no_c = inno_read(inno, 0xa6) & RK3328_PRE_PLL_PCLK_DIV_C_MASK;
+ no_c >>= RK3328_PRE_PLL_PCLK_DIV_C_SHIFT;
+ no_c = 1 << no_c;
+ no_d = inno_read(inno, 0xa6) & RK3328_PRE_PLL_PCLK_DIV_D_MASK;
+
+ do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
+ }
+
+ inno->pixclock = vco;
+ dev_dbg(inno->dev, "%s rate %lu\n", __func__, inno->pixclock);
+
+ return vco;
+}
+
+static long inno_hdmi_phy_rk3328_clk_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+
+ for (; cfg->pixclock != 0; cfg++)
+ if (cfg->pixclock == rate)
+ break;
+
+ if (cfg->pixclock == 0)
+ return -EINVAL;
+
+ return cfg->pixclock;
+}
+
+static int inno_hdmi_phy_rk3328_clk_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct inno_hdmi_phy *inno = to_inno_hdmi_phy(hw);
+ const struct pre_pll_config *cfg = pre_pll_cfg_table;
+ unsigned long tmdsclock = inno_hdmi_phy_get_tmdsclk(inno, rate);
+ u32 val;
+ int ret;
+
+ dev_dbg(inno->dev, "%s rate %lu tmdsclk %lu\n",
+ __func__, rate, tmdsclock);
+
+ cfg = inno_hdmi_phy_get_pre_pll_cfg(inno, rate);
+ if (IS_ERR(cfg))
+ return PTR_ERR(cfg);
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN,
+ RK3328_PRE_PLL_POWER_DOWN);
+
+ /* Configure pre-pll */
+ inno_update_bits(inno, 0xa0, RK3228_PCLK_VCO_DIV_5_MASK,
+ RK3228_PCLK_VCO_DIV_5(cfg->vco_div_5_en));
+ inno_write(inno, 0xa1, RK3328_PRE_PLL_PRE_DIV(cfg->prediv));
+
+ val = RK3328_SPREAD_SPECTRUM_MOD_DISABLE;
+ if (!cfg->fracdiv)
+ val |= RK3328_PRE_PLL_FRAC_DIV_DISABLE;
+ inno_write(inno, 0xa2, RK3328_PRE_PLL_FB_DIV_11_8(cfg->fbdiv) | val);
+ inno_write(inno, 0xa3, RK3328_PRE_PLL_FB_DIV_7_0(cfg->fbdiv));
+ inno_write(inno, 0xa5, RK3328_PRE_PLL_PCLK_DIV_A(cfg->pclk_div_a) |
+ RK3328_PRE_PLL_PCLK_DIV_B(cfg->pclk_div_b));
+ inno_write(inno, 0xa6, RK3328_PRE_PLL_PCLK_DIV_C(cfg->pclk_div_c) |
+ RK3328_PRE_PLL_PCLK_DIV_D(cfg->pclk_div_d));
+ inno_write(inno, 0xa4, RK3328_PRE_PLL_TMDSCLK_DIV_C(cfg->tmds_div_c) |
+ RK3328_PRE_PLL_TMDSCLK_DIV_A(cfg->tmds_div_a) |
+ RK3328_PRE_PLL_TMDSCLK_DIV_B(cfg->tmds_div_b));
+ inno_write(inno, 0xd3, RK3328_PRE_PLL_FRAC_DIV_7_0(cfg->fracdiv));
+ inno_write(inno, 0xd2, RK3328_PRE_PLL_FRAC_DIV_15_8(cfg->fracdiv));
+ inno_write(inno, 0xd1, RK3328_PRE_PLL_FRAC_DIV_23_16(cfg->fracdiv));
+
+ inno_update_bits(inno, 0xa0, RK3328_PRE_PLL_POWER_DOWN, 0);
+
+ /* Wait for Pre-PLL lock */
+ ret = inno_poll(inno, 0xa9, val, val & RK3328_PRE_PLL_LOCK_STATUS,
+ 1000, 10000);
+ if (ret) {
+ dev_err(inno->dev, "Pre-PLL locking failed\n");
+ return ret;
+ }
+
+ inno->pixclock = rate;
+
+ return 0;
+}
+
+static const struct clk_ops inno_hdmi_phy_rk3328_clk_ops = {
+ .prepare = inno_hdmi_phy_rk3328_clk_prepare,
+ .unprepare = inno_hdmi_phy_rk3328_clk_unprepare,
+ .is_prepared = inno_hdmi_phy_rk3328_clk_is_prepared,
+ .recalc_rate = inno_hdmi_phy_rk3328_clk_recalc_rate,
+ .round_rate = inno_hdmi_phy_rk3328_clk_round_rate,
+ .set_rate = inno_hdmi_phy_rk3328_clk_set_rate,
+};
+
+static int inno_hdmi_phy_clk_register(struct inno_hdmi_phy *inno)
+{
+ struct device *dev = inno->dev;
+ struct device_node *np = dev->of_node;
+ struct clk_init_data init;
+ const char *parent_name;
+ int ret;
+
+ parent_name = __clk_get_name(inno->refoclk);
+
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ init.flags = 0;
+ init.name = "pin_hd20_pclk";
+ init.ops = inno->plat_data->clk_ops;
+
+ /* optional override of the clock name */
+ of_property_read_string(np, "clock-output-names", &init.name);
+
+ inno->hw.init = &init;
+
+ inno->phyclk = devm_clk_register(dev, &inno->hw);
+ if (IS_ERR(inno->phyclk)) {
+ ret = PTR_ERR(inno->phyclk);
+ dev_err(dev, "failed to register clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_clk_add_provider(np, of_clk_src_simple_get, inno->phyclk);
+ if (ret) {
+ dev_err(dev, "failed to register clock provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int inno_hdmi_phy_rk3228_init(struct inno_hdmi_phy *inno)
+{
+ /*
+ * Use phy internal register control
+ * rxsense/poweron/pllpd/pdataen signal.
+ */
+ inno_write(inno, 0x01, RK3228_BYPASS_RXSENSE_EN |
+ RK3228_BYPASS_PWRON_EN |
+ RK3228_BYPASS_PLLPD_EN);
+ inno_update_bits(inno, 0x02, RK3228_BYPASS_PDATA_EN,
+ RK3228_BYPASS_PDATA_EN);
+
+ /* manual power down post-PLL */
+ inno_update_bits(inno, 0xaa, RK3228_POST_PLL_CTRL_MANUAL,
+ RK3228_POST_PLL_CTRL_MANUAL);
+
+ inno->chip_version = 1;
+
+ return 0;
+}
+
+static int
+inno_hdmi_phy_rk3228_power_on(struct inno_hdmi_phy *inno,
+ const struct post_pll_config *cfg,
+ const struct phy_config *phy_cfg)
+{
+ int ret;
+ u32 v;
+
+ inno_update_bits(inno, 0x02, RK3228_PDATAEN_DISABLE,
+ RK3228_PDATAEN_DISABLE);
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN |
+ RK3228_POST_PLL_POWER_DOWN,
+ RK3228_PRE_PLL_POWER_DOWN |
+ RK3228_POST_PLL_POWER_DOWN);
+
+ /* Post-PLL update */
+ inno_update_bits(inno, 0xe9, RK3228_POST_PLL_PRE_DIV_MASK,
+ RK3228_POST_PLL_PRE_DIV(cfg->prediv));
+ inno_update_bits(inno, 0xeb, RK3228_POST_PLL_FB_DIV_8_MASK,
+ RK3228_POST_PLL_FB_DIV_8(cfg->fbdiv));
+ inno_write(inno, 0xea, RK3228_POST_PLL_FB_DIV_7_0(cfg->fbdiv));
+
+ if (cfg->postdiv == 1) {
+ inno_update_bits(inno, 0xe9, RK3228_POST_PLL_POST_DIV_ENABLE,
+ 0);
+ } else {
+ int div = cfg->postdiv / 2 - 1;
+
+ inno_update_bits(inno, 0xe9, RK3228_POST_PLL_POST_DIV_ENABLE,
+ RK3228_POST_PLL_POST_DIV_ENABLE);
+ inno_update_bits(inno, 0xeb, RK3228_POST_PLL_POST_DIV_MASK,
+ RK3228_POST_PLL_POST_DIV(div));
+ }
+
+ for (v = 0; v < 4; v++)
+ inno_write(inno, 0xef + v, phy_cfg->regs[v]);
+
+ inno_update_bits(inno, 0xe0, RK3228_PRE_PLL_POWER_DOWN |
+ RK3228_POST_PLL_POWER_DOWN, 0);
+ inno_update_bits(inno, 0xe1, RK3228_BANDGAP_ENABLE,
+ RK3228_BANDGAP_ENABLE);
+ inno_update_bits(inno, 0xe1, RK3228_TMDS_DRIVER_ENABLE,
+ RK3228_TMDS_DRIVER_ENABLE);
+
+ /* Wait for post PLL lock */
+ ret = inno_poll(inno, 0xeb, v, v & RK3228_POST_PLL_LOCK_STATUS,
+ 100, 100000);
+ if (ret) {
+ dev_err(inno->dev, "Post-PLL locking failed\n");
+ return ret;
+ }
+
+ if (cfg->tmdsclock > 340000000)
+ msleep(100);
+
+ inno_update_bits(inno, 0x02, RK3228_PDATAEN_DISABLE, 0);
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3228_power_off(struct inno_hdmi_phy *inno)
+{
+ inno_update_bits(inno, 0xe1, RK3228_TMDS_DRIVER_ENABLE, 0);
+ inno_update_bits(inno, 0xe1, RK3228_BANDGAP_ENABLE, 0);
+ inno_update_bits(inno, 0xe0, RK3228_POST_PLL_POWER_DOWN,
+ RK3228_POST_PLL_POWER_DOWN);
+}
+
+static const struct inno_hdmi_phy_ops rk3228_hdmi_phy_ops = {
+ .init = inno_hdmi_phy_rk3228_init,
+ .power_on = inno_hdmi_phy_rk3228_power_on,
+ .power_off = inno_hdmi_phy_rk3228_power_off,
+};
+
+static int inno_hdmi_phy_rk3328_init(struct inno_hdmi_phy *inno)
+{
+ struct nvmem_cell *cell;
+ unsigned char *efuse_buf;
+ size_t len;
+
+ /*
+ * Use phy internal register control
+ * rxsense/poweron/pllpd/pdataen signal.
+ */
+ inno_write(inno, 0x01, RK3328_BYPASS_RXSENSE_EN |
+ RK3328_BYPASS_POWERON_EN |
+ RK3328_BYPASS_PLLPD_EN);
+ inno_write(inno, 0x02, RK3328_INT_POL_HIGH | RK3328_BYPASS_PDATA_EN |
+ RK3328_PDATA_EN);
+
+ /* Disable phy irq */
+ inno_write(inno, 0x05, 0);
+ inno_write(inno, 0x07, 0);
+
+ /* try to read the chip-version */
+ inno->chip_version = 1;
+ cell = nvmem_cell_get(inno->dev, "cpu-version");
+ if (IS_ERR(cell)) {
+ if (PTR_ERR(cell) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ return 0;
+ }
+
+ efuse_buf = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(efuse_buf))
+ return 0;
+ if (len == 1)
+ inno->chip_version = efuse_buf[0] + 1;
+ kfree(efuse_buf);
+
+ return 0;
+}
+
+static int
+inno_hdmi_phy_rk3328_power_on(struct inno_hdmi_phy *inno,
+ const struct post_pll_config *cfg,
+ const struct phy_config *phy_cfg)
+{
+ int ret;
+ u32 v;
+
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, 0);
+ inno_update_bits(inno, 0xaa, RK3328_POST_PLL_POWER_DOWN,
+ RK3328_POST_PLL_POWER_DOWN);
+
+ inno_write(inno, 0xac, RK3328_POST_PLL_FB_DIV_7_0(cfg->fbdiv));
+ if (cfg->postdiv == 1) {
+ inno_write(inno, 0xaa, RK3328_POST_PLL_REFCLK_SEL_TMDS);
+ inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
+ RK3328_POST_PLL_PRE_DIV(cfg->prediv));
+ } else {
+ v = (cfg->postdiv / 2) - 1;
+ v &= RK3328_POST_PLL_POST_DIV_MASK;
+ inno_write(inno, 0xad, v);
+ inno_write(inno, 0xab, RK3328_POST_PLL_FB_DIV_8(cfg->fbdiv) |
+ RK3328_POST_PLL_PRE_DIV(cfg->prediv));
+ inno_write(inno, 0xaa, RK3328_POST_PLL_POST_DIV_ENABLE |
+ RK3328_POST_PLL_REFCLK_SEL_TMDS);
+ }
+
+ for (v = 0; v < 14; v++)
+ inno_write(inno, 0xb5 + v, phy_cfg->regs[v]);
+
+ /* set ESD detection threshold for TMDS CLK, D2, D1 and D0 */
+ for (v = 0; v < 4; v++)
+ inno_update_bits(inno, 0xc8 + v, RK3328_ESD_DETECT_MASK,
+ RK3328_ESD_DETECT_340MV);
+
+ if (phy_cfg->tmdsclock > 340000000) {
+ /* Set termination resistor to 100ohm */
+ v = clk_get_rate(inno->sysclk) / 100000;
+ inno_write(inno, 0xc5, RK3328_TERM_RESISTOR_CALIB_SPEED_14_8(v)
+ | RK3328_BYPASS_TERM_RESISTOR_CALIB);
+ inno_write(inno, 0xc6, RK3328_TERM_RESISTOR_CALIB_SPEED_7_0(v));
+ inno_write(inno, 0xc7, RK3328_TERM_RESISTOR_100);
+ inno_update_bits(inno, 0xc5,
+ RK3328_BYPASS_TERM_RESISTOR_CALIB, 0);
+ } else {
+ inno_write(inno, 0xc5, RK3328_BYPASS_TERM_RESISTOR_CALIB);
+
+ /* clk termination resistor is 50ohm (parallel resistors) */
+ if (phy_cfg->tmdsclock > 165000000)
+ inno_update_bits(inno, 0xc8,
+ RK3328_TMDS_TERM_RESIST_MASK,
+ RK3328_TMDS_TERM_RESIST_75 |
+ RK3328_TMDS_TERM_RESIST_150);
+
+ /* data termination resistor for D2, D1 and D0 is 150ohm */
+ for (v = 0; v < 3; v++)
+ inno_update_bits(inno, 0xc9 + v,
+ RK3328_TMDS_TERM_RESIST_MASK,
+ RK3328_TMDS_TERM_RESIST_150);
+ }
+
+ inno_update_bits(inno, 0xaa, RK3328_POST_PLL_POWER_DOWN, 0);
+ inno_update_bits(inno, 0xb0, RK3328_BANDGAP_ENABLE,
+ RK3328_BANDGAP_ENABLE);
+ inno_update_bits(inno, 0xb2, RK3328_TMDS_DRIVER_ENABLE,
+ RK3328_TMDS_DRIVER_ENABLE);
+
+ /* Wait for post PLL lock */
+ ret = inno_poll(inno, 0xaf, v, v & RK3328_POST_PLL_LOCK_STATUS,
+ 1000, 10000);
+ if (ret) {
+ dev_err(inno->dev, "Post-PLL locking failed\n");
+ return ret;
+ }
+
+ if (phy_cfg->tmdsclock > 340000000)
+ msleep(100);
+
+ inno_update_bits(inno, 0x02, RK3328_PDATA_EN, RK3328_PDATA_EN);
+
+ /* Enable PHY IRQ */
+ inno_write(inno, 0x05, RK3328_INT_TMDS_CLK(RK3328_INT_VSS_AGND_ESD_DET)
+ | RK3328_INT_TMDS_D2(RK3328_INT_VSS_AGND_ESD_DET));
+ inno_write(inno, 0x07, RK3328_INT_TMDS_D1(RK3328_INT_VSS_AGND_ESD_DET)
+ | RK3328_INT_TMDS_D0(RK3328_INT_VSS_AGND_ESD_DET));
+ return 0;
+}
+
+static void inno_hdmi_phy_rk3328_power_off(struct inno_hdmi_phy *inno)
+{
+ inno_update_bits(inno, 0xb2, RK3328_TMDS_DRIVER_ENABLE, 0);
+ inno_update_bits(inno, 0xb0, RK3328_BANDGAP_ENABLE, 0);
+ inno_update_bits(inno, 0xaa, RK3328_POST_PLL_POWER_DOWN,
+ RK3328_POST_PLL_POWER_DOWN);
+
+ /* Disable PHY IRQ */
+ inno_write(inno, 0x05, 0);
+ inno_write(inno, 0x07, 0);
+}
+
+static const struct inno_hdmi_phy_ops rk3328_hdmi_phy_ops = {
+ .init = inno_hdmi_phy_rk3328_init,
+ .power_on = inno_hdmi_phy_rk3328_power_on,
+ .power_off = inno_hdmi_phy_rk3328_power_off,
+};
+
+static const struct inno_hdmi_phy_drv_data rk3228_hdmi_phy_drv_data = {
+ .ops = &rk3228_hdmi_phy_ops,
+ .clk_ops = &inno_hdmi_phy_rk3228_clk_ops,
+ .phy_cfg_table = rk3228_phy_cfg,
+};
+
+static const struct inno_hdmi_phy_drv_data rk3328_hdmi_phy_drv_data = {
+ .ops = &rk3328_hdmi_phy_ops,
+ .clk_ops = &inno_hdmi_phy_rk3328_clk_ops,
+ .phy_cfg_table = rk3328_phy_cfg,
+};
+
+static const struct regmap_config inno_hdmi_phy_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x400,
+};
+
+static void inno_hdmi_phy_action(void *data)
+{
+ struct inno_hdmi_phy *inno = data;
+
+ clk_disable_unprepare(inno->refpclk);
+ clk_disable_unprepare(inno->sysclk);
+}
+
+static int inno_hdmi_phy_probe(struct platform_device *pdev)
+{
+ struct inno_hdmi_phy *inno;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ inno = devm_kzalloc(&pdev->dev, sizeof(*inno), GFP_KERNEL);
+ if (!inno)
+ return -ENOMEM;
+
+ inno->dev = &pdev->dev;
+
+ inno->plat_data = of_device_get_match_data(inno->dev);
+ if (!inno->plat_data || !inno->plat_data->ops)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(inno->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ inno->sysclk = devm_clk_get(inno->dev, "sysclk");
+ if (IS_ERR(inno->sysclk)) {
+ ret = PTR_ERR(inno->sysclk);
+ dev_err(inno->dev, "failed to get sysclk: %d\n", ret);
+ return ret;
+ }
+
+ inno->refpclk = devm_clk_get(inno->dev, "refpclk");
+ if (IS_ERR(inno->refpclk)) {
+ ret = PTR_ERR(inno->refpclk);
+ dev_err(inno->dev, "failed to get ref clock: %d\n", ret);
+ return ret;
+ }
+
+ inno->refoclk = devm_clk_get(inno->dev, "refoclk");
+ if (IS_ERR(inno->refoclk)) {
+ ret = PTR_ERR(inno->refoclk);
+ dev_err(inno->dev, "failed to get oscillator-ref clock: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(inno->sysclk);
+ if (ret) {
+ dev_err(inno->dev, "Cannot enable inno phy sysclk: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Refpclk needs to be on, on at least the rk3328 for still
+ * unknown reasons.
+ */
+ ret = clk_prepare_enable(inno->refpclk);
+ if (ret) {
+ dev_err(inno->dev, "failed to enable refpclk\n");
+ clk_disable_unprepare(inno->sysclk);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(inno->dev, inno_hdmi_phy_action,
+ inno);
+ if (ret)
+ return ret;
+
+ inno->regmap = devm_regmap_init_mmio(inno->dev, regs,
+ &inno_hdmi_phy_regmap_config);
+ if (IS_ERR(inno->regmap))
+ return PTR_ERR(inno->regmap);
+
+ /* only the newer rk3328 hdmiphy has an interrupt */
+ inno->irq = platform_get_irq(pdev, 0);
+ if (inno->irq > 0) {
+ ret = devm_request_threaded_irq(inno->dev, inno->irq,
+ inno_hdmi_phy_rk3328_hardirq,
+ inno_hdmi_phy_rk3328_irq,
+ IRQF_SHARED,
+ dev_name(inno->dev), inno);
+ if (ret)
+ return ret;
+ }
+
+ inno->phy = devm_phy_create(inno->dev, NULL, &inno_hdmi_phy_ops);
+ if (IS_ERR(inno->phy)) {
+ dev_err(inno->dev, "failed to create HDMI PHY\n");
+ return PTR_ERR(inno->phy);
+ }
+
+ phy_set_drvdata(inno->phy, inno);
+ phy_set_bus_width(inno->phy, 8);
+
+ if (inno->plat_data->ops->init) {
+ ret = inno->plat_data->ops->init(inno);
+ if (ret)
+ return ret;
+ }
+
+ ret = inno_hdmi_phy_clk_register(inno);
+ if (ret)
+ return ret;
+
+ phy_provider = devm_of_phy_provider_register(inno->dev,
+ of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static int inno_hdmi_phy_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+
+ return 0;
+}
+
+static const struct of_device_id inno_hdmi_phy_of_match[] = {
+ {
+ .compatible = "rockchip,rk3228-hdmi-phy",
+ .data = &rk3228_hdmi_phy_drv_data
+ }, {
+ .compatible = "rockchip,rk3328-hdmi-phy",
+ .data = &rk3328_hdmi_phy_drv_data
+ }, { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, inno_hdmi_phy_of_match);
+
+static struct platform_driver inno_hdmi_phy_driver = {
+ .probe = inno_hdmi_phy_probe,
+ .remove = inno_hdmi_phy_remove,
+ .driver = {
+ .name = "inno-hdmi-phy",
+ .of_match_table = inno_hdmi_phy_of_match,
+ },
+};
+module_platform_driver(inno_hdmi_phy_driver);
+
+MODULE_AUTHOR("Zheng Yang <zhengyang@rock-chips.com>");
+MODULE_DESCRIPTION("Innosilion HDMI 2.0 Transmitter PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 5049dac79bd0..24bd2717abdb 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -1116,8 +1116,8 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
}
if (of_property_read_u32(np, "reg", &reg)) {
- dev_err(dev, "the reg property is not assigned in %s node\n",
- np->name);
+ dev_err(dev, "the reg property is not assigned in %pOFn node\n",
+ np);
return -EINVAL;
}
@@ -1143,8 +1143,8 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
}
if (!rphy->phy_cfg) {
- dev_err(dev, "no phy-config can be matched with %s node\n",
- np->name);
+ dev_err(dev, "no phy-config can be matched with %pOFn node\n",
+ np);
return -EINVAL;
}
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c
index 76a4b58ec771..c57e496f0b0c 100644
--- a/drivers/phy/rockchip/phy-rockchip-typec.c
+++ b/drivers/phy/rockchip/phy-rockchip-typec.c
@@ -1145,8 +1145,8 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
}
if (!tcphy->port_cfgs) {
- dev_err(dev, "no phy-config can be matched with %s node\n",
- np->name);
+ dev_err(dev, "no phy-config can be matched with %pOFn node\n",
+ np);
return -EINVAL;
}
@@ -1186,8 +1186,8 @@ static int rockchip_typec_phy_probe(struct platform_device *pdev)
continue;
if (IS_ERR(phy)) {
- dev_err(dev, "failed to create phy: %s\n",
- child_np->name);
+ dev_err(dev, "failed to create phy: %pOFn\n",
+ child_np);
pm_runtime_disable(dev);
return PTR_ERR(phy);
}
diff --git a/drivers/phy/rockchip/phy-rockchip-usb.c b/drivers/phy/rockchip/phy-rockchip-usb.c
index 3378eeb7a562..b2899c744ad9 100644
--- a/drivers/phy/rockchip/phy-rockchip-usb.c
+++ b/drivers/phy/rockchip/phy-rockchip-usb.c
@@ -36,7 +36,22 @@ static int enable_usb_uart;
#define HIWORD_UPDATE(val, mask) \
((val) | (mask) << 16)
-#define UOC_CON0_SIDDQ BIT(13)
+#define UOC_CON0 0x00
+#define UOC_CON0_SIDDQ BIT(13)
+#define UOC_CON0_DISABLE BIT(4)
+#define UOC_CON0_COMMON_ON_N BIT(0)
+
+#define UOC_CON2 0x08
+#define UOC_CON2_SOFT_CON_SEL BIT(2)
+
+#define UOC_CON3 0x0c
+/* bits present on rk3188 and rk3288 phys */
+#define UOC_CON3_UTMI_TERMSEL_FULLSPEED BIT(5)
+#define UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC (1 << 3)
+#define UOC_CON3_UTMI_XCVRSEELCT_MASK (3 << 3)
+#define UOC_CON3_UTMI_OPMODE_NODRIVING (1 << 1)
+#define UOC_CON3_UTMI_OPMODE_MASK (3 << 1)
+#define UOC_CON3_UTMI_SUSPENDN BIT(0)
struct rockchip_usb_phys {
int reg;
@@ -46,7 +61,8 @@ struct rockchip_usb_phys {
struct rockchip_usb_phy_base;
struct rockchip_usb_phy_pdata {
struct rockchip_usb_phys *phys;
- int (*init_usb_uart)(struct regmap *grf);
+ int (*init_usb_uart)(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata);
int usb_uart_phy;
};
@@ -208,8 +224,8 @@ static int rockchip_usb_phy_init(struct rockchip_usb_phy_base *base,
rk_phy->np = child;
if (of_property_read_u32(child, "reg", &reg_offset)) {
- dev_err(base->dev, "missing reg property in node %s\n",
- child->name);
+ dev_err(base->dev, "missing reg property in node %pOFn\n",
+ child);
return -EINVAL;
}
@@ -313,28 +329,88 @@ static const struct rockchip_usb_phy_pdata rk3066a_pdata = {
},
};
+static int __init rockchip_init_usb_uart_common(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata)
+{
+ int regoffs = pdata->phys[pdata->usb_uart_phy].reg;
+ int ret;
+ u32 val;
+
+ /*
+ * COMMON_ON and DISABLE settings are described in the TRM,
+ * but were not present in the original code.
+ * Also disable the analog phy components to save power.
+ */
+ val = HIWORD_UPDATE(UOC_CON0_COMMON_ON_N
+ | UOC_CON0_DISABLE
+ | UOC_CON0_SIDDQ,
+ UOC_CON0_COMMON_ON_N
+ | UOC_CON0_DISABLE
+ | UOC_CON0_SIDDQ);
+ ret = regmap_write(grf, regoffs + UOC_CON0, val);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(UOC_CON2_SOFT_CON_SEL,
+ UOC_CON2_SOFT_CON_SEL);
+ ret = regmap_write(grf, regoffs + UOC_CON2, val);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(UOC_CON3_UTMI_OPMODE_NODRIVING
+ | UOC_CON3_UTMI_XCVRSEELCT_FSTRANSC
+ | UOC_CON3_UTMI_TERMSEL_FULLSPEED,
+ UOC_CON3_UTMI_SUSPENDN
+ | UOC_CON3_UTMI_OPMODE_MASK
+ | UOC_CON3_UTMI_XCVRSEELCT_MASK
+ | UOC_CON3_UTMI_TERMSEL_FULLSPEED);
+ ret = regmap_write(grf, UOC_CON3, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+#define RK3188_UOC0_CON0 0x10c
+#define RK3188_UOC0_CON0_BYPASSSEL BIT(9)
+#define RK3188_UOC0_CON0_BYPASSDMEN BIT(8)
+
+/*
+ * Enable the bypass of uart2 data through the otg usb phy.
+ * See description of rk3288-variant for details.
+ */
+static int __init rk3188_init_usb_uart(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata)
+{
+ u32 val;
+ int ret;
+
+ ret = rockchip_init_usb_uart_common(grf, pdata);
+ if (ret)
+ return ret;
+
+ val = HIWORD_UPDATE(RK3188_UOC0_CON0_BYPASSSEL
+ | RK3188_UOC0_CON0_BYPASSDMEN,
+ RK3188_UOC0_CON0_BYPASSSEL
+ | RK3188_UOC0_CON0_BYPASSDMEN);
+ ret = regmap_write(grf, RK3188_UOC0_CON0, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct rockchip_usb_phy_pdata rk3188_pdata = {
.phys = (struct rockchip_usb_phys[]){
{ .reg = 0x10c, .pll_name = "sclk_otgphy0_480m" },
{ .reg = 0x11c, .pll_name = "sclk_otgphy1_480m" },
{ /* sentinel */ }
},
+ .init_usb_uart = rk3188_init_usb_uart,
+ .usb_uart_phy = 0,
};
-#define RK3288_UOC0_CON0 0x320
-#define RK3288_UOC0_CON0_COMMON_ON_N BIT(0)
-#define RK3288_UOC0_CON0_DISABLE BIT(4)
-
-#define RK3288_UOC0_CON2 0x328
-#define RK3288_UOC0_CON2_SOFT_CON_SEL BIT(2)
-
#define RK3288_UOC0_CON3 0x32c
-#define RK3288_UOC0_CON3_UTMI_SUSPENDN BIT(0)
-#define RK3288_UOC0_CON3_UTMI_OPMODE_NODRIVING (1 << 1)
-#define RK3288_UOC0_CON3_UTMI_OPMODE_MASK (3 << 1)
-#define RK3288_UOC0_CON3_UTMI_XCVRSEELCT_FSTRANSC (1 << 3)
-#define RK3288_UOC0_CON3_UTMI_XCVRSEELCT_MASK (3 << 3)
-#define RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED BIT(5)
#define RK3288_UOC0_CON3_BYPASSDMEN BIT(6)
#define RK3288_UOC0_CON3_BYPASSSEL BIT(7)
@@ -353,40 +429,13 @@ static const struct rockchip_usb_phy_pdata rk3188_pdata = {
*
* The actual code in the vendor kernel does some things differently.
*/
-static int __init rk3288_init_usb_uart(struct regmap *grf)
+static int __init rk3288_init_usb_uart(struct regmap *grf,
+ const struct rockchip_usb_phy_pdata *pdata)
{
u32 val;
int ret;
- /*
- * COMMON_ON and DISABLE settings are described in the TRM,
- * but were not present in the original code.
- * Also disable the analog phy components to save power.
- */
- val = HIWORD_UPDATE(RK3288_UOC0_CON0_COMMON_ON_N
- | RK3288_UOC0_CON0_DISABLE
- | UOC_CON0_SIDDQ,
- RK3288_UOC0_CON0_COMMON_ON_N
- | RK3288_UOC0_CON0_DISABLE
- | UOC_CON0_SIDDQ);
- ret = regmap_write(grf, RK3288_UOC0_CON0, val);
- if (ret)
- return ret;
-
- val = HIWORD_UPDATE(RK3288_UOC0_CON2_SOFT_CON_SEL,
- RK3288_UOC0_CON2_SOFT_CON_SEL);
- ret = regmap_write(grf, RK3288_UOC0_CON2, val);
- if (ret)
- return ret;
-
- val = HIWORD_UPDATE(RK3288_UOC0_CON3_UTMI_OPMODE_NODRIVING
- | RK3288_UOC0_CON3_UTMI_XCVRSEELCT_FSTRANSC
- | RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED,
- RK3288_UOC0_CON3_UTMI_SUSPENDN
- | RK3288_UOC0_CON3_UTMI_OPMODE_MASK
- | RK3288_UOC0_CON3_UTMI_XCVRSEELCT_MASK
- | RK3288_UOC0_CON3_UTMI_TERMSEL_FULLSPEED);
- ret = regmap_write(grf, RK3288_UOC0_CON3, val);
+ ret = rockchip_init_usb_uart_common(grf, pdata);
if (ret)
return ret;
@@ -516,7 +565,7 @@ static int __init rockchip_init_usb_uart(void)
return PTR_ERR(grf);
}
- ret = data->init_usb_uart(grf);
+ ret = data->init_usb_uart(grf, data);
if (ret) {
pr_err("%s: could not init usb_uart, %d\n", __func__, ret);
enable_usb_uart = 0;
diff --git a/drivers/phy/socionext/Kconfig b/drivers/phy/socionext/Kconfig
new file mode 100644
index 000000000000..467e8147972b
--- /dev/null
+++ b/drivers/phy/socionext/Kconfig
@@ -0,0 +1,34 @@
+#
+# PHY drivers for Socionext platforms.
+#
+
+config PHY_UNIPHIER_USB2
+ tristate "UniPhier USB2 PHY driver"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ select MFD_SYSCON
+ help
+ Enable this to support USB PHY implemented on USB2 controller
+ on UniPhier SoCs. This driver provides interface to interact
+ with USB 2.0 PHY that is part of the UniPhier SoC.
+ In case of Pro4, it is necessary to specify this USB2 PHY instead
+ of USB3 HS-PHY.
+
+config PHY_UNIPHIER_USB3
+ tristate "UniPhier USB3 PHY driver"
+ depends on ARCH_UNIPHIER || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to support USB PHY implemented in USB3 controller
+ on UniPhier SoCs. This controller supports USB3.0 and lower speed.
+
+config PHY_UNIPHIER_PCIE
+ tristate "Uniphier PHY driver for PCIe controller"
+ depends on (ARCH_UNIPHIER || COMPILE_TEST) && OF
+ default PCIE_UNIPHIER
+ select GENERIC_PHY
+ help
+ Enable this to support PHY implemented in PCIe controller
+ on UniPhier SoCs. This driver supports LD20 and PXs3 SoCs.
diff --git a/drivers/phy/socionext/Makefile b/drivers/phy/socionext/Makefile
new file mode 100644
index 000000000000..7dc9095b5bb7
--- /dev/null
+++ b/drivers/phy/socionext/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the phy drivers.
+#
+
+obj-$(CONFIG_PHY_UNIPHIER_USB2) += phy-uniphier-usb2.o
+obj-$(CONFIG_PHY_UNIPHIER_USB3) += phy-uniphier-usb3hs.o phy-uniphier-usb3ss.o
+obj-$(CONFIG_PHY_UNIPHIER_PCIE) += phy-uniphier-pcie.o
diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
new file mode 100644
index 000000000000..93ffbd2940fa
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-pcie.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-pcie.c - PHY driver for UniPhier PCIe controller
+ * Copyright 2018, Socionext Inc.
+ * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/resource.h>
+
+/* PHY */
+#define PCL_PHY_TEST_I 0x2000
+#define PCL_PHY_TEST_O 0x2004
+#define TESTI_DAT_MASK GENMASK(13, 6)
+#define TESTI_ADR_MASK GENMASK(5, 1)
+#define TESTI_WR_EN BIT(0)
+
+#define PCL_PHY_RESET 0x200c
+#define PCL_PHY_RESET_N_MNMODE BIT(8) /* =1:manual */
+#define PCL_PHY_RESET_N BIT(0) /* =1:deasssert */
+
+/* SG */
+#define SG_USBPCIESEL 0x590
+#define SG_USBPCIESEL_PCIE BIT(0)
+
+#define PCL_PHY_R00 0
+#define RX_EQ_ADJ_EN BIT(3) /* enable for EQ adjustment */
+#define PCL_PHY_R06 6
+#define RX_EQ_ADJ GENMASK(5, 0) /* EQ adjustment value */
+#define RX_EQ_ADJ_VAL 0
+#define PCL_PHY_R26 26
+#define VCO_CTRL GENMASK(7, 4) /* Tx VCO adjustment value */
+#define VCO_CTRL_INIT_VAL 5
+
+struct uniphier_pciephy_priv {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *clk;
+ struct reset_control *rst;
+ const struct uniphier_pciephy_soc_data *data;
+};
+
+struct uniphier_pciephy_soc_data {
+ bool has_syscon;
+};
+
+static void uniphier_pciephy_testio_write(struct uniphier_pciephy_priv *priv,
+ u32 data)
+{
+ /* need to read TESTO twice after accessing TESTI */
+ writel(data, priv->base + PCL_PHY_TEST_I);
+ readl(priv->base + PCL_PHY_TEST_O);
+ readl(priv->base + PCL_PHY_TEST_O);
+}
+
+static void uniphier_pciephy_set_param(struct uniphier_pciephy_priv *priv,
+ u32 reg, u32 mask, u32 param)
+{
+ u32 val;
+
+ /* read previous data */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, reg);
+ uniphier_pciephy_testio_write(priv, val);
+ val = readl(priv->base + PCL_PHY_TEST_O);
+
+ /* update value */
+ val &= ~FIELD_PREP(TESTI_DAT_MASK, mask);
+ val = FIELD_PREP(TESTI_DAT_MASK, mask & param);
+ val |= FIELD_PREP(TESTI_ADR_MASK, reg);
+ uniphier_pciephy_testio_write(priv, val);
+ uniphier_pciephy_testio_write(priv, val | TESTI_WR_EN);
+ uniphier_pciephy_testio_write(priv, val);
+
+ /* read current data as dummy */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, reg);
+ uniphier_pciephy_testio_write(priv, val);
+ readl(priv->base + PCL_PHY_TEST_O);
+}
+
+static void uniphier_pciephy_assert(struct uniphier_pciephy_priv *priv)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_PHY_RESET);
+ val &= ~PCL_PHY_RESET_N;
+ val |= PCL_PHY_RESET_N_MNMODE;
+ writel(val, priv->base + PCL_PHY_RESET);
+}
+
+static void uniphier_pciephy_deassert(struct uniphier_pciephy_priv *priv)
+{
+ u32 val;
+
+ val = readl(priv->base + PCL_PHY_RESET);
+ val |= PCL_PHY_RESET_N_MNMODE | PCL_PHY_RESET_N;
+ writel(val, priv->base + PCL_PHY_RESET);
+}
+
+static int uniphier_pciephy_init(struct phy *phy)
+{
+ struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ uniphier_pciephy_set_param(priv, PCL_PHY_R00,
+ RX_EQ_ADJ_EN, RX_EQ_ADJ_EN);
+ uniphier_pciephy_set_param(priv, PCL_PHY_R06, RX_EQ_ADJ,
+ FIELD_PREP(RX_EQ_ADJ, RX_EQ_ADJ_VAL));
+ uniphier_pciephy_set_param(priv, PCL_PHY_R26, VCO_CTRL,
+ FIELD_PREP(VCO_CTRL, VCO_CTRL_INIT_VAL));
+ usleep_range(1, 10);
+
+ uniphier_pciephy_deassert(priv);
+ usleep_range(1, 10);
+
+ return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+}
+
+static int uniphier_pciephy_exit(struct phy *phy)
+{
+ struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy);
+
+ uniphier_pciephy_assert(priv);
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static const struct phy_ops uniphier_pciephy_ops = {
+ .init = uniphier_pciephy_init,
+ .exit = uniphier_pciephy_exit,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_pciephy_probe(struct platform_device *pdev)
+{
+ struct uniphier_pciephy_priv *priv;
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ struct resource *res;
+ struct phy *phy;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data))
+ return -EINVAL;
+
+ priv->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->rst = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ phy = devm_phy_create(dev, dev->of_node, &uniphier_pciephy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "socionext,syscon");
+ if (!IS_ERR(regmap) && priv->data->has_syscon)
+ regmap_update_bits(regmap, SG_USBPCIESEL,
+ SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct uniphier_pciephy_soc_data uniphier_ld20_data = {
+ .has_syscon = true,
+};
+
+static const struct uniphier_pciephy_soc_data uniphier_pxs3_data = {
+ .has_syscon = false,
+};
+
+static const struct of_device_id uniphier_pciephy_match[] = {
+ {
+ .compatible = "socionext,uniphier-ld20-pcie-phy",
+ .data = &uniphier_ld20_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-pcie-phy",
+ .data = &uniphier_pxs3_data,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, uniphier_pciephy_match);
+
+static struct platform_driver uniphier_pciephy_driver = {
+ .probe = uniphier_pciephy_probe,
+ .driver = {
+ .name = "uniphier-pcie-phy",
+ .of_match_table = uniphier_pciephy_match,
+ },
+};
+module_platform_driver(uniphier_pciephy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PHY driver for PCIe controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/socionext/phy-uniphier-usb2.c b/drivers/phy/socionext/phy-uniphier-usb2.c
new file mode 100644
index 000000000000..3f2086ed4fe4
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-usb2.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-usb2.c - PHY driver for UniPhier USB2 controller
+ * Copyright 2015-2018 Socionext Inc.
+ * Author:
+ * Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define SG_USBPHY1CTRL 0x500
+#define SG_USBPHY1CTRL2 0x504
+#define SG_USBPHY2CTRL 0x508
+#define SG_USBPHY2CTRL2 0x50c /* LD11 */
+#define SG_USBPHY12PLL 0x50c /* Pro4 */
+#define SG_USBPHY3CTRL 0x510
+#define SG_USBPHY3CTRL2 0x514
+#define SG_USBPHY4CTRL 0x518 /* Pro4 */
+#define SG_USBPHY4CTRL2 0x51c /* Pro4 */
+#define SG_USBPHY34PLL 0x51c /* Pro4 */
+
+struct uniphier_u2phy_param {
+ u32 offset;
+ u32 value;
+};
+
+struct uniphier_u2phy_soc_data {
+ struct uniphier_u2phy_param config0;
+ struct uniphier_u2phy_param config1;
+};
+
+struct uniphier_u2phy_priv {
+ struct regmap *regmap;
+ struct phy *phy;
+ struct regulator *vbus;
+ const struct uniphier_u2phy_soc_data *data;
+ struct uniphier_u2phy_priv *next;
+};
+
+static int uniphier_u2phy_power_on(struct phy *phy)
+{
+ struct uniphier_u2phy_priv *priv = phy_get_drvdata(phy);
+ int ret = 0;
+
+ if (priv->vbus)
+ ret = regulator_enable(priv->vbus);
+
+ return ret;
+}
+
+static int uniphier_u2phy_power_off(struct phy *phy)
+{
+ struct uniphier_u2phy_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
+ return 0;
+}
+
+static int uniphier_u2phy_init(struct phy *phy)
+{
+ struct uniphier_u2phy_priv *priv = phy_get_drvdata(phy);
+
+ if (!priv->data)
+ return 0;
+
+ regmap_write(priv->regmap, priv->data->config0.offset,
+ priv->data->config0.value);
+ regmap_write(priv->regmap, priv->data->config1.offset,
+ priv->data->config1.value);
+
+ return 0;
+}
+
+static struct phy *uniphier_u2phy_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct uniphier_u2phy_priv *priv = dev_get_drvdata(dev);
+
+ while (priv && args->np != priv->phy->dev.of_node)
+ priv = priv->next;
+
+ if (!priv) {
+ dev_err(dev, "Failed to find appropriate phy\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return priv->phy;
+}
+
+static const struct phy_ops uniphier_u2phy_ops = {
+ .init = uniphier_u2phy_init,
+ .power_on = uniphier_u2phy_power_on,
+ .power_off = uniphier_u2phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_u2phy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent, *child;
+ struct uniphier_u2phy_priv *priv = NULL, *next = NULL;
+ struct phy_provider *phy_provider;
+ struct regmap *regmap;
+ const struct uniphier_u2phy_soc_data *data;
+ int ret, data_idx, ndatas;
+
+ data = of_device_get_match_data(dev);
+ if (WARN_ON(!data))
+ return -EINVAL;
+
+ /* get number of data */
+ for (ndatas = 0; data[ndatas].config0.offset; ndatas++)
+ ;
+
+ parent = of_get_parent(dev->of_node);
+ regmap = syscon_node_to_regmap(parent);
+ of_node_put(parent);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to get regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ for_each_child_of_node(dev->of_node, child) {
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto out_put_child;
+ }
+ priv->regmap = regmap;
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(priv->vbus)) {
+ if (PTR_ERR(priv->vbus) == -EPROBE_DEFER) {
+ ret = PTR_ERR(priv->vbus);
+ goto out_put_child;
+ }
+ priv->vbus = NULL;
+ }
+
+ priv->phy = devm_phy_create(dev, child, &uniphier_u2phy_ops);
+ if (IS_ERR(priv->phy)) {
+ dev_err(dev, "Failed to create phy\n");
+ ret = PTR_ERR(priv->phy);
+ goto out_put_child;
+ }
+
+ ret = of_property_read_u32(child, "reg", &data_idx);
+ if (ret) {
+ dev_err(dev, "Failed to get reg property\n");
+ goto out_put_child;
+ }
+
+ if (data_idx < ndatas)
+ priv->data = &data[data_idx];
+ else
+ dev_warn(dev, "No phy configuration: %s\n",
+ child->full_name);
+
+ phy_set_drvdata(priv->phy, priv);
+ priv->next = next;
+ next = priv;
+ }
+
+ dev_set_drvdata(dev, priv);
+ phy_provider = devm_of_phy_provider_register(dev,
+ uniphier_u2phy_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+
+out_put_child:
+ of_node_put(child);
+
+ return ret;
+}
+
+static const struct uniphier_u2phy_soc_data uniphier_pro4_data[] = {
+ {
+ .config0 = { SG_USBPHY1CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY12PLL, 0x00010010 },
+ },
+ {
+ .config0 = { SG_USBPHY2CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY12PLL, 0x00010010 },
+ },
+ {
+ .config0 = { SG_USBPHY3CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY34PLL, 0x00010010 },
+ },
+ {
+ .config0 = { SG_USBPHY4CTRL, 0x05142400 },
+ .config1 = { SG_USBPHY34PLL, 0x00010010 },
+ },
+ { /* sentinel */ }
+};
+
+static const struct uniphier_u2phy_soc_data uniphier_ld11_data[] = {
+ {
+ .config0 = { SG_USBPHY1CTRL, 0x82280000 },
+ .config1 = { SG_USBPHY1CTRL2, 0x00000106 },
+ },
+ {
+ .config0 = { SG_USBPHY2CTRL, 0x82280000 },
+ .config1 = { SG_USBPHY2CTRL2, 0x00000106 },
+ },
+ {
+ .config0 = { SG_USBPHY3CTRL, 0x82280000 },
+ .config1 = { SG_USBPHY3CTRL2, 0x00000106 },
+ },
+ { /* sentinel */ }
+};
+
+static const struct of_device_id uniphier_u2phy_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro4-usb2-phy",
+ .data = &uniphier_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld11-usb2-phy",
+ .data = &uniphier_ld11_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_u2phy_match);
+
+static struct platform_driver uniphier_u2phy_driver = {
+ .probe = uniphier_u2phy_probe,
+ .driver = {
+ .name = "uniphier-usb2-phy",
+ .of_match_table = uniphier_u2phy_match,
+ },
+};
+module_platform_driver(uniphier_u2phy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier PHY driver for USB2 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/socionext/phy-uniphier-usb3hs.c b/drivers/phy/socionext/phy-uniphier-usb3hs.c
new file mode 100644
index 000000000000..b1b048be6166
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-usb3hs.c
@@ -0,0 +1,422 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-usb3hs.c - HS-PHY driver for Socionext UniPhier USB3 controller
+ * Copyright 2015-2018 Socionext Inc.
+ * Author:
+ * Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ * Contributors:
+ * Motoya Tanigawa <tanigawa.motoya@socionext.com>
+ * Masami Hiramatsu <masami.hiramatsu@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#define HSPHY_CFG0 0x0
+#define HSPHY_CFG0_HS_I_MASK GENMASK(31, 28)
+#define HSPHY_CFG0_HSDISC_MASK GENMASK(27, 26)
+#define HSPHY_CFG0_SWING_MASK GENMASK(17, 16)
+#define HSPHY_CFG0_SEL_T_MASK GENMASK(15, 12)
+#define HSPHY_CFG0_RTERM_MASK GENMASK(7, 6)
+#define HSPHY_CFG0_TRIMMASK (HSPHY_CFG0_HS_I_MASK \
+ | HSPHY_CFG0_SEL_T_MASK \
+ | HSPHY_CFG0_RTERM_MASK)
+
+#define HSPHY_CFG1 0x4
+#define HSPHY_CFG1_DAT_EN BIT(29)
+#define HSPHY_CFG1_ADR_EN BIT(28)
+#define HSPHY_CFG1_ADR_MASK GENMASK(27, 16)
+#define HSPHY_CFG1_DAT_MASK GENMASK(23, 16)
+
+#define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) }
+
+#define LS_SLEW PHY_F(10, 6, 6) /* LS mode slew rate */
+#define FS_LS_DRV PHY_F(10, 5, 5) /* FS/LS slew rate */
+
+#define MAX_PHY_PARAMS 2
+
+struct uniphier_u3hsphy_param {
+ struct {
+ int reg_no;
+ int msb;
+ int lsb;
+ } field;
+ u8 value;
+};
+
+struct uniphier_u3hsphy_trim_param {
+ unsigned int rterm;
+ unsigned int sel_t;
+ unsigned int hs_i;
+};
+
+#define trim_param_is_valid(p) ((p)->rterm || (p)->sel_t || (p)->hs_i)
+
+struct uniphier_u3hsphy_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk, *clk_parent, *clk_ext;
+ struct reset_control *rst, *rst_parent;
+ struct regulator *vbus;
+ const struct uniphier_u3hsphy_soc_data *data;
+};
+
+struct uniphier_u3hsphy_soc_data {
+ int nparams;
+ const struct uniphier_u3hsphy_param param[MAX_PHY_PARAMS];
+ u32 config0;
+ u32 config1;
+ void (*trim_func)(struct uniphier_u3hsphy_priv *priv, u32 *pconfig,
+ struct uniphier_u3hsphy_trim_param *pt);
+};
+
+static void uniphier_u3hsphy_trim_ld20(struct uniphier_u3hsphy_priv *priv,
+ u32 *pconfig,
+ struct uniphier_u3hsphy_trim_param *pt)
+{
+ *pconfig &= ~HSPHY_CFG0_RTERM_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_RTERM_MASK, pt->rterm);
+
+ *pconfig &= ~HSPHY_CFG0_SEL_T_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_SEL_T_MASK, pt->sel_t);
+
+ *pconfig &= ~HSPHY_CFG0_HS_I_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_HS_I_MASK, pt->hs_i);
+}
+
+static int uniphier_u3hsphy_get_nvparam(struct uniphier_u3hsphy_priv *priv,
+ const char *name, unsigned int *val)
+{
+ struct nvmem_cell *cell;
+ u8 *buf;
+
+ cell = devm_nvmem_cell_get(priv->dev, name);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, NULL);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ *val = *buf;
+
+ kfree(buf);
+
+ return 0;
+}
+
+static int uniphier_u3hsphy_get_nvparams(struct uniphier_u3hsphy_priv *priv,
+ struct uniphier_u3hsphy_trim_param *pt)
+{
+ int ret;
+
+ ret = uniphier_u3hsphy_get_nvparam(priv, "rterm", &pt->rterm);
+ if (ret)
+ return ret;
+
+ ret = uniphier_u3hsphy_get_nvparam(priv, "sel_t", &pt->sel_t);
+ if (ret)
+ return ret;
+
+ ret = uniphier_u3hsphy_get_nvparam(priv, "hs_i", &pt->hs_i);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int uniphier_u3hsphy_update_config(struct uniphier_u3hsphy_priv *priv,
+ u32 *pconfig)
+{
+ struct uniphier_u3hsphy_trim_param trim;
+ int ret, trimmed = 0;
+
+ if (priv->data->trim_func) {
+ ret = uniphier_u3hsphy_get_nvparams(priv, &trim);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ /*
+ * call trim_func only when trimming parameters that aren't
+ * all-zero can be acquired. All-zero parameters mean nothing
+ * has been written to nvmem.
+ */
+ if (!ret && trim_param_is_valid(&trim)) {
+ priv->data->trim_func(priv, pconfig, &trim);
+ trimmed = 1;
+ } else {
+ dev_dbg(priv->dev, "can't get parameter from nvmem\n");
+ }
+ }
+
+ /* use default parameters without trimming values */
+ if (!trimmed) {
+ *pconfig &= ~HSPHY_CFG0_HSDISC_MASK;
+ *pconfig |= FIELD_PREP(HSPHY_CFG0_HSDISC_MASK, 3);
+ }
+
+ return 0;
+}
+
+static void uniphier_u3hsphy_set_param(struct uniphier_u3hsphy_priv *priv,
+ const struct uniphier_u3hsphy_param *p)
+{
+ u32 val;
+ u32 field_mask = GENMASK(p->field.msb, p->field.lsb);
+ u8 data;
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~HSPHY_CFG1_ADR_MASK;
+ val |= FIELD_PREP(HSPHY_CFG1_ADR_MASK, p->field.reg_no)
+ | HSPHY_CFG1_ADR_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~HSPHY_CFG1_ADR_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~FIELD_PREP(HSPHY_CFG1_DAT_MASK, field_mask);
+ data = field_mask & (p->value << p->field.lsb);
+ val |= FIELD_PREP(HSPHY_CFG1_DAT_MASK, data) | HSPHY_CFG1_DAT_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+
+ val = readl(priv->base + HSPHY_CFG1);
+ val &= ~HSPHY_CFG1_DAT_EN;
+ writel(val, priv->base + HSPHY_CFG1);
+}
+
+static int uniphier_u3hsphy_power_on(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_ext);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto out_clk_ext_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ if (priv->vbus) {
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ goto out_rst_assert;
+ }
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+out_clk_ext_disable:
+ clk_disable_unprepare(priv->clk_ext);
+
+ return ret;
+}
+
+static int uniphier_u3hsphy_power_off(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_ext);
+
+ return 0;
+}
+
+static int uniphier_u3hsphy_init(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+ u32 config0, config1;
+ int i, ret;
+
+ ret = clk_prepare_enable(priv->clk_parent);
+ if (ret)
+ return ret;
+
+ ret = reset_control_deassert(priv->rst_parent);
+ if (ret)
+ goto out_clk_disable;
+
+ if (!priv->data->config0 && !priv->data->config1)
+ return 0;
+
+ config0 = priv->data->config0;
+ config1 = priv->data->config1;
+
+ ret = uniphier_u3hsphy_update_config(priv, &config0);
+ if (ret)
+ goto out_rst_assert;
+
+ writel(config0, priv->base + HSPHY_CFG0);
+ writel(config1, priv->base + HSPHY_CFG1);
+
+ for (i = 0; i < priv->data->nparams; i++)
+ uniphier_u3hsphy_set_param(priv, &priv->data->param[i]);
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst_parent);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk_parent);
+
+ return ret;
+}
+
+static int uniphier_u3hsphy_exit(struct phy *phy)
+{
+ struct uniphier_u3hsphy_priv *priv = phy_get_drvdata(phy);
+
+ reset_control_assert(priv->rst_parent);
+ clk_disable_unprepare(priv->clk_parent);
+
+ return 0;
+}
+
+static const struct phy_ops uniphier_u3hsphy_ops = {
+ .init = uniphier_u3hsphy_init,
+ .exit = uniphier_u3hsphy_exit,
+ .power_on = uniphier_u3hsphy_power_on,
+ .power_off = uniphier_u3hsphy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_u3hsphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_u3hsphy_priv *priv;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct phy *phy;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data ||
+ priv->data->nparams > MAX_PHY_PARAMS))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->clk_parent = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk_parent))
+ return PTR_ERR(priv->clk_parent);
+
+ priv->clk_ext = devm_clk_get(dev, "phy-ext");
+ if (IS_ERR(priv->clk_ext)) {
+ if (PTR_ERR(priv->clk_ext) == -ENOENT)
+ priv->clk_ext = NULL;
+ else
+ return PTR_ERR(priv->clk_ext);
+ }
+
+ priv->rst = devm_reset_control_get_shared(dev, "phy");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+
+ priv->rst_parent = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst_parent))
+ return PTR_ERR(priv->rst_parent);
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(priv->vbus)) {
+ if (PTR_ERR(priv->vbus) == -EPROBE_DEFER)
+ return PTR_ERR(priv->vbus);
+ priv->vbus = NULL;
+ }
+
+ phy = devm_phy_create(dev, dev->of_node, &uniphier_u3hsphy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct uniphier_u3hsphy_soc_data uniphier_pxs2_data = {
+ .nparams = 0,
+};
+
+static const struct uniphier_u3hsphy_soc_data uniphier_ld20_data = {
+ .nparams = 2,
+ .param = {
+ { LS_SLEW, 1 },
+ { FS_LS_DRV, 1 },
+ },
+ .trim_func = uniphier_u3hsphy_trim_ld20,
+ .config0 = 0x92316680,
+ .config1 = 0x00000106,
+};
+
+static const struct uniphier_u3hsphy_soc_data uniphier_pxs3_data = {
+ .nparams = 0,
+ .trim_func = uniphier_u3hsphy_trim_ld20,
+ .config0 = 0x92316680,
+ .config1 = 0x00000106,
+};
+
+static const struct of_device_id uniphier_u3hsphy_match[] = {
+ {
+ .compatible = "socionext,uniphier-pxs2-usb3-hsphy",
+ .data = &uniphier_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-usb3-hsphy",
+ .data = &uniphier_ld20_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-usb3-hsphy",
+ .data = &uniphier_pxs3_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_u3hsphy_match);
+
+static struct platform_driver uniphier_u3hsphy_driver = {
+ .probe = uniphier_u3hsphy_probe,
+ .driver = {
+ .name = "uniphier-usb3-hsphy",
+ .of_match_table = uniphier_u3hsphy_match,
+ },
+};
+
+module_platform_driver(uniphier_u3hsphy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier HS-PHY driver for USB3 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c
new file mode 100644
index 000000000000..4be95679c7d8
--- /dev/null
+++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-uniphier-usb3ss.c - SS-PHY driver for Socionext UniPhier USB3 controller
+ * Copyright 2015-2018 Socionext Inc.
+ * Author:
+ * Kunihiko Hayashi <hayashi.kunihiko@socionext.com>
+ * Contributors:
+ * Motoya Tanigawa <tanigawa.motoya@socionext.com>
+ * Masami Hiramatsu <masami.hiramatsu@linaro.org>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+
+#define SSPHY_TESTI 0x0
+#define SSPHY_TESTO 0x4
+#define TESTI_DAT_MASK GENMASK(13, 6)
+#define TESTI_ADR_MASK GENMASK(5, 1)
+#define TESTI_WR_EN BIT(0)
+
+#define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) }
+
+#define CDR_CPD_TRIM PHY_F(7, 3, 0) /* RxPLL charge pump current */
+#define CDR_CPF_TRIM PHY_F(8, 3, 0) /* RxPLL charge pump current 2 */
+#define TX_PLL_TRIM PHY_F(9, 3, 0) /* TxPLL charge pump current */
+#define BGAP_TRIM PHY_F(11, 3, 0) /* Bandgap voltage */
+#define CDR_TRIM PHY_F(13, 6, 5) /* Clock Data Recovery setting */
+#define VCO_CTRL PHY_F(26, 7, 4) /* VCO control */
+#define VCOPLL_CTRL PHY_F(27, 2, 0) /* TxPLL VCO tuning */
+#define VCOPLL_CM PHY_F(28, 1, 0) /* TxPLL voltage */
+
+#define MAX_PHY_PARAMS 7
+
+struct uniphier_u3ssphy_param {
+ struct {
+ int reg_no;
+ int msb;
+ int lsb;
+ } field;
+ u8 value;
+};
+
+struct uniphier_u3ssphy_priv {
+ struct device *dev;
+ void __iomem *base;
+ struct clk *clk, *clk_ext, *clk_parent, *clk_parent_gio;
+ struct reset_control *rst, *rst_parent, *rst_parent_gio;
+ struct regulator *vbus;
+ const struct uniphier_u3ssphy_soc_data *data;
+};
+
+struct uniphier_u3ssphy_soc_data {
+ bool is_legacy;
+ int nparams;
+ const struct uniphier_u3ssphy_param param[MAX_PHY_PARAMS];
+};
+
+static void uniphier_u3ssphy_testio_write(struct uniphier_u3ssphy_priv *priv,
+ u32 data)
+{
+ /* need to read TESTO twice after accessing TESTI */
+ writel(data, priv->base + SSPHY_TESTI);
+ readl(priv->base + SSPHY_TESTO);
+ readl(priv->base + SSPHY_TESTO);
+}
+
+static void uniphier_u3ssphy_set_param(struct uniphier_u3ssphy_priv *priv,
+ const struct uniphier_u3ssphy_param *p)
+{
+ u32 val;
+ u8 field_mask = GENMASK(p->field.msb, p->field.lsb);
+ u8 data;
+
+ /* read previous data */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
+ uniphier_u3ssphy_testio_write(priv, val);
+ val = readl(priv->base + SSPHY_TESTO);
+
+ /* update value */
+ val &= ~FIELD_PREP(TESTI_DAT_MASK, field_mask);
+ data = field_mask & (p->value << p->field.lsb);
+ val = FIELD_PREP(TESTI_DAT_MASK, data);
+ val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
+ uniphier_u3ssphy_testio_write(priv, val);
+ uniphier_u3ssphy_testio_write(priv, val | TESTI_WR_EN);
+ uniphier_u3ssphy_testio_write(priv, val);
+
+ /* read current data as dummy */
+ val = FIELD_PREP(TESTI_DAT_MASK, 1);
+ val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
+ uniphier_u3ssphy_testio_write(priv, val);
+ readl(priv->base + SSPHY_TESTO);
+}
+
+static int uniphier_u3ssphy_power_on(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_ext);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ goto out_clk_ext_disable;
+
+ ret = reset_control_deassert(priv->rst);
+ if (ret)
+ goto out_clk_disable;
+
+ if (priv->vbus) {
+ ret = regulator_enable(priv->vbus);
+ if (ret)
+ goto out_rst_assert;
+ }
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
+out_clk_ext_disable:
+ clk_disable_unprepare(priv->clk_ext);
+
+ return ret;
+}
+
+static int uniphier_u3ssphy_power_off(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+
+ if (priv->vbus)
+ regulator_disable(priv->vbus);
+
+ reset_control_assert(priv->rst);
+ clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(priv->clk_ext);
+
+ return 0;
+}
+
+static int uniphier_u3ssphy_init(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+ int i, ret;
+
+ ret = clk_prepare_enable(priv->clk_parent);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->clk_parent_gio);
+ if (ret)
+ goto out_clk_disable;
+
+ ret = reset_control_deassert(priv->rst_parent);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_parent_gio);
+ if (ret)
+ goto out_rst_assert;
+
+ if (priv->data->is_legacy)
+ return 0;
+
+ for (i = 0; i < priv->data->nparams; i++)
+ uniphier_u3ssphy_set_param(priv, &priv->data->param[i]);
+
+ return 0;
+
+out_rst_assert:
+ reset_control_assert(priv->rst_parent);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_parent_gio);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk_parent);
+
+ return ret;
+}
+
+static int uniphier_u3ssphy_exit(struct phy *phy)
+{
+ struct uniphier_u3ssphy_priv *priv = phy_get_drvdata(phy);
+
+ reset_control_assert(priv->rst_parent_gio);
+ reset_control_assert(priv->rst_parent);
+ clk_disable_unprepare(priv->clk_parent_gio);
+ clk_disable_unprepare(priv->clk_parent);
+
+ return 0;
+}
+
+static const struct phy_ops uniphier_u3ssphy_ops = {
+ .init = uniphier_u3ssphy_init,
+ .exit = uniphier_u3ssphy_exit,
+ .power_on = uniphier_u3ssphy_power_on,
+ .power_off = uniphier_u3ssphy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int uniphier_u3ssphy_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct uniphier_u3ssphy_priv *priv;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct phy *phy;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->dev = dev;
+ priv->data = of_device_get_match_data(dev);
+ if (WARN_ON(!priv->data ||
+ priv->data->nparams > MAX_PHY_PARAMS))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ if (!priv->data->is_legacy) {
+ priv->clk = devm_clk_get(dev, "phy");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ priv->clk_ext = devm_clk_get(dev, "phy-ext");
+ if (IS_ERR(priv->clk_ext)) {
+ if (PTR_ERR(priv->clk_ext) == -ENOENT)
+ priv->clk_ext = NULL;
+ else
+ return PTR_ERR(priv->clk_ext);
+ }
+
+ priv->rst = devm_reset_control_get_shared(dev, "phy");
+ if (IS_ERR(priv->rst))
+ return PTR_ERR(priv->rst);
+ } else {
+ priv->clk_parent_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_parent_gio))
+ return PTR_ERR(priv->clk_parent_gio);
+
+ priv->rst_parent_gio =
+ devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_parent_gio))
+ return PTR_ERR(priv->rst_parent_gio);
+ }
+
+ priv->clk_parent = devm_clk_get(dev, "link");
+ if (IS_ERR(priv->clk_parent))
+ return PTR_ERR(priv->clk_parent);
+
+ priv->rst_parent = devm_reset_control_get_shared(dev, "link");
+ if (IS_ERR(priv->rst_parent))
+ return PTR_ERR(priv->rst_parent);
+
+ priv->vbus = devm_regulator_get_optional(dev, "vbus");
+ if (IS_ERR(priv->vbus)) {
+ if (PTR_ERR(priv->vbus) == -EPROBE_DEFER)
+ return PTR_ERR(priv->vbus);
+ priv->vbus = NULL;
+ }
+
+ phy = devm_phy_create(dev, dev->of_node, &uniphier_u3ssphy_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct uniphier_u3ssphy_soc_data uniphier_pro4_data = {
+ .is_legacy = true,
+};
+
+static const struct uniphier_u3ssphy_soc_data uniphier_pxs2_data = {
+ .is_legacy = false,
+ .nparams = 7,
+ .param = {
+ { CDR_CPD_TRIM, 10 },
+ { CDR_CPF_TRIM, 3 },
+ { TX_PLL_TRIM, 5 },
+ { BGAP_TRIM, 9 },
+ { CDR_TRIM, 2 },
+ { VCOPLL_CTRL, 7 },
+ { VCOPLL_CM, 1 },
+ },
+};
+
+static const struct uniphier_u3ssphy_soc_data uniphier_ld20_data = {
+ .is_legacy = false,
+ .nparams = 3,
+ .param = {
+ { CDR_CPD_TRIM, 6 },
+ { CDR_TRIM, 2 },
+ { VCO_CTRL, 5 },
+ },
+};
+
+static const struct of_device_id uniphier_u3ssphy_match[] = {
+ {
+ .compatible = "socionext,uniphier-pro4-usb3-ssphy",
+ .data = &uniphier_pro4_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs2-usb3-ssphy",
+ .data = &uniphier_pxs2_data,
+ },
+ {
+ .compatible = "socionext,uniphier-ld20-usb3-ssphy",
+ .data = &uniphier_ld20_data,
+ },
+ {
+ .compatible = "socionext,uniphier-pxs3-usb3-ssphy",
+ .data = &uniphier_ld20_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, uniphier_u3ssphy_match);
+
+static struct platform_driver uniphier_u3ssphy_driver = {
+ .probe = uniphier_u3ssphy_probe,
+ .driver = {
+ .name = "uniphier-usb3-ssphy",
+ .of_match_table = uniphier_u3ssphy_match,
+ },
+};
+
+module_platform_driver(uniphier_u3ssphy_driver);
+
+MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>");
+MODULE_DESCRIPTION("UniPhier SS-PHY driver for USB3 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index de1b4ebe4de2..5b3b8863363e 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -115,8 +115,8 @@ int tegra_xusb_lane_parse_dt(struct tegra_xusb_lane *lane,
err = match_string(lane->soc->funcs, lane->soc->num_funcs, function);
if (err < 0) {
- dev_err(dev, "invalid function \"%s\" for lane \"%s\"\n",
- function, np->name);
+ dev_err(dev, "invalid function \"%s\" for lane \"%pOFn\"\n",
+ function, np);
return err;
}
diff --git a/drivers/phy/ti/phy-twl4030-usb.c b/drivers/phy/ti/phy-twl4030-usb.c
index a44680d64f9b..c267afb68f07 100644
--- a/drivers/phy/ti/phy-twl4030-usb.c
+++ b/drivers/phy/ti/phy-twl4030-usb.c
@@ -144,6 +144,7 @@
#define PMBR1 0x0D
#define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
+static irqreturn_t twl4030_usb_irq(int irq, void *_twl);
/*
* If VBUS is valid or ID is ground, then we know a
* cable is present and we need to be runtime-enabled
@@ -395,6 +396,33 @@ static void __twl4030_phy_power(struct twl4030_usb *twl, int on)
WARN_ON(twl4030_usb_write_verify(twl, PHY_PWR_CTRL, pwr) < 0);
}
+static int __maybe_unused twl4030_usb_suspend(struct device *dev)
+{
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+ /*
+ * we need enabled runtime on resume,
+ * so turn irq off here, so we do not get it early
+ * note: wakeup on usb plug works independently of this
+ */
+ dev_dbg(twl->dev, "%s\n", __func__);
+ disable_irq(twl->irq);
+
+ return 0;
+}
+
+static int __maybe_unused twl4030_usb_resume(struct device *dev)
+{
+ struct twl4030_usb *twl = dev_get_drvdata(dev);
+
+ dev_dbg(twl->dev, "%s\n", __func__);
+ enable_irq(twl->irq);
+ /* check whether cable status changed */
+ twl4030_usb_irq(0, twl);
+
+ return 0;
+}
+
static int __maybe_unused twl4030_usb_runtime_suspend(struct device *dev)
{
struct twl4030_usb *twl = dev_get_drvdata(dev);
@@ -655,6 +683,7 @@ static const struct phy_ops ops = {
static const struct dev_pm_ops twl4030_usb_pm_ops = {
SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend,
twl4030_usb_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(twl4030_usb_suspend, twl4030_usb_resume)
};
static int twl4030_usb_probe(struct platform_device *pdev)
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 2da567540c2d..7c639006252e 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2013 Intel, Inc.
@@ -46,7 +47,6 @@
* exchange is properly mapped during a transfer.
*/
-
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/interrupt.h>
@@ -59,10 +59,11 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include <linux/goldfish.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/acpi.h>
+#include <linux/bug.h>
+#include "goldfish_pipe_qemu.h"
/*
* Update this when something changes in the driver's behavior so the host
@@ -73,71 +74,6 @@ enum {
PIPE_CURRENT_DEVICE_VERSION = 2
};
-/*
- * IMPORTANT: The following constants must match the ones used and defined
- * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
- */
-
-/* List of bitflags returned in status of CMD_POLL command */
-enum PipePollFlags {
- PIPE_POLL_IN = 1 << 0,
- PIPE_POLL_OUT = 1 << 1,
- PIPE_POLL_HUP = 1 << 2
-};
-
-/* Possible status values used to signal errors - see goldfish_pipe_error_convert */
-enum PipeErrors {
- PIPE_ERROR_INVAL = -1,
- PIPE_ERROR_AGAIN = -2,
- PIPE_ERROR_NOMEM = -3,
- PIPE_ERROR_IO = -4
-};
-
-/* Bit-flags used to signal events from the emulator */
-enum PipeWakeFlags {
- PIPE_WAKE_CLOSED = 1 << 0, /* emulator closed pipe */
- PIPE_WAKE_READ = 1 << 1, /* pipe can now be read from */
- PIPE_WAKE_WRITE = 1 << 2 /* pipe can now be written to */
-};
-
-/* Bit flags for the 'flags' field */
-enum PipeFlagsBits {
- BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
- BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
- BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
-};
-
-enum PipeRegs {
- PIPE_REG_CMD = 0,
-
- PIPE_REG_SIGNAL_BUFFER_HIGH = 4,
- PIPE_REG_SIGNAL_BUFFER = 8,
- PIPE_REG_SIGNAL_BUFFER_COUNT = 12,
-
- PIPE_REG_OPEN_BUFFER_HIGH = 20,
- PIPE_REG_OPEN_BUFFER = 24,
-
- PIPE_REG_VERSION = 36,
-
- PIPE_REG_GET_SIGNALLED = 48,
-};
-
-enum PipeCmdCode {
- PIPE_CMD_OPEN = 1, /* to be used by the pipe device itself */
- PIPE_CMD_CLOSE,
- PIPE_CMD_POLL,
- PIPE_CMD_WRITE,
- PIPE_CMD_WAKE_ON_WRITE,
- PIPE_CMD_READ,
- PIPE_CMD_WAKE_ON_READ,
-
- /*
- * TODO(zyy): implement a deferred read/write execution to allow
- * parallel processing of pipe operations on the host.
- */
- PIPE_CMD_WAKE_ON_DONE_IO,
-};
-
enum {
MAX_BUFFERS_PER_COMMAND = 336,
MAX_SIGNALLED_PIPES = 64,
@@ -145,14 +81,12 @@ enum {
};
struct goldfish_pipe_dev;
-struct goldfish_pipe;
-struct goldfish_pipe_command;
/* A per-pipe command structure, shared with the host */
struct goldfish_pipe_command {
- s32 cmd; /* PipeCmdCode, guest -> host */
- s32 id; /* pipe id, guest -> host */
- s32 status; /* command execution status, host -> guest */
+ s32 cmd; /* PipeCmdCode, guest -> host */
+ s32 id; /* pipe id, guest -> host */
+ s32 status; /* command execution status, host -> guest */
s32 reserved; /* to pad to 64-bit boundary */
union {
/* Parameters for PIPE_CMD_{READ,WRITE} */
@@ -184,19 +118,21 @@ struct open_command_param {
/* Device-level set of buffers shared with the host */
struct goldfish_pipe_dev_buffers {
struct open_command_param open_command_params;
- struct signalled_pipe_buffer signalled_pipe_buffers[
- MAX_SIGNALLED_PIPES];
+ struct signalled_pipe_buffer
+ signalled_pipe_buffers[MAX_SIGNALLED_PIPES];
};
/* This data type models a given pipe instance */
struct goldfish_pipe {
/* pipe ID - index into goldfish_pipe_dev::pipes array */
u32 id;
+
/* The wake flags pipe is waiting for
* Note: not protected with any lock, uses atomic operations
* and barriers to make it thread-safe.
*/
unsigned long flags;
+
/* wake flags host have signalled,
* - protected by goldfish_pipe_dev::lock
*/
@@ -220,8 +156,12 @@ struct goldfish_pipe {
/* A wake queue for sleeping until host signals an event */
wait_queue_head_t wake_queue;
+
/* Pointer to the parent goldfish_pipe_dev instance */
struct goldfish_pipe_dev *dev;
+
+ /* A buffer of pages, too large to fit into a stack frame */
+ struct page *pages[MAX_BUFFERS_PER_COMMAND];
};
/* The global driver data. Holds a reference to the i/o page used to
@@ -229,6 +169,9 @@ struct goldfish_pipe {
* waiting to be awoken.
*/
struct goldfish_pipe_dev {
+ /* A magic number to check if this is an instance of this struct */
+ void *magic;
+
/*
* Global device spinlock. Protects the following members:
* - pipes, pipes_capacity
@@ -261,15 +204,22 @@ struct goldfish_pipe_dev {
/* Head of a doubly linked list of signalled pipes */
struct goldfish_pipe *first_signalled_pipe;
+ /* ptr to platform device's device struct */
+ struct device *pdev_dev;
+
/* Some device-specific data */
int irq;
int version;
unsigned char __iomem *base;
-};
-static struct goldfish_pipe_dev pipe_dev[1] = {};
+ /* an irq tasklet to run goldfish_interrupt_task */
+ struct tasklet_struct irq_tasklet;
-static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+ struct miscdevice miscdev;
+};
+
+static int goldfish_pipe_cmd_locked(struct goldfish_pipe *pipe,
+ enum PipeCmdCode cmd)
{
pipe->command_buffer->cmd = cmd;
/* failure by default */
@@ -278,13 +228,13 @@ static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
return pipe->command_buffer->status;
}
-static int goldfish_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
+static int goldfish_pipe_cmd(struct goldfish_pipe *pipe, enum PipeCmdCode cmd)
{
int status;
if (mutex_lock_interruptible(&pipe->lock))
return PIPE_ERROR_IO;
- status = goldfish_cmd_locked(pipe, cmd);
+ status = goldfish_pipe_cmd_locked(pipe, cmd);
mutex_unlock(&pipe->lock);
return status;
}
@@ -307,10 +257,12 @@ static int goldfish_pipe_error_convert(int status)
}
}
-static int pin_user_pages(unsigned long first_page, unsigned long last_page,
- unsigned int last_page_size, int is_write,
- struct page *pages[MAX_BUFFERS_PER_COMMAND],
- unsigned int *iter_last_page_size)
+static int pin_user_pages(unsigned long first_page,
+ unsigned long last_page,
+ unsigned int last_page_size,
+ int is_write,
+ struct page *pages[MAX_BUFFERS_PER_COMMAND],
+ unsigned int *iter_last_page_size)
{
int ret;
int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
@@ -322,18 +274,18 @@ static int pin_user_pages(unsigned long first_page, unsigned long last_page,
*iter_last_page_size = last_page_size;
}
- ret = get_user_pages_fast(
- first_page, requested_pages, !is_write, pages);
+ ret = get_user_pages_fast(first_page, requested_pages, !is_write,
+ pages);
if (ret <= 0)
return -EFAULT;
if (ret < requested_pages)
*iter_last_page_size = PAGE_SIZE;
- return ret;
+ return ret;
}
static void release_user_pages(struct page **pages, int pages_count,
- int is_write, s32 consumed_size)
+ int is_write, s32 consumed_size)
{
int i;
@@ -345,12 +297,15 @@ static void release_user_pages(struct page **pages, int pages_count,
}
/* Populate the call parameters, merging adjacent pages together */
-static void populate_rw_params(
- struct page **pages, int pages_count,
- unsigned long address, unsigned long address_end,
- unsigned long first_page, unsigned long last_page,
- unsigned int iter_last_page_size, int is_write,
- struct goldfish_pipe_command *command)
+static void populate_rw_params(struct page **pages,
+ int pages_count,
+ unsigned long address,
+ unsigned long address_end,
+ unsigned long first_page,
+ unsigned long last_page,
+ unsigned int iter_last_page_size,
+ int is_write,
+ struct goldfish_pipe_command *command)
{
/*
* Process the first page separately - it's the only page that
@@ -382,55 +337,59 @@ static void populate_rw_params(
}
static int transfer_max_buffers(struct goldfish_pipe *pipe,
- unsigned long address, unsigned long address_end, int is_write,
- unsigned long last_page, unsigned int last_page_size,
- s32 *consumed_size, int *status)
+ unsigned long address,
+ unsigned long address_end,
+ int is_write,
+ unsigned long last_page,
+ unsigned int last_page_size,
+ s32 *consumed_size,
+ int *status)
{
- static struct page *pages[MAX_BUFFERS_PER_COMMAND];
unsigned long first_page = address & PAGE_MASK;
unsigned int iter_last_page_size;
- int pages_count = pin_user_pages(first_page, last_page,
- last_page_size, is_write,
- pages, &iter_last_page_size);
-
- if (pages_count < 0)
- return pages_count;
+ int pages_count;
/* Serialize access to the pipe command buffers */
if (mutex_lock_interruptible(&pipe->lock))
return -ERESTARTSYS;
- populate_rw_params(pages, pages_count, address, address_end,
- first_page, last_page, iter_last_page_size, is_write,
- pipe->command_buffer);
+ pages_count = pin_user_pages(first_page, last_page,
+ last_page_size, is_write,
+ pipe->pages, &iter_last_page_size);
+ if (pages_count < 0) {
+ mutex_unlock(&pipe->lock);
+ return pages_count;
+ }
+
+ populate_rw_params(pipe->pages, pages_count, address, address_end,
+ first_page, last_page, iter_last_page_size, is_write,
+ pipe->command_buffer);
/* Transfer the data */
- *status = goldfish_cmd_locked(pipe,
+ *status = goldfish_pipe_cmd_locked(pipe,
is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ);
*consumed_size = pipe->command_buffer->rw_params.consumed_size;
- release_user_pages(pages, pages_count, is_write, *consumed_size);
+ release_user_pages(pipe->pages, pages_count, is_write, *consumed_size);
mutex_unlock(&pipe->lock);
-
return 0;
}
static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
{
- u32 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
+ u32 wake_bit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
- set_bit(wakeBit, &pipe->flags);
+ set_bit(wake_bit, &pipe->flags);
/* Tell the emulator we're going to wait for a wake event */
- (void)goldfish_cmd(pipe,
+ goldfish_pipe_cmd(pipe,
is_write ? PIPE_CMD_WAKE_ON_WRITE : PIPE_CMD_WAKE_ON_READ);
- while (test_bit(wakeBit, &pipe->flags)) {
- if (wait_event_interruptible(
- pipe->wake_queue,
- !test_bit(wakeBit, &pipe->flags)))
+ while (test_bit(wake_bit, &pipe->flags)) {
+ if (wait_event_interruptible(pipe->wake_queue,
+ !test_bit(wake_bit, &pipe->flags)))
return -ERESTARTSYS;
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
@@ -441,7 +400,9 @@ static int wait_for_host_signal(struct goldfish_pipe *pipe, int is_write)
}
static ssize_t goldfish_pipe_read_write(struct file *filp,
- char __user *buffer, size_t bufflen, int is_write)
+ char __user *buffer,
+ size_t bufflen,
+ int is_write)
{
struct goldfish_pipe *pipe = filp->private_data;
int count = 0, ret = -EINVAL;
@@ -456,7 +417,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
return 0;
/* Check the buffer range for access */
if (unlikely(!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
- buffer, bufflen)))
+ buffer, bufflen)))
return -EFAULT;
address = (unsigned long)buffer;
@@ -469,8 +430,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
int status;
ret = transfer_max_buffers(pipe, address, address_end, is_write,
- last_page, last_page_size, &consumed_size,
- &status);
+ last_page, last_page_size,
+ &consumed_size, &status);
if (ret < 0)
break;
@@ -496,7 +457,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
* err.
*/
if (status != PIPE_ERROR_AGAIN)
- pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n",
+ dev_err_ratelimited(pipe->dev->pdev_dev,
+ "backend error %d on %s\n",
status, is_write ? "write" : "read");
break;
}
@@ -522,19 +484,21 @@ static ssize_t goldfish_pipe_read_write(struct file *filp,
}
static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
- size_t bufflen, loff_t *ppos)
+ size_t bufflen, loff_t *ppos)
{
return goldfish_pipe_read_write(filp, buffer, bufflen,
- /* is_write */ 0);
+ /* is_write */ 0);
}
static ssize_t goldfish_pipe_write(struct file *filp,
- const char __user *buffer, size_t bufflen,
- loff_t *ppos)
+ const char __user *buffer, size_t bufflen,
+ loff_t *ppos)
{
- return goldfish_pipe_read_write(filp,
- /* cast away the const */(char __user *)buffer, bufflen,
- /* is_write */ 1);
+ /* cast away the const */
+ char __user *no_const_buffer = (char __user *)buffer;
+
+ return goldfish_pipe_read_write(filp, no_const_buffer, bufflen,
+ /* is_write */ 1);
}
static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
@@ -545,7 +509,7 @@ static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
poll_wait(filp, &pipe->wake_queue, wait);
- status = goldfish_cmd(pipe, PIPE_CMD_POLL);
+ status = goldfish_pipe_cmd(pipe, PIPE_CMD_POLL);
if (status < 0)
return -ERESTARTSYS;
@@ -562,7 +526,7 @@ static __poll_t goldfish_pipe_poll(struct file *filp, poll_table *wait)
}
static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
- u32 id, u32 flags)
+ u32 id, u32 flags)
{
struct goldfish_pipe *pipe;
@@ -574,8 +538,8 @@ static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
return;
pipe->signalled_flags |= flags;
- if (pipe->prev_signalled || pipe->next_signalled
- || dev->first_signalled_pipe == pipe)
+ if (pipe->prev_signalled || pipe->next_signalled ||
+ dev->first_signalled_pipe == pipe)
return; /* already in the list */
pipe->next_signalled = dev->first_signalled_pipe;
if (dev->first_signalled_pipe)
@@ -584,7 +548,8 @@ static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev,
}
static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev,
- struct goldfish_pipe *pipe) {
+ struct goldfish_pipe *pipe)
+{
if (pipe->prev_signalled)
pipe->prev_signalled->next_signalled = pipe->next_signalled;
if (pipe->next_signalled)
@@ -623,10 +588,10 @@ static struct goldfish_pipe *signalled_pipes_pop_front(
return pipe;
}
-static void goldfish_interrupt_task(unsigned long unused)
+static void goldfish_interrupt_task(unsigned long dev_addr)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
/* Iterate over the signalled pipes and wake them one by one */
+ struct goldfish_pipe_dev *dev = (struct goldfish_pipe_dev *)dev_addr;
struct goldfish_pipe *pipe;
int wakes;
@@ -646,7 +611,9 @@ static void goldfish_interrupt_task(unsigned long unused)
wake_up_interruptible(&pipe->wake_queue);
}
}
-static DECLARE_TASKLET(goldfish_interrupt_tasklet, goldfish_interrupt_task, 0);
+
+static void goldfish_pipe_device_deinit(struct platform_device *pdev,
+ struct goldfish_pipe_dev *dev);
/*
* The general idea of the interrupt handling:
@@ -668,7 +635,7 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
unsigned long flags;
struct goldfish_pipe_dev *dev = dev_id;
- if (dev != pipe_dev)
+ if (dev->magic != &goldfish_pipe_device_deinit)
return IRQ_NONE;
/* Request the signalled pipes from the device */
@@ -689,7 +656,7 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&dev->lock, flags);
- tasklet_schedule(&goldfish_interrupt_tasklet);
+ tasklet_schedule(&dev->irq_tasklet);
return IRQ_HANDLED;
}
@@ -702,7 +669,10 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
return id;
{
- /* Reallocate the array */
+ /* Reallocate the array.
+ * Since get_free_pipe_id_locked runs with interrupts disabled,
+ * we don't want to make calls that could lead to sleep.
+ */
u32 new_capacity = 2 * dev->pipes_capacity;
struct goldfish_pipe **pipes =
kcalloc(new_capacity, sizeof(*pipes), GFP_ATOMIC);
@@ -717,6 +687,14 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
return id;
}
+/* A helper function to get the instance of goldfish_pipe_dev from file */
+static struct goldfish_pipe_dev *to_goldfish_pipe_dev(struct file *file)
+{
+ struct miscdevice *miscdev = file->private_data;
+
+ return container_of(miscdev, struct goldfish_pipe_dev, miscdev);
+}
+
/**
* goldfish_pipe_open - open a channel to the AVD
* @inode: inode of device
@@ -730,14 +708,15 @@ static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev)
*/
static int goldfish_pipe_open(struct inode *inode, struct file *file)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev = to_goldfish_pipe_dev(file);
unsigned long flags;
int id;
int status;
/* Allocate new pipe kernel object */
struct goldfish_pipe *pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
- if (pipe == NULL)
+
+ if (!pipe)
return -ENOMEM;
pipe->dev = dev;
@@ -748,6 +727,7 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
* Command buffer needs to be allocated on its own page to make sure
* it is physically contiguous in host's address space.
*/
+ BUILD_BUG_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE);
pipe->command_buffer =
(struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL);
if (!pipe->command_buffer) {
@@ -772,7 +752,7 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file)
MAX_BUFFERS_PER_COMMAND;
dev->buffers->open_command_params.command_buffer_ptr =
(u64)(unsigned long)__pa(pipe->command_buffer);
- status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN);
+ status = goldfish_pipe_cmd_locked(pipe, PIPE_CMD_OPEN);
spin_unlock_irqrestore(&dev->lock, flags);
if (status < 0)
goto err_cmd;
@@ -798,7 +778,7 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp)
struct goldfish_pipe_dev *dev = pipe->dev;
/* The guest is closing the channel, so tell the emulator right now */
- (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE);
+ goldfish_pipe_cmd(pipe, PIPE_CMD_CLOSE);
spin_lock_irqsave(&dev->lock, flags);
dev->pipes[pipe->id] = NULL;
@@ -820,36 +800,55 @@ static const struct file_operations goldfish_pipe_fops = {
.release = goldfish_pipe_release,
};
-static struct miscdevice goldfish_pipe_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "goldfish_pipe",
- .fops = &goldfish_pipe_fops,
-};
+static void init_miscdevice(struct miscdevice *miscdev)
+{
+ memset(miscdev, 0, sizeof(*miscdev));
+
+ miscdev->minor = MISC_DYNAMIC_MINOR;
+ miscdev->name = "goldfish_pipe";
+ miscdev->fops = &goldfish_pipe_fops;
+}
+
+static void write_pa_addr(void *addr, void __iomem *portl, void __iomem *porth)
+{
+ const unsigned long paddr = __pa(addr);
+
+ writel(upper_32_bits(paddr), porth);
+ writel(lower_32_bits(paddr), portl);
+}
-static int goldfish_pipe_device_init(struct platform_device *pdev)
+static int goldfish_pipe_device_init(struct platform_device *pdev,
+ struct goldfish_pipe_dev *dev)
{
- char *page;
- struct goldfish_pipe_dev *dev = pipe_dev;
- int err = devm_request_irq(&pdev->dev, dev->irq,
- goldfish_pipe_interrupt,
- IRQF_SHARED, "goldfish_pipe", dev);
+ int err;
+
+ tasklet_init(&dev->irq_tasklet, &goldfish_interrupt_task,
+ (unsigned long)dev);
+
+ err = devm_request_irq(&pdev->dev, dev->irq,
+ goldfish_pipe_interrupt,
+ IRQF_SHARED, "goldfish_pipe", dev);
if (err) {
dev_err(&pdev->dev, "unable to allocate IRQ for v2\n");
return err;
}
- err = misc_register(&goldfish_pipe_dev);
+ init_miscdevice(&dev->miscdev);
+ err = misc_register(&dev->miscdev);
if (err) {
dev_err(&pdev->dev, "unable to register v2 device\n");
return err;
}
+ dev->pdev_dev = &pdev->dev;
dev->first_signalled_pipe = NULL;
dev->pipes_capacity = INITIAL_PIPES_CAPACITY;
dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes),
- GFP_KERNEL);
- if (!dev->pipes)
+ GFP_KERNEL);
+ if (!dev->pipes) {
+ misc_deregister(&dev->miscdev);
return -ENOMEM;
+ }
/*
* We're going to pass two buffers, open_command_params and
@@ -857,75 +856,67 @@ static int goldfish_pipe_device_init(struct platform_device *pdev)
* needs to be contained in a single physical page. The easiest choice
* is to just allocate a page and place the buffers in it.
*/
- if (WARN_ON(sizeof(*dev->buffers) > PAGE_SIZE))
- return -ENOMEM;
-
- page = (char *)__get_free_page(GFP_KERNEL);
- if (!page) {
+ BUILD_BUG_ON(sizeof(struct goldfish_pipe_dev_buffers) > PAGE_SIZE);
+ dev->buffers = (struct goldfish_pipe_dev_buffers *)
+ __get_free_page(GFP_KERNEL);
+ if (!dev->buffers) {
kfree(dev->pipes);
+ misc_deregister(&dev->miscdev);
return -ENOMEM;
}
- dev->buffers = (struct goldfish_pipe_dev_buffers *)page;
/* Send the buffer addresses to the host */
- {
- u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers);
-
- writel((u32)(unsigned long)(paddr >> 32),
- dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
- writel((u32)(unsigned long)paddr,
- dev->base + PIPE_REG_SIGNAL_BUFFER);
- writel((u32)MAX_SIGNALLED_PIPES,
- dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
-
- paddr = __pa(&dev->buffers->open_command_params);
- writel((u32)(unsigned long)(paddr >> 32),
- dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
- writel((u32)(unsigned long)paddr,
- dev->base + PIPE_REG_OPEN_BUFFER);
- }
+ write_pa_addr(&dev->buffers->signalled_pipe_buffers,
+ dev->base + PIPE_REG_SIGNAL_BUFFER,
+ dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH);
+
+ writel(MAX_SIGNALLED_PIPES,
+ dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT);
+
+ write_pa_addr(&dev->buffers->open_command_params,
+ dev->base + PIPE_REG_OPEN_BUFFER,
+ dev->base + PIPE_REG_OPEN_BUFFER_HIGH);
+
+ platform_set_drvdata(pdev, dev);
return 0;
}
-static void goldfish_pipe_device_deinit(struct platform_device *pdev)
+static void goldfish_pipe_device_deinit(struct platform_device *pdev,
+ struct goldfish_pipe_dev *dev)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
-
- misc_deregister(&goldfish_pipe_dev);
+ misc_deregister(&dev->miscdev);
+ tasklet_kill(&dev->irq_tasklet);
kfree(dev->pipes);
free_page((unsigned long)dev->buffers);
}
static int goldfish_pipe_probe(struct platform_device *pdev)
{
- int err;
struct resource *r;
- struct goldfish_pipe_dev *dev = pipe_dev;
+ struct goldfish_pipe_dev *dev;
- if (WARN_ON(sizeof(struct goldfish_pipe_command) > PAGE_SIZE))
+ dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
return -ENOMEM;
- /* not thread safe, but this should not happen */
- WARN_ON(dev->base != NULL);
-
+ dev->magic = &goldfish_pipe_device_deinit;
spin_lock_init(&dev->lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL || resource_size(r) < PAGE_SIZE) {
+ if (!r || resource_size(r) < PAGE_SIZE) {
dev_err(&pdev->dev, "can't allocate i/o page\n");
return -EINVAL;
}
dev->base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
- if (dev->base == NULL) {
+ if (!dev->base) {
dev_err(&pdev->dev, "ioremap failed\n");
return -EINVAL;
}
r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (r == NULL) {
- err = -EINVAL;
- goto error;
- }
+ if (!r)
+ return -EINVAL;
+
dev->irq = r->start;
/*
@@ -935,25 +926,19 @@ static int goldfish_pipe_probe(struct platform_device *pdev)
* reading device version back: this allows the host implementation to
* detect the old driver (if there was no version write before read).
*/
- writel((u32)PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
+ writel(PIPE_DRIVER_VERSION, dev->base + PIPE_REG_VERSION);
dev->version = readl(dev->base + PIPE_REG_VERSION);
if (WARN_ON(dev->version < PIPE_CURRENT_DEVICE_VERSION))
return -EINVAL;
- err = goldfish_pipe_device_init(pdev);
- if (!err)
- return 0;
-
-error:
- dev->base = NULL;
- return err;
+ return goldfish_pipe_device_init(pdev, dev);
}
static int goldfish_pipe_remove(struct platform_device *pdev)
{
- struct goldfish_pipe_dev *dev = pipe_dev;
- goldfish_pipe_device_deinit(pdev);
- dev->base = NULL;
+ struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev);
+
+ goldfish_pipe_device_deinit(pdev, dev);
return 0;
}
@@ -981,4 +966,4 @@ static struct platform_driver goldfish_pipe_driver = {
module_platform_driver(goldfish_pipe_driver);
MODULE_AUTHOR("David Turner <digit@google.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/goldfish/goldfish_pipe_qemu.h b/drivers/platform/goldfish/goldfish_pipe_qemu.h
new file mode 100644
index 000000000000..b4d78c108afd
--- /dev/null
+++ b/drivers/platform/goldfish/goldfish_pipe_qemu.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * IMPORTANT: The following constants must match the ones used and defined in
+ * external/qemu/include/hw/misc/goldfish_pipe.h
+ */
+
+#ifndef GOLDFISH_PIPE_QEMU_H
+#define GOLDFISH_PIPE_QEMU_H
+
+/* List of bitflags returned in status of CMD_POLL command */
+enum PipePollFlags {
+ PIPE_POLL_IN = 1 << 0,
+ PIPE_POLL_OUT = 1 << 1,
+ PIPE_POLL_HUP = 1 << 2
+};
+
+/* Possible status values used to signal errors */
+enum PipeErrors {
+ PIPE_ERROR_INVAL = -1,
+ PIPE_ERROR_AGAIN = -2,
+ PIPE_ERROR_NOMEM = -3,
+ PIPE_ERROR_IO = -4
+};
+
+/* Bit-flags used to signal events from the emulator */
+enum PipeWakeFlags {
+ /* emulator closed pipe */
+ PIPE_WAKE_CLOSED = 1 << 0,
+
+ /* pipe can now be read from */
+ PIPE_WAKE_READ = 1 << 1,
+
+ /* pipe can now be written to */
+ PIPE_WAKE_WRITE = 1 << 2,
+
+ /* unlock this pipe's DMA buffer */
+ PIPE_WAKE_UNLOCK_DMA = 1 << 3,
+
+ /* unlock DMA buffer of the pipe shared to this pipe */
+ PIPE_WAKE_UNLOCK_DMA_SHARED = 1 << 4,
+};
+
+/* Possible pipe closing reasons */
+enum PipeCloseReason {
+ /* guest sent a close command */
+ PIPE_CLOSE_GRACEFUL = 0,
+
+ /* guest rebooted, we're closing the pipes */
+ PIPE_CLOSE_REBOOT = 1,
+
+ /* close old pipes on snapshot load */
+ PIPE_CLOSE_LOAD_SNAPSHOT = 2,
+
+ /* some unrecoverable error on the pipe */
+ PIPE_CLOSE_ERROR = 3,
+};
+
+/* Bit flags for the 'flags' field */
+enum PipeFlagsBits {
+ BIT_CLOSED_ON_HOST = 0, /* pipe closed by host */
+ BIT_WAKE_ON_WRITE = 1, /* want to be woken on writes */
+ BIT_WAKE_ON_READ = 2, /* want to be woken on reads */
+};
+
+enum PipeRegs {
+ PIPE_REG_CMD = 0,
+
+ PIPE_REG_SIGNAL_BUFFER_HIGH = 4,
+ PIPE_REG_SIGNAL_BUFFER = 8,
+ PIPE_REG_SIGNAL_BUFFER_COUNT = 12,
+
+ PIPE_REG_OPEN_BUFFER_HIGH = 20,
+ PIPE_REG_OPEN_BUFFER = 24,
+
+ PIPE_REG_VERSION = 36,
+
+ PIPE_REG_GET_SIGNALLED = 48,
+};
+
+enum PipeCmdCode {
+ /* to be used by the pipe device itself */
+ PIPE_CMD_OPEN = 1,
+
+ PIPE_CMD_CLOSE,
+ PIPE_CMD_POLL,
+ PIPE_CMD_WRITE,
+ PIPE_CMD_WAKE_ON_WRITE,
+ PIPE_CMD_READ,
+ PIPE_CMD_WAKE_ON_READ,
+
+ /*
+ * TODO(zyy): implement a deferred read/write execution to allow
+ * parallel processing of pipe operations on the host.
+ */
+ PIPE_CMD_WAKE_ON_DONE_IO,
+};
+
+#endif /* GOLDFISH_PIPE_QEMU_H */
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0c1aa6c314f5..bdac939de223 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -867,6 +867,8 @@ config INTEL_CHT_INT33FE
tristate "Intel Cherry Trail ACPI INT33FE Driver"
depends on X86 && ACPI && I2C && REGULATOR
depends on CHARGER_BQ24190=y || (CHARGER_BQ24190=m && m)
+ depends on USB_ROLES_INTEL_XHCI=y || (USB_ROLES_INTEL_XHCI=m && m)
+ depends on TYPEC_MUX_PI3USB30532=y || (TYPEC_MUX_PI3USB30532=m && m)
---help---
This driver add support for the INT33FE ACPI device found on
some Intel Cherry Trail devices.
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 2d6e272315a8..93ee2d5466f8 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -254,7 +254,7 @@ struct asus_wmi {
int asus_hwmon_num_fans;
int asus_hwmon_pwm;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
struct mutex wmi_lock;
struct workqueue_struct *hotplug_workqueue;
@@ -753,7 +753,7 @@ static void asus_rfkill_hotplug(struct asus_wmi *asus)
if (asus->wlan.rfkill)
rfkill_set_sw_state(asus->wlan.rfkill, blocked);
- if (asus->hotplug_slot) {
+ if (asus->hotplug_slot.ops) {
bus = pci_find_bus(0, 1);
if (!bus) {
pr_warn("Unable to find PCI bus 1?\n");
@@ -858,7 +858,8 @@ static void asus_unregister_rfkill_notifier(struct asus_wmi *asus, char *node)
static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
u8 *value)
{
- struct asus_wmi *asus = hotplug_slot->private;
+ struct asus_wmi *asus = container_of(hotplug_slot,
+ struct asus_wmi, hotplug_slot);
int result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_WLAN);
if (result < 0)
@@ -868,8 +869,7 @@ static int asus_get_adapter_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-static struct hotplug_slot_ops asus_hotplug_slot_ops = {
- .owner = THIS_MODULE,
+static const struct hotplug_slot_ops asus_hotplug_slot_ops = {
.get_adapter_status = asus_get_adapter_status,
.get_power_status = asus_get_adapter_status,
};
@@ -899,21 +899,9 @@ static int asus_setup_pci_hotplug(struct asus_wmi *asus)
INIT_WORK(&asus->hotplug_work, asus_hotplug_work);
- asus->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!asus->hotplug_slot)
- goto error_slot;
-
- asus->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!asus->hotplug_slot->info)
- goto error_info;
+ asus->hotplug_slot.ops = &asus_hotplug_slot_ops;
- asus->hotplug_slot->private = asus;
- asus->hotplug_slot->ops = &asus_hotplug_slot_ops;
- asus_get_adapter_status(asus->hotplug_slot,
- &asus->hotplug_slot->info->adapter_status);
-
- ret = pci_hp_register(asus->hotplug_slot, bus, 0, "asus-wifi");
+ ret = pci_hp_register(&asus->hotplug_slot, bus, 0, "asus-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
@@ -922,11 +910,7 @@ static int asus_setup_pci_hotplug(struct asus_wmi *asus)
return 0;
error_register:
- kfree(asus->hotplug_slot->info);
-error_info:
- kfree(asus->hotplug_slot);
- asus->hotplug_slot = NULL;
-error_slot:
+ asus->hotplug_slot.ops = NULL;
destroy_workqueue(asus->hotplug_workqueue);
error_workqueue:
return ret;
@@ -1054,11 +1038,8 @@ static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
* asus_unregister_rfkill_notifier()
*/
asus_rfkill_hotplug(asus);
- if (asus->hotplug_slot) {
- pci_hp_deregister(asus->hotplug_slot);
- kfree(asus->hotplug_slot->info);
- kfree(asus->hotplug_slot);
- }
+ if (asus->hotplug_slot.ops)
+ pci_hp_deregister(&asus->hotplug_slot);
if (asus->hotplug_workqueue)
destroy_workqueue(asus->hotplug_workqueue);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index a4bbf6ecd1f0..e6946a9beb5a 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -177,7 +177,7 @@ struct eeepc_laptop {
struct rfkill *wwan3g_rfkill;
struct rfkill *wimax_rfkill;
- struct hotplug_slot *hotplug_slot;
+ struct hotplug_slot hotplug_slot;
struct mutex hotplug_lock;
struct led_classdev tpd_led;
@@ -582,7 +582,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
mutex_lock(&eeepc->hotplug_lock);
pci_lock_rescan_remove();
- if (!eeepc->hotplug_slot)
+ if (!eeepc->hotplug_slot.ops)
goto out_unlock;
port = acpi_get_pci_dev(handle);
@@ -715,8 +715,11 @@ static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
u8 *value)
{
- struct eeepc_laptop *eeepc = hotplug_slot->private;
- int val = get_acpi(eeepc, CM_ASL_WLAN);
+ struct eeepc_laptop *eeepc;
+ int val;
+
+ eeepc = container_of(hotplug_slot, struct eeepc_laptop, hotplug_slot);
+ val = get_acpi(eeepc, CM_ASL_WLAN);
if (val == 1 || val == 0)
*value = val;
@@ -726,8 +729,7 @@ static int eeepc_get_adapter_status(struct hotplug_slot *hotplug_slot,
return 0;
}
-static struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
- .owner = THIS_MODULE,
+static const struct hotplug_slot_ops eeepc_hotplug_slot_ops = {
.get_adapter_status = eeepc_get_adapter_status,
.get_power_status = eeepc_get_adapter_status,
};
@@ -742,21 +744,9 @@ static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
return -ENODEV;
}
- eeepc->hotplug_slot = kzalloc(sizeof(struct hotplug_slot), GFP_KERNEL);
- if (!eeepc->hotplug_slot)
- goto error_slot;
+ eeepc->hotplug_slot.ops = &eeepc_hotplug_slot_ops;
- eeepc->hotplug_slot->info = kzalloc(sizeof(struct hotplug_slot_info),
- GFP_KERNEL);
- if (!eeepc->hotplug_slot->info)
- goto error_info;
-
- eeepc->hotplug_slot->private = eeepc;
- eeepc->hotplug_slot->ops = &eeepc_hotplug_slot_ops;
- eeepc_get_adapter_status(eeepc->hotplug_slot,
- &eeepc->hotplug_slot->info->adapter_status);
-
- ret = pci_hp_register(eeepc->hotplug_slot, bus, 0, "eeepc-wifi");
+ ret = pci_hp_register(&eeepc->hotplug_slot, bus, 0, "eeepc-wifi");
if (ret) {
pr_err("Unable to register hotplug slot - %d\n", ret);
goto error_register;
@@ -765,11 +755,7 @@ static int eeepc_setup_pci_hotplug(struct eeepc_laptop *eeepc)
return 0;
error_register:
- kfree(eeepc->hotplug_slot->info);
-error_info:
- kfree(eeepc->hotplug_slot);
- eeepc->hotplug_slot = NULL;
-error_slot:
+ eeepc->hotplug_slot.ops = NULL;
return ret;
}
@@ -830,11 +816,8 @@ static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
eeepc->wlan_rfkill = NULL;
}
- if (eeepc->hotplug_slot) {
- pci_hp_deregister(eeepc->hotplug_slot);
- kfree(eeepc->hotplug_slot->info);
- kfree(eeepc->hotplug_slot);
- }
+ if (eeepc->hotplug_slot.ops)
+ pci_hp_deregister(&eeepc->hotplug_slot);
if (eeepc->bluetooth_rfkill) {
rfkill_unregister(eeepc->bluetooth_rfkill);
diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c
index 7166f1cf8a1d..f40b1c192106 100644
--- a/drivers/platform/x86/intel_cht_int33fe.c
+++ b/drivers/platform/x86/intel_cht_int33fe.c
@@ -35,7 +35,7 @@ struct cht_int33fe_data {
struct i2c_client *fusb302;
struct i2c_client *pi3usb30532;
/* Contain a list-head must be per device */
- struct device_connection connections[3];
+ struct device_connection connections[5];
};
/*
@@ -175,19 +175,20 @@ static int cht_int33fe_probe(struct platform_device *pdev)
return -EPROBE_DEFER; /* Wait for i2c-adapter to load */
}
- data->connections[0].endpoint[0] = "i2c-fusb302";
+ data->connections[0].endpoint[0] = "port0";
data->connections[0].endpoint[1] = "i2c-pi3usb30532";
data->connections[0].id = "typec-switch";
- data->connections[1].endpoint[0] = "i2c-fusb302";
+ data->connections[1].endpoint[0] = "port0";
data->connections[1].endpoint[1] = "i2c-pi3usb30532";
data->connections[1].id = "typec-mux";
- data->connections[2].endpoint[0] = "i2c-fusb302";
- data->connections[2].endpoint[1] = "intel_xhci_usb_sw-role-switch";
- data->connections[2].id = "usb-role-switch";
+ data->connections[2].endpoint[0] = "port0";
+ data->connections[2].endpoint[1] = "i2c-pi3usb30532";
+ data->connections[2].id = "idff01m01";
+ data->connections[3].endpoint[0] = "i2c-fusb302";
+ data->connections[3].endpoint[1] = "intel_xhci_usb_sw-role-switch";
+ data->connections[3].id = "usb-role-switch";
- device_connection_add(&data->connections[0]);
- device_connection_add(&data->connections[1]);
- device_connection_add(&data->connections[2]);
+ device_connections_add(data->connections);
memset(&board_info, 0, sizeof(board_info));
strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE);
@@ -218,9 +219,7 @@ out_unregister_max17047:
if (data->max17047)
i2c_unregister_device(data->max17047);
- device_connection_remove(&data->connections[2]);
- device_connection_remove(&data->connections[1]);
- device_connection_remove(&data->connections[0]);
+ device_connections_remove(data->connections);
return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */
}
@@ -234,9 +233,7 @@ static int cht_int33fe_remove(struct platform_device *pdev)
if (data->max17047)
i2c_unregister_device(data->max17047);
- device_connection_remove(&data->connections[2]);
- device_connection_remove(&data->connections[1]);
- device_connection_remove(&data->connections[0]);
+ device_connections_remove(data->connections);
return 0;
}
diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
index 0206cce328b3..2b686c55b717 100644
--- a/drivers/power/reset/at91-sama5d2_shdwc.c
+++ b/drivers/power/reset/at91-sama5d2_shdwc.c
@@ -19,6 +19,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk/at91_pmc.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -69,7 +70,10 @@ struct shdwc_config {
struct shdwc {
const struct shdwc_config *cfg;
- void __iomem *at91_shdwc_base;
+ struct clk *sclk;
+ void __iomem *shdwc_base;
+ void __iomem *mpddrc_base;
+ void __iomem *pmc_base;
};
/*
@@ -77,8 +81,6 @@ struct shdwc {
* since pm_power_off itself is global.
*/
static struct shdwc *at91_shdwc;
-static struct clk *sclk;
-static void __iomem *mpddrc_base;
static const unsigned long long sdwc_dbc_period[] = {
0, 3, 32, 512, 4096, 32768,
@@ -90,7 +92,7 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
u32 reg;
char *reason = "unknown";
- reg = readl(shdw->at91_shdwc_base + AT91_SHDW_SR);
+ reg = readl(shdw->shdwc_base + AT91_SHDW_SR);
dev_dbg(&pdev->dev, "%s: status = %#x\n", __func__, reg);
@@ -108,12 +110,6 @@ static void __init at91_wakeup_status(struct platform_device *pdev)
static void at91_poweroff(void)
{
- writel(AT91_SHDW_KEY | AT91_SHDW_SHDW,
- at91_shdwc->at91_shdwc_base + AT91_SHDW_CR);
-}
-
-static void at91_lpddr_poweroff(void)
-{
asm volatile(
/* Align to cache lines */
".balign 32\n\t"
@@ -122,16 +118,29 @@ static void at91_lpddr_poweroff(void)
" ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
/* Power down SDRAM0 */
+ " tst %0, #0\n\t"
+ " beq 1f\n\t"
" str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+
+ /* Switch the master clock source to slow clock. */
+ "1: ldr r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+ " bic r6, r6, #" __stringify(AT91_PMC_CSS) "\n\t"
+ " str r6, [%4, #" __stringify(AT91_PMC_MCKR) "]\n\t"
+ /* Wait for clock switch. */
+ "2: ldr r6, [%4, #" __stringify(AT91_PMC_SR) "]\n\t"
+ " tst r6, #" __stringify(AT91_PMC_MCKRDY) "\n\t"
+ " beq 2b\n\t"
+
/* Shutdown CPU */
" str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
" b .\n\t"
:
- : "r" (mpddrc_base),
+ : "r" (at91_shdwc->mpddrc_base),
"r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
- "r" (at91_shdwc->at91_shdwc_base),
- "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
+ "r" (at91_shdwc->shdwc_base),
+ "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW),
+ "r" (at91_shdwc->pmc_base)
: "r6");
}
@@ -213,10 +222,10 @@ static void at91_shdwc_dt_configure(struct platform_device *pdev)
mode |= SHDW_RTCWKEN(shdw->cfg);
dev_dbg(&pdev->dev, "%s: mode = %#x\n", __func__, mode);
- writel(mode, shdw->at91_shdwc_base + AT91_SHDW_MR);
+ writel(mode, shdw->shdwc_base + AT91_SHDW_MR);
input = at91_shdwc_get_wakeup_input(pdev, np);
- writel(input, shdw->at91_shdwc_base + AT91_SHDW_WUIR);
+ writel(input, shdw->shdwc_base + AT91_SHDW_WUIR);
}
static const struct shdwc_config sama5d2_shdwc_config = {
@@ -246,6 +255,9 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
if (!pdev->dev.of_node)
return -ENODEV;
+ if (at91_shdwc)
+ return -EBUSY;
+
at91_shdwc = devm_kzalloc(&pdev->dev, sizeof(*at91_shdwc), GFP_KERNEL);
if (!at91_shdwc)
return -ENOMEM;
@@ -253,20 +265,20 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, at91_shdwc);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- at91_shdwc->at91_shdwc_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(at91_shdwc->at91_shdwc_base)) {
+ at91_shdwc->shdwc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(at91_shdwc->shdwc_base)) {
dev_err(&pdev->dev, "Could not map reset controller address\n");
- return PTR_ERR(at91_shdwc->at91_shdwc_base);
+ return PTR_ERR(at91_shdwc->shdwc_base);
}
match = of_match_node(at91_shdwc_of_match, pdev->dev.of_node);
at91_shdwc->cfg = match->data;
- sclk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(sclk))
- return PTR_ERR(sclk);
+ at91_shdwc->sclk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(at91_shdwc->sclk))
+ return PTR_ERR(at91_shdwc->sclk);
- ret = clk_prepare_enable(sclk);
+ ret = clk_prepare_enable(at91_shdwc->sclk);
if (ret) {
dev_err(&pdev->dev, "Could not enable slow clock\n");
return ret;
@@ -276,41 +288,70 @@ static int __init at91_shdwc_probe(struct platform_device *pdev)
at91_shdwc_dt_configure(pdev);
- pm_power_off = at91_poweroff;
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-pmc");
+ if (!np) {
+ ret = -ENODEV;
+ goto clk_disable;
+ }
+
+ at91_shdwc->pmc_base = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!at91_shdwc->pmc_base) {
+ ret = -ENOMEM;
+ goto clk_disable;
+ }
np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
- if (!np)
- return 0;
+ if (!np) {
+ ret = -ENODEV;
+ goto unmap;
+ }
- mpddrc_base = of_iomap(np, 0);
+ at91_shdwc->mpddrc_base = of_iomap(np, 0);
of_node_put(np);
- if (!mpddrc_base)
- return 0;
+ if (!at91_shdwc->mpddrc_base) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
- ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD;
- if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) ||
- (ddr_type == AT91_DDRSDRC_MD_LPDDR3))
- pm_power_off = at91_lpddr_poweroff;
- else
- iounmap(mpddrc_base);
+ pm_power_off = at91_poweroff;
+
+ ddr_type = readl(at91_shdwc->mpddrc_base + AT91_DDRSDRC_MDR) &
+ AT91_DDRSDRC_MD;
+ if (ddr_type != AT91_DDRSDRC_MD_LPDDR2 &&
+ ddr_type != AT91_DDRSDRC_MD_LPDDR3) {
+ iounmap(at91_shdwc->mpddrc_base);
+ at91_shdwc->mpddrc_base = NULL;
+ }
return 0;
+
+unmap:
+ iounmap(at91_shdwc->pmc_base);
+clk_disable:
+ clk_disable_unprepare(at91_shdwc->sclk);
+
+ return ret;
}
static int __exit at91_shdwc_remove(struct platform_device *pdev)
{
struct shdwc *shdw = platform_get_drvdata(pdev);
- if (pm_power_off == at91_poweroff ||
- pm_power_off == at91_lpddr_poweroff)
+ if (pm_power_off == at91_poweroff)
pm_power_off = NULL;
/* Reset values to disable wake-up features */
- writel(0, shdw->at91_shdwc_base + AT91_SHDW_MR);
- writel(0, shdw->at91_shdwc_base + AT91_SHDW_WUIR);
+ writel(0, shdw->shdwc_base + AT91_SHDW_MR);
+ writel(0, shdw->shdwc_base + AT91_SHDW_WUIR);
+
+ if (shdw->mpddrc_base)
+ iounmap(shdw->mpddrc_base);
+ iounmap(shdw->pmc_base);
- clk_disable_unprepare(sclk);
+ clk_disable_unprepare(shdw->sclk);
return 0;
}
diff --git a/drivers/power/reset/qcom-pon.c b/drivers/power/reset/qcom-pon.c
index 0c4caaa7e88f..3fa1642d4c54 100644
--- a/drivers/power/reset/qcom-pon.c
+++ b/drivers/power/reset/qcom-pon.c
@@ -74,6 +74,7 @@ static int pm8916_pon_probe(struct platform_device *pdev)
static const struct of_device_id pm8916_pon_id_table[] = {
{ .compatible = "qcom,pm8916-pon" },
+ { .compatible = "qcom,pms405-pon" },
{ }
};
MODULE_DEVICE_TABLE(of, pm8916_pon_id_table);
diff --git a/drivers/power/reset/rmobile-reset.c b/drivers/power/reset/rmobile-reset.c
index e6569df76941..bd3b396558e0 100644
--- a/drivers/power/reset/rmobile-reset.c
+++ b/drivers/power/reset/rmobile-reset.c
@@ -1,11 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Renesas R-Mobile Reset Driver
*
* Copyright (C) 2014 Glider bvba
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
*/
#include <linux/io.h>
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index ff6dab0bf0dd..f27cf0709500 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -645,4 +645,11 @@ config CHARGER_CROS_USBPD
what is connected to USB PD ports from the EC and converts
that into power_supply properties.
+config CHARGER_SC2731
+ tristate "Spreadtrum SC2731 charger driver"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ help
+ Say Y here to enable support for battery charging with SC2731
+ PMIC chips.
+
endif # POWER_SUPPLY
diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile
index a26b402c45d9..767105b88d00 100644
--- a/drivers/power/supply/Makefile
+++ b/drivers/power/supply/Makefile
@@ -85,3 +85,4 @@ obj-$(CONFIG_CHARGER_TPS65217) += tps65217_charger.o
obj-$(CONFIG_AXP288_FUEL_GAUGE) += axp288_fuel_gauge.o
obj-$(CONFIG_AXP288_CHARGER) += axp288_charger.o
obj-$(CONFIG_CHARGER_CROS_USBPD) += cros_usbpd-charger.o
+obj-$(CONFIG_CHARGER_SC2731) += sc2731_charger.o
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 02356f9b5f22..776102c31305 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2433,17 +2433,14 @@ static ssize_t charge_full_store(struct ab8500_fg *di, const char *buf,
size_t count)
{
unsigned long charge_full;
- ssize_t ret;
+ int ret;
ret = kstrtoul(buf, 10, &charge_full);
+ if (ret)
+ return ret;
- dev_dbg(di->dev, "Ret %zd charge_full %lu", ret, charge_full);
-
- if (!ret) {
- di->bat_cap.max_mah = (int) charge_full;
- ret = count;
- }
- return ret;
+ di->bat_cap.max_mah = (int) charge_full;
+ return count;
}
static ssize_t charge_now_show(struct ab8500_fg *di, char *buf)
@@ -2455,20 +2452,16 @@ static ssize_t charge_now_store(struct ab8500_fg *di, const char *buf,
size_t count)
{
unsigned long charge_now;
- ssize_t ret;
+ int ret;
ret = kstrtoul(buf, 10, &charge_now);
+ if (ret)
+ return ret;
- dev_dbg(di->dev, "Ret %zd charge_now %lu was %d",
- ret, charge_now, di->bat_cap.prev_mah);
-
- if (!ret) {
- di->bat_cap.user_mah = (int) charge_now;
- di->flags.user_cap = true;
- ret = count;
- queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
- }
- return ret;
+ di->bat_cap.user_mah = (int) charge_now;
+ di->flags.user_cap = true;
+ queue_delayed_work(di->fg_wq, &di->fg_periodic_work, 0);
+ return count;
}
static struct ab8500_fg_sysfs_entry charge_full_attr =
@@ -2582,11 +2575,12 @@ static ssize_t ab8505_powercut_flagtime_write(struct device *dev,
const char *buf, size_t count)
{
int ret;
- long unsigned reg_value;
+ int reg_value;
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
if (reg_value > 0x7F) {
dev_err(dev, "Incorrect parameter, echo 0 (1.98s) - 127 (15.625ms) for flagtime\n");
@@ -2636,7 +2630,9 @@ static ssize_t ab8505_powercut_maxtime_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0x7F) {
dev_err(dev, "Incorrect parameter, echo 0 (0.0s) - 127 (1.98s) for maxtime\n");
goto fail;
@@ -2684,7 +2680,9 @@ static ssize_t ab8505_powercut_restart_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0xF) {
dev_err(dev, "Incorrect parameter, echo 0 - 15 for number of restart\n");
goto fail;
@@ -2777,7 +2775,9 @@ static ssize_t ab8505_powercut_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0x1) {
dev_err(dev, "Incorrect parameter, echo 0/1 to disable/enable Pcut feature\n");
goto fail;
@@ -2849,7 +2849,9 @@ static ssize_t ab8505_powercut_debounce_write(struct device *dev,
struct power_supply *psy = dev_get_drvdata(dev);
struct ab8500_fg *di = power_supply_get_drvdata(psy);
- reg_value = simple_strtoul(buf, NULL, 10);
+ if (kstrtoint(buf, 10, &reg_value))
+ goto fail;
+
if (reg_value > 0x7) {
dev_err(dev, "Incorrect parameter, echo 0 to 7 for debounce setting\n");
goto fail;
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 8e2c41ded171..70b90db5ae38 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -32,6 +32,7 @@
#define BQ25890_IRQ_PIN "bq25890_irq"
#define BQ25890_ID 3
+#define BQ25896_ID 0
enum bq25890_fields {
F_EN_HIZ, F_EN_ILIM, F_IILIM, /* Reg00 */
@@ -153,8 +154,8 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_CONV_RATE] = REG_FIELD(0x02, 6, 6),
[F_BOOSTF] = REG_FIELD(0x02, 5, 5),
[F_ICO_EN] = REG_FIELD(0x02, 4, 4),
- [F_HVDCP_EN] = REG_FIELD(0x02, 3, 3),
- [F_MAXC_EN] = REG_FIELD(0x02, 2, 2),
+ [F_HVDCP_EN] = REG_FIELD(0x02, 3, 3), // reserved on BQ25896
+ [F_MAXC_EN] = REG_FIELD(0x02, 2, 2), // reserved on BQ25896
[F_FORCE_DPM] = REG_FIELD(0x02, 1, 1),
[F_AUTO_DPDM_EN] = REG_FIELD(0x02, 0, 0),
/* REG03 */
@@ -163,6 +164,7 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_OTG_CFG] = REG_FIELD(0x03, 5, 5),
[F_CHG_CFG] = REG_FIELD(0x03, 4, 4),
[F_SYSVMIN] = REG_FIELD(0x03, 1, 3),
+ /* MIN_VBAT_SEL on BQ25896 */
/* REG04 */
[F_PUMPX_EN] = REG_FIELD(0x04, 7, 7),
[F_ICHG] = REG_FIELD(0x04, 0, 6),
@@ -181,7 +183,7 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_CHG_TMR] = REG_FIELD(0x07, 1, 2),
[F_JEITA_ISET] = REG_FIELD(0x07, 0, 0),
/* REG08 */
- [F_BATCMP] = REG_FIELD(0x08, 6, 7),
+ [F_BATCMP] = REG_FIELD(0x08, 6, 7), // 5-7 on BQ25896
[F_VCLAMP] = REG_FIELD(0x08, 2, 4),
[F_TREG] = REG_FIELD(0x08, 0, 1),
/* REG09 */
@@ -195,12 +197,13 @@ static const struct reg_field bq25890_reg_fields[] = {
[F_PUMPX_DN] = REG_FIELD(0x09, 0, 0),
/* REG0A */
[F_BOOSTV] = REG_FIELD(0x0A, 4, 7),
+ /* PFM_OTG_DIS 3 on BQ25896 */
[F_BOOSTI] = REG_FIELD(0x0A, 0, 2),
/* REG0B */
[F_VBUS_STAT] = REG_FIELD(0x0B, 5, 7),
[F_CHG_STAT] = REG_FIELD(0x0B, 3, 4),
[F_PG_STAT] = REG_FIELD(0x0B, 2, 2),
- [F_SDP_STAT] = REG_FIELD(0x0B, 1, 1),
+ [F_SDP_STAT] = REG_FIELD(0x0B, 1, 1), // reserved on BQ25896
[F_VSYS_STAT] = REG_FIELD(0x0B, 0, 0),
/* REG0C */
[F_WD_FAULT] = REG_FIELD(0x0C, 7, 7),
@@ -244,10 +247,7 @@ enum bq25890_table_ids {
/* range tables */
TBL_ICHG,
TBL_ITERM,
- TBL_IPRECHG,
TBL_VREG,
- TBL_BATCMP,
- TBL_VCLAMP,
TBL_BOOSTV,
TBL_SYSVMIN,
@@ -287,8 +287,6 @@ static const union {
[TBL_ICHG] = { .rt = {0, 5056000, 64000} }, /* uA */
[TBL_ITERM] = { .rt = {64000, 1024000, 64000} }, /* uA */
[TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */
- [TBL_BATCMP] = { .rt = {0, 140, 20} }, /* mOhm */
- [TBL_VCLAMP] = { .rt = {0, 224000, 32000} }, /* uV */
[TBL_BOOSTV] = { .rt = {4550000, 5510000, 64000} }, /* uV */
[TBL_SYSVMIN] = { .rt = {3000000, 3700000, 100000} }, /* uV */
@@ -401,6 +399,16 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
val->strval = BQ25890_MANUFACTURER;
break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ if (bq->chip_id == BQ25890_ID)
+ val->strval = "BQ25890";
+ else if (bq->chip_id == BQ25896_ID)
+ val->strval = "BQ25896";
+ else
+ val->strval = "UNKNOWN";
+
+ break;
+
case POWER_SUPPLY_PROP_ONLINE:
val->intval = state.online;
break;
@@ -453,6 +461,15 @@ static int bq25890_power_supply_get_property(struct power_supply *psy,
val->intval = bq25890_find_val(bq->init_data.iterm, TBL_ITERM);
break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = bq25890_field_read(bq, F_SYSV); /* read measured value */
+ if (ret < 0)
+ return ret;
+
+ /* converted_val = 2.304V + ADC_val * 20mV (table 10.3.15) */
+ val->intval = 2304000 + ret * 20000;
+ break;
+
default:
return -EINVAL;
}
@@ -608,30 +625,40 @@ static int bq25890_hw_init(struct bq25890_device *bq)
};
ret = bq25890_chip_reset(bq);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Reset failed %d\n", ret);
return ret;
+ }
/* disable watchdog */
ret = bq25890_field_write(bq, F_WD, 0);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Disabling watchdog failed %d\n", ret);
return ret;
+ }
/* initialize currents/voltages and other parameters */
for (i = 0; i < ARRAY_SIZE(init_data); i++) {
ret = bq25890_field_write(bq, init_data[i].id,
init_data[i].value);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Writing init data failed %d\n", ret);
return ret;
+ }
}
/* Configure ADC for continuous conversions. This does not enable it. */
ret = bq25890_field_write(bq, F_CONV_RATE, 1);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Config ADC failed %d\n", ret);
return ret;
+ }
ret = bq25890_get_chip_state(bq, &state);
- if (ret < 0)
+ if (ret < 0) {
+ dev_dbg(bq->dev, "Get state failed %d\n", ret);
return ret;
+ }
mutex_lock(&bq->lock);
bq->state = state;
@@ -642,6 +669,7 @@ static int bq25890_hw_init(struct bq25890_device *bq)
static enum power_supply_property bq25890_power_supply_props[] = {
POWER_SUPPLY_PROP_MANUFACTURER,
+ POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_HEALTH,
@@ -650,6 +678,7 @@ static enum power_supply_property bq25890_power_supply_props[] = {
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
};
static char *bq25890_charger_supplied_to[] = {
@@ -767,6 +796,9 @@ static int bq25890_fw_read_u32_props(struct bq25890_device *bq)
if (props[i].optional)
continue;
+ dev_err(bq->dev, "Unable to read property %d %s\n", ret,
+ props[i].name);
+
return ret;
}
@@ -840,7 +872,7 @@ static int bq25890_probe(struct i2c_client *client,
return bq->chip_id;
}
- if (bq->chip_id != BQ25890_ID) {
+ if ((bq->chip_id != BQ25890_ID) && (bq->chip_id != BQ25896_ID)) {
dev_err(dev, "Chip with ID=%d, not supported!\n", bq->chip_id);
return -ENODEV;
}
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index f022e1b550df..6dbbe95844a3 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -432,6 +432,7 @@ static u8
[BQ27XXX_REG_AP] = 0x18,
BQ27XXX_DM_REG_ROWS,
};
+#define bq27411_regs bq27421_regs
#define bq27425_regs bq27421_regs
#define bq27426_regs bq27421_regs
#define bq27441_regs bq27421_regs
@@ -665,6 +666,7 @@ static enum power_supply_property bq27421_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_MANUFACTURER,
};
+#define bq27411_props bq27421_props
#define bq27425_props bq27421_props
#define bq27426_props bq27421_props
#define bq27441_props bq27421_props
@@ -725,6 +727,12 @@ static struct bq27xxx_dm_reg bq27545_dm_regs[] = {
#define bq27545_dm_regs 0
#endif
+static struct bq27xxx_dm_reg bq27411_dm_regs[] = {
+ [BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 10, 2, 0, 32767 },
+ [BQ27XXX_DM_DESIGN_ENERGY] = { 82, 12, 2, 0, 32767 },
+ [BQ27XXX_DM_TERMINATE_VOLTAGE] = { 82, 16, 2, 2800, 3700 },
+};
+
static struct bq27xxx_dm_reg bq27421_dm_regs[] = {
[BQ27XXX_DM_DESIGN_CAPACITY] = { 82, 10, 2, 0, 8000 },
[BQ27XXX_DM_DESIGN_ENERGY] = { 82, 12, 2, 0, 32767 },
@@ -802,6 +810,7 @@ static struct {
[BQ27546] = BQ27XXX_DATA(bq27546, 0 , BQ27XXX_O_OTDC),
[BQ27742] = BQ27XXX_DATA(bq27742, 0 , BQ27XXX_O_OTDC),
[BQ27545] = BQ27XXX_DATA(bq27545, 0x04143672, BQ27XXX_O_OTDC),
+ [BQ27411] = BQ27XXX_DATA(bq27411, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
[BQ27421] = BQ27XXX_DATA(bq27421, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
[BQ27425] = BQ27XXX_DATA(bq27425, 0x04143672, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP),
[BQ27426] = BQ27XXX_DATA(bq27426, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM),
diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c
index 40069128ad44..2677c38a8a42 100644
--- a/drivers/power/supply/bq27xxx_battery_i2c.c
+++ b/drivers/power/supply/bq27xxx_battery_i2c.c
@@ -247,6 +247,7 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = {
{ "bq27546", BQ27546 },
{ "bq27742", BQ27742 },
{ "bq27545", BQ27545 },
+ { "bq27411", BQ27411 },
{ "bq27421", BQ27421 },
{ "bq27425", BQ27425 },
{ "bq27426", BQ27426 },
@@ -279,6 +280,7 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = {
{ .compatible = "ti,bq27546" },
{ .compatible = "ti,bq27742" },
{ .compatible = "ti,bq27545" },
+ { .compatible = "ti,bq27411" },
{ .compatible = "ti,bq27421" },
{ .compatible = "ti,bq27425" },
{ .compatible = "ti,bq27426" },
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index 688a16bacfbb..7e9c3984ef6a 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -12,8 +12,12 @@
#include <linux/power_supply.h>
#include <linux/slab.h>
-#define CHARGER_DIR_NAME "CROS_USBPD_CHARGER%d"
-#define CHARGER_DIR_NAME_LENGTH sizeof(CHARGER_DIR_NAME)
+#define CHARGER_USBPD_DIR_NAME "CROS_USBPD_CHARGER%d"
+#define CHARGER_DEDICATED_DIR_NAME "CROS_DEDICATED_CHARGER"
+#define CHARGER_DIR_NAME_LENGTH (sizeof(CHARGER_USBPD_DIR_NAME) >= \
+ sizeof(CHARGER_DEDICATED_DIR_NAME) ? \
+ sizeof(CHARGER_USBPD_DIR_NAME) : \
+ sizeof(CHARGER_DEDICATED_DIR_NAME))
#define CHARGER_CACHE_UPDATE_DELAY msecs_to_jiffies(500)
#define CHARGER_MANUFACTURER_MODEL_LENGTH 32
@@ -42,6 +46,7 @@ struct charger_data {
struct cros_ec_dev *ec_dev;
struct cros_ec_device *ec_device;
int num_charger_ports;
+ int num_usbpd_ports;
int num_registered_psy;
struct port_data *ports[EC_USB_PD_MAX_PORTS];
struct notifier_block notifier;
@@ -58,6 +63,12 @@ static enum power_supply_property cros_usbpd_charger_props[] = {
POWER_SUPPLY_PROP_USB_TYPE
};
+static enum power_supply_property cros_usbpd_dedicated_charger_props[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+};
+
static enum power_supply_usb_type cros_usbpd_charger_usb_types[] = {
POWER_SUPPLY_USB_TYPE_UNKNOWN,
POWER_SUPPLY_USB_TYPE_SDP,
@@ -69,6 +80,11 @@ static enum power_supply_usb_type cros_usbpd_charger_usb_types[] = {
POWER_SUPPLY_USB_TYPE_APPLE_BRICK_ID
};
+static bool cros_usbpd_charger_port_is_dedicated(struct port_data *port)
+{
+ return port->port_number >= port->charger->num_usbpd_ports;
+}
+
static int cros_usbpd_charger_ec_command(struct charger_data *charger,
unsigned int version,
unsigned int command,
@@ -103,6 +119,23 @@ static int cros_usbpd_charger_ec_command(struct charger_data *charger,
static int cros_usbpd_charger_get_num_ports(struct charger_data *charger)
{
+ struct ec_response_charge_port_count resp;
+ int ret;
+
+ ret = cros_usbpd_charger_ec_command(charger, 0,
+ EC_CMD_CHARGE_PORT_COUNT,
+ NULL, 0, &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_err(charger->dev,
+ "Unable to get the number of ports (err:0x%x)\n", ret);
+ return ret;
+ }
+
+ return resp.port_count;
+}
+
+static int cros_usbpd_charger_get_usbpd_num_ports(struct charger_data *charger)
+{
struct ec_response_usb_pd_ports resp;
int ret;
@@ -246,7 +279,10 @@ static int cros_usbpd_charger_get_power_info(struct port_data *port)
port->psy_usb_type = POWER_SUPPLY_USB_TYPE_SDP;
}
- port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
+ if (cros_usbpd_charger_port_is_dedicated(port))
+ port->psy_desc.type = POWER_SUPPLY_TYPE_MAINS;
+ else
+ port->psy_desc.type = POWER_SUPPLY_TYPE_USB;
dev_dbg(dev,
"Port %d: type=%d vmax=%d vnow=%d cmax=%d clim=%d pmax=%d\n",
@@ -281,7 +317,8 @@ static int cros_usbpd_charger_get_port_status(struct port_data *port,
if (ret < 0)
return ret;
- ret = cros_usbpd_charger_get_discovery_info(port);
+ if (!cros_usbpd_charger_port_is_dedicated(port))
+ ret = cros_usbpd_charger_get_discovery_info(port);
port->last_update = jiffies;
return ret;
@@ -378,12 +415,10 @@ static int cros_usbpd_charger_ec_event(struct notifier_block *nb,
{
struct cros_ec_device *ec_device;
struct charger_data *charger;
- struct device *dev;
u32 host_event;
charger = container_of(nb, struct charger_data, notifier);
ec_device = charger->ec_device;
- dev = charger->dev;
host_event = cros_ec_get_host_event(ec_device);
if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU)) {
@@ -426,17 +461,56 @@ static int cros_usbpd_charger_probe(struct platform_device *pd)
platform_set_drvdata(pd, charger);
+ /*
+ * We need to know the number of USB PD ports in order to know whether
+ * there is a dedicated port. The dedicated port will always be
+ * after the USB PD ports, and there should be only one.
+ */
+ charger->num_usbpd_ports =
+ cros_usbpd_charger_get_usbpd_num_ports(charger);
+ if (charger->num_usbpd_ports <= 0) {
+ /*
+ * This can happen on a system that doesn't support USB PD.
+ * Log a message, but no need to warn.
+ */
+ dev_info(dev, "No USB PD charging ports found\n");
+ }
+
charger->num_charger_ports = cros_usbpd_charger_get_num_ports(charger);
- if (charger->num_charger_ports <= 0) {
+ if (charger->num_charger_ports < 0) {
/*
* This can happen on a system that doesn't support USB PD.
* Log a message, but no need to warn.
+ * Older ECs do not support the above command, in that case
+ * let's set up the number of charger ports equal to the number
+ * of USB PD ports
+ */
+ dev_info(dev, "Could not get charger port count\n");
+ charger->num_charger_ports = charger->num_usbpd_ports;
+ }
+
+ if (charger->num_charger_ports <= 0) {
+ /*
+ * This can happen on a system that doesn't support USB PD and
+ * doesn't have a dedicated port.
+ * Log a message, but no need to warn.
*/
dev_info(dev, "No charging ports found\n");
ret = -ENODEV;
goto fail_nowarn;
}
+ /*
+ * Sanity checks on the number of ports:
+ * there should be at most 1 dedicated port
+ */
+ if (charger->num_charger_ports < charger->num_usbpd_ports ||
+ charger->num_charger_ports > (charger->num_usbpd_ports + 1)) {
+ dev_err(dev, "Unexpected number of charge port count\n");
+ ret = -EPROTO;
+ goto fail_nowarn;
+ }
+
for (i = 0; i < charger->num_charger_ports; i++) {
struct power_supply_config psy_cfg = {};
@@ -448,22 +522,33 @@ static int cros_usbpd_charger_probe(struct platform_device *pd)
port->charger = charger;
port->port_number = i;
- sprintf(port->name, CHARGER_DIR_NAME, i);
psy_desc = &port->psy_desc;
- psy_desc->name = port->name;
- psy_desc->type = POWER_SUPPLY_TYPE_USB;
psy_desc->get_property = cros_usbpd_charger_get_prop;
psy_desc->external_power_changed =
cros_usbpd_charger_power_changed;
- psy_desc->properties = cros_usbpd_charger_props;
- psy_desc->num_properties =
- ARRAY_SIZE(cros_usbpd_charger_props);
- psy_desc->usb_types = cros_usbpd_charger_usb_types;
- psy_desc->num_usb_types =
- ARRAY_SIZE(cros_usbpd_charger_usb_types);
psy_cfg.drv_data = port;
+ if (cros_usbpd_charger_port_is_dedicated(port)) {
+ sprintf(port->name, CHARGER_DEDICATED_DIR_NAME);
+ psy_desc->type = POWER_SUPPLY_TYPE_MAINS;
+ psy_desc->properties =
+ cros_usbpd_dedicated_charger_props;
+ psy_desc->num_properties =
+ ARRAY_SIZE(cros_usbpd_dedicated_charger_props);
+ } else {
+ sprintf(port->name, CHARGER_USBPD_DIR_NAME, i);
+ psy_desc->type = POWER_SUPPLY_TYPE_USB;
+ psy_desc->properties = cros_usbpd_charger_props;
+ psy_desc->num_properties =
+ ARRAY_SIZE(cros_usbpd_charger_props);
+ psy_desc->usb_types = cros_usbpd_charger_usb_types;
+ psy_desc->num_usb_types =
+ ARRAY_SIZE(cros_usbpd_charger_usb_types);
+ }
+
+ psy_desc->name = port->name;
+
psy = devm_power_supply_register_no_ws(dev, psy_desc,
&psy_cfg);
if (IS_ERR(psy)) {
diff --git a/drivers/power/supply/ds2780_battery.c b/drivers/power/supply/ds2780_battery.c
index 370e9109342b..cad14ba1b648 100644
--- a/drivers/power/supply/ds2780_battery.c
+++ b/drivers/power/supply/ds2780_battery.c
@@ -829,5 +829,5 @@ module_platform_driver(ds2780_battery_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
-MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauage IC driver");
+MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC driver");
MODULE_ALIAS("platform:ds2780-battery");
diff --git a/drivers/power/supply/ds2781_battery.c b/drivers/power/supply/ds2781_battery.c
index d1b5a19aae7c..5e794607f732 100644
--- a/drivers/power/supply/ds2781_battery.c
+++ b/drivers/power/supply/ds2781_battery.c
@@ -829,6 +829,6 @@ module_platform_driver(ds2781_battery_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>");
-MODULE_DESCRIPTION("Maxim/Dallas DS2781 Stand-Alone Fuel Gauage IC driver");
+MODULE_DESCRIPTION("Maxim/Dallas DS2781 Stand-Alone Fuel Gauge IC driver");
MODULE_ALIAS("platform:ds2781-battery");
diff --git a/drivers/power/supply/ds2782_battery.c b/drivers/power/supply/ds2782_battery.c
index a1b7e0592245..019c58493e3d 100644
--- a/drivers/power/supply/ds2782_battery.c
+++ b/drivers/power/supply/ds2782_battery.c
@@ -471,5 +471,5 @@ static struct i2c_driver ds278x_battery_driver = {
module_i2c_driver(ds278x_battery_driver);
MODULE_AUTHOR("Ryan Mallon");
-MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauage IC driver");
+MODULE_DESCRIPTION("Maxim/Dallas DS2782 Stand-Alone Fuel Gauge IC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/max14577_charger.c b/drivers/power/supply/max14577_charger.c
index 449fc56f09eb..8a59feac6468 100644
--- a/drivers/power/supply/max14577_charger.c
+++ b/drivers/power/supply/max14577_charger.c
@@ -1,19 +1,9 @@
-/*
- * max14577_charger.c - Battery charger driver for the Maxim 14577/77836
- *
- * Copyright (C) 2013,2014 Samsung Electronics
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max14577_charger.c - Battery charger driver for the Maxim 14577/77836
+//
+// Copyright (C) 2013,2014 Samsung Electronics
+// Krzysztof Kozlowski <krzk@kernel.org>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
index 33c40f79d23d..91cafc7bed30 100644
--- a/drivers/power/supply/max17040_battery.c
+++ b/drivers/power/supply/max17040_battery.c
@@ -1,14 +1,10 @@
-/*
- * max17040_battery.c
- * fuel-gauge systems for lithium-ion (Li+) batteries
- *
- * Copyright (C) 2009 Samsung Electronics
- * Minkyu Kang <mk7.kang@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// max17040_battery.c
+// fuel-gauge systems for lithium-ion (Li+) batteries
+//
+// Copyright (C) 2009 Samsung Electronics
+// Minkyu Kang <mk7.kang@samsung.com>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index 1a568df383db..2a8d75e5e930 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -1,26 +1,12 @@
-/*
- * Fuel gauge driver for Maxim 17042 / 8966 / 8997
- * Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * This driver is based on max17040_battery.c
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// Fuel gauge driver for Maxim 17042 / 8966 / 8997
+// Note that Maxim 8966 and 8997 are mfd and this is its subdevice.
+//
+// Copyright (C) 2011 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
+//
+// This driver is based on max17040_battery.c
#include <linux/acpi.h>
#include <linux/init.h>
diff --git a/drivers/power/supply/max77693_charger.c b/drivers/power/supply/max77693_charger.c
index 749c7926e3c9..a2c5c9858639 100644
--- a/drivers/power/supply/max77693_charger.c
+++ b/drivers/power/supply/max77693_charger.c
@@ -1,19 +1,9 @@
-/*
- * max77693_charger.c - Battery charger driver for the Maxim 77693
- *
- * Copyright (C) 2014 Samsung Electronics
- * Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max77693_charger.c - Battery charger driver for the Maxim 77693
+//
+// Copyright (C) 2014 Samsung Electronics
+// Krzysztof Kozlowski <krzk@kernel.org>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/power/supply/max8925_power.c b/drivers/power/supply/max8925_power.c
index 3b94620ce5c1..39b4d5b6ac39 100644
--- a/drivers/power/supply/max8925_power.c
+++ b/drivers/power/supply/max8925_power.c
@@ -124,6 +124,7 @@ static irqreturn_t max8925_charger_handler(int irq, void *data)
case MAX8925_IRQ_VCHG_THM_OK_F:
/* Battery is not ready yet */
dev_dbg(chip->dev, "Battery temperature is out of range\n");
+ /* Fall through */
case MAX8925_IRQ_VCHG_DC_OVP:
dev_dbg(chip->dev, "Error detection\n");
__set_charger(info, 0);
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index c73fb4221695..f5e84cd47924 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -1,23 +1,9 @@
-/*
- * max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966
- *
- * Copyright (C) 2011 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8997_charger.c - Power supply consumer driver for the Maxim 8997/8966
+//
+// Copyright (C) 2011 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
#include <linux/err.h>
#include <linux/module.h>
diff --git a/drivers/power/supply/max8998_charger.c b/drivers/power/supply/max8998_charger.c
index cad7d1a8feec..9a926c7c0f22 100644
--- a/drivers/power/supply/max8998_charger.c
+++ b/drivers/power/supply/max8998_charger.c
@@ -1,23 +1,9 @@
-/*
- * max8998_charger.c - Power supply consumer driver for the Maxim 8998/LP3974
- *
- * Copyright (C) 2009-2010 Samsung Electronics
- * MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
+// SPDX-License-Identifier: GPL-2.0+
+//
+// max8998_charger.c - Power supply consumer driver for the Maxim 8998/LP3974
+//
+// Copyright (C) 2009-2010 Samsung Electronics
+// MyungJoo Ham <myungjoo.ham@samsung.com>
#include <linux/err.h>
#include <linux/module.h>
@@ -86,7 +72,7 @@ static const struct power_supply_desc max8998_battery_desc = {
static int max8998_battery_probe(struct platform_device *pdev)
{
struct max8998_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct max8998_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8998_platform_data *pdata = iodev->pdata;
struct power_supply_config psy_cfg = {};
struct max8998_battery_data *max8998;
struct i2c_client *i2c;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index 6170ed8b6854..dce24f596160 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -131,7 +131,8 @@ static ssize_t power_supply_show_property(struct device *dev,
dev_dbg(dev, "driver has no data for `%s' property\n",
attr->attr.name);
else if (ret != -ENODEV && ret != -EAGAIN)
- dev_err(dev, "driver failed to report `%s' property: %zd\n",
+ dev_err_ratelimited(dev,
+ "driver failed to report `%s' property: %zd\n",
attr->attr.name, ret);
return ret;
}
diff --git a/drivers/power/supply/sc2731_charger.c b/drivers/power/supply/sc2731_charger.c
new file mode 100644
index 000000000000..525a820537bf
--- /dev/null
+++ b/drivers/power/supply/sc2731_charger.c
@@ -0,0 +1,504 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2018 Spreadtrum Communications Inc.
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/usb/phy.h>
+#include <linux/regmap.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+
+/* PMIC global registers definition */
+#define SC2731_CHARGE_STATUS 0xedc
+#define SC2731_CHARGE_FULL BIT(4)
+#define SC2731_MODULE_EN1 0xc0c
+#define SC2731_CHARGE_EN BIT(5)
+
+/* SC2731 switch charger registers definition */
+#define SC2731_CHG_CFG0 0x0
+#define SC2731_CHG_CFG1 0x4
+#define SC2731_CHG_CFG2 0x8
+#define SC2731_CHG_CFG3 0xc
+#define SC2731_CHG_CFG4 0x10
+#define SC2731_CHG_CFG5 0x28
+
+/* SC2731_CHG_CFG0 register definition */
+#define SC2731_PRECHG_RNG_SHIFT 11
+#define SC2731_PRECHG_RNG_MASK GENMASK(12, 11)
+
+#define SC2731_TERMINATION_VOL_MASK GENMASK(2, 1)
+#define SC2731_TERMINATION_VOL_SHIFT 1
+#define SC2731_TERMINATION_VOL_CAL_MASK GENMASK(8, 3)
+#define SC2731_TERMINATION_VOL_CAL_SHIFT 3
+#define SC2731_TERMINATION_CUR_MASK GENMASK(2, 0)
+
+#define SC2731_CC_EN BIT(13)
+#define SC2731_CHARGER_PD BIT(0)
+
+/* SC2731_CHG_CFG1 register definition */
+#define SC2731_CUR_MASK GENMASK(5, 0)
+
+/* SC2731_CHG_CFG5 register definition */
+#define SC2731_CUR_LIMIT_SHIFT 8
+#define SC2731_CUR_LIMIT_MASK GENMASK(9, 8)
+
+/* Default current definition (unit is mA) */
+#define SC2731_CURRENT_LIMIT_100 100
+#define SC2731_CURRENT_LIMIT_500 500
+#define SC2731_CURRENT_LIMIT_900 900
+#define SC2731_CURRENT_LIMIT_2000 2000
+#define SC2731_CURRENT_PRECHG 450
+#define SC2731_CURRENT_STEP 50
+
+struct sc2731_charger_info {
+ struct device *dev;
+ struct regmap *regmap;
+ struct usb_phy *usb_phy;
+ struct notifier_block usb_notify;
+ struct power_supply *psy_usb;
+ struct mutex lock;
+ bool charging;
+ u32 base;
+};
+
+static void sc2731_charger_stop_charge(struct sc2731_charger_info *info)
+{
+ regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CC_EN, 0);
+
+ regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CHARGER_PD, SC2731_CHARGER_PD);
+}
+
+static int sc2731_charger_start_charge(struct sc2731_charger_info *info)
+{
+ int ret;
+
+ /* Enable charger constant current mode */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CC_EN, SC2731_CC_EN);
+ if (ret)
+ return ret;
+
+ /* Start charging */
+ return regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_CHARGER_PD, 0);
+}
+
+static int sc2731_charger_set_current_limit(struct sc2731_charger_info *info,
+ u32 limit)
+{
+ u32 val;
+
+ if (limit <= SC2731_CURRENT_LIMIT_100)
+ val = 0;
+ else if (limit <= SC2731_CURRENT_LIMIT_500)
+ val = 3;
+ else if (limit <= SC2731_CURRENT_LIMIT_900)
+ val = 2;
+ else
+ val = 1;
+
+ return regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG5,
+ SC2731_CUR_LIMIT_MASK,
+ val << SC2731_CUR_LIMIT_SHIFT);
+}
+
+static int sc2731_charger_set_current(struct sc2731_charger_info *info, u32 cur)
+{
+ u32 val;
+ int ret;
+
+ if (cur > SC2731_CURRENT_LIMIT_2000)
+ cur = SC2731_CURRENT_LIMIT_2000;
+ else if (cur < SC2731_CURRENT_PRECHG)
+ cur = SC2731_CURRENT_PRECHG;
+
+ /* Calculate the step value, each step is 50 mA */
+ val = (cur - SC2731_CURRENT_PRECHG) / SC2731_CURRENT_STEP;
+
+ /* Set pre-charge current as 450 mA */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_PRECHG_RNG_MASK,
+ 0x3 << SC2731_PRECHG_RNG_SHIFT);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG1,
+ SC2731_CUR_MASK, val);
+}
+
+static int sc2731_charger_get_status(struct sc2731_charger_info *info)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(info->regmap, SC2731_CHARGE_STATUS, &val);
+ if (ret)
+ return ret;
+
+ if (val & SC2731_CHARGE_FULL)
+ return POWER_SUPPLY_STATUS_FULL;
+
+ return POWER_SUPPLY_STATUS_CHARGING;
+}
+
+static int sc2731_charger_get_current(struct sc2731_charger_info *info,
+ u32 *cur)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(info->regmap, info->base + SC2731_CHG_CFG1, &val);
+ if (ret)
+ return ret;
+
+ val &= SC2731_CUR_MASK;
+ *cur = val * SC2731_CURRENT_STEP + SC2731_CURRENT_PRECHG;
+
+ return 0;
+}
+
+static int sc2731_charger_get_current_limit(struct sc2731_charger_info *info,
+ u32 *cur)
+{
+ int ret;
+ u32 val;
+
+ ret = regmap_read(info->regmap, info->base + SC2731_CHG_CFG5, &val);
+ if (ret)
+ return ret;
+
+ val = (val & SC2731_CUR_LIMIT_MASK) >> SC2731_CUR_LIMIT_SHIFT;
+
+ switch (val) {
+ case 0:
+ *cur = SC2731_CURRENT_LIMIT_100;
+ break;
+
+ case 1:
+ *cur = SC2731_CURRENT_LIMIT_2000;
+ break;
+
+ case 2:
+ *cur = SC2731_CURRENT_LIMIT_900;
+ break;
+
+ case 3:
+ *cur = SC2731_CURRENT_LIMIT_500;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+sc2731_charger_usb_set_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct sc2731_charger_info *info = power_supply_get_drvdata(psy);
+ int ret;
+
+ mutex_lock(&info->lock);
+
+ if (!info->charging) {
+ mutex_unlock(&info->lock);
+ return -ENODEV;
+ }
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ ret = sc2731_charger_set_current(info, val->intval / 1000);
+ if (ret < 0)
+ dev_err(info->dev, "set charge current failed\n");
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = sc2731_charger_set_current_limit(info,
+ val->intval / 1000);
+ if (ret < 0)
+ dev_err(info->dev, "set input current limit failed\n");
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int sc2731_charger_usb_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct sc2731_charger_info *info = power_supply_get_drvdata(psy);
+ int ret = 0;
+ u32 cur;
+
+ mutex_lock(&info->lock);
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (info->charging)
+ val->intval = sc2731_charger_get_status(info);
+ else
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ if (!info->charging) {
+ val->intval = 0;
+ } else {
+ ret = sc2731_charger_get_current(info, &cur);
+ if (ret)
+ goto out;
+
+ val->intval = cur * 1000;
+ }
+ break;
+
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ if (!info->charging) {
+ val->intval = 0;
+ } else {
+ ret = sc2731_charger_get_current_limit(info, &cur);
+ if (ret)
+ goto out;
+
+ val->intval = cur * 1000;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int sc2731_charger_property_is_writeable(struct power_supply *psy,
+ enum power_supply_property psp)
+{
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT:
+ case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT:
+ ret = 1;
+ break;
+
+ default:
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static enum power_supply_property sc2731_usb_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT,
+ POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT,
+};
+
+static const struct power_supply_desc sc2731_charger_desc = {
+ .name = "sc2731_charger",
+ .type = POWER_SUPPLY_TYPE_USB,
+ .properties = sc2731_usb_props,
+ .num_properties = ARRAY_SIZE(sc2731_usb_props),
+ .get_property = sc2731_charger_usb_get_property,
+ .set_property = sc2731_charger_usb_set_property,
+ .property_is_writeable = sc2731_charger_property_is_writeable,
+};
+
+static int sc2731_charger_usb_change(struct notifier_block *nb,
+ unsigned long limit, void *data)
+{
+ struct sc2731_charger_info *info =
+ container_of(nb, struct sc2731_charger_info, usb_notify);
+ int ret = 0;
+
+ mutex_lock(&info->lock);
+
+ if (limit > 0) {
+ /* set current limitation and start to charge */
+ ret = sc2731_charger_set_current_limit(info, limit);
+ if (ret)
+ goto out;
+
+ ret = sc2731_charger_set_current(info, limit);
+ if (ret)
+ goto out;
+
+ ret = sc2731_charger_start_charge(info);
+ if (ret)
+ goto out;
+
+ info->charging = true;
+ } else {
+ /* Stop charging */
+ info->charging = false;
+ sc2731_charger_stop_charge(info);
+ }
+
+out:
+ mutex_unlock(&info->lock);
+ return ret;
+}
+
+static int sc2731_charger_hw_init(struct sc2731_charger_info *info)
+{
+ struct power_supply_battery_info bat_info = { };
+ u32 term_currrent, term_voltage, cur_val, vol_val;
+ int ret;
+
+ /* Enable charger module */
+ ret = regmap_update_bits(info->regmap, SC2731_MODULE_EN1,
+ SC2731_CHARGE_EN, SC2731_CHARGE_EN);
+ if (ret)
+ return ret;
+
+ ret = power_supply_get_battery_info(info->psy_usb, &bat_info);
+ if (ret) {
+ dev_warn(info->dev, "no battery information is supplied\n");
+
+ /*
+ * If no battery information is supplied, we should set
+ * default charge termination current to 120 mA, and default
+ * charge termination voltage to 4.35V.
+ */
+ cur_val = 0x2;
+ vol_val = 0x1;
+ } else {
+ term_currrent = bat_info.charge_term_current_ua / 1000;
+
+ if (term_currrent <= 90)
+ cur_val = 0;
+ else if (term_currrent >= 265)
+ cur_val = 0x7;
+ else
+ cur_val = ((term_currrent - 90) / 25) + 1;
+
+ term_voltage = bat_info.constant_charge_voltage_max_uv / 1000;
+
+ if (term_voltage > 4500)
+ term_voltage = 4500;
+
+ if (term_voltage > 4200)
+ vol_val = (term_voltage - 4200) / 100;
+ else
+ vol_val = 0;
+ }
+
+ /* Set charge termination current */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG2,
+ SC2731_TERMINATION_CUR_MASK, cur_val);
+ if (ret)
+ goto error;
+
+ /* Set charge termination voltage */
+ ret = regmap_update_bits(info->regmap, info->base + SC2731_CHG_CFG0,
+ SC2731_TERMINATION_VOL_MASK |
+ SC2731_TERMINATION_VOL_CAL_MASK,
+ (vol_val << SC2731_TERMINATION_VOL_SHIFT) |
+ (0x6 << SC2731_TERMINATION_VOL_CAL_SHIFT));
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ regmap_update_bits(info->regmap, SC2731_MODULE_EN1, SC2731_CHARGE_EN, 0);
+ return ret;
+}
+
+static int sc2731_charger_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct sc2731_charger_info *info;
+ struct power_supply_config charger_cfg = { };
+ int ret;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ mutex_init(&info->lock);
+ info->dev = &pdev->dev;
+
+ info->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!info->regmap) {
+ dev_err(&pdev->dev, "failed to get charger regmap\n");
+ return -ENODEV;
+ }
+
+ ret = of_property_read_u32(np, "reg", &info->base);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get register address\n");
+ return -ENODEV;
+ }
+
+ charger_cfg.drv_data = info;
+ charger_cfg.of_node = np;
+ info->psy_usb = devm_power_supply_register(&pdev->dev,
+ &sc2731_charger_desc,
+ &charger_cfg);
+ if (IS_ERR(info->psy_usb)) {
+ dev_err(&pdev->dev, "failed to register power supply\n");
+ return PTR_ERR(info->psy_usb);
+ }
+
+ ret = sc2731_charger_hw_init(info);
+ if (ret)
+ return ret;
+
+ info->usb_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "phys", 0);
+ if (IS_ERR(info->usb_phy)) {
+ dev_err(&pdev->dev, "failed to find USB phy\n");
+ return PTR_ERR(info->usb_phy);
+ }
+
+ info->usb_notify.notifier_call = sc2731_charger_usb_change;
+ ret = usb_register_notifier(info->usb_phy, &info->usb_notify);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register notifier: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sc2731_charger_remove(struct platform_device *pdev)
+{
+ struct sc2731_charger_info *info = platform_get_drvdata(pdev);
+
+ usb_unregister_notifier(info->usb_phy, &info->usb_notify);
+
+ return 0;
+}
+
+static const struct of_device_id sc2731_charger_of_match[] = {
+ { .compatible = "sprd,sc2731-charger", },
+ { }
+};
+
+static struct platform_driver sc2731_charger_driver = {
+ .driver = {
+ .name = "sc2731-charger",
+ .of_match_table = sc2731_charger_of_match,
+ },
+ .probe = sc2731_charger_probe,
+ .remove = sc2731_charger_remove,
+};
+
+module_platform_driver(sc2731_charger_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SC2731 Charger Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c
index bbcaee56db9d..0e202d4273fb 100644
--- a/drivers/power/supply/twl4030_charger.c
+++ b/drivers/power/supply/twl4030_charger.c
@@ -420,7 +420,8 @@ static void twl4030_current_worker(struct work_struct *data)
if (v < USB_MIN_VOLT) {
/* Back up and stop adjusting. */
- bci->usb_cur -= USB_CUR_STEP;
+ if (bci->usb_cur >= USB_CUR_STEP)
+ bci->usb_cur -= USB_CUR_STEP;
bci->usb_cur_target = bci->usb_cur;
} else if (bci->usb_cur >= bci->usb_cur_target ||
bci->usb_cur + USB_CUR_STEP > USB_MAX_CURRENT) {
@@ -439,6 +440,7 @@ static void twl4030_current_worker(struct work_struct *data)
static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
{
int ret;
+ u32 reg;
if (bci->usb_mode == CHARGE_OFF)
enable = false;
@@ -452,14 +454,38 @@ static int twl4030_charger_enable_usb(struct twl4030_bci *bci, bool enable)
bci->usb_enabled = 1;
}
- if (bci->usb_mode == CHARGE_AUTO)
+ if (bci->usb_mode == CHARGE_AUTO) {
+ /* Enable interrupts now. */
+ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC |
+ TWL4030_TBATOR2 | TWL4030_TBATOR1 |
+ TWL4030_BATSTS);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ if (ret < 0) {
+ dev_err(bci->dev,
+ "failed to unmask interrupts: %d\n",
+ ret);
+ return ret;
+ }
/* forcing the field BCIAUTOUSB (BOOT_BCI[1]) to 1 */
ret = twl4030_clear_set_boot_bci(0, TWL4030_BCIAUTOUSB);
+ }
/* forcing USBFASTMCHG(BCIMFSTS4[2]) to 1 */
ret = twl4030_clear_set(TWL_MODULE_MAIN_CHARGE, 0,
TWL4030_USBFASTMCHG, TWL4030_BCIMFSTS4);
if (bci->usb_mode == CHARGE_LINEAR) {
+ /* Enable interrupts now. */
+ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_TBATOR2 |
+ TWL4030_TBATOR1 | TWL4030_BATSTS);
+ ret = twl_i2c_write_u8(TWL4030_MODULE_INTERRUPTS, reg,
+ TWL4030_INTERRUPTS_BCIIMR1A);
+ if (ret < 0) {
+ dev_err(bci->dev,
+ "failed to unmask interrupts: %d\n",
+ ret);
+ return ret;
+ }
twl4030_clear_set_boot_bci(TWL4030_BCIAUTOAC|TWL4030_CVENAC, 0);
/* Watch dog key: WOVF acknowledge */
ret = twl_i2c_write_u8(TWL_MODULE_MAIN_CHARGE, 0x33,
@@ -996,12 +1022,13 @@ static int twl4030_bci_probe(struct platform_device *pdev)
if (bci->dev->of_node) {
struct device_node *phynode;
- phynode = of_find_compatible_node(bci->dev->of_node->parent,
- NULL, "ti,twl4030-usb");
+ phynode = of_get_compatible_child(bci->dev->of_node->parent,
+ "ti,twl4030-usb");
if (phynode) {
bci->usb_nb.notifier_call = twl4030_bci_usb_ncb;
bci->transceiver = devm_usb_get_phy_by_node(
bci->dev, phynode, &bci->usb_nb);
+ of_node_put(phynode);
if (IS_ERR(bci->transceiver)) {
ret = PTR_ERR(bci->transceiver);
if (ret == -EPROBE_DEFER)
diff --git a/drivers/reset/reset-imx7.c b/drivers/reset/reset-imx7.c
index 97d9f08271c5..77911fa8f31d 100644
--- a/drivers/reset/reset-imx7.c
+++ b/drivers/reset/reset-imx7.c
@@ -67,6 +67,7 @@ static const struct imx7_src_signal imx7_src_signals[IMX7_RESET_NUM] = {
[IMX7_RESET_PCIEPHY] = { SRC_PCIEPHY_RCR, BIT(2) | BIT(1) },
[IMX7_RESET_PCIEPHY_PERST] = { SRC_PCIEPHY_RCR, BIT(3) },
[IMX7_RESET_PCIE_CTRL_APPS_EN] = { SRC_PCIEPHY_RCR, BIT(6) },
+ [IMX7_RESET_PCIE_CTRL_APPS_TURNOFF] = { SRC_PCIEPHY_RCR, BIT(11) },
[IMX7_RESET_DDRC_PRST] = { SRC_DDRC_RCR, BIT(0) },
[IMX7_RESET_DDRC_CORE_RST] = { SRC_DDRC_RCR, BIT(1) },
};
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index fd5e215c66b7..6ccd93d0b1cb 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -15,3 +15,7 @@ obj-$(CONFIG_ZCRYPT) += zcrypt_cex2c.o zcrypt_cex2a.o zcrypt_cex4.o
# pkey kernel module
pkey-objs := pkey_api.o
obj-$(CONFIG_PKEY) += pkey.o
+
+# adjunct processor matrix
+vfio_ap-objs := vfio_ap_drv.o vfio_ap_ops.o
+obj-$(CONFIG_VFIO_AP) += vfio_ap.o
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
new file mode 100644
index 000000000000..7667b38728f0
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * VFIO based AP device driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "vfio_ap_private.h"
+
+#define VFIO_AP_ROOT_NAME "vfio_ap"
+#define VFIO_AP_DEV_TYPE_NAME "ap_matrix"
+#define VFIO_AP_DEV_NAME "matrix"
+
+MODULE_AUTHOR("IBM Corporation");
+MODULE_DESCRIPTION("VFIO AP device driver, Copyright IBM Corp. 2018");
+MODULE_LICENSE("GPL v2");
+
+static struct ap_driver vfio_ap_drv;
+
+static struct device_type vfio_ap_dev_type = {
+ .name = VFIO_AP_DEV_TYPE_NAME,
+};
+
+struct ap_matrix_dev *matrix_dev;
+
+/* Only type 10 adapters (CEX4 and later) are supported
+ * by the AP matrix device driver
+ */
+static struct ap_device_id ap_queue_ids[] = {
+ { .dev_type = AP_DEVICE_TYPE_CEX4,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX5,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { .dev_type = AP_DEVICE_TYPE_CEX6,
+ .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE },
+ { /* end of sibling */ },
+};
+
+MODULE_DEVICE_TABLE(vfio_ap, ap_queue_ids);
+
+static int vfio_ap_queue_dev_probe(struct ap_device *apdev)
+{
+ return 0;
+}
+
+static void vfio_ap_queue_dev_remove(struct ap_device *apdev)
+{
+ /* Nothing to do yet */
+}
+
+static void vfio_ap_matrix_dev_release(struct device *dev)
+{
+ struct ap_matrix_dev *matrix_dev = dev_get_drvdata(dev);
+
+ kfree(matrix_dev);
+}
+
+static int vfio_ap_matrix_dev_create(void)
+{
+ int ret;
+ struct device *root_device;
+
+ root_device = root_device_register(VFIO_AP_ROOT_NAME);
+ if (IS_ERR(root_device))
+ return PTR_ERR(root_device);
+
+ matrix_dev = kzalloc(sizeof(*matrix_dev), GFP_KERNEL);
+ if (!matrix_dev) {
+ ret = -ENOMEM;
+ goto matrix_alloc_err;
+ }
+
+ /* Fill in config info via PQAP(QCI), if available */
+ if (test_facility(12)) {
+ ret = ap_qci(&matrix_dev->info);
+ if (ret)
+ goto matrix_alloc_err;
+ }
+
+ mutex_init(&matrix_dev->lock);
+ INIT_LIST_HEAD(&matrix_dev->mdev_list);
+
+ matrix_dev->device.type = &vfio_ap_dev_type;
+ dev_set_name(&matrix_dev->device, "%s", VFIO_AP_DEV_NAME);
+ matrix_dev->device.parent = root_device;
+ matrix_dev->device.release = vfio_ap_matrix_dev_release;
+ matrix_dev->device.driver = &vfio_ap_drv.driver;
+
+ ret = device_register(&matrix_dev->device);
+ if (ret)
+ goto matrix_reg_err;
+
+ return 0;
+
+matrix_reg_err:
+ put_device(&matrix_dev->device);
+matrix_alloc_err:
+ root_device_unregister(root_device);
+
+ return ret;
+}
+
+static void vfio_ap_matrix_dev_destroy(void)
+{
+ device_unregister(&matrix_dev->device);
+ root_device_unregister(matrix_dev->device.parent);
+}
+
+static int __init vfio_ap_init(void)
+{
+ int ret;
+
+ /* If there are no AP instructions, there is nothing to pass through. */
+ if (!ap_instructions_available())
+ return -ENODEV;
+
+ ret = vfio_ap_matrix_dev_create();
+ if (ret)
+ return ret;
+
+ memset(&vfio_ap_drv, 0, sizeof(vfio_ap_drv));
+ vfio_ap_drv.probe = vfio_ap_queue_dev_probe;
+ vfio_ap_drv.remove = vfio_ap_queue_dev_remove;
+ vfio_ap_drv.ids = ap_queue_ids;
+
+ ret = ap_driver_register(&vfio_ap_drv, THIS_MODULE, VFIO_AP_DRV_NAME);
+ if (ret) {
+ vfio_ap_matrix_dev_destroy();
+ return ret;
+ }
+
+ ret = vfio_ap_mdev_register();
+ if (ret) {
+ ap_driver_unregister(&vfio_ap_drv);
+ vfio_ap_matrix_dev_destroy();
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit vfio_ap_exit(void)
+{
+ vfio_ap_mdev_unregister();
+ ap_driver_unregister(&vfio_ap_drv);
+ vfio_ap_matrix_dev_destroy();
+}
+
+module_init(vfio_ap_init);
+module_exit(vfio_ap_exit);
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
new file mode 100644
index 000000000000..272ef427dcc0
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -0,0 +1,939 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Adjunct processor matrix VFIO device driver callbacks.
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Halil Pasic <pasic@linux.ibm.com>
+ * Pierre Morel <pmorel@linux.ibm.com>
+ */
+#include <linux/string.h>
+#include <linux/vfio.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/bitops.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <asm/kvm.h>
+#include <asm/zcrypt.h>
+
+#include "vfio_ap_private.h"
+
+#define VFIO_AP_MDEV_TYPE_HWVIRT "passthrough"
+#define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device"
+
+static void vfio_ap_matrix_init(struct ap_config_info *info,
+ struct ap_matrix *matrix)
+{
+ matrix->apm_max = info->apxa ? info->Na : 63;
+ matrix->aqm_max = info->apxa ? info->Nd : 15;
+ matrix->adm_max = info->apxa ? info->Nd : 15;
+}
+
+static int vfio_ap_mdev_create(struct kobject *kobj, struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if ((atomic_dec_if_positive(&matrix_dev->available_instances) < 0))
+ return -EPERM;
+
+ matrix_mdev = kzalloc(sizeof(*matrix_mdev), GFP_KERNEL);
+ if (!matrix_mdev) {
+ atomic_inc(&matrix_dev->available_instances);
+ return -ENOMEM;
+ }
+
+ vfio_ap_matrix_init(&matrix_dev->info, &matrix_mdev->matrix);
+ mdev_set_drvdata(mdev, matrix_mdev);
+ mutex_lock(&matrix_dev->lock);
+ list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
+ mutex_unlock(&matrix_dev->lock);
+
+ return 0;
+}
+
+static int vfio_ap_mdev_remove(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ mutex_lock(&matrix_dev->lock);
+ list_del(&matrix_mdev->node);
+ mutex_unlock(&matrix_dev->lock);
+
+ kfree(matrix_mdev);
+ mdev_set_drvdata(mdev, NULL);
+ atomic_inc(&matrix_dev->available_instances);
+
+ return 0;
+}
+
+static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_AP_MDEV_NAME_HWVIRT);
+}
+
+static MDEV_TYPE_ATTR_RO(name);
+
+static ssize_t available_instances_show(struct kobject *kobj,
+ struct device *dev, char *buf)
+{
+ return sprintf(buf, "%d\n",
+ atomic_read(&matrix_dev->available_instances));
+}
+
+static MDEV_TYPE_ATTR_RO(available_instances);
+
+static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", VFIO_DEVICE_API_AP_STRING);
+}
+
+static MDEV_TYPE_ATTR_RO(device_api);
+
+static struct attribute *vfio_ap_mdev_type_attrs[] = {
+ &mdev_type_attr_name.attr,
+ &mdev_type_attr_device_api.attr,
+ &mdev_type_attr_available_instances.attr,
+ NULL,
+};
+
+static struct attribute_group vfio_ap_mdev_hwvirt_type_group = {
+ .name = VFIO_AP_MDEV_TYPE_HWVIRT,
+ .attrs = vfio_ap_mdev_type_attrs,
+};
+
+static struct attribute_group *vfio_ap_mdev_type_groups[] = {
+ &vfio_ap_mdev_hwvirt_type_group,
+ NULL,
+};
+
+struct vfio_ap_queue_reserved {
+ unsigned long *apid;
+ unsigned long *apqi;
+ bool reserved;
+};
+
+/**
+ * vfio_ap_has_queue
+ *
+ * @dev: an AP queue device
+ * @data: a struct vfio_ap_queue_reserved reference
+ *
+ * Flags whether the AP queue device (@dev) has a queue ID containing the APQN,
+ * apid or apqi specified in @data:
+ *
+ * - If @data contains both an apid and apqi value, then @data will be flagged
+ * as reserved if the APID and APQI fields for the AP queue device matches
+ *
+ * - If @data contains only an apid value, @data will be flagged as
+ * reserved if the APID field in the AP queue device matches
+ *
+ * - If @data contains only an apqi value, @data will be flagged as
+ * reserved if the APQI field in the AP queue device matches
+ *
+ * Returns 0 to indicate the input to function succeeded. Returns -EINVAL if
+ * @data does not contain either an apid or apqi.
+ */
+static int vfio_ap_has_queue(struct device *dev, void *data)
+{
+ struct vfio_ap_queue_reserved *qres = data;
+ struct ap_queue *ap_queue = to_ap_queue(dev);
+ ap_qid_t qid;
+ unsigned long id;
+
+ if (qres->apid && qres->apqi) {
+ qid = AP_MKQID(*qres->apid, *qres->apqi);
+ if (qid == ap_queue->qid)
+ qres->reserved = true;
+ } else if (qres->apid && !qres->apqi) {
+ id = AP_QID_CARD(ap_queue->qid);
+ if (id == *qres->apid)
+ qres->reserved = true;
+ } else if (!qres->apid && qres->apqi) {
+ id = AP_QID_QUEUE(ap_queue->qid);
+ if (id == *qres->apqi)
+ qres->reserved = true;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * vfio_ap_verify_queue_reserved
+ *
+ * @matrix_dev: a mediated matrix device
+ * @apid: an AP adapter ID
+ * @apqi: an AP queue index
+ *
+ * Verifies that the AP queue with @apid/@apqi is reserved by the VFIO AP device
+ * driver according to the following rules:
+ *
+ * - If both @apid and @apqi are not NULL, then there must be an AP queue
+ * device bound to the vfio_ap driver with the APQN identified by @apid and
+ * @apqi
+ *
+ * - If only @apid is not NULL, then there must be an AP queue device bound
+ * to the vfio_ap driver with an APQN containing @apid
+ *
+ * - If only @apqi is not NULL, then there must be an AP queue device bound
+ * to the vfio_ap driver with an APQN containing @apqi
+ *
+ * Returns 0 if the AP queue is reserved; otherwise, returns -EADDRNOTAVAIL.
+ */
+static int vfio_ap_verify_queue_reserved(unsigned long *apid,
+ unsigned long *apqi)
+{
+ int ret;
+ struct vfio_ap_queue_reserved qres;
+
+ qres.apid = apid;
+ qres.apqi = apqi;
+ qres.reserved = false;
+
+ ret = driver_for_each_device(matrix_dev->device.driver, NULL, &qres,
+ vfio_ap_has_queue);
+ if (ret)
+ return ret;
+
+ if (qres.reserved)
+ return 0;
+
+ return -EADDRNOTAVAIL;
+}
+
+static int
+vfio_ap_mdev_verify_queues_reserved_for_apid(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apid)
+{
+ int ret;
+ unsigned long apqi;
+ unsigned long nbits = matrix_mdev->matrix.aqm_max + 1;
+
+ if (find_first_bit_inv(matrix_mdev->matrix.aqm, nbits) >= nbits)
+ return vfio_ap_verify_queue_reserved(&apid, NULL);
+
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, nbits) {
+ ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vfio_ap_mdev_verify_no_sharing
+ *
+ * Verifies that the APQNs derived from the cross product of the AP adapter IDs
+ * and AP queue indexes comprising the AP matrix are not configured for another
+ * mediated device. AP queue sharing is not allowed.
+ *
+ * @matrix_mdev: the mediated matrix device
+ *
+ * Returns 0 if the APQNs are not shared, otherwise; returns -EADDRINUSE.
+ */
+static int vfio_ap_mdev_verify_no_sharing(struct ap_matrix_mdev *matrix_mdev)
+{
+ struct ap_matrix_mdev *lstdev;
+ DECLARE_BITMAP(apm, AP_DEVICES);
+ DECLARE_BITMAP(aqm, AP_DOMAINS);
+
+ list_for_each_entry(lstdev, &matrix_dev->mdev_list, node) {
+ if (matrix_mdev == lstdev)
+ continue;
+
+ memset(apm, 0, sizeof(apm));
+ memset(aqm, 0, sizeof(aqm));
+
+ /*
+ * We work on full longs, as we can only exclude the leftover
+ * bits in non-inverse order. The leftover is all zeros.
+ */
+ if (!bitmap_and(apm, matrix_mdev->matrix.apm,
+ lstdev->matrix.apm, AP_DEVICES))
+ continue;
+
+ if (!bitmap_and(aqm, matrix_mdev->matrix.aqm,
+ lstdev->matrix.aqm, AP_DOMAINS))
+ continue;
+
+ return -EADDRINUSE;
+ }
+
+ return 0;
+}
+
+/**
+ * assign_adapter_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_adapter attribute
+ * @buf: a buffer containing the AP adapter number (APID) to
+ * be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APID from @buf and sets the corresponding bit in the mediated
+ * matrix device's APM.
+ *
+ * Returns the number of bytes processed if the APID is valid; otherwise,
+ * returns one of the following errors:
+ *
+ * 1. -EINVAL
+ * The APID is not a valid number
+ *
+ * 2. -ENODEV
+ * The APID exceeds the maximum value configured for the system
+ *
+ * 3. -EADDRNOTAVAIL
+ * An APQN derived from the cross product of the APID being assigned
+ * and the APQIs previously assigned is not bound to the vfio_ap device
+ * driver; or, if no APQIs have yet been assigned, the APID is not
+ * contained in an APQN bound to the vfio_ap device driver.
+ *
+ * 4. -EADDRINUSE
+ * An APQN derived from the cross product of the APID being assigned
+ * and the APQIs previously assigned is being used by another mediated
+ * matrix device
+ */
+static ssize_t assign_adapter_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow assignment of adapter */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apid);
+ if (ret)
+ return ret;
+
+ if (apid > matrix_mdev->matrix.apm_max)
+ return -ENODEV;
+
+ /*
+ * Set the bit in the AP mask (APM) corresponding to the AP adapter
+ * number (APID). The bits in the mask, from most significant to least
+ * significant bit, correspond to APIDs 0-255.
+ */
+ mutex_lock(&matrix_dev->lock);
+
+ ret = vfio_ap_mdev_verify_queues_reserved_for_apid(matrix_mdev, apid);
+ if (ret)
+ goto done;
+
+ set_bit_inv(apid, matrix_mdev->matrix.apm);
+
+ ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
+ if (ret)
+ goto share_err;
+
+ ret = count;
+ goto done;
+
+share_err:
+ clear_bit_inv(apid, matrix_mdev->matrix.apm);
+done:
+ mutex_unlock(&matrix_dev->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(assign_adapter);
+
+/**
+ * unassign_adapter_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_adapter attribute
+ * @buf: a buffer containing the adapter number (APID) to be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APID from @buf and clears the corresponding bit in the mediated
+ * matrix device's APM.
+ *
+ * Returns the number of bytes processed if the APID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the APID is not a number
+ * -ENODEV if the APID it exceeds the maximum value configured for the
+ * system
+ */
+static ssize_t unassign_adapter_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow un-assignment of adapter */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apid);
+ if (ret)
+ return ret;
+
+ if (apid > matrix_mdev->matrix.apm_max)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv((unsigned long)apid, matrix_mdev->matrix.apm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_adapter);
+
+static int
+vfio_ap_mdev_verify_queues_reserved_for_apqi(struct ap_matrix_mdev *matrix_mdev,
+ unsigned long apqi)
+{
+ int ret;
+ unsigned long apid;
+ unsigned long nbits = matrix_mdev->matrix.apm_max + 1;
+
+ if (find_first_bit_inv(matrix_mdev->matrix.apm, nbits) >= nbits)
+ return vfio_ap_verify_queue_reserved(NULL, &apqi);
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, nbits) {
+ ret = vfio_ap_verify_queue_reserved(&apid, &apqi);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * assign_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_domain attribute
+ * @buf: a buffer containing the AP queue index (APQI) of the domain to
+ * be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APQI from @buf and sets the corresponding bit in the mediated
+ * matrix device's AQM.
+ *
+ * Returns the number of bytes processed if the APQI is valid; otherwise returns
+ * one of the following errors:
+ *
+ * 1. -EINVAL
+ * The APQI is not a valid number
+ *
+ * 2. -ENODEV
+ * The APQI exceeds the maximum value configured for the system
+ *
+ * 3. -EADDRNOTAVAIL
+ * An APQN derived from the cross product of the APQI being assigned
+ * and the APIDs previously assigned is not bound to the vfio_ap device
+ * driver; or, if no APIDs have yet been assigned, the APQI is not
+ * contained in an APQN bound to the vfio_ap device driver.
+ *
+ * 4. -EADDRINUSE
+ * An APQN derived from the cross product of the APQI being assigned
+ * and the APIDs previously assigned is being used by another mediated
+ * matrix device
+ */
+static ssize_t assign_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apqi;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_apqi = matrix_mdev->matrix.aqm_max;
+
+ /* If the guest is running, disallow assignment of domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apqi);
+ if (ret)
+ return ret;
+ if (apqi > max_apqi)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+
+ ret = vfio_ap_mdev_verify_queues_reserved_for_apqi(matrix_mdev, apqi);
+ if (ret)
+ goto done;
+
+ set_bit_inv(apqi, matrix_mdev->matrix.aqm);
+
+ ret = vfio_ap_mdev_verify_no_sharing(matrix_mdev);
+ if (ret)
+ goto share_err;
+
+ ret = count;
+ goto done;
+
+share_err:
+ clear_bit_inv(apqi, matrix_mdev->matrix.aqm);
+done:
+ mutex_unlock(&matrix_dev->lock);
+
+ return ret;
+}
+static DEVICE_ATTR_WO(assign_domain);
+
+
+/**
+ * unassign_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_domain attribute
+ * @buf: a buffer containing the AP queue index (APQI) of the domain to
+ * be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the APQI from @buf and clears the corresponding bit in the
+ * mediated matrix device's AQM.
+ *
+ * Returns the number of bytes processed if the APQI is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the APQI is not a number
+ * -ENODEV if the APQI exceeds the maximum value configured for the system
+ */
+static ssize_t unassign_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long apqi;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow un-assignment of domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &apqi);
+ if (ret)
+ return ret;
+
+ if (apqi > matrix_mdev->matrix.aqm_max)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv((unsigned long)apqi, matrix_mdev->matrix.aqm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_domain);
+
+/**
+ * assign_control_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's assign_control_domain attribute
+ * @buf: a buffer containing the domain ID to be assigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the domain ID from @buf and sets the corresponding bit in the mediated
+ * matrix device's ADM.
+ *
+ * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the ID is not a number
+ * -ENODEV if the ID exceeds the maximum value configured for the system
+ */
+static ssize_t assign_control_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long id;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ /* If the guest is running, disallow assignment of control domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &id);
+ if (ret)
+ return ret;
+
+ if (id > matrix_mdev->matrix.adm_max)
+ return -ENODEV;
+
+ /* Set the bit in the ADM (bitmask) corresponding to the AP control
+ * domain number (id). The bits in the mask, from most significant to
+ * least significant, correspond to IDs 0 up to the one less than the
+ * number of control domains that can be assigned.
+ */
+ mutex_lock(&matrix_dev->lock);
+ set_bit_inv(id, matrix_mdev->matrix.adm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(assign_control_domain);
+
+/**
+ * unassign_control_domain_store
+ *
+ * @dev: the matrix device
+ * @attr: the mediated matrix device's unassign_control_domain attribute
+ * @buf: a buffer containing the domain ID to be unassigned
+ * @count: the number of bytes in @buf
+ *
+ * Parses the domain ID from @buf and clears the corresponding bit in the
+ * mediated matrix device's ADM.
+ *
+ * Returns the number of bytes processed if the domain ID is valid; otherwise,
+ * returns one of the following errors:
+ * -EINVAL if the ID is not a number
+ * -ENODEV if the ID exceeds the maximum value configured for the system
+ */
+static ssize_t unassign_control_domain_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ unsigned long domid;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_domid = matrix_mdev->matrix.adm_max;
+
+ /* If the guest is running, disallow un-assignment of control domain */
+ if (matrix_mdev->kvm)
+ return -EBUSY;
+
+ ret = kstrtoul(buf, 0, &domid);
+ if (ret)
+ return ret;
+ if (domid > max_domid)
+ return -ENODEV;
+
+ mutex_lock(&matrix_dev->lock);
+ clear_bit_inv(domid, matrix_mdev->matrix.adm);
+ mutex_unlock(&matrix_dev->lock);
+
+ return count;
+}
+static DEVICE_ATTR_WO(unassign_control_domain);
+
+static ssize_t control_domains_show(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ unsigned long id;
+ int nchars = 0;
+ int n;
+ char *bufpos = buf;
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long max_domid = matrix_mdev->matrix.adm_max;
+
+ mutex_lock(&matrix_dev->lock);
+ for_each_set_bit_inv(id, matrix_mdev->matrix.adm, max_domid + 1) {
+ n = sprintf(bufpos, "%04lx\n", id);
+ bufpos += n;
+ nchars += n;
+ }
+ mutex_unlock(&matrix_dev->lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(control_domains);
+
+static ssize_t matrix_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mdev_device *mdev = mdev_from_dev(dev);
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ char *bufpos = buf;
+ unsigned long apid;
+ unsigned long apqi;
+ unsigned long apid1;
+ unsigned long apqi1;
+ unsigned long napm_bits = matrix_mdev->matrix.apm_max + 1;
+ unsigned long naqm_bits = matrix_mdev->matrix.aqm_max + 1;
+ int nchars = 0;
+ int n;
+
+ apid1 = find_first_bit_inv(matrix_mdev->matrix.apm, napm_bits);
+ apqi1 = find_first_bit_inv(matrix_mdev->matrix.aqm, naqm_bits);
+
+ mutex_lock(&matrix_dev->lock);
+
+ if ((apid1 < napm_bits) && (apqi1 < naqm_bits)) {
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ naqm_bits) {
+ n = sprintf(bufpos, "%02lx.%04lx\n", apid,
+ apqi);
+ bufpos += n;
+ nchars += n;
+ }
+ }
+ } else if (apid1 < napm_bits) {
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, napm_bits) {
+ n = sprintf(bufpos, "%02lx.\n", apid);
+ bufpos += n;
+ nchars += n;
+ }
+ } else if (apqi1 < naqm_bits) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, naqm_bits) {
+ n = sprintf(bufpos, ".%04lx\n", apqi);
+ bufpos += n;
+ nchars += n;
+ }
+ }
+
+ mutex_unlock(&matrix_dev->lock);
+
+ return nchars;
+}
+static DEVICE_ATTR_RO(matrix);
+
+static struct attribute *vfio_ap_mdev_attrs[] = {
+ &dev_attr_assign_adapter.attr,
+ &dev_attr_unassign_adapter.attr,
+ &dev_attr_assign_domain.attr,
+ &dev_attr_unassign_domain.attr,
+ &dev_attr_assign_control_domain.attr,
+ &dev_attr_unassign_control_domain.attr,
+ &dev_attr_control_domains.attr,
+ &dev_attr_matrix.attr,
+ NULL,
+};
+
+static struct attribute_group vfio_ap_mdev_attr_group = {
+ .attrs = vfio_ap_mdev_attrs
+};
+
+static const struct attribute_group *vfio_ap_mdev_attr_groups[] = {
+ &vfio_ap_mdev_attr_group,
+ NULL
+};
+
+/**
+ * vfio_ap_mdev_set_kvm
+ *
+ * @matrix_mdev: a mediated matrix device
+ * @kvm: reference to KVM instance
+ *
+ * Verifies no other mediated matrix device has @kvm and sets a reference to
+ * it in @matrix_mdev->kvm.
+ *
+ * Return 0 if no other mediated matrix device has a reference to @kvm;
+ * otherwise, returns an -EPERM.
+ */
+static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev,
+ struct kvm *kvm)
+{
+ struct ap_matrix_mdev *m;
+
+ mutex_lock(&matrix_dev->lock);
+
+ list_for_each_entry(m, &matrix_dev->mdev_list, node) {
+ if ((m != matrix_mdev) && (m->kvm == kvm)) {
+ mutex_unlock(&matrix_dev->lock);
+ return -EPERM;
+ }
+ }
+
+ matrix_mdev->kvm = kvm;
+ mutex_unlock(&matrix_dev->lock);
+
+ return 0;
+}
+
+static int vfio_ap_mdev_group_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int ret;
+ struct ap_matrix_mdev *matrix_mdev;
+
+ if (action != VFIO_GROUP_NOTIFY_SET_KVM)
+ return NOTIFY_OK;
+
+ matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier);
+
+ if (!data) {
+ matrix_mdev->kvm = NULL;
+ return NOTIFY_OK;
+ }
+
+ ret = vfio_ap_mdev_set_kvm(matrix_mdev, data);
+ if (ret)
+ return NOTIFY_DONE;
+
+ /* If there is no CRYCB pointer, then we can't copy the masks */
+ if (!matrix_mdev->kvm->arch.crypto.crycbd)
+ return NOTIFY_DONE;
+
+ kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.aqm,
+ matrix_mdev->matrix.adm);
+
+ return NOTIFY_OK;
+}
+
+static int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi,
+ unsigned int retry)
+{
+ struct ap_queue_status status;
+
+ do {
+ status = ap_zapq(AP_MKQID(apid, apqi));
+ switch (status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ return 0;
+ case AP_RESPONSE_RESET_IN_PROGRESS:
+ case AP_RESPONSE_BUSY:
+ msleep(20);
+ break;
+ default:
+ /* things are really broken, give up */
+ return -EIO;
+ }
+ } while (retry--);
+
+ return -EBUSY;
+}
+
+static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev)
+{
+ int ret;
+ int rc = 0;
+ unsigned long apid, apqi;
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ for_each_set_bit_inv(apid, matrix_mdev->matrix.apm,
+ matrix_mdev->matrix.apm_max + 1) {
+ for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm,
+ matrix_mdev->matrix.aqm_max + 1) {
+ ret = vfio_ap_mdev_reset_queue(apid, apqi, 1);
+ /*
+ * Regardless whether a queue turns out to be busy, or
+ * is not operational, we need to continue resetting
+ * the remaining queues.
+ */
+ if (ret)
+ rc = ret;
+ }
+ }
+
+ return rc;
+}
+
+static int vfio_ap_mdev_open(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+ unsigned long events;
+ int ret;
+
+
+ if (!try_module_get(THIS_MODULE))
+ return -ENODEV;
+
+ matrix_mdev->group_notifier.notifier_call = vfio_ap_mdev_group_notifier;
+ events = VFIO_GROUP_NOTIFY_SET_KVM;
+
+ ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &events, &matrix_mdev->group_notifier);
+ if (ret) {
+ module_put(THIS_MODULE);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vfio_ap_mdev_release(struct mdev_device *mdev)
+{
+ struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev);
+
+ if (matrix_mdev->kvm)
+ kvm_arch_crypto_clear_masks(matrix_mdev->kvm);
+
+ vfio_ap_mdev_reset_queues(mdev);
+ vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
+ &matrix_mdev->group_notifier);
+ matrix_mdev->kvm = NULL;
+ module_put(THIS_MODULE);
+}
+
+static int vfio_ap_mdev_get_device_info(unsigned long arg)
+{
+ unsigned long minsz;
+ struct vfio_device_info info;
+
+ minsz = offsetofend(struct vfio_device_info, num_irqs);
+
+ if (copy_from_user(&info, (void __user *)arg, minsz))
+ return -EFAULT;
+
+ if (info.argsz < minsz)
+ return -EINVAL;
+
+ info.flags = VFIO_DEVICE_FLAGS_AP | VFIO_DEVICE_FLAGS_RESET;
+ info.num_regions = 0;
+ info.num_irqs = 0;
+
+ return copy_to_user((void __user *)arg, &info, minsz);
+}
+
+static ssize_t vfio_ap_mdev_ioctl(struct mdev_device *mdev,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret;
+
+ switch (cmd) {
+ case VFIO_DEVICE_GET_INFO:
+ ret = vfio_ap_mdev_get_device_info(arg);
+ break;
+ case VFIO_DEVICE_RESET:
+ ret = vfio_ap_mdev_reset_queues(mdev);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct mdev_parent_ops vfio_ap_matrix_ops = {
+ .owner = THIS_MODULE,
+ .supported_type_groups = vfio_ap_mdev_type_groups,
+ .mdev_attr_groups = vfio_ap_mdev_attr_groups,
+ .create = vfio_ap_mdev_create,
+ .remove = vfio_ap_mdev_remove,
+ .open = vfio_ap_mdev_open,
+ .release = vfio_ap_mdev_release,
+ .ioctl = vfio_ap_mdev_ioctl,
+};
+
+int vfio_ap_mdev_register(void)
+{
+ atomic_set(&matrix_dev->available_instances, MAX_ZDEV_ENTRIES_EXT);
+
+ return mdev_register_device(&matrix_dev->device, &vfio_ap_matrix_ops);
+}
+
+void vfio_ap_mdev_unregister(void)
+{
+ mdev_unregister_device(&matrix_dev->device);
+}
diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h
new file mode 100644
index 000000000000..5675492233c7
--- /dev/null
+++ b/drivers/s390/crypto/vfio_ap_private.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Private data and functions for adjunct processor VFIO matrix driver.
+ *
+ * Author(s): Tony Krowiak <akrowiak@linux.ibm.com>
+ * Halil Pasic <pasic@linux.ibm.com>
+ *
+ * Copyright IBM Corp. 2018
+ */
+
+#ifndef _VFIO_AP_PRIVATE_H_
+#define _VFIO_AP_PRIVATE_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/mdev.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+
+#include "ap_bus.h"
+
+#define VFIO_AP_MODULE_NAME "vfio_ap"
+#define VFIO_AP_DRV_NAME "vfio_ap"
+
+/**
+ * ap_matrix_dev - the AP matrix device structure
+ * @device: generic device structure associated with the AP matrix device
+ * @available_instances: number of mediated matrix devices that can be created
+ * @info: the struct containing the output from the PQAP(QCI) instruction
+ * mdev_list: the list of mediated matrix devices created
+ * lock: mutex for locking the AP matrix device. This lock will be
+ * taken every time we fiddle with state managed by the vfio_ap
+ * driver, be it using @mdev_list or writing the state of a
+ * single ap_matrix_mdev device. It's quite coarse but we don't
+ * expect much contention.
+ */
+struct ap_matrix_dev {
+ struct device device;
+ atomic_t available_instances;
+ struct ap_config_info info;
+ struct list_head mdev_list;
+ struct mutex lock;
+};
+
+extern struct ap_matrix_dev *matrix_dev;
+
+/**
+ * The AP matrix is comprised of three bit masks identifying the adapters,
+ * queues (domains) and control domains that belong to an AP matrix. The bits i
+ * each mask, from least significant to most significant bit, correspond to IDs
+ * 0 to 255. When a bit is set, the corresponding ID belongs to the matrix.
+ *
+ * @apm_max: max adapter number in @apm
+ * @apm identifies the AP adapters in the matrix
+ * @aqm_max: max domain number in @aqm
+ * @aqm identifies the AP queues (domains) in the matrix
+ * @adm_max: max domain number in @adm
+ * @adm identifies the AP control domains in the matrix
+ */
+struct ap_matrix {
+ unsigned long apm_max;
+ DECLARE_BITMAP(apm, 256);
+ unsigned long aqm_max;
+ DECLARE_BITMAP(aqm, 256);
+ unsigned long adm_max;
+ DECLARE_BITMAP(adm, 256);
+};
+
+/**
+ * struct ap_matrix_mdev - the mediated matrix device structure
+ * @list: allows the ap_matrix_mdev struct to be added to a list
+ * @matrix: the adapters, usage domains and control domains assigned to the
+ * mediated matrix device.
+ * @group_notifier: notifier block used for specifying callback function for
+ * handling the VFIO_GROUP_NOTIFY_SET_KVM event
+ * @kvm: the struct holding guest's state
+ */
+struct ap_matrix_mdev {
+ struct list_head node;
+ struct ap_matrix matrix;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+};
+
+extern int vfio_ap_mdev_register(void);
+extern void vfio_ap_mdev_unregister(void);
+
+#endif /* _VFIO_AP_PRIVATE_H_ */
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index c0631895154e..f96ec68af2e5 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -515,8 +515,8 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_unmap;
- pci_set_dma_seg_boundary(pdev, SZ_1M - 1);
- pci_set_dma_max_seg_size(pdev, SZ_1M);
+ dma_set_seg_boundary(&pdev->dev, SZ_1M - 1);
+ dma_set_max_seg_size(&pdev->dev, SZ_1M);
pci_set_master(pdev);
ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 27521fc3ef5a..05293babb031 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -518,7 +518,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr;
int retval = 1;
- cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
@@ -526,7 +527,8 @@ static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
- pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
+ cpu_addr, dma_handle);
goto out;
}
@@ -1027,16 +1029,16 @@ out:
static void twa_free_device_extension(TW_Device_Extension *tw_dev)
{
if (tw_dev->command_packet_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
- sizeof(TW_Command_Full)*TW_Q_LENGTH,
- tw_dev->command_packet_virt[0],
- tw_dev->command_packet_phys[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Command_Full) * TW_Q_LENGTH,
+ tw_dev->command_packet_virt[0],
+ tw_dev->command_packet_phys[0]);
if (tw_dev->generic_buffer_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
- TW_SECTOR_SIZE*TW_Q_LENGTH,
- tw_dev->generic_buffer_virt[0],
- tw_dev->generic_buffer_phys[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ TW_SECTOR_SIZE * TW_Q_LENGTH,
+ tw_dev->generic_buffer_virt[0],
+ tw_dev->generic_buffer_phys[0]);
kfree(tw_dev->event_queue[0]);
} /* End twa_free_device_extension() */
@@ -2015,14 +2017,12 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
if (!host) {
@@ -2237,14 +2237,12 @@ static int twa_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
/* Initialize the card */
if (twa_reset_sequence(tw_dev, 0)) {
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 40c1e6e64f58..266bdac75304 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -644,8 +644,8 @@ static int twl_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
unsigned long *cpu_addr;
int retval = 1;
- cpu_addr = pci_zalloc_consistent(tw_dev->tw_pci_dev, size * TW_Q_LENGTH,
- &dma_handle);
+ cpu_addr = dma_zalloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (!cpu_addr) {
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
goto out;
@@ -899,19 +899,19 @@ out:
static void twl_free_device_extension(TW_Device_Extension *tw_dev)
{
if (tw_dev->command_packet_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Full)*TW_Q_LENGTH,
tw_dev->command_packet_virt[0],
tw_dev->command_packet_phys[0]);
if (tw_dev->generic_buffer_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
TW_SECTOR_SIZE*TW_Q_LENGTH,
tw_dev->generic_buffer_virt[0],
tw_dev->generic_buffer_phys[0]);
if (tw_dev->sense_buffer_virt[0])
- pci_free_consistent(tw_dev->tw_pci_dev,
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
sizeof(TW_Command_Apache_Header)*
TW_Q_LENGTH,
tw_dev->sense_buffer_virt[0],
@@ -1571,14 +1571,12 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
if (!host) {
@@ -1805,14 +1803,12 @@ static int twl_resume(struct pci_dev *pdev)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
- retval = -ENODEV;
- goto out_disable_device;
- }
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
+ TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
+ retval = -ENODEV;
+ goto out_disable_device;
+ }
/* Initialize the card */
if (twl_reset_sequence(tw_dev, 0)) {
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 471366945bd4..a58257645e94 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -834,15 +834,17 @@ static int tw_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
dprintk(KERN_NOTICE "3w-xxxx: tw_allocate_memory()\n");
- cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
+ cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
+ size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
if (cpu_addr == NULL) {
- printk(KERN_WARNING "3w-xxxx: pci_alloc_consistent() failed.\n");
+ printk(KERN_WARNING "3w-xxxx: dma_alloc_coherent() failed.\n");
return 1;
}
if ((unsigned long)cpu_addr % (tw_dev->tw_pci_dev->device == TW_DEVICE_ID ? TW_ALIGNMENT_6000 : TW_ALIGNMENT_7000)) {
printk(KERN_WARNING "3w-xxxx: Couldn't allocate correctly aligned memory.\n");
- pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
+ cpu_addr, dma_handle);
return 1;
}
@@ -1062,10 +1064,16 @@ static void tw_free_device_extension(TW_Device_Extension *tw_dev)
/* Free command packet and generic buffer memory */
if (tw_dev->command_packet_virtual_address[0])
- pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Command)*TW_Q_LENGTH, tw_dev->command_packet_virtual_address[0], tw_dev->command_packet_physical_address[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Command) * TW_Q_LENGTH,
+ tw_dev->command_packet_virtual_address[0],
+ tw_dev->command_packet_physical_address[0]);
if (tw_dev->alignment_virtual_address[0])
- pci_free_consistent(tw_dev->tw_pci_dev, sizeof(TW_Sector)*TW_Q_LENGTH, tw_dev->alignment_virtual_address[0], tw_dev->alignment_physical_address[0]);
+ dma_free_coherent(&tw_dev->tw_pci_dev->dev,
+ sizeof(TW_Sector) * TW_Q_LENGTH,
+ tw_dev->alignment_virtual_address[0],
+ tw_dev->alignment_physical_address[0]);
} /* End tw_free_device_extension() */
/* This function will send an initconnection command to controller */
@@ -2260,7 +2268,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_set_master(pdev);
- retval = pci_set_dma_mask(pdev, TW_DMA_MASK);
+ retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
printk(KERN_WARNING "3w-xxxx: Failed to set dma mask.");
goto out_disable_device;
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 69e80c1ed1ca..bd87fbacfbc7 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -230,7 +230,6 @@ static unsigned char tw_sense_table[][4] =
#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */
#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
#define TW_IOCTL_CHRDEV_FREE -1
-#define TW_DMA_MASK DMA_BIT_MASK(32)
#define TW_MAX_CDB_LEN 16
/* Bitmask macros to eliminate bitfields */
diff --git a/drivers/scsi/53c700.h b/drivers/scsi/53c700.h
index 0c9a100af667..05fe439b66af 100644
--- a/drivers/scsi/53c700.h
+++ b/drivers/scsi/53c700.h
@@ -90,7 +90,7 @@ struct NCR_700_Device_Parameters {
/* The SYNC negotiation sequence looks like:
*
* If DEV_NEGOTIATED_SYNC not set, tack and SDTR message on to the
- * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTATION
+ * initial identify for the device and set DEV_BEGIN_SYNC_NEGOTIATION
* If we get an SDTR reply, work out the SXFER parameters, squirrel
* them away here, clear DEV_BEGIN_SYNC_NEGOTIATION and set
* DEV_NEGOTIATED_SYNC. If we get a REJECT msg, squirrel
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 0d4ffe0ae306..9cee941f97d6 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -201,8 +201,8 @@ static bool __init blogic_create_initccbs(struct blogic_adapter *adapter)
dma_addr_t blkp;
while (adapter->alloc_ccbs < adapter->initccbs) {
- blk_pointer = pci_alloc_consistent(adapter->pci_device,
- blk_size, &blkp);
+ blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
+ blk_size, &blkp, GFP_KERNEL);
if (blk_pointer == NULL) {
blogic_err("UNABLE TO ALLOCATE CCB GROUP - DETACHING\n",
adapter);
@@ -227,15 +227,16 @@ static void blogic_destroy_ccbs(struct blogic_adapter *adapter)
next_ccb = ccb->next_all;
if (ccb->allocgrp_head) {
if (lastccb)
- pci_free_consistent(adapter->pci_device,
+ dma_free_coherent(&adapter->pci_device->dev,
lastccb->allocgrp_size, lastccb,
lastccb->allocgrp_head);
lastccb = ccb;
}
}
if (lastccb)
- pci_free_consistent(adapter->pci_device, lastccb->allocgrp_size,
- lastccb, lastccb->allocgrp_head);
+ dma_free_coherent(&adapter->pci_device->dev,
+ lastccb->allocgrp_size, lastccb,
+ lastccb->allocgrp_head);
}
@@ -256,8 +257,8 @@ static void blogic_create_addlccbs(struct blogic_adapter *adapter,
if (addl_ccbs <= 0)
return;
while (adapter->alloc_ccbs - prev_alloc < addl_ccbs) {
- blk_pointer = pci_alloc_consistent(adapter->pci_device,
- blk_size, &blkp);
+ blk_pointer = dma_alloc_coherent(&adapter->pci_device->dev,
+ blk_size, &blkp, GFP_KERNEL);
if (blk_pointer == NULL)
break;
blogic_init_ccbs(adapter, blk_pointer, blk_size, blkp);
@@ -318,8 +319,8 @@ static void blogic_dealloc_ccb(struct blogic_ccb *ccb, int dma_unmap)
if (ccb->command != NULL)
scsi_dma_unmap(ccb->command);
if (dma_unmap)
- pci_unmap_single(adapter->pci_device, ccb->sensedata,
- ccb->sense_datalen, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->pci_device->dev, ccb->sensedata,
+ ccb->sense_datalen, DMA_FROM_DEVICE);
ccb->command = NULL;
ccb->status = BLOGIC_CCB_FREE;
@@ -712,7 +713,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
- if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
@@ -895,7 +896,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
- if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
@@ -952,7 +953,7 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter)
if (pci_enable_device(pci_device))
continue;
- if (pci_set_dma_mask(pci_device, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pci_device->dev, DMA_BIT_MASK(32)))
continue;
bus = pci_device->bus->number;
@@ -2040,7 +2041,7 @@ static void blogic_relres(struct blogic_adapter *adapter)
Release any allocated memory structs not released elsewhere
*/
if (adapter->mbox_space)
- pci_free_consistent(adapter->pci_device, adapter->mbox_sz,
+ dma_free_coherent(&adapter->pci_device->dev, adapter->mbox_sz,
adapter->mbox_space, adapter->mbox_space_handle);
pci_dev_put(adapter->pci_device);
adapter->mbox_space = NULL;
@@ -2092,8 +2093,9 @@ static bool blogic_initadapter(struct blogic_adapter *adapter)
Initialize the Outgoing and Incoming Mailbox pointers.
*/
adapter->mbox_sz = adapter->mbox_count * (sizeof(struct blogic_outbox) + sizeof(struct blogic_inbox));
- adapter->mbox_space = pci_alloc_consistent(adapter->pci_device,
- adapter->mbox_sz, &adapter->mbox_space_handle);
+ adapter->mbox_space = dma_alloc_coherent(&adapter->pci_device->dev,
+ adapter->mbox_sz, &adapter->mbox_space_handle,
+ GFP_KERNEL);
if (adapter->mbox_space == NULL)
return blogic_failure(adapter, "MAILBOX ALLOCATION");
adapter->first_outbox = (struct blogic_outbox *) adapter->mbox_space;
@@ -3183,9 +3185,9 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command,
memcpy(ccb->cdb, cdb, cdblen);
ccb->sense_datalen = SCSI_SENSE_BUFFERSIZE;
ccb->command = command;
- sense_buf = pci_map_single(adapter->pci_device,
+ sense_buf = dma_map_single(&adapter->pci_device->dev,
command->sense_buffer, ccb->sense_datalen,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (dma_mapping_error(&adapter->pci_device->dev, sense_buf)) {
blogic_err("DMA mapping for sense data buffer failed\n",
adapter);
diff --git a/drivers/scsi/FlashPoint.c b/drivers/scsi/FlashPoint.c
index 867b864f5047..0f17bd51088a 100644
--- a/drivers/scsi/FlashPoint.c
+++ b/drivers/scsi/FlashPoint.c
@@ -2944,7 +2944,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
}
if (currSCCB->Lun == 0x00) {
- if ((currSCCB->Sccb_scsistat == SELECT_SN_ST)) {
+ if (currSCCB->Sccb_scsistat == SELECT_SN_ST) {
currTar_Info->TarStatus |=
(unsigned char)SYNC_SUPPORTED;
@@ -2953,8 +2953,8 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
~EE_SYNC_MASK;
}
- else if ((currSCCB->Sccb_scsistat ==
- SELECT_WN_ST)) {
+ else if (currSCCB->Sccb_scsistat ==
+ SELECT_WN_ST) {
currTar_Info->TarStatus =
(currTar_Info->
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 7c097006c54d..70988c381268 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -42,6 +42,9 @@ config SCSI_DMA
bool
default n
+config SCSI_ESP_PIO
+ bool
+
config SCSI_NETLINK
bool
default n
@@ -557,6 +560,36 @@ config SCSI_FLASHPOINT
substantial, so users of MultiMaster Host Adapters may not
wish to include it.
+config SCSI_MYRB
+ tristate "Mylex DAC960/DAC1100 PCI RAID Controller (Block Interface)"
+ depends on PCI
+ select RAID_ATTRS
+ help
+ This driver adds support for the Mylex DAC960, AcceleRAID, and
+ eXtremeRAID PCI RAID controllers. This driver supports the
+ older, block based interface.
+ This driver is a reimplementation of the original DAC960
+ driver. If you have used the DAC960 driver you should enable
+ this module.
+
+ To compile this driver as a module, choose M here: the
+ module will be called myrb.
+
+config SCSI_MYRS
+ tristate "Mylex DAC960/DAC1100 PCI RAID Controller (SCSI Interface)"
+ depends on PCI
+ select RAID_ATTRS
+ help
+ This driver adds support for the Mylex DAC960, AcceleRAID, and
+ eXtremeRAID PCI RAID controllers. This driver supports the
+ newer, SCSI-based interface only.
+ This driver is a reimplementation of the original DAC960
+ driver. If you have used the DAC960 driver you should enable
+ this module.
+
+ To compile this driver as a module, choose M here: the
+ module will be called myrs.
+
config VMWARE_PVSCSI
tristate "VMware PVSCSI driver support"
depends on PCI && SCSI && X86
@@ -1332,6 +1365,7 @@ config SCSI_ZORRO_ESP
tristate "Zorro ESP SCSI support"
depends on ZORRO && SCSI
select SCSI_SPI_ATTRS
+ select SCSI_ESP_PIO
help
Support for various NCR53C9x (ESP) based SCSI controllers on Zorro
expansion boards for the Amiga.
@@ -1374,6 +1408,7 @@ config SCSI_MAC_ESP
tristate "Macintosh NCR53c9[46] SCSI"
depends on MAC && SCSI
select SCSI_SPI_ATTRS
+ select SCSI_ESP_PIO
help
This is the NCR 53c9x SCSI controller found on most of the 68040
based Macintoshes.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 6d71b2a9592b..fcb41ae329c4 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -106,6 +106,8 @@ obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
obj-$(CONFIG_SCSI_MESH) += mesh.o
obj-$(CONFIG_SCSI_MAC53C94) += mac53c94.o
+obj-$(CONFIG_SCSI_MYRB) += myrb.o
+obj-$(CONFIG_SCSI_MYRS) += myrs.o
obj-$(CONFIG_BLK_DEV_3W_XXXX_RAID) += 3w-xxxx.o
obj-$(CONFIG_SCSI_3W_9XXX) += 3w-9xxx.o
obj-$(CONFIG_SCSI_3W_SAS) += 3w-sas.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 90ea0f5d9bdb..8429c855701f 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -131,6 +131,7 @@
static int do_abort(struct Scsi_Host *);
static void do_reset(struct Scsi_Host *);
+static void bus_reset_cleanup(struct Scsi_Host *);
/**
* initialize_SCp - init the scsi pointer field
@@ -513,16 +514,15 @@ static void complete_cmd(struct Scsi_Host *instance,
if (hostdata->sensing == cmd) {
/* Autosense processing ends here */
- if ((cmd->result & 0xff) != SAM_STAT_GOOD) {
+ if (status_byte(cmd->result) != GOOD) {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
- set_host_byte(cmd, DID_ERROR);
- } else
+ } else {
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
+ set_driver_byte(cmd, DRIVER_SENSE);
+ }
hostdata->sensing = NULL;
}
- hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
-
cmd->scsi_done(cmd);
}
@@ -884,7 +884,14 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
/* Probably Bus Reset */
NCR5380_read(RESET_PARITY_INTERRUPT_REG);
- dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+ if (sr & SR_RST) {
+ /* Certainly Bus Reset */
+ shost_printk(KERN_WARNING, instance,
+ "bus reset interrupt\n");
+ bus_reset_cleanup(instance);
+ } else {
+ dsprintk(NDEBUG_INTR, instance, "unknown interrupt\n");
+ }
#ifdef SUN3_SCSI_VME
dregs->csr |= CSR_DMA_ENABLE;
#endif
@@ -902,20 +909,16 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-/*
- * Function : int NCR5380_select(struct Scsi_Host *instance,
- * struct scsi_cmnd *cmd)
- *
- * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command,
- * including ARBITRATION, SELECTION, and initial message out for
- * IDENTIFY and queue messages.
+/**
+ * NCR5380_select - attempt arbitration and selection for a given command
+ * @instance: the Scsi_Host instance
+ * @cmd: the scsi_cmnd to execute
*
- * Inputs : instance - instantiation of the 5380 driver on which this
- * target lives, cmd - SCSI command to execute.
+ * This routine establishes an I_T_L nexus for a SCSI command. This involves
+ * ARBITRATION, SELECTION and MESSAGE OUT phases and an IDENTIFY message.
*
- * Returns cmd if selection failed but should be retried,
- * NULL if selection failed and should not be retried, or
- * NULL if selection succeeded (hostdata->connected == cmd).
+ * Returns true if the operation should be retried.
+ * Returns false if it should not be retried.
*
* Side effects :
* If bus busy, arbitration failed, etc, NCR5380_select() will exit
@@ -923,16 +926,15 @@ static irqreturn_t __maybe_unused NCR5380_intr(int irq, void *dev_id)
* SELECT_ENABLE will be set appropriately, the NCR5380
* will cease to drive any SCSI bus signals.
*
- * If successful : I_T_L or I_T_L_Q nexus will be established,
- * instance->connected will be set to cmd.
+ * If successful : the I_T_L nexus will be established, and
+ * hostdata->connected will be set to cmd.
* SELECT interrupt will be disabled.
*
* If failed (no target) : cmd->scsi_done() will be called, and the
* cmd->result host byte set to DID_BAD_TARGET.
*/
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
- struct scsi_cmnd *cmd)
+static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd)
__releases(&hostdata->lock) __acquires(&hostdata->lock)
{
struct NCR5380_hostdata *hostdata = shost_priv(instance);
@@ -940,6 +942,9 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
unsigned char *data;
int len;
int err;
+ bool ret = true;
+ bool can_disconnect = instance->irq != NO_IRQ &&
+ cmd->cmnd[0] != REQUEST_SENSE;
NCR5380_dprint(NDEBUG_ARBITRATION, instance);
dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n",
@@ -948,7 +953,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
/*
* Arbitration and selection phases are slow and involve dropping the
* lock, so we have to watch out for EH. An exception handler may
- * change 'selecting' to NULL. This function will then return NULL
+ * change 'selecting' to NULL. This function will then return false
* so that the caller will forget about 'cmd'. (During information
* transfer phases, EH may change 'connected' to NULL.)
*/
@@ -984,7 +989,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (!hostdata->selecting) {
/* Command was aborted */
NCR5380_write(MODE_REG, MR_BASE);
- goto out;
+ return false;
}
if (err < 0) {
NCR5380_write(MODE_REG, MR_BASE);
@@ -1033,7 +1038,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
if (!hostdata->selecting) {
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
- goto out;
+ return false;
}
dsprintk(NDEBUG_ARBITRATION, instance, "won arbitration\n");
@@ -1116,13 +1121,16 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
spin_lock_irq(&hostdata->lock);
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
+
/* Can't touch cmd if it has been reclaimed by the scsi ML */
- if (hostdata->selecting) {
- cmd->result = DID_BAD_TARGET << 16;
- complete_cmd(instance, cmd);
- dsprintk(NDEBUG_SELECTION, instance, "target did not respond within 250ms\n");
- cmd = NULL;
- }
+ if (!hostdata->selecting)
+ return false;
+
+ cmd->result = DID_BAD_TARGET << 16;
+ complete_cmd(instance, cmd);
+ dsprintk(NDEBUG_SELECTION, instance,
+ "target did not respond within 250ms\n");
+ ret = false;
goto out;
}
@@ -1155,12 +1163,12 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
}
if (!hostdata->selecting) {
do_abort(instance);
- goto out;
+ return false;
}
dsprintk(NDEBUG_SELECTION, instance, "target %d selected, going into MESSAGE OUT phase.\n",
scmd_id(cmd));
- tmp[0] = IDENTIFY(((instance->irq == NO_IRQ) ? 0 : 1), cmd->device->lun);
+ tmp[0] = IDENTIFY(can_disconnect, cmd->device->lun);
len = 1;
data = tmp;
@@ -1171,7 +1179,7 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
dsprintk(NDEBUG_SELECTION, instance, "IDENTIFY message transfer failed\n");
- cmd = NULL;
+ ret = false;
goto out;
}
@@ -1186,13 +1194,13 @@ static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *instance,
initialize_SCp(cmd);
- cmd = NULL;
+ ret = false;
out:
if (!hostdata->selecting)
return NULL;
hostdata->selecting = NULL;
- return cmd;
+ return ret;
}
/*
@@ -1711,6 +1719,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
return;
#endif
case PHASE_DATAIN:
@@ -1793,6 +1802,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
cmd, scmd_id(cmd), cmd->device->lun);
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result &= ~0xffff;
cmd->result |= cmd->SCp.Status;
@@ -1951,6 +1961,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
NCR5380_transfer_pio(instance, &phase, &len, &data);
if (msgout == ABORT) {
hostdata->connected = NULL;
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
cmd->result = DID_ERROR << 16;
complete_cmd(instance, cmd);
maybe_release_dma_irq(instance);
@@ -2014,8 +2025,11 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(MODE_REG, MR_BASE);
target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
-
- dsprintk(NDEBUG_RESELECTION, instance, "reselect\n");
+ if (!target_mask || target_mask & (target_mask - 1)) {
+ shost_printk(KERN_WARNING, instance,
+ "reselect: bad target_mask 0x%02x\n", target_mask);
+ return;
+ }
/*
* At this point, we have detected that our SCSI ID is on the bus,
@@ -2029,6 +2043,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY);
if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_SEL, 0, 2 * HZ) < 0) {
+ shost_printk(KERN_ERR, instance, "reselect: !SEL timeout\n");
NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
return;
}
@@ -2040,6 +2055,10 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
if (NCR5380_poll_politely(hostdata,
STATUS_REG, SR_REQ, SR_REQ, 2 * HZ) < 0) {
+ if ((NCR5380_read(STATUS_REG) & (SR_BSY | SR_SEL)) == 0)
+ /* BUS FREE phase */
+ return;
+ shost_printk(KERN_ERR, instance, "reselect: REQ timeout\n");
do_abort(instance);
return;
}
@@ -2101,13 +2120,16 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
dsprintk(NDEBUG_RESELECTION | NDEBUG_QUEUES, instance,
"reselect: removed %p from disconnected queue\n", tmp);
} else {
+ int target = ffs(target_mask) - 1;
+
shost_printk(KERN_ERR, instance, "target bitmask 0x%02x lun %d not in disconnected queue.\n",
target_mask, lun);
/*
* Since we have an established nexus that we can't do anything
* with, we must abort it.
*/
- do_abort(instance);
+ if (do_abort(instance) == 0)
+ hostdata->busy[target] &= ~(1 << lun);
return;
}
@@ -2272,15 +2294,16 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
if (list_del_cmd(&hostdata->autosense, cmd)) {
dsprintk(NDEBUG_ABORT, instance,
"abort: removed %p from sense queue\n", cmd);
- set_host_byte(cmd, DID_ERROR);
complete_cmd(instance, cmd);
}
out:
if (result == FAILED)
dsprintk(NDEBUG_ABORT, instance, "abort: failed to abort %p\n", cmd);
- else
+ else {
+ hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
dsprintk(NDEBUG_ABORT, instance, "abort: successfully aborted %p\n", cmd);
+ }
queue_work(hostdata->work_q, &hostdata->main_task);
maybe_release_dma_irq(instance);
@@ -2290,31 +2313,12 @@ out:
}
-/**
- * NCR5380_host_reset - reset the SCSI host
- * @cmd: SCSI command undergoing EH
- *
- * Returns SUCCESS
- */
-
-static int NCR5380_host_reset(struct scsi_cmnd *cmd)
+static void bus_reset_cleanup(struct Scsi_Host *instance)
{
- struct Scsi_Host *instance = cmd->device->host;
struct NCR5380_hostdata *hostdata = shost_priv(instance);
int i;
- unsigned long flags;
struct NCR5380_cmd *ncmd;
- spin_lock_irqsave(&hostdata->lock, flags);
-
-#if (NDEBUG & NDEBUG_ANY)
- scmd_printk(KERN_INFO, cmd, __func__);
-#endif
- NCR5380_dprint(NDEBUG_ANY, instance);
- NCR5380_dprint_phase(NDEBUG_ANY, instance);
-
- do_reset(instance);
-
/* reset NCR registers */
NCR5380_write(MODE_REG, MR_BASE);
NCR5380_write(TARGET_COMMAND_REG, 0);
@@ -2326,11 +2330,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
* commands!
*/
- if (list_del_cmd(&hostdata->unissued, cmd)) {
- cmd->result = DID_RESET << 16;
- cmd->scsi_done(cmd);
- }
-
if (hostdata->selecting) {
hostdata->selecting->result = DID_RESET << 16;
complete_cmd(instance, hostdata->selecting);
@@ -2348,7 +2347,6 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
list_for_each_entry(ncmd, &hostdata->autosense, list) {
struct scsi_cmnd *cmd = NCR5380_to_scmd(ncmd);
- set_host_byte(cmd, DID_RESET);
cmd->scsi_done(cmd);
}
INIT_LIST_HEAD(&hostdata->autosense);
@@ -2365,6 +2363,41 @@ static int NCR5380_host_reset(struct scsi_cmnd *cmd)
queue_work(hostdata->work_q, &hostdata->main_task);
maybe_release_dma_irq(instance);
+}
+
+/**
+ * NCR5380_host_reset - reset the SCSI host
+ * @cmd: SCSI command undergoing EH
+ *
+ * Returns SUCCESS
+ */
+
+static int NCR5380_host_reset(struct scsi_cmnd *cmd)
+{
+ struct Scsi_Host *instance = cmd->device->host;
+ struct NCR5380_hostdata *hostdata = shost_priv(instance);
+ unsigned long flags;
+ struct NCR5380_cmd *ncmd;
+
+ spin_lock_irqsave(&hostdata->lock, flags);
+
+#if (NDEBUG & NDEBUG_ANY)
+ shost_printk(KERN_INFO, instance, __func__);
+#endif
+ NCR5380_dprint(NDEBUG_ANY, instance);
+ NCR5380_dprint_phase(NDEBUG_ANY, instance);
+
+ list_for_each_entry(ncmd, &hostdata->unissued, list) {
+ struct scsi_cmnd *scmd = NCR5380_to_scmd(ncmd);
+
+ scmd->result = DID_RESET << 16;
+ scmd->scsi_done(scmd);
+ }
+ INIT_LIST_HEAD(&hostdata->unissued);
+
+ do_reset(instance);
+ bus_reset_cleanup(instance);
+
spin_unlock_irqrestore(&hostdata->lock, flags);
return SUCCESS;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 31096a0b0fdd..efca509b92b0 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -275,7 +275,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);
static void NCR5380_main(struct work_struct *work);
static const char *NCR5380_info(struct Scsi_Host *instance);
static void NCR5380_reselect(struct Scsi_Host *instance);
-static struct scsi_cmnd *NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
+static bool NCR5380_select(struct Scsi_Host *, struct scsi_cmnd *);
static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase, int *count, unsigned char **data);
static int NCR5380_poll_politely2(struct NCR5380_hostdata *,
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 23b17621b6d2..00072ed9540b 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1094,7 +1094,7 @@ static int inia100_probe_one(struct pci_dev *pdev,
if (pci_enable_device(pdev))
goto out;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "Unable to set 32bit DMA "
"on inia100 adapter, ignoring.\n");
goto out_disable_device;
@@ -1124,7 +1124,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for SCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
- host->scb_virt = pci_zalloc_consistent(pdev, sz, &host->scb_phys);
+ host->scb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->scb_phys,
+ GFP_KERNEL);
if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
@@ -1132,7 +1133,8 @@ static int inia100_probe_one(struct pci_dev *pdev,
/* Get total memory needed for ESCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
- host->escb_virt = pci_zalloc_consistent(pdev, sz, &host->escb_phys);
+ host->escb_virt = dma_zalloc_coherent(&pdev->dev, sz, &host->escb_phys,
+ GFP_KERNEL);
if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
@@ -1177,10 +1179,12 @@ static int inia100_probe_one(struct pci_dev *pdev,
out_free_irq:
free_irq(shost->irq, shost);
out_free_escb_array:
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
out_free_scb_array:
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
out_host_put:
scsi_host_put(shost);
@@ -1200,9 +1204,11 @@ static void inia100_remove_one(struct pci_dev *pdev)
scsi_remove_host(shost);
free_irq(shost->irq, shost);
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
- pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
+ dma_free_coherent(&pdev->dev,
+ ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
release_region(shost->io_port, 256);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 6e356325d8d9..bd7f352c28f3 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -3480,7 +3480,6 @@ int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
static void aac_srb_callback(void *context, struct fib * fibptr)
{
- struct aac_dev *dev;
struct aac_srb_reply *srbreply;
struct scsi_cmnd *scsicmd;
@@ -3491,8 +3490,6 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
BUG_ON(fibptr == NULL);
- dev = fibptr->dev;
-
srbreply = (struct aac_srb_reply *) fib_data(fibptr);
scsicmd->sense_buffer[0] = '\0'; /* Initialize sense valid flag to false */
@@ -3921,13 +3918,11 @@ static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
{
- struct aac_dev *dev;
unsigned long byte_count = 0;
int nseg;
struct scatterlist *sg;
int i;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
psg->count = 0;
psg->sg[0].addr = 0;
@@ -3963,14 +3958,12 @@ static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
{
- struct aac_dev *dev;
unsigned long byte_count = 0;
u64 addr;
int nseg;
struct scatterlist *sg;
int i;
- dev = (struct aac_dev *)scsicmd->device->host->hostdata;
// Get rid of old data
psg->count = 0;
psg->sg[0].addr[0] = 0;
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 6e1b022a823d..1e77d96a18f2 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -2586,9 +2586,7 @@ int aac_acquire_irq(struct aac_dev *dev)
void aac_free_irq(struct aac_dev *dev)
{
int i;
- int cpu;
- cpu = cpumask_first(cpu_online_mask);
if (aac_is_src(dev)) {
if (dev->max_msix > 1) {
for (i = 0; i < dev->max_msix; i++)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 04443577d48b..2d4e4ddc5ace 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1747,7 +1747,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_sectors = (shost->sg_tablesize * 8) + 112;
}
- error = pci_set_dma_max_seg_size(pdev,
+ error = dma_set_max_seg_size(&pdev->dev,
(aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
(shost->max_sectors << 9) : 65536);
if (error)
@@ -2055,8 +2055,6 @@ static void aac_pci_resume(struct pci_dev *pdev)
struct scsi_device *sdev = NULL;
struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
if (aac_adapter_ioremap(aac, aac->base_size)) {
dev_err(&pdev->dev, "aacraid: ioremap failed\n");
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 713f69033f20..223ef6f4e258 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -5949,7 +5949,6 @@ static void adv_async_callback(ADV_DVC_VAR *adv_dvc_varp, uchar code)
static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
{
struct asc_board *boardp = adv_dvc_varp->drv_ptr;
- u32 srb_tag;
adv_req_t *reqp;
adv_sgblk_t *sgblkp;
struct scsi_cmnd *scp;
@@ -5965,7 +5964,6 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
* completed. The adv_req_t structure actually contains the
* completed ADV_SCSI_REQ_Q structure.
*/
- srb_tag = le32_to_cpu(scsiqp->srb_tag);
scp = scsi_host_find_tag(boardp->shost, scsiqp->srb_tag);
ASC_DBG(1, "scp 0x%p\n", scp);
@@ -6448,7 +6446,7 @@ static void AscIsrChipHalted(ASC_DVC_VAR *asc_dvc)
sdtr_data =
AscCalSDTRData(asc_dvc, ext_msg.xfer_period,
ext_msg.req_ack_offset);
- if ((sdtr_data == 0xFF)) {
+ if (sdtr_data == 0xFF) {
q_cntl |= QC_MSG_OUT;
asc_dvc->init_sdtr &= ~target_id;
diff --git a/drivers/scsi/aic7xxx/aic7770.c b/drivers/scsi/aic7xxx/aic7770.c
index 5000bd69c13f..176704b24e6a 100644
--- a/drivers/scsi/aic7xxx/aic7770.c
+++ b/drivers/scsi/aic7xxx/aic7770.c
@@ -42,15 +42,9 @@
* $FreeBSD$
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
#define ID_AIC7770 0x04907770
#define ID_AHA_274x 0x04907771
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index 31f2bb9d7146..9a515551641c 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -607,9 +607,6 @@ struct scb {
ahd_io_ctx_t io_ctx;
struct ahd_softc *ahd_softc;
scb_flag flags;
-#ifndef __linux__
- bus_dmamap_t dmamap;
-#endif
struct scb_platform_data *platform_data;
struct map_node *hscb_map;
struct map_node *sg_map;
@@ -1056,9 +1053,6 @@ struct ahd_completion
struct ahd_softc {
bus_space_tag_t tags[2];
bus_space_handle_t bshs[2];
-#ifndef __linux__
- bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
-#endif
struct scb_data scb_data;
struct hardware_scb *next_queued_hscb;
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 2d82ec85753e..9ee75c9a9aa1 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -40,16 +40,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $
*/
-#ifdef __linux__
#include "aic79xx_osm.h"
#include "aic79xx_inline.h"
#include "aicasm/aicasm_insformat.h"
-#else
-#include <dev/aic7xxx/aic79xx_osm.h>
-#include <dev/aic7xxx/aic79xx_inline.h>
-#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
-#endif
-
/***************************** Lookup Tables **********************************/
static const char *const ahd_chip_names[] =
@@ -59,7 +52,6 @@ static const char *const ahd_chip_names[] =
"aic7902",
"aic7901A"
};
-static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
/*
* Hardware error codes.
@@ -6172,17 +6164,11 @@ ahd_free(struct ahd_softc *ahd)
case 2:
ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat);
case 1:
-#ifndef __linux__
- ahd_dma_tag_destroy(ahd, ahd->buffer_dmat);
-#endif
break;
case 0:
break;
}
-#ifndef __linux__
- ahd_dma_tag_destroy(ahd, ahd->parent_dmat);
-#endif
ahd_platform_free(ahd);
ahd_fini_scbdata(ahd);
for (i = 0; i < AHD_NUM_TARGETS; i++) {
@@ -6934,9 +6920,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
for (i = 0; i < newcount; i++) {
struct scb_platform_data *pdata;
u_int col_tag;
-#ifndef __linux__
- int error;
-#endif
next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC);
if (next_scb == NULL)
@@ -6970,15 +6953,6 @@ ahd_alloc_scbs(struct ahd_softc *ahd)
next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg);
next_scb->ahd_softc = ahd;
next_scb->flags = SCB_FLAG_NONE;
-#ifndef __linux__
- error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0,
- &next_scb->dmamap);
- if (error != 0) {
- kfree(next_scb);
- kfree(pdata);
- break;
- }
-#endif
next_scb->hscb->tag = ahd_htole16(scb_data->numscbs);
col_tag = scb_data->numscbs ^ 0x100;
next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag);
@@ -7091,24 +7065,6 @@ ahd_init(struct ahd_softc *ahd)
if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0)
ahd->features &= ~AHD_TARGETMODE;
-#ifndef __linux__
- /* DMA tag for mapping buffers into device visible space. */
- if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1,
- /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
- /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING
- ? (dma_addr_t)0x7FFFFFFFFFULL
- : BUS_SPACE_MAXADDR_32BIT,
- /*highaddr*/BUS_SPACE_MAXADDR,
- /*filter*/NULL, /*filterarg*/NULL,
- /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE,
- /*nsegments*/AHD_NSEG,
- /*maxsegsz*/AHD_MAXTRANSFER_SIZE,
- /*flags*/BUS_DMA_ALLOCNOW,
- &ahd->buffer_dmat) != 0) {
- return (ENOMEM);
- }
-#endif
-
ahd->init_level++;
/*
diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
index cc9bd26f5d1a..8397ae93f7dd 100644
--- a/drivers/scsi/aic7xxx/aic79xx_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
@@ -41,14 +41,8 @@
* $Id: //depot/aic7xxx/aic7xxx/aic79xx_pci.c#92 $
*/
-#ifdef __linux__
#include "aic79xx_osm.h"
#include "aic79xx_inline.h"
-#else
-#include <dev/aic7xxx/aic79xx_osm.h>
-#include <dev/aic7xxx/aic79xx_inline.h>
-#endif
-
#include "aic79xx_pci.h"
static inline uint64_t
@@ -294,13 +288,11 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
int
ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
{
- struct scb_data *shared_scb_data;
u_int command;
uint32_t devconfig;
uint16_t subvendor;
int error;
- shared_scb_data = NULL;
ahd->description = entry->name;
/*
* Record if this is an HP board.
diff --git a/drivers/scsi/aic7xxx/aic7xxx.h b/drivers/scsi/aic7xxx/aic7xxx.h
index 4ce4e903a759..5614921b4041 100644
--- a/drivers/scsi/aic7xxx/aic7xxx.h
+++ b/drivers/scsi/aic7xxx/aic7xxx.h
@@ -568,9 +568,6 @@ struct scb {
ahc_io_ctx_t io_ctx;
struct ahc_softc *ahc_softc;
scb_flag flags;
-#ifndef __linux__
- bus_dmamap_t dmamap;
-#endif
struct scb_platform_data *platform_data;
struct sg_map_node *sg_map;
struct ahc_dma_seg *sg_list;
@@ -906,9 +903,6 @@ typedef void ahc_callback_t (void *);
struct ahc_softc {
bus_space_tag_t tag;
bus_space_handle_t bsh;
-#ifndef __linux__
- bus_dma_tag_t buffer_dmat; /* dmat for buffer I/O */
-#endif
struct scb_data *scb_data;
struct scb *next_queued_scb;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index 9e85a7ef9c8e..cc9e41967ce4 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -64,15 +64,9 @@
* bit to be sent from the chip.
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
/*
* Right now, we only have to read the SEEPROM. But we make it easier to
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 915a34f141e4..f3362f4ab16e 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -40,15 +40,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx.c#155 $
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aicasm/aicasm_insformat.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aicasm/aicasm_insformat.h>
-#endif
/***************************** Lookup Tables **********************************/
static const char *const ahc_chip_names[] = {
@@ -67,7 +61,6 @@ static const char *const ahc_chip_names[] = {
"aic7892",
"aic7899"
};
-static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
/*
* Hardware error codes.
@@ -4509,17 +4502,11 @@ ahc_free(struct ahc_softc *ahc)
case 2:
ahc_dma_tag_destroy(ahc, ahc->shared_data_dmat);
case 1:
-#ifndef __linux__
- ahc_dma_tag_destroy(ahc, ahc->buffer_dmat);
-#endif
break;
case 0:
break;
}
-#ifndef __linux__
- ahc_dma_tag_destroy(ahc, ahc->parent_dmat);
-#endif
ahc_platform_free(ahc);
ahc_fini_scbdata(ahc);
for (i = 0; i < AHC_NUM_TARGETS; i++) {
@@ -5005,9 +4992,7 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
newcount = min(newcount, (AHC_SCB_MAX_ALLOC - scb_data->numscbs));
for (i = 0; i < newcount; i++) {
struct scb_platform_data *pdata;
-#ifndef __linux__
- int error;
-#endif
+
pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC);
if (pdata == NULL)
break;
@@ -5021,12 +5006,6 @@ ahc_alloc_scbs(struct ahc_softc *ahc)
next_scb->sg_list_phys = physaddr + sizeof(struct ahc_dma_seg);
next_scb->ahc_softc = ahc;
next_scb->flags = SCB_FREE;
-#ifndef __linux__
- error = ahc_dmamap_create(ahc, ahc->buffer_dmat, /*flags*/0,
- &next_scb->dmamap);
- if (error != 0)
- break;
-#endif
next_scb->hscb = &scb_data->hscbs[scb_data->numscbs];
next_scb->hscb->tag = ahc->scb_data->numscbs;
SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs,
@@ -5325,24 +5304,6 @@ ahc_init(struct ahc_softc *ahc)
if ((AHC_TMODE_ENABLE & (0x1 << ahc->unit)) == 0)
ahc->features &= ~AHC_TARGETMODE;
-#ifndef __linux__
- /* DMA tag for mapping buffers into device visible space. */
- if (ahc_dma_tag_create(ahc, ahc->parent_dmat, /*alignment*/1,
- /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1,
- /*lowaddr*/ahc->flags & AHC_39BIT_ADDRESSING
- ? (dma_addr_t)0x7FFFFFFFFFULL
- : BUS_SPACE_MAXADDR_32BIT,
- /*highaddr*/BUS_SPACE_MAXADDR,
- /*filter*/NULL, /*filterarg*/NULL,
- /*maxsize*/(AHC_NSEG - 1) * PAGE_SIZE,
- /*nsegments*/AHC_NSEG,
- /*maxsegsz*/AHC_MAXTRANSFER_SIZE,
- /*flags*/BUS_DMA_ALLOCNOW,
- &ahc->buffer_dmat) != 0) {
- return (ENOMEM);
- }
-#endif
-
ahc->init_level++;
/*
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c
index 673e826d7adb..656f680c7802 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_pci.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c
@@ -42,16 +42,9 @@
* $Id: //depot/aic7xxx/aic7xxx/aic7xxx_pci.c#79 $
*/
-#ifdef __linux__
#include "aic7xxx_osm.h"
#include "aic7xxx_inline.h"
#include "aic7xxx_93cx6.h"
-#else
-#include <dev/aic7xxx/aic7xxx_osm.h>
-#include <dev/aic7xxx/aic7xxx_inline.h>
-#include <dev/aic7xxx/aic7xxx_93cx6.h>
-#endif
-
#include "aic7xxx_pci.h"
static inline uint64_t
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.h b/drivers/scsi/aic7xxx/aicasm/aicasm.h
index 51678dd46ff7..716a2aefc925 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.h
@@ -42,11 +42,7 @@
* $FreeBSD$
*/
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#ifndef TRUE
#define TRUE 1
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
index f1586a437906..924d55a8acbf 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
@@ -52,11 +52,7 @@
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
index 708326df0766..8c0479865f04 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_gram.y
@@ -52,11 +52,7 @@
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
index c0457b8c3b77..98e9959c6907 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_macro_scan.l
@@ -51,11 +51,7 @@
#include <stdio.h>
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
index 93c8667cd704..c78d4f68eea5 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
@@ -51,11 +51,7 @@
#include <stdio.h>
#include <string.h>
#include <sysexits.h>
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
#include "aicasm.h"
#include "aicasm_symbol.h"
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
index 232aff1fe784..975fcfcc0d8f 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
@@ -44,11 +44,7 @@
#include <sys/types.h>
-#ifdef __linux__
#include "aicdb.h"
-#else
-#include <db.h>
-#endif
#include <fcntl.h>
#include <inttypes.h>
#include <regex.h>
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
index 34bbcad7f83f..7bf7fd5953ac 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
@@ -42,11 +42,7 @@
* $FreeBSD$
*/
-#ifdef __linux__
#include "../queue.h"
-#else
-#include <sys/queue.h>
-#endif
typedef enum {
UNINITIALIZED,
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 1391e5f35918..41c4d8abdd4a 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -771,13 +771,8 @@ static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto Err_remove;
err = -ENODEV;
- if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))
- && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)))
- ;
- else if (!pci_set_dma_mask(dev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)))
- ;
- else {
+ if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
asd_printk("no suitable DMA mask for %s\n", pci_name(dev));
goto Err_remove;
}
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 22873ce8bbfa..91ea87dfb700 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -724,9 +724,11 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask &= ~SAS_SPEED_60_DIS;
+ /* fall through*/
default:
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SAS_SPEED_30_DIS;
+ /* fall through*/
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SAS_SPEED_15_DIS;
}
@@ -734,6 +736,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->min_sas_lrate) {
case SAS_LINK_RATE_6_0_GBPS:
*speed_mask |= SAS_SPEED_30_DIS;
+ /* fall through*/
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask |= SAS_SPEED_15_DIS;
default:
@@ -745,6 +748,7 @@ static void set_speed_mask(u8 *speed_mask, struct asd_phy_desc *pd)
switch (pd->max_sata_lrate) {
case SAS_LINK_RATE_3_0_GBPS:
*speed_mask &= ~SATA_SPEED_30_DIS;
+ /* fall through*/
default:
case SAS_LINK_RATE_1_5_GBPS:
*speed_mask &= ~SATA_SPEED_15_DIS;
@@ -803,6 +807,7 @@ void asd_build_control_phy(struct asd_ascb *ascb, int phy_id, u8 subfunc)
/* link reset retries, this should be nominal */
control_phy->link_reset_retries = 10;
+ /* fall through */
case RELEASE_SPINUP_HOLD: /* 0x02 */
/* decide the func_mask */
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index cdd4ab683be9..7fea344531f6 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -42,13 +42,13 @@ static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num)
spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags);
}
-/* PCI_DMA_... to our direction translation.
+/* DMA_... to our direction translation.
*/
static const u8 data_dir_flags[] = {
- [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
- [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */
- [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */
- [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
+ [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+ [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
+ [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
+ [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
};
static int asd_map_scatterlist(struct sas_task *task,
@@ -60,12 +60,12 @@ static int asd_map_scatterlist(struct sas_task *task,
struct scatterlist *sc;
int num_sg, res;
- if (task->data_dir == PCI_DMA_NONE)
+ if (task->data_dir == DMA_NONE)
return 0;
if (task->num_scatter == 0) {
void *p = task->scatter;
- dma_addr_t dma = pci_map_single(asd_ha->pcidev, p,
+ dma_addr_t dma = dma_map_single(&asd_ha->pcidev->dev, p,
task->total_xfer_len,
task->data_dir);
sg_arr[0].bus_addr = cpu_to_le64((u64)dma);
@@ -79,7 +79,7 @@ static int asd_map_scatterlist(struct sas_task *task,
if (sas_protocol_ata(task->task_proto))
num_sg = task->num_scatter;
else
- num_sg = pci_map_sg(asd_ha->pcidev, task->scatter,
+ num_sg = dma_map_sg(&asd_ha->pcidev->dev, task->scatter,
task->num_scatter, task->data_dir);
if (num_sg == 0)
return -ENOMEM;
@@ -126,8 +126,8 @@ static int asd_map_scatterlist(struct sas_task *task,
return 0;
err_unmap:
if (sas_protocol_ata(task->task_proto))
- pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
- task->data_dir);
+ dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
+ task->num_scatter, task->data_dir);
return res;
}
@@ -136,21 +136,21 @@ static void asd_unmap_scatterlist(struct asd_ascb *ascb)
struct asd_ha_struct *asd_ha = ascb->ha;
struct sas_task *task = ascb->uldd_task;
- if (task->data_dir == PCI_DMA_NONE)
+ if (task->data_dir == DMA_NONE)
return;
if (task->num_scatter == 0) {
dma_addr_t dma = (dma_addr_t)
le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr);
- pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len,
- task->data_dir);
+ dma_unmap_single(&ascb->ha->pcidev->dev, dma,
+ task->total_xfer_len, task->data_dir);
return;
}
asd_free_coherent(asd_ha, ascb->sg_arr);
if (task->task_proto != SAS_PROTOCOL_STP)
- pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter,
- task->data_dir);
+ dma_unmap_sg(&asd_ha->pcidev->dev, task->scatter,
+ task->num_scatter, task->data_dir);
}
/* ---------- Task complete tasklet ---------- */
@@ -436,10 +436,10 @@ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task,
struct domain_device *dev = task->dev;
struct scb *scb;
- pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
- pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ dma_map_sg(&asd_ha->pcidev->dev, &task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
scb = ascb->scb;
@@ -471,10 +471,10 @@ static void asd_unbuild_smp_ascb(struct asd_ascb *a)
struct sas_task *task = a->uldd_task;
BUG_ON(!task);
- pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
- pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_req, 1,
+ DMA_TO_DEVICE);
+ dma_unmap_sg(&a->ha->pcidev->dev, &task->smp_task.smp_resp, 1,
+ DMA_FROM_DEVICE);
}
/* ---------- SSP ---------- */
diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c
index d81ca66e24d6..27c0a4a937d9 100644
--- a/drivers/scsi/am53c974.c
+++ b/drivers/scsi/am53c974.c
@@ -96,9 +96,7 @@ static void pci_esp_dma_drain(struct esp *esp);
static inline struct pci_esp_priv *pci_esp_get_priv(struct esp *esp)
{
- struct pci_dev *pdev = esp->dev;
-
- return pci_get_drvdata(pdev);
+ return dev_get_drvdata(esp->dev);
}
static void pci_esp_write8(struct esp *esp, u8 val, unsigned long reg)
@@ -116,30 +114,6 @@ static void pci_esp_write32(struct esp *esp, u32 val, unsigned long reg)
return iowrite32(val, esp->regs + (reg * 4UL));
}
-static dma_addr_t pci_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return pci_map_single(esp->dev, buf, sz, dir);
-}
-
-static int pci_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return pci_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void pci_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- pci_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void pci_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- pci_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int pci_esp_irq_pending(struct esp *esp)
{
struct pci_esp_priv *pep = pci_esp_get_priv(esp);
@@ -295,10 +269,6 @@ static u32 pci_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
static const struct esp_driver_ops pci_esp_ops = {
.esp_write8 = pci_esp_write8,
.esp_read8 = pci_esp_read8,
- .map_single = pci_esp_map_single,
- .map_sg = pci_esp_map_sg,
- .unmap_single = pci_esp_unmap_single,
- .unmap_sg = pci_esp_unmap_sg,
.irq_pending = pci_esp_irq_pending,
.reset_dma = pci_esp_reset_dma,
.dma_drain = pci_esp_dma_drain,
@@ -375,18 +345,18 @@ static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
static void dc390_check_eeprom(struct esp *esp)
{
+ struct pci_dev *pdev = to_pci_dev(esp->dev);
u8 EEbuf[128];
u16 *ptr = (u16 *)EEbuf, wval = 0;
int i;
- dc390_read_eeprom((struct pci_dev *)esp->dev, ptr);
+ dc390_read_eeprom(pdev, ptr);
for (i = 0; i < DC390_EEPROM_LEN; i++, ptr++)
wval += *ptr;
/* no Tekram EEprom found */
if (wval != 0x1234) {
- struct pci_dev *pdev = esp->dev;
dev_printk(KERN_INFO, &pdev->dev,
"No valid Tekram EEprom found\n");
return;
@@ -411,7 +381,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
return -ENODEV;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dev_printk(KERN_INFO, &pdev->dev,
"failed to set 32bit DMA mask\n");
goto fail_disable_device;
@@ -435,7 +405,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
esp = shost_priv(shost);
esp->host = shost;
- esp->dev = pdev;
+ esp->dev = &pdev->dev;
esp->ops = &pci_esp_ops;
/*
* The am53c974 HBA has a design flaw of generating
@@ -467,8 +437,8 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
pci_set_master(pdev);
- esp->command_block = pci_alloc_consistent(pdev, 16,
- &esp->command_block_dma);
+ esp->command_block = dma_alloc_coherent(&pdev->dev, 16,
+ &esp->command_block_dma, GFP_KERNEL);
if (!esp->command_block) {
dev_printk(KERN_ERR, &pdev->dev,
"failed to allocate command block\n");
@@ -498,7 +468,7 @@ static int pci_esp_probe_one(struct pci_dev *pdev,
/* Assume 40MHz clock */
esp->cfreq = 40000000;
- err = scsi_esp_register(esp, &pdev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
@@ -508,8 +478,8 @@ fail_free_irq:
free_irq(pdev->irq, esp);
fail_unmap_command_block:
pci_set_drvdata(pdev, NULL);
- pci_free_consistent(pdev, 16, esp->command_block,
- esp->command_block_dma);
+ dma_free_coherent(&pdev->dev, 16, esp->command_block,
+ esp->command_block_dma);
fail_unmap_regs:
pci_iounmap(pdev, esp->regs);
fail_release_regions:
@@ -532,8 +502,8 @@ static void pci_esp_remove_one(struct pci_dev *pdev)
scsi_esp_unregister(esp);
free_irq(pdev->irq, esp);
pci_set_drvdata(pdev, NULL);
- pci_free_consistent(pdev, 16, esp->command_block,
- esp->command_block_dma);
+ dma_free_coherent(&pdev->dev, 16, esp->command_block,
+ esp->command_block_dma);
pci_iounmap(pdev, esp->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 12316ef4c893..d4404eea24fb 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1317,13 +1317,10 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error)
{
- int id, lun;
if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
struct scsi_cmnd *abortcmd = pCCB->pcmd;
if (abortcmd) {
- id = abortcmd->device->id;
- lun = abortcmd->device->lun;
abortcmd->result |= DID_ABORT << 16;
arcmsr_ccb_complete(pCCB);
printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
@@ -1798,7 +1795,7 @@ static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, acb->host->host_no);
}
}
@@ -1811,7 +1808,7 @@ static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, acb->host->host_no);
}
}
@@ -1824,7 +1821,7 @@ static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE
- "arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
+ "arcmsr%d: wait 'stop adapter background rebuild' timeout\n"
, pACB->host->host_no);
}
return;
@@ -1837,7 +1834,7 @@ static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
if (!arcmsr_hbaD_wait_msgint_ready(pACB))
- pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
"timeout\n", pACB->host->host_no);
}
@@ -1850,7 +1847,7 @@ static void arcmsr_hbaE_stop_bgrb(struct AdapterControlBlock *pACB)
pACB->out_doorbell ^= ARCMSR_HBEMU_DRV2IOP_MESSAGE_CMD_DONE;
writel(pACB->out_doorbell, &reg->iobound_doorbell);
if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
- pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+ pr_notice("arcmsr%d: wait 'stop adapter background rebuild' "
"timeout\n", pACB->host->host_no);
}
}
@@ -3927,7 +3924,7 @@ static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
- rebulid' timeout \n", acb->host->host_no);
+ rebuild' timeout \n", acb->host->host_no);
}
}
@@ -3938,7 +3935,7 @@ static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
- rebulid' timeout \n",acb->host->host_no);
+ rebuild' timeout \n",acb->host->host_no);
}
}
@@ -3950,7 +3947,7 @@ static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
- rebulid' timeout \n", pACB->host->host_no);
+ rebuild' timeout \n", pACB->host->host_no);
}
return;
}
@@ -3963,7 +3960,7 @@ static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
pr_notice("arcmsr%d: wait 'start adapter "
- "background rebulid' timeout\n", pACB->host->host_no);
+ "background rebuild' timeout\n", pACB->host->host_no);
}
}
@@ -3977,7 +3974,7 @@ static void arcmsr_hbaE_start_bgrb(struct AdapterControlBlock *pACB)
writel(pACB->out_doorbell, &pmu->iobound_doorbell);
if (!arcmsr_hbaE_wait_msgint_ready(pACB)) {
pr_notice("arcmsr%d: wait 'start adapter "
- "background rebulid' timeout \n", pACB->host->host_no);
+ "background rebuild' timeout \n", pACB->host->host_no);
}
}
@@ -4135,9 +4132,9 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
pci_read_config_byte(acb->pdev, i, &value[i]);
}
/* hardware reset signal */
- if ((acb->dev_id == 0x1680)) {
+ if (acb->dev_id == 0x1680) {
writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]);
- } else if ((acb->dev_id == 0x1880)) {
+ } else if (acb->dev_id == 0x1880) {
do {
count++;
writel(0xF, &pmuC->write_sequence);
@@ -4161,7 +4158,7 @@ static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
} while (((readl(&pmuE->host_diagnostic_3xxx) &
ARCMSR_ARC1884_DiagWrite_ENABLE) == 0) && (count < 5));
writel(ARCMSR_ARC188X_RESET_ADAPTER, &pmuE->host_diagnostic_3xxx);
- } else if ((acb->dev_id == 0x1214)) {
+ } else if (acb->dev_id == 0x1214) {
writel(0x20, pmuD->reset_request);
} else {
pci_write_config_byte(acb->pdev, 0x84, 0x20);
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 8996d2329e11..802d15018ec0 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1193,7 +1193,7 @@ static void atp870u_free_tables(struct Scsi_Host *host)
for (k = 0; k < 16; k++) {
if (!atp_dev->id[j][k].prd_table)
continue;
- pci_free_consistent(atp_dev->pdev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
+ dma_free_coherent(&atp_dev->pdev->dev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
atp_dev->id[j][k].prd_table = NULL;
}
}
@@ -1205,7 +1205,7 @@ static int atp870u_init_tables(struct Scsi_Host *host)
int c,k;
for(c=0;c < 2;c++) {
for(k=0;k<16;k++) {
- atp_dev->id[c][k].prd_table = pci_alloc_consistent(atp_dev->pdev, 1024, &(atp_dev->id[c][k].prd_bus));
+ atp_dev->id[c][k].prd_table = dma_alloc_coherent(&atp_dev->pdev->dev, 1024, &(atp_dev->id[c][k].prd_bus), GFP_KERNEL);
if (!atp_dev->id[c][k].prd_table) {
printk("atp870u_init_tables fail\n");
atp870u_free_tables(host);
@@ -1509,7 +1509,7 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto fail;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
err = -EIO;
goto disable_device;
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index c10aac4dbc5e..0a6972ee94d7 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -520,7 +520,7 @@ int beiscsi_process_mcc_compl(struct be_ctrl_info *ctrl,
**/
tag_mem = &ctrl->ptag_state[tag].tag_mem_state;
if (tag_mem->size) {
- pci_free_consistent(ctrl->pdev, tag_mem->size,
+ dma_free_coherent(&ctrl->pdev->dev, tag_mem->size,
tag_mem->va, tag_mem->dma);
tag_mem->size = 0;
}
@@ -1269,12 +1269,12 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
struct be_sge *sge = nonembedded_sgl(wrb);
int status = 0;
- nonemb_cmd.va = pci_alloc_consistent(ctrl->pdev,
+ nonemb_cmd.va = dma_alloc_coherent(&ctrl->pdev->dev,
sizeof(struct be_mgmt_controller_attributes),
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BG_%d : pci_alloc_consistent failed in %s\n",
+ "BG_%d : dma_alloc_coherent failed in %s\n",
__func__);
return -ENOMEM;
}
@@ -1314,7 +1314,7 @@ int beiscsi_check_supported_fw(struct be_ctrl_info *ctrl,
"BG_%d : Failed in beiscsi_check_supported_fw\n");
mutex_unlock(&ctrl->mbox_lock);
if (nonemb_cmd.va)
- pci_free_consistent(ctrl->pdev, nonemb_cmd.size,
+ dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return status;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index c8f0a2144b44..96b96e2ab91a 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -771,7 +771,7 @@ int beiscsi_get_host_param(struct Scsi_Host *shost,
status = beiscsi_get_initiator_name(phba, buf, false);
if (status < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : Retreiving Initiator Name Failed\n");
+ "BS_%d : Retrieving Initiator Name Failed\n");
status = 0;
}
}
@@ -1071,9 +1071,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
else
req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
- nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
req_memsize,
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -1091,7 +1091,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : mgmt_open_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
beiscsi_free_ep(beiscsi_ep);
return -EAGAIN;
@@ -1104,8 +1104,9 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
"BS_%d : mgmt_open_connection Failed");
if (ret != -EBUSY)
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
- nonemb_cmd.va, nonemb_cmd.dma);
+ dma_free_coherent(&phba->ctrl.pdev->dev,
+ nonemb_cmd.size, nonemb_cmd.va,
+ nonemb_cmd.dma);
beiscsi_free_ep(beiscsi_ep);
return ret;
@@ -1118,7 +1119,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_open_connection Success\n");
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return 0;
}
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 3660059784f7..effb6fc95af4 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -511,18 +511,9 @@ static int beiscsi_enable_pci(struct pci_dev *pcidev)
}
pci_set_master(pcidev);
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
if (ret) {
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
- goto pci_region_release;
- } else {
- ret = pci_set_consistent_dma_mask(pcidev,
- DMA_BIT_MASK(32));
- }
- } else {
- ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
goto pci_region_release;
@@ -550,9 +541,8 @@ static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
if (status)
return status;
mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
- mbox_mem_alloc->va = pci_alloc_consistent(pdev,
- mbox_mem_alloc->size,
- &mbox_mem_alloc->dma);
+ mbox_mem_alloc->va = dma_alloc_coherent(&pdev->dev,
+ mbox_mem_alloc->size, &mbox_mem_alloc->dma, GFP_KERNEL);
if (!mbox_mem_alloc->va) {
beiscsi_unmap_pci_function(phba);
return -ENOMEM;
@@ -1866,7 +1856,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
{
struct be_queue_info *cq;
struct sol_cqe *sol;
- struct dmsg_cqe *dmsg;
unsigned int total = 0;
unsigned int num_processed = 0;
unsigned short code = 0, cid = 0;
@@ -1939,7 +1928,6 @@ unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
"BM_%d : Received %s[%d] on CID : %d\n",
cqe_desc[code], code, cid);
- dmsg = (struct dmsg_cqe *)sol;
hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
break;
case UNSOL_HDR_NOTIFY:
@@ -2304,11 +2292,11 @@ static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
/* Map addr only if there is data_count */
if (dsp_value) {
- io_task->mtask_addr = pci_map_single(phba->pcidev,
+ io_task->mtask_addr = dma_map_single(&phba->pcidev->dev,
task->data,
task->data_count,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(phba->pcidev,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&phba->pcidev->dev,
io_task->mtask_addr))
return -ENOMEM;
io_task->mtask_data_count = task->data_count;
@@ -2519,10 +2507,9 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
BEISCSI_MAX_FRAGS_INIT);
curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
do {
- mem_arr->virtual_address = pci_alloc_consistent(
- phba->pcidev,
- curr_alloc_size,
- &bus_add);
+ mem_arr->virtual_address =
+ dma_alloc_coherent(&phba->pcidev->dev,
+ curr_alloc_size, &bus_add, GFP_KERNEL);
if (!mem_arr->virtual_address) {
if (curr_alloc_size <= BE_MIN_MEM_SIZE)
goto free_mem;
@@ -2560,7 +2547,7 @@ free_mem:
mem_descr->num_elements = j;
while ((i) || (j)) {
for (j = mem_descr->num_elements; j > 0; j--) {
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
mem_descr->mem_array[j - 1].size,
mem_descr->mem_array[j - 1].
virtual_address,
@@ -3031,9 +3018,9 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
eq = &phwi_context->be_eq[i].q;
mem = &eq->dma_mem;
phwi_context->be_eq[i].phba = phba;
- eq_vaddress = pci_alloc_consistent(phba->pcidev,
+ eq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
num_eq_pages * PAGE_SIZE,
- &paddr);
+ &paddr, GFP_KERNEL);
if (!eq_vaddress) {
ret = -ENOMEM;
goto create_eq_error;
@@ -3069,7 +3056,7 @@ create_eq_error:
eq = &phwi_context->be_eq[i].q;
mem = &eq->dma_mem;
if (mem->va)
- pci_free_consistent(phba->pcidev, num_eq_pages
+ dma_free_coherent(&phba->pcidev->dev, num_eq_pages
* PAGE_SIZE,
mem->va, mem->dma);
}
@@ -3097,9 +3084,9 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
pbe_eq->cq = cq;
pbe_eq->phba = phba;
mem = &cq->dma_mem;
- cq_vaddress = pci_alloc_consistent(phba->pcidev,
+ cq_vaddress = dma_alloc_coherent(&phba->pcidev->dev,
num_cq_pages * PAGE_SIZE,
- &paddr);
+ &paddr, GFP_KERNEL);
if (!cq_vaddress) {
ret = -ENOMEM;
goto create_cq_error;
@@ -3134,7 +3121,7 @@ create_cq_error:
cq = &phwi_context->be_cq[i];
mem = &cq->dma_mem;
if (mem->va)
- pci_free_consistent(phba->pcidev, num_cq_pages
+ dma_free_coherent(&phba->pcidev->dev, num_cq_pages
* PAGE_SIZE,
mem->va, mem->dma);
}
@@ -3326,7 +3313,7 @@ static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
{
struct be_dma_mem *mem = &q->dma_mem;
if (mem->va) {
- pci_free_consistent(phba->pcidev, mem->size,
+ dma_free_coherent(&phba->pcidev->dev, mem->size,
mem->va, mem->dma);
mem->va = NULL;
}
@@ -3341,7 +3328,8 @@ static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma);
+ mem->va = dma_zalloc_coherent(&phba->pcidev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
@@ -3479,7 +3467,7 @@ static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
&ctrl->ptag_state[tag].tag_state)) {
ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
if (ptag_mem->size) {
- pci_free_consistent(ctrl->pdev,
+ dma_free_coherent(&ctrl->pdev->dev,
ptag_mem->size,
ptag_mem->va,
ptag_mem->dma);
@@ -3880,7 +3868,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
j = 0;
for (i = 0; i < SE_MEM_MAX; i++) {
for (j = mem_descr->num_elements; j > 0; j--) {
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
mem_descr->mem_array[j - 1].size,
mem_descr->mem_array[j - 1].virtual_address,
(unsigned long)mem_descr->mem_array[j - 1].
@@ -4255,10 +4243,10 @@ beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
}
if (io_task->mtask_addr) {
- pci_unmap_single(phba->pcidev,
+ dma_unmap_single(&phba->pcidev->dev,
io_task->mtask_addr,
io_task->mtask_data_count,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
io_task->mtask_addr = 0;
}
}
@@ -4852,9 +4840,9 @@ static int beiscsi_bsg_request(struct bsg_job *job)
switch (bsg_req->msgcode) {
case ISCSI_BSG_HST_VENDOR:
- nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd.va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
job->request_payload.payload_len,
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma, GFP_KERNEL);
if (nonemb_cmd.va == NULL) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BM_%d : Failed to allocate memory for "
@@ -4867,7 +4855,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BM_%d : MBX Tag Allocation Failed\n");
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EAGAIN;
}
@@ -4881,7 +4869,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
clear_bit(MCC_TAG_STATE_RUNNING,
&phba->ctrl.ptag_state[tag].tag_state);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EIO;
}
@@ -4898,7 +4886,7 @@ static int beiscsi_bsg_request(struct bsg_job *job)
bsg_reply->result = status;
bsg_job_done(job, bsg_reply->result,
bsg_reply->reply_payload_rcv_len);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
if (status || extd_status) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
@@ -5529,7 +5517,6 @@ static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
return PCI_ERS_RESULT_DISCONNECT;
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
return PCI_ERS_RESULT_RECOVERED;
}
@@ -5755,7 +5742,7 @@ free_twq:
beiscsi_cleanup_port(phba);
beiscsi_free_mem(phba);
free_port:
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma);
@@ -5799,7 +5786,7 @@ static void beiscsi_remove(struct pci_dev *pcidev)
/* ctrl uninit */
beiscsi_unmap_pci_function(phba);
- pci_free_consistent(phba->pcidev,
+ dma_free_coherent(&phba->pcidev->dev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma);
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 8fdc07b6c686..ca7b7bbc8371 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -284,7 +284,7 @@ static int beiscsi_exec_nemb_cmd(struct beiscsi_hba *phba,
return rc;
free_cmd:
- pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
+ dma_free_coherent(&ctrl->pdev->dev, nonemb_cmd->size,
nonemb_cmd->va, nonemb_cmd->dma);
return rc;
}
@@ -293,7 +293,8 @@ static int beiscsi_prep_nemb_cmd(struct beiscsi_hba *phba,
struct be_dma_mem *cmd,
u8 subsystem, u8 opcode, u32 size)
{
- cmd->va = pci_zalloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
+ cmd->va = dma_zalloc_coherent(&phba->ctrl.pdev->dev, size, &cmd->dma,
+ GFP_KERNEL);
if (!cmd->va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BG_%d : Failed to allocate memory for if info\n");
@@ -315,7 +316,7 @@ static void __beiscsi_eq_delay_compl(struct beiscsi_hba *phba, unsigned int tag)
__beiscsi_mcc_compl_status(phba, tag, NULL, NULL);
tag_mem = &phba->ctrl.ptag_state[tag].tag_mem_state;
if (tag_mem->size) {
- pci_free_consistent(phba->pcidev, tag_mem->size,
+ dma_free_coherent(&phba->pcidev->dev, tag_mem->size,
tag_mem->va, tag_mem->dma);
tag_mem->size = 0;
}
@@ -761,7 +762,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
"BG_%d : Memory Allocation Failure\n");
/* Free the DMA memory for the IOCTL issuing */
- pci_free_consistent(phba->ctrl.pdev,
+ dma_free_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd.size,
nonemb_cmd.va,
nonemb_cmd.dma);
@@ -780,7 +781,7 @@ int beiscsi_if_get_info(struct beiscsi_hba *phba, int ip_type,
ioctl_size += sizeof(struct be_cmd_req_hdr);
/* Free the previous allocated DMA memory */
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va,
nonemb_cmd.dma);
@@ -869,7 +870,7 @@ static void beiscsi_boot_process_compl(struct beiscsi_hba *phba,
status);
boot_work = 0;
}
- pci_free_consistent(phba->ctrl.pdev, bs->nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, bs->nonemb_cmd.size,
bs->nonemb_cmd.va, bs->nonemb_cmd.dma);
bs->nonemb_cmd.va = NULL;
break;
@@ -1012,9 +1013,10 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba)
nonemb_cmd = &phba->boot_struct.nonemb_cmd;
nonemb_cmd->size = sizeof(struct be_cmd_get_session_resp);
- nonemb_cmd->va = pci_alloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd->va = dma_alloc_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd->size,
- &nonemb_cmd->dma);
+ &nonemb_cmd->dma,
+ GFP_KERNEL);
if (!nonemb_cmd->va) {
mutex_unlock(&ctrl->mbox_lock);
return 0;
@@ -1508,9 +1510,10 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
return -EINVAL;
nonemb_cmd.size = sizeof(union be_invldt_cmds_params);
- nonemb_cmd.va = pci_zalloc_consistent(phba->ctrl.pdev,
+ nonemb_cmd.va = dma_zalloc_coherent(&phba->ctrl.pdev->dev,
nonemb_cmd.size,
- &nonemb_cmd.dma);
+ &nonemb_cmd.dma,
+ GFP_KERNEL);
if (!nonemb_cmd.va) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
"BM_%d : invldt_cmds_params alloc failed\n");
@@ -1521,7 +1524,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
wrb = alloc_mcc_wrb(phba, &tag);
if (!wrb) {
mutex_unlock(&ctrl->mbox_lock);
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -ENOMEM;
}
@@ -1548,7 +1551,7 @@ int beiscsi_mgmt_invalidate_icds(struct beiscsi_hba *phba,
rc = beiscsi_mccq_compl_wait(phba, tag, NULL, &nonemb_cmd);
if (rc != -EBUSY)
- pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
+ dma_free_coherent(&phba->ctrl.pdev->dev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return rc;
}
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 3d0c96a5c873..c19c26e0e405 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -1453,7 +1453,7 @@ union bfa_aen_data_u {
struct bfa_aen_entry_s {
struct list_head qe;
enum bfa_aen_category aen_category;
- u32 aen_type;
+ int aen_type;
union bfa_aen_data_u aen_data;
u64 aen_tv_sec;
u64 aen_tv_usec;
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index d3b00a475aeb..2de5d514e99c 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -190,27 +190,6 @@ fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
fchs->ox_id = ox_id;
}
-enum fc_parse_status
-fc_els_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
- struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
-
- len = len;
-
- switch (els_cmd->els_code) {
- case FC_ELS_LS_RJT:
- if (ls_rjt->reason_code == FC_LS_RJT_RSN_LOGICAL_BUSY)
- return FC_PARSE_BUSY;
- else
- return FC_PARSE_FAILURE;
-
- case FC_ELS_ACC:
- return FC_PARSE_OK;
- }
- return FC_PARSE_OK;
-}
-
static void
fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id)
{
@@ -831,18 +810,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
}
u16
-fc_logo_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-
- len = len;
- if (els_cmd->els_code != FC_ELS_ACC)
- return FC_PARSE_FAILURE;
-
- return FC_PARSE_OK;
-}
-
-u16
fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
wwn_t port_name, wwn_t node_name, u16 pdu_size)
{
@@ -908,40 +875,6 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
}
u16
-fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
- int num_pages = 0;
- int page = 0;
-
- len = len;
-
- if (prlo->command != FC_ELS_ACC)
- return FC_PARSE_FAILURE;
-
- num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
-
- for (page = 0; page < num_pages; page++) {
- if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].opa_valid != 0)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].rpa_valid != 0)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].orig_process_assc != 0)
- return FC_PARSE_FAILURE;
-
- if (prlo->prlo_acc_params[page].resp_process_assc != 0)
- return FC_PARSE_FAILURE;
- }
- return FC_PARSE_OK;
-
-}
-
-u16
fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
{
@@ -972,47 +905,6 @@ fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
}
u16
-fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_tprlo_acc_s *tprlo = (struct fc_tprlo_acc_s *) (fchs + 1);
- int num_pages = 0;
- int page = 0;
-
- len = len;
-
- if (tprlo->command != FC_ELS_ACC)
- return FC_PARSE_ACC_INVAL;
-
- num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
-
- for (page = 0; page < num_pages; page++) {
- if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
- return FC_PARSE_NOT_FCP;
- if (tprlo->tprlo_acc_params[page].opa_valid != 0)
- return FC_PARSE_OPAFLAG_INVAL;
- if (tprlo->tprlo_acc_params[page].rpa_valid != 0)
- return FC_PARSE_RPAFLAG_INVAL;
- if (tprlo->tprlo_acc_params[page].orig_process_assc != 0)
- return FC_PARSE_OPA_INVAL;
- if (tprlo->tprlo_acc_params[page].resp_process_assc != 0)
- return FC_PARSE_RPA_INVAL;
- }
- return FC_PARSE_OK;
-}
-
-enum fc_parse_status
-fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
-{
- struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-
- len = len;
- if (els_cmd->els_code != FC_ELS_ACC)
- return FC_PARSE_FAILURE;
-
- return FC_PARSE_OK;
-}
-
-u16
fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, __be16 ox_id,
u32 reason_code, u32 reason_expl)
{
diff --git a/drivers/scsi/bfa/bfa_fcbuild.h b/drivers/scsi/bfa/bfa_fcbuild.h
index b109a8813401..ac08d0b5b89a 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.h
+++ b/drivers/scsi/bfa/bfa_fcbuild.h
@@ -163,7 +163,6 @@ enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
u32 s_id, u16 ox_id, u16 rrq_oxid);
-enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
u16 fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
u16 ox_id, u8 *name);
@@ -276,8 +275,6 @@ void fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
void fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
__be16 ox_id);
-enum fc_parse_status fc_els_rsp_parse(struct fchs_s *fchs, int len);
-
enum fc_parse_status fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
wwn_t port_name);
@@ -297,8 +294,6 @@ u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
u32 d_id, u32 s_id, __be16 ox_id, int num_pages);
-u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
-
u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, wwn_t port_name, wwn_t node_name,
u16 pdu_size);
@@ -308,14 +303,10 @@ u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, int num_pages);
-u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
-
u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
u32 tpr_id);
-u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
-
u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
__be16 ox_id, u32 reason_code, u32 reason_expl);
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index bd7e6a6fc1f1..911efc98d1fd 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1569,8 +1569,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
if (pci_set_dma_mask(bfad->pcidev, DMA_BIT_MASK(32)) != 0)
goto out_disable_device;
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
if (restart_bfa(bfad) == -1)
goto out_disable_device;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index e61ed8dad0b4..bd4ac187fd8e 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -143,7 +143,7 @@ struct bfad_im_s {
static inline void bfad_im_post_vendor_event(struct bfa_aen_entry_s *entry,
struct bfad_s *drv, int cnt,
enum bfa_aen_category cat,
- enum bfa_ioc_aen_event evt)
+ int evt)
{
struct timespec64 ts;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 27c8d6ba05bb..cd160f2ec75d 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -432,7 +432,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
struct fcoe_rcv_info *fr;
struct fcoe_percpu_s *bg;
struct sk_buff *tmp_skb;
- unsigned short oxid;
interface = container_of(ptype, struct bnx2fc_interface,
fcoe_packet_type);
@@ -466,8 +465,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
fh = (struct fc_frame_header *) skb_transport_header(skb);
- oxid = ntohs(fh->fh_ox_id);
-
fr = fcoe_dev_from_skb(skb);
fr->fr_dev = lport;
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index ed2dae657964..1a458ce08210 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -210,11 +210,8 @@ csio_pci_init(struct pci_dev *pdev, int *bars)
pci_set_master(pdev);
pci_try_set_mwi(pdev);
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- } else {
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
dev_err(&pdev->dev, "No suitable DMA available.\n");
goto err_release_regions;
}
@@ -1102,7 +1099,6 @@ csio_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
/* Bring HW s/m to ready state.
* but don't resume IOs.
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index cc5611efc7a9..66e58f0a75dc 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -1845,8 +1845,8 @@ csio_ln_fdmi_init(struct csio_lnode *ln)
/* Allocate Dma buffers for FDMI response Payload */
dma_buf = &ln->mgmt_req->dma_buf;
dma_buf->len = 2048;
- dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
- &dma_buf->paddr);
+ dma_buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, dma_buf->len,
+ &dma_buf->paddr, GFP_KERNEL);
if (!dma_buf->vaddr) {
csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
kfree(ln->mgmt_req);
@@ -1873,7 +1873,7 @@ csio_ln_fdmi_exit(struct csio_lnode *ln)
dma_buf = &ln->mgmt_req->dma_buf;
if (dma_buf->vaddr)
- pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
+ dma_free_coherent(&hw->pdev->dev, dma_buf->len, dma_buf->vaddr,
dma_buf->paddr);
kfree(ln->mgmt_req);
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index dab0d3f9bee1..8c15b7acb4b7 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -2349,8 +2349,8 @@ csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
}
/* Allocate Dma buffers for DDP */
- ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size,
- &ddp_desc->paddr);
+ ddp_desc->vaddr = dma_alloc_coherent(&hw->pdev->dev, unit_size,
+ &ddp_desc->paddr, GFP_KERNEL);
if (!ddp_desc->vaddr) {
csio_err(hw,
"SCSI response DMA buffer (ddp) allocation"
@@ -2372,8 +2372,8 @@ no_mem:
list_for_each(tmp, &scm->ddp_freelist) {
ddp_desc = (struct csio_dma_buf *) tmp;
tmp = csio_list_prev(tmp);
- pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
- ddp_desc->paddr);
+ dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
+ ddp_desc->vaddr, ddp_desc->paddr);
list_del_init(&ddp_desc->list);
kfree(ddp_desc);
}
@@ -2399,8 +2399,8 @@ csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
list_for_each(tmp, &scm->ddp_freelist) {
ddp_desc = (struct csio_dma_buf *) tmp;
tmp = csio_list_prev(tmp);
- pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
- ddp_desc->paddr);
+ dma_free_coherent(&hw->pdev->dev, ddp_desc->len,
+ ddp_desc->vaddr, ddp_desc->paddr);
list_del_init(&ddp_desc->list);
kfree(ddp_desc);
}
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index 5022e82ccc4f..dc12933533d5 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -124,8 +124,8 @@ csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
while (n--) {
buf->len = sge->sge_fl_buf_size[sreg];
- buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
- &buf->paddr);
+ buf->vaddr = dma_alloc_coherent(&hw->pdev->dev, buf->len,
+ &buf->paddr, GFP_KERNEL);
if (!buf->vaddr) {
csio_err(hw, "Could only fill %d buffers!\n", n + 1);
return -ENOMEM;
@@ -233,7 +233,8 @@ csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
q = wrm->q_arr[free_idx];
- q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart);
+ q->vstart = dma_zalloc_coherent(&hw->pdev->dev, qsz, &q->pstart,
+ GFP_KERNEL);
if (!q->vstart) {
csio_err(hw,
"Failed to allocate DMA memory for "
@@ -1703,14 +1704,14 @@ csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
buf = &q->un.fl.bufs[j];
if (!buf->vaddr)
continue;
- pci_free_consistent(hw->pdev, buf->len,
- buf->vaddr,
- buf->paddr);
+ dma_free_coherent(&hw->pdev->dev,
+ buf->len, buf->vaddr,
+ buf->paddr);
}
kfree(q->un.fl.bufs);
}
- pci_free_consistent(hw->pdev, q->size,
- q->vstart, q->pstart);
+ dma_free_coherent(&hw->pdev->dev, q->size,
+ q->vstart, q->pstart);
}
kfree(q);
}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 211da1d5a869..064ef5735182 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -35,6 +35,11 @@ static unsigned int dbg_level;
#include "../libcxgbi.h"
+#ifdef CONFIG_CHELSIO_T4_DCB
+#include <net/dcbevent.h>
+#include "cxgb4_dcb.h"
+#endif
+
#define DRV_MODULE_NAME "cxgb4i"
#define DRV_MODULE_DESC "Chelsio T4-T6 iSCSI Driver"
#define DRV_MODULE_VERSION "0.9.5-ko"
@@ -155,6 +160,15 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
.session_recovery_timedout = iscsi_session_recovery_timedout,
};
+#ifdef CONFIG_CHELSIO_T4_DCB
+static int
+cxgb4_dcb_change_notify(struct notifier_block *, unsigned long, void *);
+
+static struct notifier_block cxgb4_dcb_change = {
+ .notifier_call = cxgb4_dcb_change_notify,
+};
+#endif
+
static struct scsi_transport_template *cxgb4i_stt;
/*
@@ -574,6 +588,9 @@ static inline int tx_flowc_wr_credits(int *nparamsp, int *flowclenp)
int nparams, flowclen16, flowclen;
nparams = FLOWC_WR_NPARAMS_MIN;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ nparams++;
+#endif
flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]);
flowclen16 = DIV_ROUND_UP(flowclen, 16);
flowclen = flowclen16 * 16;
@@ -595,6 +612,9 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
struct fw_flowc_wr *flowc;
int nparams, flowclen16, flowclen;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u16 vlan = ((struct l2t_entry *)csk->l2t)->vlan;
+#endif
flowclen16 = tx_flowc_wr_credits(&nparams, &flowclen);
skb = alloc_wr(flowclen, 0, GFP_ATOMIC);
flowc = (struct fw_flowc_wr *)skb->head;
@@ -622,6 +642,17 @@ static inline int send_tx_flowc_wr(struct cxgbi_sock *csk)
flowc->mnemval[8].val = 0;
flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
flowc->mnemval[8].val = 16384;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ flowc->mnemval[9].mnemonic = FW_FLOWC_MNEM_DCBPRIO;
+ if (vlan == CPL_L2T_VLAN_NONE) {
+ pr_warn_ratelimited("csk %u without VLAN Tag on DCB Link\n",
+ csk->tid);
+ flowc->mnemval[9].val = cpu_to_be32(0);
+ } else {
+ flowc->mnemval[9].val = cpu_to_be32((vlan & VLAN_PRIO_MASK) >>
+ VLAN_PRIO_SHIFT);
+ }
+#endif
set_wr_txq(skb, CPL_PRIORITY_DATA, csk->port_id);
@@ -1600,6 +1631,46 @@ static void release_offload_resources(struct cxgbi_sock *csk)
csk->dst = NULL;
}
+#ifdef CONFIG_CHELSIO_T4_DCB
+static inline u8 get_iscsi_dcb_state(struct net_device *ndev)
+{
+ return ndev->dcbnl_ops->getstate(ndev);
+}
+
+static int select_priority(int pri_mask)
+{
+ if (!pri_mask)
+ return 0;
+ return (ffs(pri_mask) - 1);
+}
+
+static u8 get_iscsi_dcb_priority(struct net_device *ndev)
+{
+ int rv;
+ u8 caps;
+
+ struct dcb_app iscsi_dcb_app = {
+ .protocol = 3260
+ };
+
+ rv = (int)ndev->dcbnl_ops->getcap(ndev, DCB_CAP_ATTR_DCBX, &caps);
+ if (rv)
+ return 0;
+
+ if (caps & DCB_CAP_DCBX_VER_IEEE) {
+ iscsi_dcb_app.selector = IEEE_8021QAZ_APP_SEL_ANY;
+ rv = dcb_ieee_getapp_mask(ndev, &iscsi_dcb_app);
+ } else if (caps & DCB_CAP_DCBX_VER_CEE) {
+ iscsi_dcb_app.selector = DCB_APP_IDTYPE_PORTNUM;
+ rv = dcb_getapp(ndev, &iscsi_dcb_app);
+ }
+
+ log_debug(1 << CXGBI_DBG_ISCSI,
+ "iSCSI priority is set to %u\n", select_priority(rv));
+ return select_priority(rv);
+}
+#endif
+
static int init_act_open(struct cxgbi_sock *csk)
{
struct cxgbi_device *cdev = csk->cdev;
@@ -1613,7 +1684,9 @@ static int init_act_open(struct cxgbi_sock *csk)
unsigned int size, size6;
unsigned int linkspeed;
unsigned int rcv_winf, snd_winf;
-
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 priority = 0;
+#endif
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,0x%lx,%u.\n",
csk, csk->state, csk->flags, csk->tid);
@@ -1647,7 +1720,15 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
+#ifdef CONFIG_CHELSIO_T4_DCB
+ if (get_iscsi_dcb_state(ndev))
+ priority = get_iscsi_dcb_priority(ndev);
+
+ csk->dcb_priority = priority;
+ csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, priority);
+#else
csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
+#endif
if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name);
goto rel_resource_without_clip;
@@ -2146,6 +2227,70 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
return 0;
}
+#ifdef CONFIG_CHELSIO_T4_DCB
+static int
+cxgb4_dcb_change_notify(struct notifier_block *self, unsigned long val,
+ void *data)
+{
+ int i, port = 0xFF;
+ struct net_device *ndev;
+ struct cxgbi_device *cdev = NULL;
+ struct dcb_app_type *iscsi_app = data;
+ struct cxgbi_ports_map *pmap;
+ u8 priority;
+
+ if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) {
+ if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY)
+ return NOTIFY_DONE;
+
+ priority = iscsi_app->app.priority;
+ } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) {
+ if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM)
+ return NOTIFY_DONE;
+
+ if (!iscsi_app->app.priority)
+ return NOTIFY_DONE;
+
+ priority = ffs(iscsi_app->app.priority) - 1;
+ } else {
+ return NOTIFY_DONE;
+ }
+
+ if (iscsi_app->app.protocol != 3260)
+ return NOTIFY_DONE;
+
+ log_debug(1 << CXGBI_DBG_ISCSI, "iSCSI priority for ifid %d is %u\n",
+ iscsi_app->ifindex, priority);
+
+ ndev = dev_get_by_index(&init_net, iscsi_app->ifindex);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ cdev = cxgbi_device_find_by_netdev_rcu(ndev, &port);
+
+ dev_put(ndev);
+ if (!cdev)
+ return NOTIFY_DONE;
+
+ pmap = &cdev->pmap;
+
+ for (i = 0; i < pmap->used; i++) {
+ if (pmap->port_csk[i]) {
+ struct cxgbi_sock *csk = pmap->port_csk[i];
+
+ if (csk->dcb_priority != priority) {
+ iscsi_conn_failure(csk->user_data,
+ ISCSI_ERR_CONN_FAILED);
+ pr_info("Restarting iSCSI connection %p with "
+ "priority %u->%u.\n", csk,
+ csk->dcb_priority, priority);
+ }
+ }
+ }
+ return NOTIFY_OK;
+}
+#endif
+
static int __init cxgb4i_init_module(void)
{
int rc;
@@ -2157,11 +2302,18 @@ static int __init cxgb4i_init_module(void)
return rc;
cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
+#ifdef CONFIG_CHELSIO_T4_DCB
+ pr_info("%s dcb enabled.\n", DRV_MODULE_NAME);
+ register_dcbevent_notifier(&cxgb4_dcb_change);
+#endif
return 0;
}
static void __exit cxgb4i_exit_module(void)
{
+#ifdef CONFIG_CHELSIO_T4_DCB
+ unregister_dcbevent_notifier(&cxgb4_dcb_change);
+#endif
cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index dcb190e75343..5d5d8b50d842 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -120,6 +120,9 @@ struct cxgbi_sock {
int wr_max_cred;
int wr_cred;
int wr_una_cred;
+#ifdef CONFIG_CHELSIO_T4_DCB
+ u8 dcb_priority;
+#endif
unsigned char hcrc_len;
unsigned char dcrc_len;
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 1ed2cd82129d..8c55ec6e1827 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -753,105 +753,6 @@ static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
return NULL;
}
-
-static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
-{
- struct list_head *head = &acb->srb_free_list;
- struct ScsiReqBlk *srb = NULL;
-
- if (!list_empty(head)) {
- srb = list_entry(head->next, struct ScsiReqBlk, list);
- list_del(head->next);
- dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
- }
- return srb;
-}
-
-
-static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
- list_add_tail(&srb->list, &acb->srb_free_list);
-}
-
-
-static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_add(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_waiting_append(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_add_tail(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_add_tail(&srb->list, &dcb->srb_going_list);
-}
-
-
-static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
-{
- struct ScsiReqBlk *i;
- struct ScsiReqBlk *tmp;
- dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
-
- list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
- if (i == srb) {
- list_del(&srb->list);
- break;
- }
-}
-
-
-static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- struct ScsiReqBlk *i;
- struct ScsiReqBlk *tmp;
- dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
-
- list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
- if (i == srb) {
- list_del(&srb->list);
- break;
- }
-}
-
-
-static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0,
- "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_move(&srb->list, &dcb->srb_waiting_list);
-}
-
-
-static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
- struct ScsiReqBlk *srb)
-{
- dprintkdbg(DBG_0,
- "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
- srb->cmd, dcb->target_id, dcb->target_lun, srb);
- list_move(&srb->list, &dcb->srb_going_list);
-}
-
-
/* Sets the timer to wake us up */
static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
{
@@ -923,7 +824,7 @@ static void waiting_process_next(struct AdapterCtlBlk *acb)
/* Try to send to the bus */
if (!start_scsi(acb, pos, srb))
- srb_waiting_to_going_move(pos, srb);
+ list_move(&srb->list, &pos->srb_going_list);
else
waiting_set_timer(acb, HZ/50);
break;
@@ -960,15 +861,15 @@ static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
acb->active_dcb ||
(acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
- srb_waiting_append(dcb, srb);
+ list_add_tail(&srb->list, &dcb->srb_waiting_list);
waiting_process_next(acb);
return;
}
- if (!start_scsi(acb, dcb, srb))
- srb_going_append(dcb, srb);
- else {
- srb_waiting_insert(dcb, srb);
+ if (!start_scsi(acb, dcb, srb)) {
+ list_add_tail(&srb->list, &dcb->srb_going_list);
+ } else {
+ list_add(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 50);
}
}
@@ -1045,10 +946,8 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
sgp->length++;
}
- srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
- srb->segment_x,
- SEGMENTX_LEN,
- PCI_DMA_TODEVICE);
+ srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
+ srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
@@ -1116,9 +1015,9 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
cmd->scsi_done = done;
cmd->result = 0;
- srb = srb_get_free(acb);
- if (!srb)
- {
+ srb = list_first_entry_or_null(&acb->srb_free_list,
+ struct ScsiReqBlk, list);
+ if (!srb) {
/*
* Return 1 since we are unable to queue this command at this
* point in time.
@@ -1126,12 +1025,13 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
dprintkdbg(DBG_0, "queue_command: No free srb's\n");
return 1;
}
+ list_del(&srb->list);
build_srb(cmd, dcb, srb);
if (!list_empty(&dcb->srb_waiting_list)) {
/* append to waiting queue */
- srb_waiting_append(dcb, srb);
+ list_add_tail(&srb->list, &dcb->srb_waiting_list);
waiting_process_next(acb);
} else {
/* process immediately */
@@ -1376,11 +1276,11 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
srb = find_cmd(cmd, &dcb->srb_waiting_list);
if (srb) {
- srb_waiting_remove(dcb, srb);
+ list_del(&srb->list);
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
free_tag(dcb, srb);
- srb_free_insert(acb, srb);
+ list_add_tail(&srb->list, &acb->srb_free_list);
dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
cmd->result = DID_ABORT << 16;
return SUCCESS;
@@ -1969,14 +1869,15 @@ static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
xferred -= psge->length;
} else {
/* Partial SG entry done */
+ dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
+ srb->sg_bus_addr, SEGMENTX_LEN,
+ DMA_TO_DEVICE);
psge->length -= xferred;
psge->address += xferred;
srb->sg_index = idx;
- pci_dma_sync_single_for_device(srb->dcb->
- acb->dev,
- srb->sg_bus_addr,
- SEGMENTX_LEN,
- PCI_DMA_TODEVICE);
+ dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
+ srb->sg_bus_addr, SEGMENTX_LEN,
+ DMA_TO_DEVICE);
break;
}
psge++;
@@ -3083,7 +2984,7 @@ static void disconnect(struct AdapterCtlBlk *acb)
goto disc1;
}
free_tag(dcb, srb);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
dprintkdbg(DBG_KG,
"disconnect: (0x%p) Retry\n",
srb->cmd);
@@ -3148,7 +3049,7 @@ static void reselect(struct AdapterCtlBlk *acb)
srb->state = SRB_READY;
free_tag(dcb, srb);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 20);
/* return; */
@@ -3271,9 +3172,8 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
/* unmap DC395x SG list */
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
srb->sg_bus_addr, SEGMENTX_LEN);
- pci_unmap_single(acb->dev, srb->sg_bus_addr,
- SEGMENTX_LEN,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
+ DMA_TO_DEVICE);
dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
scsi_sg_count(cmd), scsi_bufflen(cmd));
/* unmap the sg segments */
@@ -3291,8 +3191,8 @@ static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
/* Unmap sense buffer */
dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
srb->segment_x[0].address);
- pci_unmap_single(acb->dev, srb->segment_x[0].address,
- srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
+ srb->segment_x[0].length, DMA_FROM_DEVICE);
/* Restore SG stuff */
srb->total_xfer_length = srb->xferred;
srb->segment_x[0].address =
@@ -3411,7 +3311,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
tempcnt--;
dcb->max_command = tempcnt;
free_tag(dcb, srb);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 20);
srb->adapter_status = 0;
srb->target_status = 0;
@@ -3447,14 +3347,12 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
}
}
- if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
- pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
- scsi_sg_count(cmd), dir);
-
ckc_only = 0;
/* Check Error Conditions */
ckc_e:
+ pci_unmap_srb(acb, srb);
+
if (cmd->cmnd[0] == INQUIRY) {
unsigned char *base = NULL;
struct ScsiInqData *ptr;
@@ -3498,16 +3396,14 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
cmd->cmnd[0], srb->total_xfer_length);
}
- srb_going_remove(dcb, srb);
- /* Add to free list */
- if (srb == acb->tmp_srb)
- dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
- else {
+ if (srb != acb->tmp_srb) {
+ /* Add to free list */
dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
cmd, cmd->result);
- srb_free_insert(acb, srb);
+ list_move_tail(&srb->list, &acb->srb_free_list);
+ } else {
+ dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
}
- pci_unmap_srb(acb, srb);
cmd->scsi_done(cmd);
waiting_process_next(acb);
@@ -3535,9 +3431,9 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
result = MK_RES(0, did_flag, 0, 0);
printk("G:%p(%02i-%i) ", p,
p->device->id, (u8)p->device->lun);
- srb_going_remove(dcb, srb);
+ list_del(&srb->list);
free_tag(dcb, srb);
- srb_free_insert(acb, srb);
+ list_add_tail(&srb->list, &acb->srb_free_list);
p->result = result;
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
@@ -3565,8 +3461,7 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
result = MK_RES(0, did_flag, 0, 0);
printk("W:%p<%02i-%i>", p, p->device->id,
(u8)p->device->lun);
- srb_waiting_remove(dcb, srb);
- srb_free_insert(acb, srb);
+ list_move_tail(&srb->list, &acb->srb_free_list);
p->result = result;
pci_unmap_srb_sense(acb, srb);
pci_unmap_srb(acb, srb);
@@ -3692,9 +3587,9 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
/* Map sense buffer */
- srb->segment_x[0].address =
- pci_map_single(acb->dev, cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+ srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
+ cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+ DMA_FROM_DEVICE);
dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
cmd->sense_buffer, srb->segment_x[0].address,
SCSI_SENSE_BUFFERSIZE);
@@ -3705,7 +3600,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
dprintkl(KERN_DEBUG,
"request_sense: (0x%p) failed <%02i-%i>\n",
srb->cmd, dcb->target_id, dcb->target_lun);
- srb_going_to_waiting_move(dcb, srb);
+ list_move(&srb->list, &dcb->srb_waiting_list);
waiting_set_timer(acb, HZ / 100);
}
}
@@ -4392,7 +4287,7 @@ static void adapter_init_params(struct AdapterCtlBlk *acb)
/* link static array of srbs into the srb free list */
for (i = 0; i < acb->srb_count - 1; i++)
- srb_free_insert(acb, &acb->srb_array[i]);
+ list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list);
}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index c3fc34b9964d..ac7da9db7317 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -369,19 +369,28 @@ static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
struct scatterlist *sg = scsi_sglist(cmd);
- int dir = cmd->sc_data_direction;
- int total, i;
+ int total = 0, i;
- if (dir == DMA_NONE)
+ if (cmd->sc_data_direction == DMA_NONE)
return;
- spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
+ if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
+ /*
+ * For pseudo DMA and PIO we need the virtual address instead of
+ * a dma address, so perform an identity mapping.
+ */
+ spriv->num_sg = scsi_sg_count(cmd);
+ for (i = 0; i < spriv->num_sg; i++) {
+ sg[i].dma_address = (uintptr_t)sg_virt(&sg[i]);
+ total += sg_dma_len(&sg[i]);
+ }
+ } else {
+ spriv->num_sg = scsi_dma_map(cmd);
+ for (i = 0; i < spriv->num_sg; i++)
+ total += sg_dma_len(&sg[i]);
+ }
spriv->cur_residue = sg_dma_len(sg);
spriv->cur_sg = sg;
-
- total = 0;
- for (i = 0; i < spriv->u.num_sg; i++)
- total += sg_dma_len(&sg[i]);
spriv->tot_residue = total;
}
@@ -441,13 +450,8 @@ static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
- struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
- int dir = cmd->sc_data_direction;
-
- if (dir == DMA_NONE)
- return;
-
- esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
+ if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
+ scsi_dma_unmap(cmd);
}
static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
@@ -478,17 +482,6 @@ static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
spriv->tot_residue = ent->saved_tot_residue;
}
-static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
-{
- if (cmd->cmd_len == 6 ||
- cmd->cmd_len == 10 ||
- cmd->cmd_len == 12) {
- esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
- } else {
- esp->flags |= ESP_FLAG_DOING_SLOWCMD;
- }
-}
-
static void esp_write_tgt_config3(struct esp *esp, int tgt)
{
if (esp->rev > ESP100A) {
@@ -624,6 +617,26 @@ static void esp_free_lun_tag(struct esp_cmd_entry *ent,
}
}
+static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ ent->sense_ptr = ent->cmd->sense_buffer;
+ if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
+ ent->sense_dma = (uintptr_t)ent->sense_ptr;
+ return;
+ }
+
+ ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+}
+
+static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
+{
+ if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
+ dma_unmap_single(esp->dev, ent->sense_dma,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
+ ent->sense_ptr = NULL;
+}
+
/* When a contingent allegiance conditon is created, we force feed a
* REQUEST_SENSE command to the device to fetch the sense data. I
* tried many other schemes, relying on the scsi error handling layer
@@ -645,12 +658,7 @@ static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
if (!ent->sense_ptr) {
esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
tgt, lun);
-
- ent->sense_ptr = cmd->sense_buffer;
- ent->sense_dma = esp->ops->map_single(esp,
- ent->sense_ptr,
- SCSI_SENSE_BUFFERSIZE,
- DMA_FROM_DEVICE);
+ esp_map_sense(esp, ent);
}
ent->saved_sense_ptr = ent->sense_ptr;
@@ -717,10 +725,10 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
static void esp_maybe_execute_command(struct esp *esp)
{
struct esp_target_data *tp;
- struct esp_lun_data *lp;
struct scsi_device *dev;
struct scsi_cmnd *cmd;
struct esp_cmd_entry *ent;
+ bool select_and_stop = false;
int tgt, lun, i;
u32 val, start_cmd;
u8 *p;
@@ -743,7 +751,6 @@ static void esp_maybe_execute_command(struct esp *esp)
tgt = dev->id;
lun = dev->lun;
tp = &esp->target[tgt];
- lp = dev->hostdata;
list_move(&ent->list, &esp->active_cmds);
@@ -752,7 +759,8 @@ static void esp_maybe_execute_command(struct esp *esp)
esp_map_dma(esp, cmd);
esp_save_pointers(esp, ent);
- esp_check_command_len(esp, cmd);
+ if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
+ select_and_stop = true;
p = esp->command_block;
@@ -793,42 +801,22 @@ static void esp_maybe_execute_command(struct esp *esp)
tp->flags &= ~ESP_TGT_CHECK_NEGO;
}
- /* Process it like a slow command. */
- if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
- esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+ /* If there are multiple message bytes, use Select and Stop */
+ if (esp->msg_out_len)
+ select_and_stop = true;
}
build_identify:
- /* If we don't have a lun-data struct yet, we're probing
- * so do not disconnect. Also, do not disconnect unless
- * we have a tag on this command.
- */
- if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
- *p++ = IDENTIFY(1, lun);
- else
- *p++ = IDENTIFY(0, lun);
+ *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
if (ent->tag[0] && esp->rev == ESP100) {
/* ESP100 lacks select w/atn3 command, use select
* and stop instead.
*/
- esp->flags |= ESP_FLAG_DOING_SLOWCMD;
+ select_and_stop = true;
}
- if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
- start_cmd = ESP_CMD_SELA;
- if (ent->tag[0]) {
- *p++ = ent->tag[0];
- *p++ = ent->tag[1];
-
- start_cmd = ESP_CMD_SA3;
- }
-
- for (i = 0; i < cmd->cmd_len; i++)
- *p++ = cmd->cmnd[i];
-
- esp->select_state = ESP_SELECT_BASIC;
- } else {
+ if (select_and_stop) {
esp->cmd_bytes_left = cmd->cmd_len;
esp->cmd_bytes_ptr = &cmd->cmnd[0];
@@ -843,6 +831,19 @@ build_identify:
start_cmd = ESP_CMD_SELAS;
esp->select_state = ESP_SELECT_MSGOUT;
+ } else {
+ start_cmd = ESP_CMD_SELA;
+ if (ent->tag[0]) {
+ *p++ = ent->tag[0];
+ *p++ = ent->tag[1];
+
+ start_cmd = ESP_CMD_SA3;
+ }
+
+ for (i = 0; i < cmd->cmd_len; i++)
+ *p++ = cmd->cmnd[i];
+
+ esp->select_state = ESP_SELECT_BASIC;
}
val = tgt;
if (esp->rev == FASHME)
@@ -902,9 +903,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
}
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
- esp->ops->unmap_single(esp, ent->sense_dma,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- ent->sense_ptr = NULL;
+ esp_unmap_sense(esp, ent);
/* Restore the message/status bytes to what we actually
* saw originally. Also, report that we are providing
@@ -965,7 +964,7 @@ static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_
cmd->scsi_done = done;
spriv = ESP_CMD_PRIV(cmd);
- spriv->u.dma_addr = ~(dma_addr_t)0x0;
+ spriv->num_sg = 0;
list_add_tail(&ent->list, &esp->queued_cmds);
@@ -1252,14 +1251,10 @@ static int esp_finish_select(struct esp *esp)
esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, cmd->device->hostdata);
tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
- esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
esp->cmd_bytes_ptr = NULL;
esp->cmd_bytes_left = 0;
} else {
- esp->ops->unmap_single(esp, ent->sense_dma,
- SCSI_SENSE_BUFFERSIZE,
- DMA_FROM_DEVICE);
- ent->sense_ptr = NULL;
+ esp_unmap_sense(esp, ent);
}
/* Now that the state is unwound properly, put back onto
@@ -1303,9 +1298,8 @@ static int esp_finish_select(struct esp *esp)
esp_flush_fifo(esp);
}
- /* If we are doing a slow command, negotiation, etc.
- * we'll do the right thing as we transition to the
- * next phase.
+ /* If we are doing a Select And Stop command, negotiation, etc.
+ * we'll do the right thing as we transition to the next phase.
*/
esp_event(esp, ESP_EVENT_CHECK_PHASE);
return 0;
@@ -1338,6 +1332,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
bytes_sent = esp->data_dma_len;
bytes_sent -= ecount;
+ bytes_sent -= esp->send_cmd_residual;
/*
* The am53c974 has a DMA 'pecularity'. The doc states:
@@ -1358,7 +1353,7 @@ static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
u8 *ptr;
- ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
+ ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
&offset, &count);
if (likely(ptr)) {
*(ptr + offset) = bval;
@@ -2039,11 +2034,8 @@ static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
esp_free_lun_tag(ent, cmd->device->hostdata);
cmd->result = DID_RESET << 16;
- if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
- esp->ops->unmap_single(esp, ent->sense_dma,
- SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
- ent->sense_ptr = NULL;
- }
+ if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
+ esp_unmap_sense(esp, ent);
cmd->scsi_done(cmd);
list_del(&ent->list);
@@ -2382,7 +2374,7 @@ static const char *esp_chip_names[] = {
static struct scsi_transport_template *esp_transport_template;
-int scsi_esp_register(struct esp *esp, struct device *dev)
+int scsi_esp_register(struct esp *esp)
{
static int instance;
int err;
@@ -2402,10 +2394,10 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
esp_bootup_reset(esp);
- dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
+ dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
esp->host->unique_id, esp->regs, esp->dma_regs,
esp->host->irq);
- dev_printk(KERN_INFO, dev,
+ dev_printk(KERN_INFO, esp->dev,
"esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
esp->host->unique_id, esp_chip_names[esp->rev],
esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
@@ -2413,7 +2405,7 @@ int scsi_esp_register(struct esp *esp, struct device *dev)
/* Let the SCSI bus reset settle. */
ssleep(esp_bus_reset_settle);
- err = scsi_add_host(esp->host, dev);
+ err = scsi_add_host(esp->host, esp->dev);
if (err)
return err;
@@ -2790,3 +2782,131 @@ MODULE_PARM_DESC(esp_debug,
module_init(esp_init);
module_exit(esp_exit);
+
+#ifdef CONFIG_SCSI_ESP_PIO
+static inline unsigned int esp_wait_for_fifo(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+ if (fbytes)
+ return fbytes;
+
+ udelay(1);
+ } while (--i);
+
+ shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
+ esp_read8(ESP_STATUS));
+ return 0;
+}
+
+static inline int esp_wait_for_intr(struct esp *esp)
+{
+ int i = 500000;
+
+ do {
+ esp->sreg = esp_read8(ESP_STATUS);
+ if (esp->sreg & ESP_STAT_INTR)
+ return 0;
+
+ udelay(1);
+ } while (--i);
+
+ shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
+ esp->sreg);
+ return 1;
+}
+
+#define ESP_FIFO_SIZE 16
+
+void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd)
+{
+ u8 phase = esp->sreg & ESP_STAT_PMASK;
+
+ cmd &= ~ESP_CMD_DMA;
+ esp->send_cmd_error = 0;
+
+ if (write) {
+ u8 *dst = (u8 *)addr;
+ u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
+
+ scsi_esp_cmd(esp, cmd);
+
+ while (1) {
+ if (!esp_wait_for_fifo(esp))
+ break;
+
+ *dst++ = readb(esp->fifo_reg);
+ --esp_count;
+
+ if (!esp_count)
+ break;
+
+ if (esp_wait_for_intr(esp)) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ if ((esp->sreg & ESP_STAT_PMASK) != phase)
+ break;
+
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if (esp->ireg & mask) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ if (phase == ESP_MIP)
+ esp_write8(ESP_CMD_MOK, ESP_CMD);
+
+ esp_write8(ESP_CMD_TI, ESP_CMD);
+ }
+ } else {
+ unsigned int n = ESP_FIFO_SIZE;
+ u8 *src = (u8 *)addr;
+
+ scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+ if (n > esp_count)
+ n = esp_count;
+ writesb(esp->fifo_reg, src, n);
+ src += n;
+ esp_count -= n;
+
+ scsi_esp_cmd(esp, cmd);
+
+ while (esp_count) {
+ if (esp_wait_for_intr(esp)) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ if ((esp->sreg & ESP_STAT_PMASK) != phase)
+ break;
+
+ esp->ireg = esp_read8(ESP_INTRPT);
+ if (esp->ireg & ~ESP_INTR_BSERV) {
+ esp->send_cmd_error = 1;
+ break;
+ }
+
+ n = ESP_FIFO_SIZE -
+ (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
+
+ if (n > esp_count)
+ n = esp_count;
+ writesb(esp->fifo_reg, src, n);
+ src += n;
+ esp_count -= n;
+
+ esp_write8(ESP_CMD_TI, ESP_CMD);
+ }
+ }
+
+ esp->send_cmd_residual = esp_count;
+}
+EXPORT_SYMBOL(esp_send_pio_cmd);
+#endif
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 8163dca2071b..aa87a6b72dcc 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -249,11 +249,7 @@
#define SYNC_DEFP_FAST 0x19 /* 10mb/s */
struct esp_cmd_priv {
- union {
- dma_addr_t dma_addr;
- int num_sg;
- } u;
-
+ int num_sg;
int cur_residue;
struct scatterlist *cur_sg;
int tot_residue;
@@ -363,19 +359,6 @@ struct esp_driver_ops {
void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg);
u8 (*esp_read8)(struct esp *esp, unsigned long reg);
- /* Map and unmap DMA memory. Eventually the driver will be
- * converted to the generic DMA API as soon as SBUS is able to
- * cope with that. At such time we can remove this.
- */
- dma_addr_t (*map_single)(struct esp *esp, void *buf,
- size_t sz, int dir);
- int (*map_sg)(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir);
- void (*unmap_single)(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir);
- void (*unmap_sg)(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir);
-
/* Return non-zero if there is an IRQ pending. Usually this
* status bit lives in the DMA controller sitting in front of
* the ESP. This has to be accurate or else the ESP interrupt
@@ -435,7 +418,7 @@ struct esp {
const struct esp_driver_ops *ops;
struct Scsi_Host *host;
- void *dev;
+ struct device *dev;
struct esp_cmd_entry *active_cmd;
@@ -490,11 +473,11 @@ struct esp {
u32 flags;
#define ESP_FLAG_DIFFERENTIAL 0x00000001
#define ESP_FLAG_RESETTING 0x00000002
-#define ESP_FLAG_DOING_SLOWCMD 0x00000004
#define ESP_FLAG_WIDE_CAPABLE 0x00000008
#define ESP_FLAG_QUICKIRQ_CHECK 0x00000010
#define ESP_FLAG_DISABLE_SYNC 0x00000020
#define ESP_FLAG_USE_FIFO 0x00000040
+#define ESP_FLAG_NO_DMA_MAP 0x00000080
u8 select_state;
#define ESP_SELECT_NONE 0x00 /* Not selecting */
@@ -532,7 +515,7 @@ struct esp {
u32 min_period;
u32 radelay;
- /* Slow command state. */
+ /* ESP_CMD_SELAS command state */
u8 *cmd_bytes_ptr;
int cmd_bytes_left;
@@ -540,6 +523,11 @@ struct esp {
void *dma;
int dmarev;
+
+ /* These are used by esp_send_pio_cmd() */
+ u8 __iomem *fifo_reg;
+ int send_cmd_error;
+ u32 send_cmd_residual;
};
/* A front-end driver for the ESP chip should do the following in
@@ -568,16 +556,18 @@ struct esp {
* example, the DMA engine has to be reset before ESP can
* be programmed.
* 11) If necessary, call dev_set_drvdata() as needed.
- * 12) Call scsi_esp_register() with prepared 'esp' structure
- * and a device pointer if possible.
+ * 12) Call scsi_esp_register() with prepared 'esp' structure.
* 13) Check scsi_esp_register() return value, release all resources
* if an error was returned.
*/
extern struct scsi_host_template scsi_esp_template;
-extern int scsi_esp_register(struct esp *, struct device *);
+extern int scsi_esp_register(struct esp *);
extern void scsi_esp_unregister(struct esp *);
extern irqreturn_t scsi_esp_intr(int, void *);
extern void scsi_esp_cmd(struct esp *, u8);
+extern void esp_send_pio_cmd(struct esp *esp, u32 dma_addr, u32 esp_count,
+ u32 dma_count, int write, u8 cmd);
+
#endif /* !(_ESP_SCSI_H) */
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index c7bf316d8e83..844ef688fa91 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -836,8 +836,8 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
u32 fcp_bytes_written = 0;
unsigned long flags;
- pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
skb = buf->os_buf;
fp = (struct fc_frame *)skb;
buf->os_buf = NULL;
@@ -977,9 +977,8 @@ int fnic_alloc_rq_frame(struct vnic_rq *rq)
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
skb_put(skb, len);
- pa = pci_map_single(fnic->pdev, skb->data, len, PCI_DMA_FROMDEVICE);
-
- if (pci_dma_mapping_error(fnic->pdev, pa)) {
+ pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, pa)) {
r = -ENOMEM;
printk(KERN_ERR "PCI mapping failed with error %d\n", r);
goto free_skb;
@@ -998,8 +997,8 @@ void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(rq->vdev);
- pci_unmap_single(fnic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
@@ -1018,7 +1017,6 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
struct ethhdr *eth_hdr;
struct vlan_ethhdr *vlan_hdr;
unsigned long flags;
- int r;
if (!fnic->vlan_hw_insert) {
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
@@ -1038,11 +1036,10 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
}
}
- pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-
- r = pci_dma_mapping_error(fnic->pdev, pa);
- if (r) {
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, pa)) {
+ printk(KERN_ERR "DMA mapping failed\n");
goto free_skb;
}
@@ -1058,7 +1055,7 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
irq_restore:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
- pci_unmap_single(fnic->pdev, pa, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE);
free_skb:
kfree_skb(skb);
}
@@ -1115,9 +1112,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
if (FC_FCOE_VER)
FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);
- pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
-
- if (pci_dma_mapping_error(fnic->pdev, pa)) {
+ pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, pa)) {
ret = -ENOMEM;
printk(KERN_ERR "DMA map failed with error %d\n", ret);
goto free_skb_on_err;
@@ -1131,8 +1127,7 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
spin_lock_irqsave(&fnic->wq_lock[0], flags);
if (!vnic_wq_desc_avail(wq)) {
- pci_unmap_single(fnic->pdev, pa,
- tot_len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE);
ret = -1;
goto irq_restore;
}
@@ -1247,8 +1242,8 @@ static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
struct fc_frame *fp = (struct fc_frame *)skb;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
- pci_unmap_single(fnic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_irq(fp_skb(fp));
buf->os_buf = NULL;
}
@@ -1290,8 +1285,8 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct fc_frame *fp = buf->os_buf;
struct fnic *fnic = vnic_dev_priv(wq->vdev);
- pci_unmap_single(fnic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
dev_kfree_skb(fp_skb(fp));
buf->os_buf = NULL;
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index e52599f44170..cc461fd7bef1 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -611,30 +611,15 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* limitation for the device. Try 64-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
shost_printk(KERN_ERR, fnic->lport->host,
"No usable DMA configuration "
"aborting\n");
goto err_out_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to obtain 32-bit DMA "
- "for consistent allocations, aborting.\n");
- goto err_out_release_regions;
- }
- } else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- shost_printk(KERN_ERR, fnic->lport->host,
- "Unable to obtain 64-bit DMA "
- "for consistent allocations, aborting.\n");
- goto err_out_release_regions;
- }
}
/* Map vNIC resources from BAR0 */
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 8cbd3c9f0b4c..96acfcecd540 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -126,17 +126,17 @@ static void fnic_release_ioreq_buf(struct fnic *fnic,
struct scsi_cmnd *sc)
{
if (io_req->sgl_list_pa)
- pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+ dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
scsi_dma_unmap(sc);
if (io_req->sgl_cnt)
mempool_free(io_req->sgl_list_alloc,
fnic->io_sgl_pool[io_req->sgl_type]);
if (io_req->sense_buf_pa)
- pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&fnic->pdev->dev, io_req->sense_buf_pa,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
/* Free up Copy Wq descriptors. Called with copy_wq lock held */
@@ -330,7 +330,6 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
int flags;
u8 exch_flags;
struct scsi_lun fc_lun;
- int r;
if (sg_count) {
/* For each SGE, create a device desc entry */
@@ -342,30 +341,25 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
desc++;
}
- io_req->sgl_list_pa = pci_map_single
- (fnic->pdev,
- io_req->sgl_list,
- sizeof(io_req->sgl_list[0]) * sg_count,
- PCI_DMA_TODEVICE);
-
- r = pci_dma_mapping_error(fnic->pdev, io_req->sgl_list_pa);
- if (r) {
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ io_req->sgl_list_pa = dma_map_single(&fnic->pdev->dev,
+ io_req->sgl_list,
+ sizeof(io_req->sgl_list[0]) * sg_count,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, io_req->sgl_list_pa)) {
+ printk(KERN_ERR "DMA mapping failed\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
}
- io_req->sense_buf_pa = pci_map_single(fnic->pdev,
+ io_req->sense_buf_pa = dma_map_single(&fnic->pdev->dev,
sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
-
- r = pci_dma_mapping_error(fnic->pdev, io_req->sense_buf_pa);
- if (r) {
- pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&fnic->pdev->dev, io_req->sense_buf_pa)) {
+ dma_unmap_single(&fnic->pdev->dev, io_req->sgl_list_pa,
sizeof(io_req->sgl_list[0]) * sg_count,
- PCI_DMA_TODEVICE);
- printk(KERN_ERR "PCI mapping failed with error %d\n", r);
+ DMA_TO_DEVICE);
+ printk(KERN_ERR "DMA mapping failed\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -2272,33 +2266,17 @@ clean_pending_aborts_end:
static inline int
fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{
- struct blk_queue_tag *bqt = fnic->lport->host->bqt;
- int tag, ret = SCSI_NO_TAG;
-
- BUG_ON(!bqt);
- if (!bqt) {
- pr_err("Tags are not supported\n");
- goto end;
- }
-
- do {
- tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
- if (tag >= bqt->max_depth) {
- pr_err("Tag allocation failure\n");
- goto end;
- }
- } while (test_and_set_bit(tag, bqt->tag_map));
+ struct request_queue *q = sc->request->q;
+ struct request *dummy;
- bqt->tag_index[tag] = sc->request;
- sc->request->tag = tag;
- sc->tag = tag;
- if (!sc->request->special)
- sc->request->special = sc;
+ dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT);
+ if (IS_ERR(dummy))
+ return SCSI_NO_TAG;
- ret = tag;
+ sc->tag = sc->request->tag = dummy->tag;
+ sc->request->special = sc;
-end:
- return ret;
+ return dummy->tag;
}
/**
@@ -2308,20 +2286,9 @@ end:
static inline void
fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
{
- struct blk_queue_tag *bqt = fnic->lport->host->bqt;
- int tag = sc->request->tag;
+ struct request *dummy = sc->request->special;
- if (tag == SCSI_NO_TAG)
- return;
-
- BUG_ON(!bqt || !bqt->tag_index[tag]);
- if (!bqt)
- return;
-
- bqt->tag_index[tag] = NULL;
- clear_bit(tag, bqt->tag_map);
-
- return;
+ blk_mq_free_request(dummy);
}
/*
@@ -2380,19 +2347,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
tag = sc->request->tag;
if (unlikely(tag < 0)) {
/*
- * XXX(hch): current the midlayer fakes up a struct
- * request for the explicit reset ioctls, and those
- * don't have a tag allocated to them. The below
- * code pokes into midlayer structures to paper over
- * this design issue, but that won't work for blk-mq.
- *
- * Either someone who can actually test the hardware
- * will have to come up with a similar hack for the
- * blk-mq case, or we'll have to bite the bullet and
- * fix the way the EH ioctls work for real, but until
- * that happens we fail these explicit requests here.
+ * Really should fix the midlayer to pass in a proper
+ * request for ioctls...
*/
-
tag = fnic_scsi_host_start_tag(fnic, sc);
if (unlikely(tag == SCSI_NO_TAG))
goto fnic_device_reset_end;
diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
index ba69d6112fa1..434447ea24b8 100644
--- a/drivers/scsi/fnic/vnic_dev.c
+++ b/drivers/scsi/fnic/vnic_dev.c
@@ -195,9 +195,9 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
{
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
- ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
+ ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
ring->size_unaligned,
- &ring->base_addr_unaligned);
+ &ring->base_addr_unaligned, GFP_KERNEL);
if (!ring->descs_unaligned) {
printk(KERN_ERR
@@ -221,7 +221,7 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
@@ -298,9 +298,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0;
if (!vdev->fw_info) {
- vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+ vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
- &vdev->fw_info_pa);
+ &vdev->fw_info_pa, GFP_KERNEL);
if (!vdev->fw_info)
return -ENOMEM;
@@ -361,8 +361,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int wait = 1000;
if (!vdev->stats) {
- vdev->stats = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_stats), &vdev->stats_pa);
+ vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
if (!vdev->stats)
return -ENOMEM;
}
@@ -523,9 +523,9 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
int wait = 1000;
if (!vdev->notify) {
- vdev->notify = pci_alloc_consistent(vdev->pdev,
+ vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
- &vdev->notify_pa);
+ &vdev->notify_pa, GFP_KERNEL);
if (!vdev->notify)
return -ENOMEM;
}
@@ -647,21 +647,21 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
kfree(vdev);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 6c7d2e201abe..0ddb53c8a2e2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -34,6 +34,7 @@
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
+#define HISI_SAS_RESERVED_IPTT_CNT 96
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -217,7 +218,7 @@ struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device);
- int (*slot_index_alloc)(struct hisi_hba *hisi_hba, int *slot_idx,
+ int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
struct domain_device *device);
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index a4e2e6aa9a6b..b3f01d5b821b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -183,7 +183,14 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
{
- hisi_sas_slot_index_clear(hisi_hba, slot_idx);
+ unsigned long flags;
+
+ if (hisi_hba->hw->slot_index_alloc || (slot_idx >=
+ hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) {
+ spin_lock_irqsave(&hisi_hba->lock, flags);
+ hisi_sas_slot_index_clear(hisi_hba, slot_idx);
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ }
}
static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
@@ -193,24 +200,34 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
set_bit(slot_idx, bitmap);
}
-static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, int *slot_idx)
+static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
+ struct scsi_cmnd *scsi_cmnd)
{
- unsigned int index;
+ int index;
void *bitmap = hisi_hba->slot_index_tags;
+ unsigned long flags;
+
+ if (scsi_cmnd)
+ return scsi_cmnd->request->tag;
+ spin_lock_irqsave(&hisi_hba->lock, flags);
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
- hisi_hba->last_slot_index + 1);
+ hisi_hba->last_slot_index + 1);
if (index >= hisi_hba->slot_index_count) {
- index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
- 0);
- if (index >= hisi_hba->slot_index_count)
+ index = find_next_zero_bit(bitmap,
+ hisi_hba->slot_index_count,
+ hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT);
+ if (index >= hisi_hba->slot_index_count) {
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -SAS_QUEUE_FULL;
+ }
}
hisi_sas_slot_index_set(hisi_hba, index);
- *slot_idx = index;
hisi_hba->last_slot_index = index;
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
- return 0;
+ return index;
}
static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
@@ -249,9 +266,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
- spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot->idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
}
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
@@ -287,13 +302,13 @@ static int hisi_sas_task_prep(struct sas_task *task,
int *pass)
{
struct domain_device *device = task->dev;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct hisi_hba *hisi_hba;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_port *port;
struct hisi_sas_slot *slot;
struct hisi_sas_cmd_hdr *cmd_hdr_base;
struct asd_sas_port *sas_port = device->port;
- struct device *dev = hisi_hba->dev;
+ struct device *dev;
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
struct hisi_sas_dq *dq;
@@ -314,6 +329,9 @@ static int hisi_sas_task_prep(struct sas_task *task,
return -ECOMM;
}
+ hisi_hba = dev_to_hisi_hba(device);
+ dev = hisi_hba->dev;
+
if (DEV_IS_GONE(sas_dev)) {
if (sas_dev)
dev_info(dev, "task prep: device %d not ready\n",
@@ -381,16 +399,27 @@ static int hisi_sas_task_prep(struct sas_task *task,
goto err_out_dma_unmap;
}
- spin_lock_irqsave(&hisi_hba->lock, flags);
if (hisi_hba->hw->slot_index_alloc)
- rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
- device);
- else
- rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
- if (rc)
+ rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
+ else {
+ struct scsi_cmnd *scsi_cmnd = NULL;
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
+
+ if (dev_is_sata(device)) {
+ qc = task->uldd_task;
+ scsi_cmnd = qc->scsicmd;
+ } else {
+ scsi_cmnd = task->uldd_task;
+ }
+ }
+ rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
+ }
+ if (rc < 0)
goto err_out_dma_unmap;
+ slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
spin_lock_irqsave(&dq->lock, flags);
@@ -451,9 +480,7 @@ static int hisi_sas_task_prep(struct sas_task *task,
return 0;
err_out_tag:
- spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
err_out_dma_unmap:
if (!sas_protocol_ata(task->task_proto)) {
if (task->num_scatter) {
@@ -904,6 +931,9 @@ static void hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
_r.maximum_linkrate = max;
_r.minimum_linkrate = min;
+ sas_phy->phy->maximum_linkrate = max;
+ sas_phy->phy->minimum_linkrate = min;
+
hisi_hba->hw->phy_disable(hisi_hba, phy_no);
msleep(100);
hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
@@ -950,8 +980,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
static void hisi_sas_task_done(struct sas_task *task)
{
- if (!del_timer(&task->slow_task->timer))
- return;
+ del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -960,13 +989,17 @@ static void hisi_sas_tmf_timedout(struct timer_list *t)
struct sas_task_slow *slow = from_timer(slow, t, timer);
struct sas_task *task = slow->task;
unsigned long flags;
+ bool is_completed = true;
spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ is_completed = false;
+ }
spin_unlock_irqrestore(&task->task_state_lock, flags);
- complete(&task->slow_task->completion);
+ if (!is_completed)
+ complete(&task->slow_task->completion);
}
#define TASK_TIMEOUT 20
@@ -1019,8 +1052,16 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
struct hisi_sas_slot *slot = task->lldd_task;
dev_err(dev, "abort tmf: TMF task timeout and not done\n");
- if (slot)
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ tasklet_kill(&cq->tasklet);
slot->task = NULL;
+ }
goto ex_err;
} else
@@ -1396,6 +1437,17 @@ static int hisi_sas_abort_task(struct sas_task *task)
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ struct hisi_sas_slot *slot = task->lldd_task;
+ struct hisi_sas_cq *cq;
+
+ if (slot) {
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ cq = &hisi_hba->cq[slot->dlvry_queue];
+ tasklet_kill(&cq->tasklet);
+ }
spin_unlock_irqrestore(&task->task_state_lock, flags);
rc = TMF_RESP_FUNC_COMPLETE;
goto out;
@@ -1451,12 +1503,19 @@ static int hisi_sas_abort_task(struct sas_task *task)
/* SMP */
struct hisi_sas_slot *slot = task->lldd_task;
u32 tag = slot->idx;
+ struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
rc = hisi_sas_internal_task_abort(hisi_hba, device,
HISI_SAS_INT_ABT_CMD, tag);
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
- task->lldd_task)
- hisi_sas_do_release_task(hisi_hba, task, slot);
+ task->lldd_task) {
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ tasklet_kill(&cq->tasklet);
+ slot->task = NULL;
+ }
}
out:
@@ -1705,14 +1764,11 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
port = to_hisi_sas_port(sas_port);
/* simply get a slot and send abort command */
- spin_lock_irqsave(&hisi_hba->lock, flags);
- rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
- if (rc) {
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
+ if (rc < 0)
goto err_out;
- }
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
spin_lock_irqsave(&dq->lock, flags_dq);
@@ -1748,7 +1804,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
spin_lock_irqsave(&task->task_state_lock, flags);
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
-
WRITE_ONCE(slot->ready, 1);
/* send abort command to the chip */
spin_lock_irqsave(&dq->lock, flags);
@@ -1759,9 +1814,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
return 0;
err_out_tag:
- spin_lock_irqsave(&hisi_hba->lock, flags);
hisi_sas_slot_index_free(hisi_hba, slot_idx);
- spin_unlock_irqrestore(&hisi_hba->lock, flags);
err_out:
dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
@@ -1823,8 +1876,16 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
struct hisi_sas_slot *slot = task->lldd_task;
- if (slot)
+ if (slot) {
+ struct hisi_sas_cq *cq =
+ &hisi_hba->cq[slot->dlvry_queue];
+ /*
+ * flush tasklet to avoid free'ing task
+ * before using task in IO completion
+ */
+ tasklet_kill(&cq->tasklet);
slot->task = NULL;
+ }
dev_err(dev, "internal task abort: timeout and not done.\n");
res = -EIO;
goto exit;
@@ -1861,10 +1922,6 @@ static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
hisi_sas_port_notify_formed(sas_phy);
}
-static void hisi_sas_port_deformed(struct asd_sas_phy *sas_phy)
-{
-}
-
static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
u8 reg_index, u8 reg_count, u8 *write_data)
{
@@ -1954,10 +2011,9 @@ static struct sas_domain_function_template hisi_sas_transport_ops = {
.lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
.lldd_lu_reset = hisi_sas_lu_reset,
.lldd_query_task = hisi_sas_query_task,
- .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
+ .lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
.lldd_port_formed = hisi_sas_port_formed,
- .lldd_port_deformed = hisi_sas_port_deformed,
- .lldd_write_gpio = hisi_sas_write_gpio,
+ .lldd_write_gpio = hisi_sas_write_gpio,
};
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
@@ -2120,6 +2176,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
hisi_sas_init_mem(hisi_hba);
hisi_sas_slot_index_init(hisi_hba);
+ hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
if (!hisi_hba->wq) {
@@ -2323,8 +2381,15 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->max_channel = 1;
shost->max_cmd_len = 16;
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
- shost->can_queue = hisi_hba->hw->max_command_entries;
- shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+ if (hisi_hba->hw->slot_index_alloc) {
+ shost->can_queue = hisi_hba->hw->max_command_entries;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+ } else {
+ shost->can_queue = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
+ }
sha->sas_ha_name = DRV_NAME;
sha->dev = hisi_hba->dev;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 8f60f0e04599..f0e457e6884e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1809,7 +1809,6 @@ static struct scsi_host_template sht_v1_hw = {
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
- .can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 9c5c5a601332..cc36b6473e98 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -770,7 +770,7 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
/* This function needs to be protected from pre-emption. */
static int
-slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
+slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba,
struct domain_device *device)
{
int sata_dev = dev_is_sata(device);
@@ -778,6 +778,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
struct hisi_sas_device *sas_dev = device->lldd_dev;
int sata_idx = sas_dev->sata_idx;
int start, end;
+ unsigned long flags;
if (!sata_dev) {
/*
@@ -801,11 +802,14 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
end = 64 * (sata_idx + 2);
}
+ spin_lock_irqsave(&hisi_hba->lock, flags);
while (1) {
start = find_next_zero_bit(bitmap,
hisi_hba->slot_index_count, start);
- if (start >= end)
+ if (start >= end) {
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
return -SAS_QUEUE_FULL;
+ }
/*
* SAS IPTT bit0 should be 1, and SATA IPTT bit0 should be 0.
*/
@@ -815,8 +819,8 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
}
set_bit(start, bitmap);
- *slot_idx = start;
- return 0;
+ spin_unlock_irqrestore(&hisi_hba->lock, flags);
+ return start;
}
static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
@@ -2483,7 +2487,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -2493,6 +2496,7 @@ out:
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags);
@@ -3560,7 +3564,6 @@ static struct scsi_host_template sht_v2_hw = {
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
- .can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 08b503e274b8..bd4ce38b98d2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -127,6 +127,7 @@
#define PHY_CTRL_RESET_OFF 0
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
#define SL_CFG (PORT_BASE + 0x84)
+#define AIP_LIMIT (PORT_BASE + 0x90)
#define SL_CONTROL (PORT_BASE + 0x94)
#define SL_CONTROL_NOTIFY_EN_OFF 0
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
@@ -431,6 +432,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
(u32)((1ULL << hisi_hba->queue_count) - 1));
hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
+ hisi_sas_write32(hisi_hba, CFG_AGING_TIME, 0x1);
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
@@ -441,7 +443,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xfefefefe);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xfefefefe);
if (pdev->revision >= 0x21)
- hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7fff);
+ hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xffff7aff);
else
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0xfffe20ff);
hisi_sas_write32(hisi_hba, CHNL_PHYUPDOWN_INT_MSK, 0x0);
@@ -495,6 +497,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
+ hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
}
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -1751,7 +1754,6 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
}
out:
- hisi_sas_slot_task_free(hisi_hba, task, slot);
sts = ts->stat;
spin_lock_irqsave(&task->task_state_lock, flags);
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
@@ -1761,6 +1763,7 @@ out:
}
task->task_state_flags |= SAS_TASK_STATE_DONE;
spin_unlock_irqrestore(&task->task_state_lock, flags);
+ hisi_sas_slot_task_free(hisi_hba, task, slot);
if (!is_internal && (task->task_proto != SAS_PROTOCOL_SMP)) {
spin_lock_irqsave(&device->done_lock, flags);
@@ -2098,7 +2101,6 @@ static struct scsi_host_template sht_v3_hw = {
.scan_start = hisi_sas_scan_start,
.change_queue_depth = sas_change_queue_depth,
.bios_param = sas_bios_param,
- .can_queue = 1,
.this_id = -1,
.sg_tablesize = SG_ALL,
.max_sectors = SCSI_DEFAULT_MAX_SECTORS,
@@ -2108,6 +2110,7 @@ static struct scsi_host_template sht_v3_hw = {
.target_destroy = sas_target_destroy,
.ioctl = sas_ioctl,
.shost_attrs = host_attrs,
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR,
};
static const struct hisi_sas_hw hisi_sas_v3_hw = {
@@ -2245,8 +2248,10 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_channel = 1;
shost->max_cmd_len = 16;
shost->sg_tablesize = min_t(u16, SG_ALL, HISI_SAS_SGE_PAGE_CNT);
- shost->can_queue = hisi_hba->hw->max_command_entries;
- shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
+ shost->can_queue = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
+ shost->cmd_per_lun = hisi_hba->hw->max_command_entries -
+ HISI_SAS_RESERVED_IPTT_CNT;
sha->sas_ha_name = DRV_NAME;
sha->dev = dev;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c120929d4ffe..c9cccf35e9d7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2240,8 +2240,8 @@ static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
chain_size = le32_to_cpu(cp->sg[0].length);
- temp64 = pci_map_single(h->pdev, chain_block, chain_size,
- PCI_DMA_TODEVICE);
+ temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_size,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&h->pdev->dev, temp64)) {
/* prevent subsequent unmapping */
cp->sg->address = 0;
@@ -2261,7 +2261,7 @@ static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
chain_sg = cp->sg;
temp64 = le64_to_cpu(chain_sg->address);
chain_size = le32_to_cpu(cp->sg[0].length);
- pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
+ dma_unmap_single(&h->pdev->dev, temp64, chain_size, DMA_TO_DEVICE);
}
static int hpsa_map_sg_chain_block(struct ctlr_info *h,
@@ -2277,8 +2277,8 @@ static int hpsa_map_sg_chain_block(struct ctlr_info *h,
chain_len = sizeof(*chain_sg) *
(le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
chain_sg->Len = cpu_to_le32(chain_len);
- temp64 = pci_map_single(h->pdev, chain_block, chain_len,
- PCI_DMA_TODEVICE);
+ temp64 = dma_map_single(&h->pdev->dev, chain_block, chain_len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&h->pdev->dev, temp64)) {
/* prevent subsequent unmapping */
chain_sg->Addr = cpu_to_le64(0);
@@ -2297,8 +2297,8 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
return;
chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
- pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
- le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
+ dma_unmap_single(&h->pdev->dev, le64_to_cpu(chain_sg->Addr),
+ le32_to_cpu(chain_sg->Len), DMA_TO_DEVICE);
}
@@ -2759,13 +2759,13 @@ static void complete_scsi_command(struct CommandList *cp)
return hpsa_cmd_free_and_done(h, cp, cmd);
}
-static void hpsa_pci_unmap(struct pci_dev *pdev,
- struct CommandList *c, int sg_used, int data_direction)
+static void hpsa_pci_unmap(struct pci_dev *pdev, struct CommandList *c,
+ int sg_used, enum dma_data_direction data_direction)
{
int i;
for (i = 0; i < sg_used; i++)
- pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
+ dma_unmap_single(&pdev->dev, le64_to_cpu(c->SG[i].Addr),
le32_to_cpu(c->SG[i].Len),
data_direction);
}
@@ -2774,17 +2774,17 @@ static int hpsa_map_one(struct pci_dev *pdev,
struct CommandList *cp,
unsigned char *buf,
size_t buflen,
- int data_direction)
+ enum dma_data_direction data_direction)
{
u64 addr64;
- if (buflen == 0 || data_direction == PCI_DMA_NONE) {
+ if (buflen == 0 || data_direction == DMA_NONE) {
cp->Header.SGList = 0;
cp->Header.SGTotal = cpu_to_le16(0);
return 0;
}
- addr64 = pci_map_single(pdev, buf, buflen, data_direction);
+ addr64 = dma_map_single(&pdev->dev, buf, buflen, data_direction);
if (dma_mapping_error(&pdev->dev, addr64)) {
/* Prevent subsequent unmap of something never mapped */
cp->Header.SGList = 0;
@@ -2845,7 +2845,8 @@ static u32 lockup_detected(struct ctlr_info *h)
#define MAX_DRIVER_CMD_RETRIES 25
static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
- struct CommandList *c, int data_direction, unsigned long timeout_msecs)
+ struct CommandList *c, enum dma_data_direction data_direction,
+ unsigned long timeout_msecs)
{
int backoff_time = 10, retry_count = 0;
int rc;
@@ -2969,8 +2970,8 @@ static int hpsa_do_receive_diagnostic(struct ctlr_info *h, u8 *scsi3addr,
rc = -1;
goto out;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3022,8 +3023,8 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
rc = -1;
goto out;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3306,8 +3307,8 @@ static int hpsa_get_raid_map(struct ctlr_info *h,
cmd_free(h, c);
return -1;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3349,8 +3350,8 @@ static int hpsa_bmic_sense_subsystem_information(struct ctlr_info *h,
c->Request.CDB[2] = bmic_device_index & 0xff;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3377,8 +3378,8 @@ static int hpsa_bmic_id_controller(struct ctlr_info *h,
if (rc)
goto out;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -3408,7 +3409,7 @@ static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
c->Request.CDB[2] = bmic_device_index & 0xff;
c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
- hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
+ hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
NO_TIMEOUT);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
@@ -3484,7 +3485,7 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
else
c->Request.CDB[5] = 0;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
NO_TIMEOUT);
if (rc)
goto out;
@@ -3731,8 +3732,8 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
}
if (extended_response)
c->Request.CDB[1] = extended_response;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if (rc)
goto out;
ei = c->err_info;
@@ -6320,8 +6321,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
/* Fill in the scatter gather information */
if (iocommand.buf_size > 0) {
- temp64 = pci_map_single(h->pdev, buff,
- iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ temp64 = dma_map_single(&h->pdev->dev, buff,
+ iocommand.buf_size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
c->SG[0].Addr = cpu_to_le64(0);
c->SG[0].Len = cpu_to_le32(0);
@@ -6335,7 +6336,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
NO_TIMEOUT);
if (iocommand.buf_size > 0)
- hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+ hpsa_pci_unmap(h->pdev, c, 1, DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
if (rc) {
rc = -EIO;
@@ -6381,13 +6382,9 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
return -EINVAL;
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
- ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
- if (!ioc) {
- status = -ENOMEM;
- goto cleanup1;
- }
- if (copy_from_user(ioc, argp, sizeof(*ioc))) {
- status = -EFAULT;
+ ioc = vmemdup_user(argp, sizeof(*ioc));
+ if (IS_ERR(ioc)) {
+ status = PTR_ERR(ioc);
goto cleanup1;
}
if ((ioc->buf_size < 1) &&
@@ -6447,14 +6444,14 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
if (ioc->buf_size > 0) {
int i;
for (i = 0; i < sg_used; i++) {
- temp64 = pci_map_single(h->pdev, buff[i],
- buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ temp64 = dma_map_single(&h->pdev->dev, buff[i],
+ buff_size[i], DMA_BIDIRECTIONAL);
if (dma_mapping_error(&h->pdev->dev,
(dma_addr_t) temp64)) {
c->SG[i].Addr = cpu_to_le64(0);
c->SG[i].Len = cpu_to_le32(0);
hpsa_pci_unmap(h->pdev, c, i,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
status = -ENOMEM;
goto cleanup0;
}
@@ -6467,7 +6464,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
NO_TIMEOUT);
if (sg_used)
- hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
+ hpsa_pci_unmap(h->pdev, c, sg_used, DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
if (status) {
status = -EIO;
@@ -6505,7 +6502,7 @@ cleanup1:
kfree(buff);
}
kfree(buff_size);
- kfree(ioc);
+ kvfree(ioc);
return status;
}
@@ -6579,7 +6576,7 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
int cmd_type)
{
- int pci_dir = XFER_NONE;
+ enum dma_data_direction dir = DMA_NONE;
c->cmd_type = CMD_IOCTL_PEND;
c->scsi_cmd = SCSI_CMD_BUSY;
@@ -6785,18 +6782,18 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
switch (GET_DIR(c->Request.type_attr_dir)) {
case XFER_READ:
- pci_dir = PCI_DMA_FROMDEVICE;
+ dir = DMA_FROM_DEVICE;
break;
case XFER_WRITE:
- pci_dir = PCI_DMA_TODEVICE;
+ dir = DMA_TO_DEVICE;
break;
case XFER_NONE:
- pci_dir = PCI_DMA_NONE;
+ dir = DMA_NONE;
break;
default:
- pci_dir = PCI_DMA_BIDIRECTIONAL;
+ dir = DMA_BIDIRECTIONAL;
}
- if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+ if (hpsa_map_one(h->pdev, c, buff, size, dir))
return -1;
return 0;
}
@@ -6992,13 +6989,13 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
* CCISS commands, so they must be allocated from the lower 4GiB of
* memory.
*/
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
iounmap(vaddr);
return err;
}
- cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
+ cmd = dma_alloc_coherent(&pdev->dev, cmd_sz, &paddr64, GFP_KERNEL);
if (cmd == NULL) {
iounmap(vaddr);
return -ENOMEM;
@@ -7047,7 +7044,7 @@ static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
return -ETIMEDOUT;
}
- pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
+ dma_free_coherent(&pdev->dev, cmd_sz, cmd, paddr64);
if (tag & HPSA_ERROR_BIT) {
dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
@@ -7914,7 +7911,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
kfree(h->cmd_pool_bits);
h->cmd_pool_bits = NULL;
if (h->cmd_pool) {
- pci_free_consistent(h->pdev,
+ dma_free_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(struct CommandList),
h->cmd_pool,
h->cmd_pool_dhandle);
@@ -7922,7 +7919,7 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)
h->cmd_pool_dhandle = 0;
}
if (h->errinfo_pool) {
- pci_free_consistent(h->pdev,
+ dma_free_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(struct ErrorInfo),
h->errinfo_pool,
h->errinfo_pool_dhandle);
@@ -7936,12 +7933,12 @@ static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
h->cmd_pool_bits = kcalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG),
sizeof(unsigned long),
GFP_KERNEL);
- h->cmd_pool = pci_alloc_consistent(h->pdev,
+ h->cmd_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->cmd_pool),
- &(h->cmd_pool_dhandle));
- h->errinfo_pool = pci_alloc_consistent(h->pdev,
+ &h->cmd_pool_dhandle, GFP_KERNEL);
+ h->errinfo_pool = dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->errinfo_pool),
- &(h->errinfo_pool_dhandle));
+ &h->errinfo_pool_dhandle, GFP_KERNEL);
if ((h->cmd_pool_bits == NULL)
|| (h->cmd_pool == NULL)
|| (h->errinfo_pool == NULL)) {
@@ -8068,7 +8065,7 @@ static void hpsa_free_reply_queues(struct ctlr_info *h)
for (i = 0; i < h->nreply_queues; i++) {
if (!h->reply_queue[i].head)
continue;
- pci_free_consistent(h->pdev,
+ dma_free_coherent(&h->pdev->dev,
h->reply_queue_size,
h->reply_queue[i].head,
h->reply_queue[i].busaddr);
@@ -8594,11 +8591,11 @@ reinit_after_soft_reset:
number_of_controllers++;
/* configure PCI DMA stuff */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (rc == 0) {
dac = 1;
} else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc == 0) {
dac = 0;
} else {
@@ -8797,8 +8794,8 @@ static void hpsa_flush_cache(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD)) {
goto out;
}
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, DEFAULT_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
+ DEFAULT_TIMEOUT);
if (rc)
goto out;
if (c->err_info->CommandStatus != 0)
@@ -8833,8 +8830,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD))
goto errout;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8845,8 +8842,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD))
goto errout;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_TODEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_TO_DEVICE,
+ NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -8855,8 +8852,8 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
RAID_CTLR_LUNID, TYPE_CMD))
goto errout;
- rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
- PCI_DMA_FROMDEVICE, NO_TIMEOUT);
+ rc = hpsa_scsi_do_simple_cmd_with_retry(h, c, DMA_FROM_DEVICE,
+ NO_TIMEOUT);
if ((rc != 0) || (c->err_info->CommandStatus != 0))
goto errout;
@@ -9228,9 +9225,9 @@ static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
IOACCEL1_COMMANDLIST_ALIGNMENT);
h->ioaccel_cmd_pool =
- pci_alloc_consistent(h->pdev,
+ dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
- &(h->ioaccel_cmd_pool_dhandle));
+ &h->ioaccel_cmd_pool_dhandle, GFP_KERNEL);
h->ioaccel1_blockFetchTable =
kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9281,9 +9278,9 @@ static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
IOACCEL2_COMMANDLIST_ALIGNMENT);
h->ioaccel2_cmd_pool =
- pci_alloc_consistent(h->pdev,
+ dma_alloc_coherent(&h->pdev->dev,
h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
- &(h->ioaccel2_cmd_pool_dhandle));
+ &h->ioaccel2_cmd_pool_dhandle, GFP_KERNEL);
h->ioaccel2_blockFetchTable =
kmalloc(((h->ioaccel_maxsg + 1) *
@@ -9356,9 +9353,10 @@ static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
h->reply_queue_size = h->max_commands * sizeof(u64);
for (i = 0; i < h->nreply_queues; i++) {
- h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
+ h->reply_queue[i].head = dma_alloc_coherent(&h->pdev->dev,
h->reply_queue_size,
- &(h->reply_queue[i].busaddr));
+ &h->reply_queue[i].busaddr,
+ GFP_KERNEL);
if (!h->reply_queue[i].head) {
rc = -ENOMEM;
goto clean1; /* rq, ioaccel */
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index f42a619198c4..e63aadd10dfd 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -2266,7 +2266,6 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
/*
* Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
*/
- target_wait_for_sess_cmds(se_sess);
target_remove_session(se_sess);
tport->ibmv_nexus = NULL;
kfree(nexus);
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index bd6ac6b5980a..ee8a1ecd58fd 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -208,7 +208,7 @@ module_param(ips, charp, 0);
#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
- PCI_DMA_BIDIRECTIONAL : \
+ DMA_BIDIRECTIONAL : \
scb->scsi_cmd->sc_data_direction)
#ifdef IPS_DEBUG
@@ -1529,11 +1529,12 @@ ips_alloc_passthru_buffer(ips_ha_t * ha, int length)
if (ha->ioctl_data && length <= ha->ioctl_len)
return 0;
/* there is no buffer or it's not big enough, allocate a new one */
- bigger_buf = pci_alloc_consistent(ha->pcidev, length, &dma_busaddr);
+ bigger_buf = dma_alloc_coherent(&ha->pcidev->dev, length, &dma_busaddr,
+ GFP_KERNEL);
if (bigger_buf) {
/* free the old memory */
- pci_free_consistent(ha->pcidev, ha->ioctl_len, ha->ioctl_data,
- ha->ioctl_busaddr);
+ dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
+ ha->ioctl_data, ha->ioctl_busaddr);
/* use the new memory */
ha->ioctl_data = (char *) bigger_buf;
ha->ioctl_len = length;
@@ -1678,9 +1679,8 @@ ips_flash_copperhead(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
} else if (!ha->flash_data) {
datasize = pt->CoppCP.cmd.flashfw.total_packets *
pt->CoppCP.cmd.flashfw.count;
- ha->flash_data = pci_alloc_consistent(ha->pcidev,
- datasize,
- &ha->flash_busaddr);
+ ha->flash_data = dma_alloc_coherent(&ha->pcidev->dev,
+ datasize, &ha->flash_busaddr, GFP_KERNEL);
if (!ha->flash_data){
printk(KERN_WARNING "Unable to allocate a flash buffer\n");
return IPS_FAILURE;
@@ -1858,7 +1858,7 @@ ips_flash_firmware(ips_ha_t * ha, ips_passthru_t * pt, ips_scb_t * scb)
scb->data_len = ha->flash_datasize;
scb->data_busaddr =
- pci_map_single(ha->pcidev, ha->flash_data, scb->data_len,
+ dma_map_single(&ha->pcidev->dev, ha->flash_data, scb->data_len,
IPS_DMA_DIR(scb));
scb->flags |= IPS_SCB_MAP_SINGLE;
scb->cmd.flashfw.command_id = IPS_COMMAND_ID(ha, scb);
@@ -1880,8 +1880,8 @@ ips_free_flash_copperhead(ips_ha_t * ha)
if (ha->flash_data == ips_FlashData)
test_and_clear_bit(0, &ips_FlashDataInUse);
else if (ha->flash_data)
- pci_free_consistent(ha->pcidev, ha->flash_len, ha->flash_data,
- ha->flash_busaddr);
+ dma_free_coherent(&ha->pcidev->dev, ha->flash_len,
+ ha->flash_data, ha->flash_busaddr);
ha->flash_data = NULL;
}
@@ -3485,6 +3485,7 @@ ips_send_cmd(ips_ha_t * ha, ips_scb_t * scb)
case START_STOP:
scb->scsi_cmd->result = DID_OK << 16;
+ break;
case TEST_UNIT_READY:
case INQUIRY:
@@ -4212,7 +4213,7 @@ ips_free(ips_ha_t * ha)
if (ha) {
if (ha->enq) {
- pci_free_consistent(ha->pcidev, sizeof(IPS_ENQ),
+ dma_free_coherent(&ha->pcidev->dev, sizeof(IPS_ENQ),
ha->enq, ha->enq_busaddr);
ha->enq = NULL;
}
@@ -4221,7 +4222,7 @@ ips_free(ips_ha_t * ha)
ha->conf = NULL;
if (ha->adapt) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
sizeof (IPS_ADAPTER) +
sizeof (IPS_IO_CMD), ha->adapt,
ha->adapt->hw_status_start);
@@ -4229,7 +4230,7 @@ ips_free(ips_ha_t * ha)
}
if (ha->logical_drive_info) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
sizeof (IPS_LD_INFO),
ha->logical_drive_info,
ha->logical_drive_info_dma_addr);
@@ -4243,7 +4244,7 @@ ips_free(ips_ha_t * ha)
ha->subsys = NULL;
if (ha->ioctl_data) {
- pci_free_consistent(ha->pcidev, ha->ioctl_len,
+ dma_free_coherent(&ha->pcidev->dev, ha->ioctl_len,
ha->ioctl_data, ha->ioctl_busaddr);
ha->ioctl_data = NULL;
ha->ioctl_datasize = 0;
@@ -4276,11 +4277,11 @@ static int
ips_deallocatescbs(ips_ha_t * ha, int cmds)
{
if (ha->scbs) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * cmds,
ha->scbs->sg_list.list,
ha->scbs->sg_busaddr);
- pci_free_consistent(ha->pcidev, sizeof (ips_scb_t) * cmds,
+ dma_free_coherent(&ha->pcidev->dev, sizeof (ips_scb_t) * cmds,
ha->scbs, ha->scbs->scb_busaddr);
ha->scbs = NULL;
} /* end if */
@@ -4307,17 +4308,16 @@ ips_allocatescbs(ips_ha_t * ha)
METHOD_TRACE("ips_allocatescbs", 1);
/* Allocate memory for the SCBs */
- ha->scbs =
- pci_alloc_consistent(ha->pcidev, ha->max_cmds * sizeof (ips_scb_t),
- &command_dma);
+ ha->scbs = dma_alloc_coherent(&ha->pcidev->dev,
+ ha->max_cmds * sizeof (ips_scb_t),
+ &command_dma, GFP_KERNEL);
if (ha->scbs == NULL)
return 0;
- ips_sg.list =
- pci_alloc_consistent(ha->pcidev,
- IPS_SGLIST_SIZE(ha) * IPS_MAX_SG *
- ha->max_cmds, &sg_dma);
+ ips_sg.list = dma_alloc_coherent(&ha->pcidev->dev,
+ IPS_SGLIST_SIZE(ha) * IPS_MAX_SG * ha->max_cmds,
+ &sg_dma, GFP_KERNEL);
if (ips_sg.list == NULL) {
- pci_free_consistent(ha->pcidev,
+ dma_free_coherent(&ha->pcidev->dev,
ha->max_cmds * sizeof (ips_scb_t), ha->scbs,
command_dma);
return 0;
@@ -4446,8 +4446,8 @@ ips_freescb(ips_ha_t * ha, ips_scb_t * scb)
if (scb->flags & IPS_SCB_MAP_SG)
scsi_dma_unmap(scb->scsi_cmd);
else if (scb->flags & IPS_SCB_MAP_SINGLE)
- pci_unmap_single(ha->pcidev, scb->data_busaddr, scb->data_len,
- IPS_DMA_DIR(scb));
+ dma_unmap_single(&ha->pcidev->dev, scb->data_busaddr,
+ scb->data_len, IPS_DMA_DIR(scb));
/* check to make sure this is not our "special" scb */
if (IPS_COMMAND_ID(ha, scb) < (ha->max_cmds - 1)) {
@@ -4559,7 +4559,8 @@ ips_flush_and_reset(ips_ha_t *ha)
dma_addr_t command_dma;
/* Create a usuable SCB */
- scb = pci_alloc_consistent(ha->pcidev, sizeof(ips_scb_t), &command_dma);
+ scb = dma_alloc_coherent(&ha->pcidev->dev, sizeof(ips_scb_t),
+ &command_dma, GFP_KERNEL);
if (scb) {
memset(scb, 0, sizeof(ips_scb_t));
ips_init_scb(ha, scb);
@@ -4594,7 +4595,7 @@ ips_flush_and_reset(ips_ha_t *ha)
/* Now RESET and INIT the adapter */
(*ha->func.reset) (ha);
- pci_free_consistent(ha->pcidev, sizeof(ips_scb_t), scb, command_dma);
+ dma_free_coherent(&ha->pcidev->dev, sizeof(ips_scb_t), scb, command_dma);
return;
}
@@ -6926,29 +6927,30 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
* are guaranteed to be < 4G.
*/
if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
- !pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) {
+ !dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(64))) {
(ha)->flags |= IPS_HA_ENH_SG;
} else {
- if (pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask(&ha->pcidev->dev, DMA_BIT_MASK(32)) != 0) {
printk(KERN_WARNING "Unable to set DMA Mask\n");
return ips_abort_init(ha, index);
}
}
if(ips_cd_boot && !ips_FlashData){
- ips_FlashData = pci_alloc_consistent(pci_dev, PAGE_SIZE << 7,
- &ips_flashbusaddr);
+ ips_FlashData = dma_alloc_coherent(&pci_dev->dev,
+ PAGE_SIZE << 7, &ips_flashbusaddr, GFP_KERNEL);
}
- ha->enq = pci_alloc_consistent(pci_dev, sizeof (IPS_ENQ),
- &ha->enq_busaddr);
+ ha->enq = dma_alloc_coherent(&pci_dev->dev, sizeof (IPS_ENQ),
+ &ha->enq_busaddr, GFP_KERNEL);
if (!ha->enq) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host inquiry structure\n");
return ips_abort_init(ha, index);
}
- ha->adapt = pci_alloc_consistent(pci_dev, sizeof (IPS_ADAPTER) +
- sizeof (IPS_IO_CMD), &dma_address);
+ ha->adapt = dma_alloc_coherent(&pci_dev->dev,
+ sizeof (IPS_ADAPTER) + sizeof (IPS_IO_CMD),
+ &dma_address, GFP_KERNEL);
if (!ha->adapt) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate host adapt & dummy structures\n");
@@ -6959,7 +6961,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
- ha->logical_drive_info = pci_alloc_consistent(pci_dev, sizeof (IPS_LD_INFO), &dma_address);
+ ha->logical_drive_info = dma_alloc_coherent(&pci_dev->dev,
+ sizeof (IPS_LD_INFO), &dma_address, GFP_KERNEL);
if (!ha->logical_drive_info) {
IPS_PRINTK(KERN_WARNING, pci_dev,
"Unable to allocate logical drive info structure\n");
@@ -6997,8 +7000,8 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
if (ips_ioctlsize < PAGE_SIZE)
ips_ioctlsize = PAGE_SIZE;
- ha->ioctl_data = pci_alloc_consistent(pci_dev, ips_ioctlsize,
- &ha->ioctl_busaddr);
+ ha->ioctl_data = dma_alloc_coherent(&pci_dev->dev, ips_ioctlsize,
+ &ha->ioctl_busaddr, GFP_KERNEL);
ha->ioctl_len = ips_ioctlsize;
if (!ha->ioctl_data) {
IPS_PRINTK(KERN_WARNING, pci_dev,
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 1ee3868ade07..7b5deae68d33 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -2717,9 +2717,9 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq)
* the task management request.
* @task_request: the handle to the task request object to start.
*/
-enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
- struct isci_remote_device *idev,
- struct isci_request *ireq)
+enum sci_status sci_controller_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
{
enum sci_status status;
@@ -2728,7 +2728,7 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
"%s: SCIC Controller starting task from invalid "
"state\n",
__func__);
- return SCI_TASK_FAILURE_INVALID_STATE;
+ return SCI_FAILURE_INVALID_STATE;
}
status = sci_remote_device_start_task(ihost, idev, ireq);
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index b3539928073c..6bc3f022630a 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -489,7 +489,7 @@ enum sci_status sci_controller_start_io(
struct isci_remote_device *idev,
struct isci_request *ireq);
-enum sci_task_status sci_controller_start_task(
+enum sci_status sci_controller_start_task(
struct isci_host *ihost,
struct isci_remote_device *idev,
struct isci_request *ireq);
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index ed197bc8e801..2f151708b59a 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -1626,9 +1626,9 @@ static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq,
if (status == SCI_SUCCESS) {
if (ireq->stp.rsp.status & ATA_ERR)
- status = SCI_IO_FAILURE_RESPONSE_VALID;
+ status = SCI_FAILURE_IO_RESPONSE_VALID;
} else {
- status = SCI_IO_FAILURE_RESPONSE_VALID;
+ status = SCI_FAILURE_IO_RESPONSE_VALID;
}
if (status != SCI_SUCCESS) {
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 6dcaed0c1fc8..fb6eba331ac6 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -258,7 +258,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
struct isci_tmf *tmf, unsigned long timeout_ms)
{
DECLARE_COMPLETION_ONSTACK(completion);
- enum sci_task_status status = SCI_TASK_FAILURE;
+ enum sci_status status = SCI_FAILURE;
struct isci_request *ireq;
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
@@ -301,7 +301,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
/* start the TMF io. */
status = sci_controller_start_task(ihost, idev, ireq);
- if (status != SCI_TASK_SUCCESS) {
+ if (status != SCI_SUCCESS) {
dev_dbg(&ihost->pdev->dev,
"%s: start_io failed - status = 0x%x, request = %p\n",
__func__,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index b025a0b74341..23354f206533 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -800,7 +800,8 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
return rc;
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
- &addr, param, buf);
+ &addr,
+ (enum iscsi_param)param, buf);
default:
return iscsi_host_get_param(shost, param, buf);
}
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 6eb5ff3e2e61..1ad28262b00a 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -38,30 +38,6 @@ static u8 jazz_esp_read8(struct esp *esp, unsigned long reg)
return *(volatile u8 *)(esp->regs + reg);
}
-static dma_addr_t jazz_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int jazz_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void jazz_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void jazz_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int jazz_esp_irq_pending(struct esp *esp)
{
if (jazz_esp_read8(esp, ESP_STATUS) & ESP_STAT_INTR)
@@ -117,10 +93,6 @@ static int jazz_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops jazz_esp_ops = {
.esp_write8 = jazz_esp_write8,
.esp_read8 = jazz_esp_read8,
- .map_single = jazz_esp_map_single,
- .map_sg = jazz_esp_map_sg,
- .unmap_single = jazz_esp_unmap_single,
- .unmap_sg = jazz_esp_unmap_sg,
.irq_pending = jazz_esp_irq_pending,
.reset_dma = jazz_esp_reset_dma,
.dma_drain = jazz_esp_dma_drain,
@@ -182,7 +154,7 @@ static int esp_jazz_probe(struct platform_device *dev)
dev_set_drvdata(&dev->dev, esp);
- err = scsi_esp_register(esp, &dev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 4fae253d4f3d..b1bd283be51c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1872,7 +1872,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
struct fc_lport *lport = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
struct fc_fcp_pkt *fsp;
- struct fc_rport_libfc_priv *rpriv;
int rval;
int rc = 0;
struct fc_stats *stats;
@@ -1894,8 +1893,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
goto out;
}
- rpriv = rport->dd_data;
-
if (!fc_fcp_lport_queue_ready(lport)) {
if (lport->qfull) {
if (fc_fcp_can_queue_ramp_down(lport))
@@ -2295,8 +2292,7 @@ int fc_setup_fcp(void)
void fc_destroy_fcp(void)
{
- if (scsi_pkt_cachep)
- kmem_cache_destroy(scsi_pkt_cachep);
+ kmem_cache_destroy(scsi_pkt_cachep);
}
/**
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 372387a450df..1e1c0f1b9e69 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1038,8 +1038,11 @@ static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
struct fc_els_ls_rjt *rjt;
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
- FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
- rjt->er_reason, rjt->er_explan);
+ if (!rjt)
+ FC_RPORT_DBG(rdata, "PLOGI bad response\n");
+ else
+ FC_RPORT_DBG(rdata, "PLOGI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
fc_rport_error_retry(rdata, -FC_EX_ELS_RJT);
}
out:
@@ -1158,8 +1161,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
op = fc_frame_payload_op(fp);
if (op == ELS_LS_ACC) {
pp = fc_frame_payload_get(fp, sizeof(*pp));
- if (!pp)
+ if (!pp) {
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
+ }
resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK);
FC_RPORT_DBG(rdata, "PRLI spp_flags = 0x%x spp_type 0x%x\n",
@@ -1172,8 +1177,10 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
}
- if (pp->prli.prli_spp_len < sizeof(pp->spp))
+ if (pp->prli.prli_spp_len < sizeof(pp->spp)) {
+ fc_rport_error_retry(rdata, -FC_EX_SEQ_ERR);
goto out;
+ }
fcp_parm = ntohl(pp->spp.spp_params);
if (fcp_parm & FCP_SPPF_RETRY)
@@ -1211,8 +1218,11 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
} else {
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
- FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
- rjt->er_reason, rjt->er_explan);
+ if (!rjt)
+ FC_RPORT_DBG(rdata, "PRLI bad response\n");
+ else
+ FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
+ rjt->er_reason, rjt->er_explan);
fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 64a958a99f6a..4f6cdf53e913 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -654,7 +654,7 @@ void sas_probe_sata(struct asd_sas_port *port)
/* if libata could not bring the link up, don't surface
* the device
*/
- if (ata_dev_disabled(sas_to_ata_dev(dev)))
+ if (!ata_dev_enabled(sas_to_ata_dev(dev)))
sas_fail_probe(dev, __func__, -ENODEV);
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 0148ae62a52a..dde433aa59c2 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -260,7 +260,7 @@ static void sas_suspend_devices(struct work_struct *work)
* phy_list is not being mutated
*/
list_for_each_entry(phy, &port->phy_list, port_phy_el) {
- if (si->dft->lldd_port_formed)
+ if (si->dft->lldd_port_deformed)
si->dft->lldd_port_deformed(phy);
phy->suspended = 1;
port->suspended = 1;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index fadc99cb60df..0d1f72752ca2 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -48,17 +48,16 @@ static void smp_task_timedout(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&task->task_state_lock, flags);
- if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ complete(&task->slow_task->completion);
+ }
spin_unlock_irqrestore(&task->task_state_lock, flags);
-
- complete(&task->slow_task->completion);
}
static void smp_task_done(struct sas_task *task)
{
- if (!del_timer(&task->slow_task->timer))
- return;
+ del_timer(&task->slow_task->timer);
complete(&task->slow_task->completion);
}
@@ -2054,14 +2053,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
return res;
}
- /* delete the old link */
- if (SAS_ADDR(phy->attached_sas_addr) &&
- SAS_ADDR(sas_addr) != SAS_ADDR(phy->attached_sas_addr)) {
- SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
- SAS_ADDR(dev->sas_addr), phy_id,
- SAS_ADDR(phy->attached_sas_addr));
- sas_unregister_devs_sas_addr(dev, phy_id, last);
- }
+ /* we always have to delete the old device when we went here */
+ SAS_DPRINTK("ex %016llx phy 0x%x replace %016llx\n",
+ SAS_ADDR(dev->sas_addr), phy_id,
+ SAS_ADDR(phy->attached_sas_addr));
+ sas_unregister_devs_sas_addr(dev, phy_id, last);
return sas_discover_new(dev, phy_id);
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 43732e8d1347..c1eb2b00ca7f 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -52,7 +52,7 @@ struct lpfc_sli2_slim;
downloads using bsg */
#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */
-#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */
+#define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */
#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */
#define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */
#define LPFC_MIN_SG_SEG_CNT 32 /* sg element count per scsi cmnd */
@@ -583,6 +583,25 @@ struct lpfc_mbox_ext_buf_ctx {
struct list_head ext_dmabuf_list;
};
+struct lpfc_ras_fwlog {
+ uint8_t *fwlog_buff;
+ uint32_t fw_buffcount; /* Buffer size posted to FW */
+#define LPFC_RAS_BUFF_ENTERIES 16 /* Each entry can hold max of 64k */
+#define LPFC_RAS_MAX_ENTRY_SIZE (64 * 1024)
+#define LPFC_RAS_MIN_BUFF_POST_SIZE (256 * 1024)
+#define LPFC_RAS_MAX_BUFF_POST_SIZE (1024 * 1024)
+ uint32_t fw_loglevel; /* Log level set */
+ struct lpfc_dmabuf lwpd;
+ struct list_head fwlog_buff_list;
+
+ /* RAS support status on adapter */
+ bool ras_hwsupport; /* RAS Support available on HW or not */
+ bool ras_enabled; /* Ras Enabled for the function */
+#define LPFC_RAS_DISABLE_LOGGING 0x00
+#define LPFC_RAS_ENABLE_LOGGING 0x01
+ bool ras_active; /* RAS logging running state */
+};
+
struct lpfc_hba {
/* SCSI interface function jump table entries */
int (*lpfc_new_scsi_buf)
@@ -790,6 +809,7 @@ struct lpfc_hba {
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
uint32_t cfg_nvme_seg_cnt;
+ uint32_t cfg_scsi_seg_cnt;
uint32_t cfg_sg_dma_buf_size;
uint64_t cfg_soft_wwnn;
uint64_t cfg_soft_wwpn;
@@ -833,6 +853,9 @@ struct lpfc_hba {
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
uint32_t cfg_enable_SmartSAN;
uint32_t cfg_enable_mds_diags;
+ uint32_t cfg_ras_fwlog_level;
+ uint32_t cfg_ras_fwlog_buffsize;
+ uint32_t cfg_ras_fwlog_func;
uint32_t cfg_enable_fc4_type;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
@@ -963,6 +986,7 @@ struct lpfc_hba {
uint32_t intr_mode;
#define LPFC_INTR_ERROR 0xFFFFFFFF
struct list_head port_list;
+ spinlock_t port_list_lock; /* lock for port_list mutations */
struct lpfc_vport *pport; /* physical lpfc_vport pointer */
uint16_t max_vpi; /* Maximum virtual nports */
#define LPFC_MAX_VPI 0xFFFF /* Max number of VPI supported */
@@ -1092,6 +1116,9 @@ struct lpfc_hba {
struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
uint32_t ctx_idx;
+ /* RAS Support */
+ struct lpfc_ras_fwlog ras_fwlog;
+
uint8_t menlo_flag; /* menlo generic flags */
#define HBA_MENLO_SUPPORT 0x1 /* HBA supports menlo commands */
uint32_t iocb_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 1a6ed9b0a249..dda7f450b96d 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5358,15 +5358,74 @@ LPFC_ATTR(delay_discovery, 0, 0, 1,
/*
* lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count
- * This value can be set to values between 64 and 4096. The default value is
- * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer
- * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE).
+ * This value can be set to values between 64 and 4096. The default value
+ * is 64, but may be increased to allow for larger Max I/O sizes. The scsi
+ * and nvme layers will allow I/O sizes up to (MAX_SEG_COUNT * SEG_SIZE).
* Because of the additional overhead involved in setting up T10-DIF,
* this parameter will be limited to 128 if BlockGuard is enabled under SLI4
* and will be limited to 512 if BlockGuard is enabled under SLI3.
*/
-LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
- LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count");
+static uint lpfc_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
+module_param(lpfc_sg_seg_cnt, uint, 0444);
+MODULE_PARM_DESC(lpfc_sg_seg_cnt, "Max Scatter Gather Segment Count");
+
+/**
+ * lpfc_sg_seg_cnt_show - Display the scatter/gather list sizes
+ * configured for the adapter
+ * @dev: class converted to a Scsi_host structure.
+ * @attr: device attribute, not used.
+ * @buf: on return contains a string with the list sizes
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sg_seg_cnt_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int len;
+
+ len = snprintf(buf, PAGE_SIZE, "SGL sz: %d total SGEs: %d\n",
+ phba->cfg_sg_dma_buf_size, phba->cfg_total_seg_cnt);
+
+ len += snprintf(buf + len, PAGE_SIZE, "Cfg: %d SCSI: %d NVME: %d\n",
+ phba->cfg_sg_seg_cnt, phba->cfg_scsi_seg_cnt,
+ phba->cfg_nvme_seg_cnt);
+ return len;
+}
+
+static DEVICE_ATTR_RO(lpfc_sg_seg_cnt);
+
+/**
+ * lpfc_sg_seg_cnt_init - Set the hba sg_seg_cnt initial value
+ * @phba: lpfc_hba pointer.
+ * @val: contains the initial value
+ *
+ * Description:
+ * Validates the initial value is within range and assigns it to the
+ * adapter. If not in range, an error message is posted and the
+ * default value is assigned.
+ *
+ * Returns:
+ * zero if value is in range and is set
+ * -EINVAL if value was out of range
+ **/
+static int
+lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= LPFC_MIN_SG_SEG_CNT && val <= LPFC_MAX_SG_SEG_CNT) {
+ phba->cfg_sg_seg_cnt = val;
+ return 0;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0409 "LPFC_DRIVER_NAME"_sg_seg_cnt attribute cannot "
+ "be set to %d, allowed range is [%d, %d]\n",
+ val, LPFC_MIN_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT);
+ phba->cfg_sg_seg_cnt = LPFC_DEFAULT_SG_SEG_CNT;
+ return -EINVAL;
+}
/*
* lpfc_enable_mds_diags: Enable MDS Diagnostics
@@ -5377,6 +5436,31 @@ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MIN_SG_SEG_CNT,
LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics");
/*
+ * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size
+ * 0 = Disable firmware logging (default)
+ * [1-4] = Multiple of 1/4th Mb of host memory for FW logging
+ * Value range [0..4]. Default value is 0
+ */
+LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging");
+
+/*
+ * lpfc_ras_fwlog_level: Firmware logging verbosity level
+ * Valid only if firmware logging is enabled
+ * 0(Least Verbosity) 4 (most verbosity)
+ * Value range is [0..4]. Default value is 0
+ */
+LPFC_ATTR_RW(ras_fwlog_level, 0, 0, 4, "Firmware Logging Level");
+
+/*
+ * lpfc_ras_fwlog_func: Firmware logging enabled on function number
+ * Default function which has RAS support : 0
+ * Value Range is [0..7].
+ * FW logging is a global action and enablement is via a specific
+ * port.
+ */
+LPFC_ATTR_RW(ras_fwlog_func, 0, 0, 7, "Firmware Logging Enabled on Function");
+
+/*
* lpfc_enable_bbcr: Enable BB Credit Recovery
* 0 = BB Credit Recovery disabled
* 1 = BB Credit Recovery enabled (default)
@@ -5501,6 +5585,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_protocol,
&dev_attr_lpfc_xlane_supported,
&dev_attr_lpfc_enable_mds_diags,
+ &dev_attr_lpfc_ras_fwlog_buffsize,
+ &dev_attr_lpfc_ras_fwlog_level,
+ &dev_attr_lpfc_ras_fwlog_func,
&dev_attr_lpfc_enable_bbcr,
&dev_attr_lpfc_enable_dpp,
NULL,
@@ -6587,6 +6674,20 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_sli_mode_init(phba, lpfc_sli_mode);
phba->cfg_enable_dss = 1;
lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
+ lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
+ lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
+ lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func);
+
+
+ /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to
+ * accommodate 512K and 1M IOs in a single nvme buf and supply
+ * enough NVME LS iocb buffers for larger connectivity counts.
+ */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ phba->cfg_iocb_cnt = 5;
+ }
+
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 90745feca808..7bd7ae86bed5 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/bsg-lib.h>
+#include <linux/vmalloc.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -2843,9 +2844,6 @@ diag_cmd_data_alloc(struct lpfc_hba *phba,
if (nocopydata) {
bpl->tus.f.bdeFlags = 0;
- pci_dma_sync_single_for_device(phba->pcidev,
- dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
-
} else {
memset((uint8_t *)dmp->dma.virt, 0, cnt);
bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
@@ -5309,6 +5307,330 @@ job_error:
}
/**
+ * lpfc_check_fwlog_support: Check FW log support on the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Check if FW Logging support by the adapter
+ **/
+int
+lpfc_check_fwlog_support(struct lpfc_hba *phba)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = NULL;
+
+ ras_fwlog = &phba->ras_fwlog;
+
+ if (ras_fwlog->ras_hwsupport == false)
+ return -EACCES;
+ else if (ras_fwlog->ras_enabled == false)
+ return -EPERM;
+ else
+ return 0;
+}
+
+/**
+ * lpfc_bsg_get_ras_config: Get RAS configuration settings
+ * @job: fc_bsg_job to handle
+ *
+ * Get RAS configuration values set.
+ **/
+static int
+lpfc_bsg_get_ras_config(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_bsg_get_ras_config_reply *ras_reply;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_ras_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6181 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ /* Check FW log status */
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ ras_reply = (struct lpfc_bsg_get_ras_config_reply *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ /* Current logging state */
+ if (ras_fwlog->ras_active == true)
+ ras_reply->state = LPFC_RASLOG_STATE_RUNNING;
+ else
+ ras_reply->state = LPFC_RASLOG_STATE_STOPPED;
+
+ ras_reply->log_level = phba->ras_fwlog.fw_loglevel;
+ ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize;
+
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
+
+ /* complete the job back to userspace */
+ bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+ return rc;
+}
+
+/**
+ * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Disable FW logging into host memory on the adapter. To
+ * be done before reading logs from the host memory.
+ **/
+static void
+lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+ ras_fwlog->ras_active = false;
+
+ /* Disable FW logging to host memory */
+ writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+}
+
+/**
+ * lpfc_bsg_set_ras_config: Set FW logging parameters
+ * @job: fc_bsg_job to handle
+ *
+ * Set log-level parameters for FW-logging in host memory
+ **/
+static int
+lpfc_bsg_set_ras_config(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_bsg_set_ras_config_req *ras_req;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint8_t action = 0, log_level = 0;
+ int rc = 0;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_set_ras_config_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6182 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ /* Check FW log status */
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ ras_req = (struct lpfc_bsg_set_ras_config_req *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+ action = ras_req->action;
+ log_level = ras_req->log_level;
+
+ if (action == LPFC_RASACTION_STOP_LOGGING) {
+ /* Check if already disabled */
+ if (ras_fwlog->ras_active == false) {
+ rc = -ESRCH;
+ goto ras_job_error;
+ }
+
+ /* Disable logging */
+ lpfc_ras_stop_fwlog(phba);
+ } else {
+ /*action = LPFC_RASACTION_START_LOGGING*/
+ if (ras_fwlog->ras_active == true) {
+ rc = -EINPROGRESS;
+ goto ras_job_error;
+ }
+
+ /* Enable logging */
+ rc = lpfc_sli4_ras_fwlog_init(phba, log_level,
+ LPFC_RAS_ENABLE_LOGGING);
+ if (rc)
+ rc = -EINVAL;
+ }
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
+
+ /* complete the job back to userspace */
+ bsg_job_done(job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_get_ras_lwpd: Get log write position data
+ * @job: fc_bsg_job to handle
+ *
+ * Get Offset/Wrap count of the log message written
+ * in host memory
+ **/
+static int
+lpfc_bsg_get_ras_lwpd(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_bsg_get_ras_lwpd *ras_reply;
+ struct lpfc_hba *phba = vport->phba;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ uint32_t lwpd_offset = 0;
+ uint64_t wrap_value = 0;
+ int rc = 0;
+
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_ras_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6183 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ ras_reply = (struct lpfc_bsg_get_ras_lwpd *)
+ bsg_reply->reply_data.vendor_reply.vendor_rsp;
+
+ lwpd_offset = *((uint32_t *)ras_fwlog->lwpd.virt) & 0xffffffff;
+ ras_reply->offset = be32_to_cpu(lwpd_offset);
+
+ wrap_value = *((uint64_t *)ras_fwlog->lwpd.virt);
+ ras_reply->wrap_count = be32_to_cpu((wrap_value >> 32) & 0xffffffff);
+
+ras_job_error:
+ /* make error code available to userspace */
+ bsg_reply->result = rc;
+
+ /* complete the job back to userspace */
+ bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+ return rc;
+}
+
+/**
+ * lpfc_bsg_get_ras_fwlog: Read FW log
+ * @job: fc_bsg_job to handle
+ *
+ * Copy the FW log into the passed buffer.
+ **/
+static int
+lpfc_bsg_get_ras_fwlog(struct bsg_job *job)
+{
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct lpfc_vport *vport = shost_priv(shost);
+ struct lpfc_hba *phba = vport->phba;
+ struct fc_bsg_request *bsg_request = job->request;
+ struct fc_bsg_reply *bsg_reply = job->reply;
+ struct lpfc_bsg_get_fwlog_req *ras_req;
+ uint32_t rd_offset, rd_index, offset, pending_wlen;
+ uint32_t boundary = 0, align_len = 0, write_len = 0;
+ void *dest, *src, *fwlog_buff;
+ struct lpfc_ras_fwlog *ras_fwlog = NULL;
+ struct lpfc_dmabuf *dmabuf, *next;
+ int rc = 0;
+
+ ras_fwlog = &phba->ras_fwlog;
+
+ rc = lpfc_check_fwlog_support(phba);
+ if (rc == -EACCES || rc == -EPERM)
+ goto ras_job_error;
+
+ /* Logging to be stopped before reading */
+ if (ras_fwlog->ras_active == true) {
+ rc = -EINPROGRESS;
+ goto ras_job_error;
+ }
+
+ if (job->request_len <
+ sizeof(struct fc_bsg_request) +
+ sizeof(struct lpfc_bsg_get_fwlog_req)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "6184 Received RAS_LOG request "
+ "below minimum size\n");
+ rc = -EINVAL;
+ goto ras_job_error;
+ }
+
+ ras_req = (struct lpfc_bsg_get_fwlog_req *)
+ bsg_request->rqst_data.h_vendor.vendor_cmd;
+ rd_offset = ras_req->read_offset;
+
+ /* Allocate memory to read fw log*/
+ fwlog_buff = vmalloc(ras_req->read_size);
+ if (!fwlog_buff) {
+ rc = -ENOMEM;
+ goto ras_job_error;
+ }
+
+ rd_index = (rd_offset / LPFC_RAS_MAX_ENTRY_SIZE);
+ offset = (rd_offset % LPFC_RAS_MAX_ENTRY_SIZE);
+ pending_wlen = ras_req->read_size;
+ dest = fwlog_buff;
+
+ list_for_each_entry_safe(dmabuf, next,
+ &ras_fwlog->fwlog_buff_list, list) {
+
+ if (dmabuf->buffer_tag < rd_index)
+ continue;
+
+ /* Align read to buffer size */
+ if (offset) {
+ boundary = ((dmabuf->buffer_tag + 1) *
+ LPFC_RAS_MAX_ENTRY_SIZE);
+
+ align_len = (boundary - offset);
+ write_len = min_t(u32, align_len,
+ LPFC_RAS_MAX_ENTRY_SIZE);
+ } else {
+ write_len = min_t(u32, pending_wlen,
+ LPFC_RAS_MAX_ENTRY_SIZE);
+ align_len = 0;
+ boundary = 0;
+ }
+ src = dmabuf->virt + offset;
+ memcpy(dest, src, write_len);
+
+ pending_wlen -= write_len;
+ if (!pending_wlen)
+ break;
+
+ dest += write_len;
+ offset = (offset + write_len) % LPFC_RAS_MAX_ENTRY_SIZE;
+ }
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ fwlog_buff, ras_req->read_size);
+
+ vfree(fwlog_buff);
+
+ras_job_error:
+ bsg_reply->result = rc;
+ bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len);
+
+ return rc;
+}
+
+
+/**
* lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
* @job: fc_bsg_job to handle
**/
@@ -5355,6 +5677,18 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
rc = lpfc_forced_link_speed(job);
break;
+ case LPFC_BSG_VENDOR_RAS_GET_LWPD:
+ rc = lpfc_bsg_get_ras_lwpd(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_GET_FWLOG:
+ rc = lpfc_bsg_get_ras_fwlog(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_GET_CONFIG:
+ rc = lpfc_bsg_get_ras_config(job);
+ break;
+ case LPFC_BSG_VENDOR_RAS_SET_CONFIG:
+ rc = lpfc_bsg_set_ras_config(job);
+ break;
default:
rc = -EINVAL;
bsg_reply->reply_payload_rcv_len = 0;
@@ -5368,7 +5702,7 @@ lpfc_bsg_hst_vendor(struct bsg_job *job)
/**
* lpfc_bsg_request - handle a bsg request from the FC transport
- * @job: fc_bsg_job to handle
+ * @job: bsg_job to handle
**/
int
lpfc_bsg_request(struct bsg_job *job)
@@ -5402,7 +5736,7 @@ lpfc_bsg_request(struct bsg_job *job)
/**
* lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
- * @job: fc_bsg_job that has timed out
+ * @job: bsg_job that has timed out
*
* This function just aborts the job's IOCB. The aborted IOCB will return to
* the waiting function which will handle passing the error back to userspace
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 32347c87e3b4..820323f1139b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -38,6 +38,10 @@
#define LPFC_BSG_VENDOR_DIAG_MODE_END 10
#define LPFC_BSG_VENDOR_LINK_DIAG_TEST 11
#define LPFC_BSG_VENDOR_FORCED_LINK_SPEED 14
+#define LPFC_BSG_VENDOR_RAS_GET_LWPD 16
+#define LPFC_BSG_VENDOR_RAS_GET_FWLOG 17
+#define LPFC_BSG_VENDOR_RAS_GET_CONFIG 18
+#define LPFC_BSG_VENDOR_RAS_SET_CONFIG 19
struct set_ct_event {
uint32_t command;
@@ -296,6 +300,38 @@ struct forced_link_speed_support_reply {
uint8_t supported;
};
+struct lpfc_bsg_ras_req {
+ uint32_t command;
+};
+
+struct lpfc_bsg_get_fwlog_req {
+ uint32_t command;
+ uint32_t read_size;
+ uint32_t read_offset;
+};
+
+struct lpfc_bsg_get_ras_lwpd {
+ uint32_t offset;
+ uint32_t wrap_count;
+};
+
+struct lpfc_bsg_set_ras_config_req {
+ uint32_t command;
+ uint8_t action;
+#define LPFC_RASACTION_STOP_LOGGING 0x00
+#define LPFC_RASACTION_START_LOGGING 0x01
+ uint8_t log_level;
+};
+
+struct lpfc_bsg_get_ras_config_reply {
+ uint8_t state;
+#define LPFC_RASLOG_STATE_STOPPED 0x00
+#define LPFC_RASLOG_STATE_RUNNING 0x01
+ uint8_t log_level;
+ uint32_t log_buff_sz;
+};
+
+
/* driver only */
#define SLI_CONFIG_NOT_HANDLED 0
#define SLI_CONFIG_HANDLED 1
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index bea24bc4410a..e01136507780 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -545,6 +545,13 @@ bool lpfc_find_next_oas_lun(struct lpfc_hba *, struct lpfc_name *,
int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
+/* RAS Interface */
+void lpfc_sli4_ras_init(struct lpfc_hba *phba);
+void lpfc_sli4_ras_setup(struct lpfc_hba *phba);
+int lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t fwlog_level,
+ uint32_t fwlog_enable);
+int lpfc_check_fwlog_support(struct lpfc_hba *phba);
+
/* NVME interfaces. */
void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 1cbdc892ff95..789ad1502534 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -445,14 +445,14 @@ lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
struct lpfc_vport *vport_curr;
unsigned long flags;
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport_curr, &phba->port_list, listentry) {
if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return vport_curr;
}
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return NULL;
}
@@ -471,11 +471,6 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
"Parse GID_FTrsp: did:x%x flg:x%x x%x",
Did, ndlp->nlp_flag, vport->fc_flag);
- /* Don't assume the rport is always the previous
- * FC4 type.
- */
- ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
-
/* By default, the driver expects to support FCP FC4 */
if (fc4_type == FC_TYPE_FCP)
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index aec5b10a8c85..0c8005bb0f53 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -550,7 +550,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
struct lpfc_nodelist *ndlp;
unsigned char *statep;
struct nvme_fc_local_port *localport;
- struct lpfc_nvmet_tgtport *tgtp;
struct nvme_fc_remote_port *nrport = NULL;
struct lpfc_nvme_rport *rport;
@@ -654,7 +653,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
"\nOutstanding IO x%x\n", outio);
if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
- tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
len += snprintf(buf + len, size - len,
"\nNVME Targetport Entry ...\n");
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4dda969e947c..f1c1faa74b46 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -7673,8 +7673,11 @@ void
lpfc_els_flush_all_cmd(struct lpfc_hba *phba)
{
struct lpfc_vport *vport;
+
+ spin_lock_irq(&phba->port_list_lock);
list_for_each_entry(vport, &phba->port_list, listentry)
lpfc_els_flush_cmd(vport);
+ spin_unlock_irq(&phba->port_list_lock);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index eb71877f12f8..f4deb862efc6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -4193,7 +4193,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_MAPPED_NODE ||
new_state == NLP_STE_UNMAPPED_NODE) {
- if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
+ if (ndlp->nlp_fc4_type ||
ndlp->nlp_DID == Fabric_DID ||
ndlp->nlp_DID == NameServer_DID ||
ndlp->nlp_DID == FDMI_DID) {
@@ -5428,12 +5428,10 @@ static void
lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
LIST_HEAD(completions);
- struct lpfc_sli *psli;
IOCB_t *icmd;
struct lpfc_iocbq *iocb, *next_iocb;
struct lpfc_sli_ring *pring;
- psli = &phba->sli;
pring = lpfc_phba_elsring(phba);
if (unlikely(!pring))
return;
@@ -5938,14 +5936,14 @@ lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
}
}
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport->vpi == i) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return vport;
}
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return NULL;
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 083f8c8706e5..bbd0a57e953f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -186,6 +186,7 @@ struct lpfc_sli_intf {
#define LPFC_CTL_PDEV_CTL_FRL_ALL 0x00
#define LPFC_CTL_PDEV_CTL_FRL_FC_FCOE 0x10
#define LPFC_CTL_PDEV_CTL_FRL_NIC 0x20
+#define LPFC_CTL_PDEV_CTL_DDL_RAS 0x1000000
#define LPFC_FW_DUMP_REQUEST (LPFC_CTL_PDEV_CTL_DD | LPFC_CTL_PDEV_CTL_FRST)
@@ -964,6 +965,7 @@ struct mbox_header {
/* Subsystem Definitions */
#define LPFC_MBOX_SUBSYSTEM_NA 0x0
#define LPFC_MBOX_SUBSYSTEM_COMMON 0x1
+#define LPFC_MBOX_SUBSYSTEM_LOWLEVEL 0xB
#define LPFC_MBOX_SUBSYSTEM_FCOE 0xC
/* Device Specific Definitions */
@@ -1030,6 +1032,9 @@ struct mbox_header {
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22
#define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23
+/* Low level Opcodes */
+#define LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION 0x37
+
/* Mailbox command structures */
struct eq_context {
uint32_t word0;
@@ -1162,6 +1167,45 @@ struct lpfc_mbx_nop {
uint32_t context[2];
};
+
+
+struct lpfc_mbx_set_ras_fwlog {
+ struct mbox_header header;
+ union {
+ struct {
+ uint32_t word4;
+#define lpfc_fwlog_enable_SHIFT 0
+#define lpfc_fwlog_enable_MASK 0x00000001
+#define lpfc_fwlog_enable_WORD word4
+#define lpfc_fwlog_loglvl_SHIFT 8
+#define lpfc_fwlog_loglvl_MASK 0x0000000F
+#define lpfc_fwlog_loglvl_WORD word4
+#define lpfc_fwlog_ra_SHIFT 15
+#define lpfc_fwlog_ra_WORD 0x00000008
+#define lpfc_fwlog_buffcnt_SHIFT 16
+#define lpfc_fwlog_buffcnt_MASK 0x000000FF
+#define lpfc_fwlog_buffcnt_WORD word4
+#define lpfc_fwlog_buffsz_SHIFT 24
+#define lpfc_fwlog_buffsz_MASK 0x000000FF
+#define lpfc_fwlog_buffsz_WORD word4
+ uint32_t word5;
+#define lpfc_fwlog_acqe_SHIFT 0
+#define lpfc_fwlog_acqe_MASK 0x0000FFFF
+#define lpfc_fwlog_acqe_WORD word5
+#define lpfc_fwlog_cqid_SHIFT 16
+#define lpfc_fwlog_cqid_MASK 0x0000FFFF
+#define lpfc_fwlog_cqid_WORD word5
+#define LPFC_MAX_FWLOG_PAGE 16
+ struct dma_address lwpd;
+ struct dma_address buff_fwlog[LPFC_MAX_FWLOG_PAGE];
+ } request;
+ struct {
+ uint32_t word0;
+ } response;
+ } u;
+};
+
+
struct cq_context {
uint32_t word0;
#define lpfc_cq_context_event_SHIFT 31
@@ -3868,6 +3912,7 @@ struct lpfc_mqe {
struct lpfc_mbx_memory_dump_type3 mem_dump_type3;
struct lpfc_mbx_set_host_data set_host_data;
struct lpfc_mbx_nop nop;
+ struct lpfc_mbx_set_ras_fwlog ras_fwlog;
} un;
};
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index f3cae733ae2d..20fa6785a0e2 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3956,7 +3956,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
if (phba->sli_rev == LPFC_SLI_REV4) {
shost->dma_boundary =
phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
- shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+ shost->sg_tablesize = phba->cfg_scsi_seg_cnt;
}
/*
@@ -3988,9 +3988,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
if (error)
goto out_put_shost;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_add_tail(&vport->listentry, &phba->port_list);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
return vport;
out_put_shost:
@@ -4016,9 +4016,9 @@ destroy_port(struct lpfc_vport *vport)
fc_remove_host(shost);
scsi_remove_host(shost);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
lpfc_cleanup(vport);
return;
@@ -5621,7 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
/* Initialize ndlp management spinlock */
spin_lock_init(&phba->ndlp_lock);
+ /* Initialize port_list spinlock */
+ spin_lock_init(&phba->port_list_lock);
INIT_LIST_HEAD(&phba->port_list);
+
INIT_LIST_HEAD(&phba->work_list);
init_waitqueue_head(&phba->wait_4_mlo_m_q);
@@ -5919,8 +5922,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
*/
max_buf_size = (2 * SLI4_PAGE_SIZE);
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra)
- phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra;
/*
* Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
@@ -5942,9 +5943,16 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
- phba->cfg_sg_seg_cnt =
- LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
+ /*
+ * If supporting DIF, reduce the seg count for scsi to
+ * allow room for the DIF sges.
+ */
+ if (phba->cfg_enable_bg &&
+ phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
+ phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
+ else
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
+
} else {
/*
* The scsi_buf for a regular I/O holds the FCP cmnd,
@@ -5958,6 +5966,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Total SGEs for scsi_sg_list */
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
+ phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
/*
* NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
@@ -5965,10 +5974,22 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
*/
}
+ /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
+ "6300 Reducing NVME sg segment "
+ "cnt to %d\n",
+ LPFC_MAX_NVME_SEG_CNT);
+ phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
+ } else
+ phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
+ }
+
/* Initialize the host templates with the updated values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5977,9 +5998,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
- "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
+ "9087 sg_seg_cnt:%d dmabuf_size:%d "
+ "total:%d scsi:%d nvme:%d\n",
phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
- phba->cfg_total_seg_cnt);
+ phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
+ phba->cfg_nvme_seg_cnt);
/* Initialize buffer queue management fields */
INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
@@ -6205,6 +6228,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
if (phba->cfg_fof)
fof_vectors = 1;
+ /* Verify RAS support on adapter */
+ lpfc_sli4_ras_init(phba);
+
/* Verify all the SLI4 queues */
rc = lpfc_sli4_queue_verify(phba);
if (rc)
@@ -7967,7 +7993,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
else
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3028 GET_FUNCTION_CONFIG: failed to find "
- "Resrouce Descriptor:x%x\n",
+ "Resource Descriptor:x%x\n",
LPFC_RSRC_DESC_TYPE_FCFCOE);
read_cfg_out:
@@ -10492,6 +10518,14 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
/* Stop kthread signal shall trigger work_done one more time */
kthread_stop(phba->worker_thread);
+ /* Disable FW logging to host memory */
+ writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
+ phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
+
+ /* Free RAS DMA memory */
+ if (phba->ras_fwlog.ras_enabled == true)
+ lpfc_sli4_ras_dma_free(phba);
+
/* Unset the queues shared with the hardware then release all
* allocated resources.
*/
@@ -10737,6 +10771,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
phba->mds_diags_support = 1;
else
phba->mds_diags_support = 0;
+
return 0;
}
@@ -10965,9 +11000,9 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
kfree(phba->vpi_ids);
lpfc_stop_hba_timers(phba);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
lpfc_debugfs_terminate(vport);
@@ -11329,10 +11364,6 @@ lpfc_io_resume_s3(struct pci_dev *pdev)
/* Bring device online, it will be no-op for non-fatal error resume */
lpfc_online(phba);
-
- /* Clean up Advanced Error Reporting (AER) if needed */
- if (phba->hba_flag & HBA_AER_ENABLED)
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
/**
@@ -11698,6 +11729,10 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
+
+ /* Enable RAS FW log support */
+ lpfc_sli4_ras_setup(phba);
+
return 0;
out_disable_intr:
@@ -11777,9 +11812,9 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
lpfc_sli4_hba_unset(phba);
lpfc_stop_hba_timers(phba);
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
/* Perform scsi free before driver resource_unset since scsi
* buffers are released to their corresponding pools here.
@@ -12144,10 +12179,6 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
/* Bring the device back online */
lpfc_online(phba);
}
-
- /* Clean up Advanced Error Reporting (AER) if needed */
- if (phba->hba_flag & HBA_AER_ENABLED)
- pci_cleanup_aer_uncorrect_error_status(pdev);
}
/**
@@ -12428,6 +12459,30 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_ras_init - Verify RAS-FW log is supported by this adapter
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine checks to see if RAS is supported by the adapter. Check the
+ * function through which RAS support enablement is to be done.
+ **/
+void
+lpfc_sli4_ras_init(struct lpfc_hba *phba)
+{
+ switch (phba->pcidev->device) {
+ case PCI_DEVICE_ID_LANCER_G6_FC:
+ case PCI_DEVICE_ID_LANCER_G7_FC:
+ phba->ras_fwlog.ras_hwsupport = true;
+ if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn))
+ phba->ras_fwlog.ras_enabled = true;
+ else
+ phba->ras_fwlog.ras_enabled = false;
+ break;
+ default:
+ phba->ras_fwlog.ras_hwsupport = false;
+ }
+}
+
+/**
* lpfc_fof_queue_setup - Set up all the fof queues
* @phba: pointer to lpfc hba data structure.
*
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd9bce9d9974..269808e8480f 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -2318,6 +2318,7 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
@@ -2395,6 +2396,7 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport,
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock);
lpfc_disc_set_adisc(vport, ndlp);
return ndlp->nlp_state;
@@ -2652,6 +2654,7 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC);
+ ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME);
spin_unlock_irq(shost->host_lock);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 918ae18ef8a8..ba831def9301 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -282,7 +282,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
vport = lport->vport;
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
- "6001 ENTER. lpfc_pnvme %p, qidx x%xi qhandle %p\n",
+ "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n",
lport, qidx, handle);
kfree(handle);
}
@@ -2235,13 +2235,11 @@ lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
struct sli4_sge *sgl;
dma_addr_t pdma_phys_sgl;
uint16_t iotag, lxri = 0;
- int bcnt, num_posted, sgl_size;
+ int bcnt, num_posted;
LIST_HEAD(prep_nblist);
LIST_HEAD(post_nblist);
LIST_HEAD(nvme_nblist);
- sgl_size = phba->cfg_sg_dma_buf_size;
-
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
if (!lpfc_ncmd)
@@ -2462,17 +2460,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
- /* Limit to LPFC_MAX_NVME_SEG_CNT.
- * For now need + 1 to get around NVME transport logic.
+ /* We need to tell the transport layer + 1 because it takes page
+ * alignment into account. When space for the SGL is allocated we
+ * allocate + 3, one for cmd, one for rsp and one for this alignment
*/
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
- lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_INIT,
- "6300 Reducing sg segment cnt to %d\n",
- LPFC_MAX_NVME_SEG_CNT);
- phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- } else {
- phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
- }
lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index b766afe10d3d..6245f442d784 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1339,15 +1339,14 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
idx = 0;
}
- infop = phba->sli4_hba.nvmet_ctx_info;
- for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ for (j = 0; j < phba->cfg_nvmet_mrq; j++) {
+ infop = lpfc_get_ctx_list(phba, i, j);
lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
"6408 TOTAL NVMET ctx for CPU %d "
"MRQ %d: cnt %d nextcpu %p\n",
i, j, infop->nvmet_ctx_list_cnt,
infop->nvmet_ctx_next_cpu);
- infop++;
}
}
return 0;
@@ -1373,17 +1372,10 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
pinfo.port_id = vport->fc_myDID;
- /* Limit to LPFC_MAX_NVME_SEG_CNT.
- * For now need + 1 to get around NVME transport logic.
+ /* We need to tell the transport layer + 1 because it takes page
+ * alignment into account. When space for the SGL is allocated we
+ * allocate + 3, one for cmd, one for rsp and one for this alignment
*/
- if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
- "6400 Reducing sg segment cnt to %d\n",
- LPFC_MAX_NVME_SEG_CNT);
- phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
- } else {
- phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
- }
lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 5c7858e735c9..4fa6703a9ec9 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -202,8 +202,8 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
static void
lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
{
- struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
- struct lpfc_nodelist *pnode = rdata->pnode;
+ struct lpfc_rport_data *rdata;
+ struct lpfc_nodelist *pnode;
struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
unsigned long flags;
struct Scsi_Host *shost = cmd->device->host;
@@ -211,17 +211,19 @@ lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
unsigned long latency;
int i;
- if (cmd->result)
+ if (!vport->stat_data_enabled ||
+ vport->stat_data_blocked ||
+ (cmd->result))
return;
latency = jiffies_to_msecs((long)jiffies - (long)lpfc_cmd->start_time);
+ rdata = lpfc_cmd->rdata;
+ pnode = rdata->pnode;
spin_lock_irqsave(shost->host_lock, flags);
- if (!vport->stat_data_enabled ||
- vport->stat_data_blocked ||
- !pnode ||
- !pnode->lat_data ||
- (phba->bucket_type == LPFC_NO_BUCKET)) {
+ if (!pnode ||
+ !pnode->lat_data ||
+ (phba->bucket_type == LPFC_NO_BUCKET)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
@@ -1050,7 +1052,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
if (!found)
return NULL;
- if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
+ if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
atomic_inc(&ndlp->cmd_pending);
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
}
@@ -4158,9 +4160,17 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
}
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
- spin_lock_irqsave(&phba->hbalock, flags);
- lpfc_cmd->pCmd = NULL;
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* If pCmd was set to NULL from abort path, do not call scsi_done */
+ if (xchg(&lpfc_cmd->pCmd, NULL) == NULL) {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
+ "0711 FCP cmd already NULL, sid: 0x%06x, "
+ "did: 0x%06x, oxid: 0x%04x\n",
+ vport->fc_myDID,
+ (pnode) ? pnode->nlp_DID : 0,
+ phba->sli_rev == LPFC_SLI_REV4 ?
+ lpfc_cmd->cur_iocbq.sli4_xritag : 0xffff);
+ return;
+ }
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 9830bdb6e072..783a1540cfbe 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -392,11 +392,7 @@ lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
struct lpfc_register doorbell;
doorbell.word0 = 0;
- bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
- bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
- bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
- (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
- bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
+ bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
}
@@ -3797,6 +3793,7 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
struct hbq_dmabuf *dmabuf;
struct lpfc_cq_event *cq_event;
unsigned long iflag;
+ int count = 0;
spin_lock_irqsave(&phba->hbalock, iflag);
phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
@@ -3818,16 +3815,22 @@ lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
if (irspiocbq)
lpfc_sli_sp_handle_rspiocb(phba, pring,
irspiocbq);
+ count++;
break;
case CQE_CODE_RECEIVE:
case CQE_CODE_RECEIVE_V1:
dmabuf = container_of(cq_event, struct hbq_dmabuf,
cq_event);
lpfc_sli4_handle_received_buffer(phba, dmabuf);
+ count++;
break;
default:
break;
}
+
+ /* Limit the number of events to 64 to avoid soft lockups */
+ if (count == 64)
+ break;
}
}
@@ -6146,6 +6149,271 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
}
/**
+ * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called to free memory allocated for RAS FW logging
+ * support in the driver.
+ **/
+void
+lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct lpfc_dmabuf *dmabuf, *next;
+
+ if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
+ list_for_each_entry_safe(dmabuf, next,
+ &ras_fwlog->fwlog_buff_list,
+ list) {
+ list_del(&dmabuf->list);
+ dma_free_coherent(&phba->pcidev->dev,
+ LPFC_RAS_MAX_ENTRY_SIZE,
+ dmabuf->virt, dmabuf->phys);
+ kfree(dmabuf);
+ }
+ }
+
+ if (ras_fwlog->lwpd.virt) {
+ dma_free_coherent(&phba->pcidev->dev,
+ sizeof(uint32_t) * 2,
+ ras_fwlog->lwpd.virt,
+ ras_fwlog->lwpd.phys);
+ ras_fwlog->lwpd.virt = NULL;
+ }
+
+ ras_fwlog->ras_active = false;
+}
+
+/**
+ * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
+ * @phba: Pointer to HBA context object.
+ * @fwlog_buff_count: Count of buffers to be created.
+ *
+ * This routine DMA memory for Log Write Position Data[LPWD] and buffer
+ * to update FW log is posted to the adapter.
+ * Buffer count is calculated based on module param ras_fwlog_buffsize
+ * Size of each buffer posted to FW is 64K.
+ **/
+
+static int
+lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
+ uint32_t fwlog_buff_count)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct lpfc_dmabuf *dmabuf;
+ int rc = 0, i = 0;
+
+ /* Initialize List */
+ INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
+
+ /* Allocate memory for the LWPD */
+ ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
+ sizeof(uint32_t) * 2,
+ &ras_fwlog->lwpd.phys,
+ GFP_KERNEL);
+ if (!ras_fwlog->lwpd.virt) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6185 LWPD Memory Alloc Failed\n");
+
+ return -ENOMEM;
+ }
+
+ ras_fwlog->fw_buffcount = fwlog_buff_count;
+ for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
+ dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
+ GFP_KERNEL);
+ if (!dmabuf) {
+ rc = -ENOMEM;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6186 Memory Alloc failed FW logging");
+ goto free_mem;
+ }
+
+ dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+ LPFC_RAS_MAX_ENTRY_SIZE,
+ &dmabuf->phys,
+ GFP_KERNEL);
+ if (!dmabuf->virt) {
+ kfree(dmabuf);
+ rc = -ENOMEM;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6187 DMA Alloc Failed FW logging");
+ goto free_mem;
+ }
+ memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
+ dmabuf->buffer_tag = i;
+ list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
+ }
+
+free_mem:
+ if (rc)
+ lpfc_sli4_ras_dma_free(phba);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
+ * @phba: pointer to lpfc hba data structure.
+ * @pmboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * Completion handler for driver's RAS MBX command to the device.
+ **/
+static void
+lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+ MAILBOX_t *mb;
+ union lpfc_sli4_cfg_shdr *shdr;
+ uint32_t shdr_status, shdr_add_status;
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+
+ mb = &pmb->u.mb;
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+ if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+ "6188 FW LOG mailbox "
+ "completed with status x%x add_status x%x,"
+ " mbx status x%x\n",
+ shdr_status, shdr_add_status, mb->mbxStatus);
+ goto disable_ras;
+ }
+
+ ras_fwlog->ras_active = true;
+ mempool_free(pmb, phba->mbox_mem_pool);
+
+ return;
+
+disable_ras:
+ /* Free RAS DMA memory */
+ lpfc_sli4_ras_dma_free(phba);
+ mempool_free(pmb, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
+ * @phba: pointer to lpfc hba data structure.
+ * @fwlog_level: Logging verbosity level.
+ * @fwlog_enable: Enable/Disable logging.
+ *
+ * Initialize memory and post mailbox command to enable FW logging in host
+ * memory.
+ **/
+int
+lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
+ uint32_t fwlog_level,
+ uint32_t fwlog_enable)
+{
+ struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
+ struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
+ struct lpfc_dmabuf *dmabuf;
+ LPFC_MBOXQ_t *mbox;
+ uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
+ int rc = 0;
+
+ fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
+ phba->cfg_ras_fwlog_buffsize);
+ fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
+
+ /*
+ * If re-enabling FW logging support use earlier allocated
+ * DMA buffers while posting MBX command.
+ **/
+ if (!ras_fwlog->lwpd.virt) {
+ rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6189 RAS FW Log Support Not Enabled");
+ return rc;
+ }
+ }
+
+ /* Setup Mailbox command */
+ mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mbox) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6190 RAS MBX Alloc Failed");
+ rc = -ENOMEM;
+ goto mem_free;
+ }
+
+ ras_fwlog->fw_loglevel = fwlog_level;
+ len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+
+ lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
+ LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
+ len, LPFC_SLI4_MBX_EMBED);
+
+ mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
+ bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
+ fwlog_enable);
+ bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
+ ras_fwlog->fw_loglevel);
+ bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
+ ras_fwlog->fw_buffcount);
+ bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
+ LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
+
+ /* Update DMA buffer address */
+ list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
+ memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
+
+ mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
+ putPaddrLow(dmabuf->phys);
+
+ mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
+ putPaddrHigh(dmabuf->phys);
+ }
+
+ /* Update LPWD address */
+ mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
+ mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
+
+ mbox->vport = phba->pport;
+ mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
+
+ rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "6191 RAS Mailbox failed. "
+ "status %d mbxStatus : x%x", rc,
+ bf_get(lpfc_mqe_status, &mbox->u.mqe));
+ mempool_free(mbox, phba->mbox_mem_pool);
+ rc = -EIO;
+ goto mem_free;
+ } else
+ rc = 0;
+mem_free:
+ if (rc)
+ lpfc_sli4_ras_dma_free(phba);
+
+ return rc;
+}
+
+/**
+ * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
+ * @phba: Pointer to HBA context object.
+ *
+ * Check if RAS is supported on the adapter and initialize it.
+ **/
+void
+lpfc_sli4_ras_setup(struct lpfc_hba *phba)
+{
+ /* Check RAS FW Log needs to be enabled or not */
+ if (lpfc_check_fwlog_support(phba))
+ return;
+
+ lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
+ LPFC_RAS_ENABLE_LOGGING);
+}
+
+/**
* lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
* @phba: Pointer to HBA context object.
*
@@ -10266,8 +10534,12 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
LPFC_MBOXQ_t *pmb;
unsigned long iflag;
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
+ local_bh_disable();
+
/* Flush all the mailbox commands in the mbox system */
spin_lock_irqsave(&phba->hbalock, iflag);
+
/* The pending mailbox command queue */
list_splice_init(&phba->sli.mboxq, &completions);
/* The outstanding active mailbox command */
@@ -10280,6 +10552,9 @@ lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
list_splice_init(&phba->sli.mboxq_cmpl, &completions);
spin_unlock_irqrestore(&phba->hbalock, iflag);
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+
/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
while (!list_empty(&completions)) {
list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
@@ -10419,6 +10694,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
lpfc_hba_down_prep(phba);
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
+ local_bh_disable();
+
lpfc_fabric_abort_hba(phba);
spin_lock_irqsave(&phba->hbalock, flags);
@@ -10472,6 +10750,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
kfree(buf_ptr);
}
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+
/* Return any active mbox cmds */
del_timer_sync(&psli->mbox_tmo);
@@ -11775,6 +12056,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
}
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
+ /* Disable softirqs, including timers from obtaining phba->hbalock */
+ local_bh_disable();
+
spin_lock_irq(&phba->hbalock);
psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
@@ -11788,6 +12072,9 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
1000) + jiffies;
spin_unlock_irq(&phba->hbalock);
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+
while (phba->sli.mbox_active) {
/* Check active mailbox complete status every 2ms */
msleep(2);
@@ -11797,9 +12084,13 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
*/
break;
}
- } else
+ } else {
spin_unlock_irq(&phba->hbalock);
+ /* Enable softirqs again, done with phba->hbalock */
+ local_bh_enable();
+ }
+
lpfc_sli_mbox_sys_flush(phba);
}
@@ -13136,7 +13427,6 @@ static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
{
bool workposted = false;
- struct fc_frame_header *fc_hdr;
struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
struct lpfc_nvmet_tgtport *tgtp;
@@ -13173,9 +13463,6 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
hrq->RQ_buf_posted--;
memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
- /* If a NVME LS event (type 0x28), treat it as Fast path */
- fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
-
/* save off the frame for the word thread to process */
list_add_tail(&dma_buf->cq_event.list,
&phba->sli4_hba.sp_queue_event);
@@ -14558,13 +14845,10 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
int rc, length, status = 0;
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
- uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
/* sanity check on queue memory */
if (!cq || !eq)
return -ENODEV;
- if (!phba->sli4_hba.pc_sli4_params.supported)
- hw_page_size = cq->page_size;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 399c0015c546..e76c380e1a84 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -886,3 +886,4 @@ int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
int lpfc_sli4_post_status_check(struct lpfc_hba *);
uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 501249509af4..5a0d512ff497 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.0.0.6"
+#define LPFC_DRIVER_VERSION "12.0.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index 1ff0f7de9105..c340e0e47473 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -207,7 +207,7 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
struct lpfc_vport *vport;
unsigned long flags;
- spin_lock_irqsave(&phba->hbalock, flags);
+ spin_lock_irqsave(&phba->port_list_lock, flags);
list_for_each_entry(vport, &phba->port_list, listentry) {
if (vport == new_vport)
continue;
@@ -215,11 +215,11 @@ lpfc_unique_wwpn(struct lpfc_hba *phba, struct lpfc_vport *new_vport)
if (memcmp(&vport->fc_sparam.portName,
&new_vport->fc_sparam.portName,
sizeof(struct lpfc_name)) == 0) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return 0;
}
}
- spin_unlock_irqrestore(&phba->hbalock, flags);
+ spin_unlock_irqrestore(&phba->port_list_lock, flags);
return 1;
}
@@ -825,9 +825,9 @@ skip_logo:
lpfc_free_vpi(phba, vport->vpi);
vport->work_port_events = 0;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_del_init(&vport->listentry);
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1828 Vport Deleted.\n");
scsi_host_put(shost);
@@ -844,7 +844,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
GFP_KERNEL);
if (vports == NULL)
return NULL;
- spin_lock_irq(&phba->hbalock);
+ spin_lock_irq(&phba->port_list_lock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
if (port_iterator->load_flag & FC_UNLOADING)
continue;
@@ -856,7 +856,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
}
vports[index++] = port_iterator;
}
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->port_list_lock);
return vports;
}
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index eb551f3cc471..764d320bb2ca 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -52,14 +52,12 @@ struct mac_esp_priv {
struct esp *esp;
void __iomem *pdma_regs;
void __iomem *pdma_io;
- int error;
};
static struct esp *esp_chips[2];
static DEFINE_SPINLOCK(esp_chips_lock);
#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
- platform_get_drvdata((struct platform_device *) \
- (esp->dev)))
+ dev_get_drvdata((esp)->dev))
static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
{
@@ -71,38 +69,6 @@ static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
return nubus_readb(esp->regs + reg * 16);
}
-/* For pseudo DMA and PIO we need the virtual address
- * so this address mapping is the identity mapping.
- */
-
-static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return (dma_addr_t)buf;
-}
-
-static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- int i;
-
- for (i = 0; i < num_sg; i++)
- sg[i].dma_address = (u32)sg_virt(&sg[i]);
- return num_sg;
-}
-
-static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- /* Nothing to do. */
-}
-
-static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- /* Nothing to do. */
-}
-
static void mac_esp_reset_dma(struct esp *esp)
{
/* Nothing to do. */
@@ -120,12 +86,11 @@ static void mac_esp_dma_invalidate(struct esp *esp)
static int mac_esp_dma_error(struct esp *esp)
{
- return MAC_ESP_GET_PRIV(esp)->error;
+ return esp->send_cmd_error;
}
static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
{
- struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
int i = 500000;
do {
@@ -140,7 +105,7 @@ static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
esp_read8(ESP_STATUS));
- mep->error = 1;
+ esp->send_cmd_error = 1;
return 1;
}
@@ -166,7 +131,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
esp_read8(ESP_STATUS));
- mep->error = 1;
+ esp->send_cmd_error = 1;
return 1;
}
@@ -233,7 +198,7 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
{
struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
- mep->error = 0;
+ esp->send_cmd_error = 0;
if (!write)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
@@ -271,164 +236,6 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
} while (esp_count);
}
-/*
- * Programmed IO routines follow.
- */
-
-static inline unsigned int mac_esp_wait_for_fifo(struct esp *esp)
-{
- int i = 500000;
-
- do {
- unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
-
- if (fbytes)
- return fbytes;
-
- udelay(2);
- } while (--i);
-
- printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
- esp_read8(ESP_STATUS));
- return 0;
-}
-
-static inline int mac_esp_wait_for_intr(struct esp *esp)
-{
- struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
- int i = 500000;
-
- do {
- esp->sreg = esp_read8(ESP_STATUS);
- if (esp->sreg & ESP_STAT_INTR)
- return 0;
-
- udelay(2);
- } while (--i);
-
- printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
- mep->error = 1;
- return 1;
-}
-
-#define MAC_ESP_PIO_LOOP(operands, reg1) \
- asm volatile ( \
- "1: moveb " operands " \n" \
- " subqw #1,%1 \n" \
- " jbne 1b \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo))
-
-#define MAC_ESP_PIO_FILL(operands, reg1) \
- asm volatile ( \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " moveb " operands " \n" \
- " subqw #8,%1 \n" \
- " subqw #8,%1 \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo))
-
-#define MAC_ESP_FIFO_SIZE 16
-
-static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
- u32 dma_count, int write, u8 cmd)
-{
- struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
- u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
- u8 phase = esp->sreg & ESP_STAT_PMASK;
-
- cmd &= ~ESP_CMD_DMA;
- mep->error = 0;
-
- if (write) {
- u8 *dst = (u8 *)addr;
- u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
-
- scsi_esp_cmd(esp, cmd);
-
- while (1) {
- if (!mac_esp_wait_for_fifo(esp))
- break;
-
- *dst++ = esp_read8(ESP_FDATA);
- --esp_count;
-
- if (!esp_count)
- break;
-
- if (mac_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = esp_read8(ESP_INTRPT);
- if (esp->ireg & mask) {
- mep->error = 1;
- break;
- }
-
- if (phase == ESP_MIP)
- scsi_esp_cmd(esp, ESP_CMD_MOK);
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- } else {
- scsi_esp_cmd(esp, ESP_CMD_FLUSH);
-
- if (esp_count >= MAC_ESP_FIFO_SIZE)
- MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
- else
- MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
-
- scsi_esp_cmd(esp, cmd);
-
- while (esp_count) {
- unsigned int n;
-
- if (mac_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = esp_read8(ESP_INTRPT);
- if (esp->ireg & ~ESP_INTR_BSERV) {
- mep->error = 1;
- break;
- }
-
- n = MAC_ESP_FIFO_SIZE -
- (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
- if (n > esp_count)
- n = esp_count;
-
- if (n == MAC_ESP_FIFO_SIZE) {
- MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
- } else {
- esp_count -= n;
- MAC_ESP_PIO_LOOP("%0@+,%2@", n);
- }
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- }
-}
-
static int mac_esp_irq_pending(struct esp *esp)
{
if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
@@ -470,10 +277,6 @@ static irqreturn_t mac_scsi_esp_intr(int irq, void *dev_id)
static struct esp_driver_ops mac_esp_ops = {
.esp_write8 = mac_esp_write8,
.esp_read8 = mac_esp_read8,
- .map_single = mac_esp_map_single,
- .map_sg = mac_esp_map_sg,
- .unmap_single = mac_esp_unmap_single,
- .unmap_sg = mac_esp_unmap_sg,
.irq_pending = mac_esp_irq_pending,
.dma_length_limit = mac_esp_dma_length_limit,
.reset_dma = mac_esp_reset_dma,
@@ -508,7 +311,7 @@ static int esp_mac_probe(struct platform_device *dev)
esp = shost_priv(host);
esp->host = host;
- esp->dev = dev;
+ esp->dev = &dev->dev;
esp->command_block = kzalloc(16, GFP_KERNEL);
if (!esp->command_block)
@@ -551,14 +354,16 @@ static int esp_mac_probe(struct platform_device *dev)
mep->pdma_regs = NULL;
break;
}
+ esp->fifo_reg = esp->regs + ESP_FDATA * 16;
esp->ops = &mac_esp_ops;
+ esp->flags = ESP_FLAG_NO_DMA_MAP;
if (mep->pdma_io == NULL) {
printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
esp_write8(0, ESP_TCLOW);
esp_write8(0, ESP_TCMED);
- esp->flags = ESP_FLAG_DISABLE_SYNC;
- mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
+ esp->flags |= ESP_FLAG_DISABLE_SYNC;
+ mac_esp_ops.send_dma_cmd = esp_send_pio_cmd;
} else {
printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
}
@@ -577,7 +382,7 @@ static int esp_mac_probe(struct platform_device *dev)
esp_chips[dev->id] = esp;
spin_unlock(&esp_chips_lock);
- err = scsi_esp_register(esp, &dev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 530358cdcb39..3b7abe5ca7f5 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -202,13 +202,6 @@ module_param_named(debug_level, mraid_debug_level, int, 0);
MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
/*
- * ### global data ###
- */
-static uint8_t megaraid_mbox_version[8] =
- { 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
-
-
-/*
* PCI table for all supported controllers.
*/
static struct pci_device_id pci_id_table_g[] = {
@@ -457,10 +450,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
// Setup the default DMA mask. This would be changed later on
// depending on hardware capabilities
- if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
-
+ if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(32))) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
+ "megaraid: dma_set_mask failed:%d\n", __LINE__));
goto out_free_adapter;
}
@@ -484,7 +476,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
// Start the mailbox based controller
if (megaraid_init_mbox(adapter) != 0) {
con_log(CL_ANN, (KERN_WARNING
- "megaraid: maibox adapter did not initialize\n"));
+ "megaraid: mailbox adapter did not initialize\n"));
goto out_free_adapter;
}
@@ -878,11 +870,12 @@ megaraid_init_mbox(adapter_t *adapter)
adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
- if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
+ if (dma_set_mask(&adapter->pdev->dev, DMA_BIT_MASK(64))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: DMA mask for 64-bit failed\n"));
- if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: 32-bit DMA mask failed\n"));
goto out_free_sysfs_res;
@@ -950,7 +943,7 @@ megaraid_fini_mbox(adapter_t *adapter)
* megaraid_alloc_cmd_packets - allocate shared mailbox
* @adapter : soft state of the raid controller
*
- * Allocate and align the shared mailbox. This maibox is used to issue
+ * Allocate and align the shared mailbox. This mailbox is used to issue
* all the commands. For IO based controllers, the mailbox is also registered
* with the FW. Allocate memory for all commands as well.
* This is our big allocator.
@@ -975,9 +968,9 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
* Allocate the common 16-byte aligned memory for the handshake
* mailbox.
*/
- raid_dev->una_mbox64 = pci_zalloc_consistent(adapter->pdev,
- sizeof(mbox64_t),
- &raid_dev->una_mbox64_dma);
+ raid_dev->una_mbox64 = dma_zalloc_coherent(&adapter->pdev->dev,
+ sizeof(mbox64_t), &raid_dev->una_mbox64_dma,
+ GFP_KERNEL);
if (!raid_dev->una_mbox64) {
con_log(CL_ANN, (KERN_WARNING
@@ -1003,8 +996,8 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
align;
// Allocate memory for commands issued internally
- adapter->ibuf = pci_zalloc_consistent(pdev, MBOX_IBUF_SIZE,
- &adapter->ibuf_dma_h);
+ adapter->ibuf = dma_zalloc_coherent(&pdev->dev, MBOX_IBUF_SIZE,
+ &adapter->ibuf_dma_h, GFP_KERNEL);
if (!adapter->ibuf) {
con_log(CL_ANN, (KERN_WARNING
@@ -1082,7 +1075,7 @@ megaraid_alloc_cmd_packets(adapter_t *adapter)
scb->scp = NULL;
scb->state = SCB_FREE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
scb->dma_type = MRAID_DMA_NONE;
scb->dev_channel = -1;
scb->dev_target = -1;
@@ -1098,10 +1091,10 @@ out_teardown_dma_pools:
out_free_scb_list:
kfree(adapter->kscb_list);
out_free_ibuf:
- pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
+ dma_free_coherent(&pdev->dev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
adapter->ibuf_dma_h);
out_free_common_mbox:
- pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
return -1;
@@ -1123,10 +1116,10 @@ megaraid_free_cmd_packets(adapter_t *adapter)
kfree(adapter->kscb_list);
- pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
+ dma_free_coherent(&adapter->pdev->dev, MBOX_IBUF_SIZE,
(void *)adapter->ibuf, adapter->ibuf_dma_h);
- pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mbox64_t),
(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
return;
}
@@ -1428,12 +1421,6 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
adapter->outstanding_cmds++;
- if (scb->dma_direction == PCI_DMA_TODEVICE)
- pci_dma_sync_sg_for_device(adapter->pdev,
- scsi_sglist(scb->scp),
- scsi_sg_count(scb->scp),
- PCI_DMA_TODEVICE);
-
mbox->busy = 1; // Set busy
mbox->poll = 0;
mbox->ack = 0;
@@ -2181,31 +2168,6 @@ megaraid_isr(int irq, void *devp)
/**
- * megaraid_mbox_sync_scb - sync kernel buffers
- * @adapter : controller's soft state
- * @scb : pointer to the resource packet
- *
- * DMA sync if required.
- */
-static void
-megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
-{
- mbox_ccb_t *ccb;
-
- ccb = (mbox_ccb_t *)scb->ccb;
-
- if (scb->dma_direction == PCI_DMA_FROMDEVICE)
- pci_dma_sync_sg_for_cpu(adapter->pdev,
- scsi_sglist(scb->scp),
- scsi_sg_count(scb->scp),
- PCI_DMA_FROMDEVICE);
-
- scsi_dma_unmap(scb->scp);
- return;
-}
-
-
-/**
* megaraid_mbox_dpc - the tasklet to complete the commands from completed list
* @devp : pointer to HBA soft state
*
@@ -2403,9 +2365,7 @@ megaraid_mbox_dpc(unsigned long devp)
megaraid_mbox_display_scb(adapter, scb);
}
- // Free our internal resources and call the mid-layer callback
- // routine
- megaraid_mbox_sync_scb(adapter, scb);
+ scsi_dma_unmap(scp);
// remove from local clist
list_del_init(&scb->list);
@@ -2577,7 +2537,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
int recovery_window;
- int recovering;
int i;
uioc_t *kioc;
@@ -2591,7 +2550,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
return FAILED;
}
-
// Under exceptional conditions, FW can take up to 3 minutes to
// complete command processing. Wait for additional 2 minutes for the
// pending commands counter to go down to 0. If it doesn't, let the
@@ -2640,8 +2598,6 @@ megaraid_reset_handler(struct scsi_cmnd *scp)
recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
- recovering = adapter->outstanding_cmds;
-
for (i = 0; i < recovery_window; i++) {
megaraid_ack_sequence(adapter);
@@ -2725,13 +2681,10 @@ static int
mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
{
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
- mbox64_t *mbox64;
mbox_t *mbox;
uint8_t status;
int i;
-
- mbox64 = raid_dev->mbox64;
mbox = raid_dev->mbox;
/*
@@ -2948,9 +2901,8 @@ megaraid_mbox_product_info(adapter_t *adapter)
* Issue an ENQUIRY3 command to find out certain adapter parameters,
* e.g., max channels, max commands etc.
*/
- pinfo = pci_zalloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
- &pinfo_dma_h);
-
+ pinfo = dma_zalloc_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
+ &pinfo_dma_h, GFP_KERNEL);
if (pinfo == NULL) {
con_log(CL_ANN, (KERN_WARNING
"megaraid: out of memory, %s %d\n", __func__,
@@ -2971,7 +2923,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
pinfo, pinfo_dma_h);
return -1;
@@ -3002,7 +2954,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
con_log(CL_ANN, (KERN_WARNING
"megaraid: product info failed\n"));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t),
pinfo, pinfo_dma_h);
return -1;
@@ -3038,7 +2990,7 @@ megaraid_mbox_product_info(adapter_t *adapter)
"megaraid: fw version:[%s] bios version:[%s]\n",
adapter->fw_version, adapter->bios_version));
- pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
+ dma_free_coherent(&adapter->pdev->dev, sizeof(mraid_pinfo_t), pinfo,
pinfo_dma_h);
return 0;
@@ -3135,7 +3087,6 @@ megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
static int
megaraid_mbox_support_random_del(adapter_t *adapter)
{
- mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
int rval;
@@ -3157,8 +3108,6 @@ megaraid_mbox_support_random_del(adapter_t *adapter)
return 0;
}
- mbox = (mbox_t *)raw_mbox;
-
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = FC_DEL_LOGDRV;
@@ -3263,12 +3212,8 @@ megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
static void
megaraid_mbox_flush_cache(adapter_t *adapter)
{
- mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
-
- mbox = (mbox_t *)raw_mbox;
-
memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
raw_mbox[0] = FLUSH_ADAPTER;
@@ -3299,7 +3244,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
mbox_t *mbox;
uint8_t raw_mbox[sizeof(mbox_t)];
mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
- mbox64_t *mbox64;
int status = 0;
int i;
uint32_t dword;
@@ -3310,7 +3254,6 @@ megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
raw_mbox[0] = 0xFF;
- mbox64 = raid_dev->mbox64;
mbox = raid_dev->mbox;
/* Wait until mailbox is free */
@@ -3515,7 +3458,7 @@ megaraid_cmm_register(adapter_t *adapter)
scb->scp = NULL;
scb->state = SCB_FREE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
scb->dma_type = MRAID_DMA_NONE;
scb->dev_channel = -1;
scb->dev_target = -1;
@@ -3653,7 +3596,7 @@ megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
scb->state = SCB_ACTIVE;
scb->dma_type = MRAID_DMA_NONE;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
ccb = (mbox_ccb_t *)scb->ccb;
mbox64 = (mbox64_t *)(unsigned long)kioc->cmdbuf;
@@ -3794,10 +3737,6 @@ megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
static int
gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
{
- uint8_t dmajor;
-
- dmajor = megaraid_mbox_version[0];
-
hinfo->pci_vendor_id = adapter->pdev->vendor;
hinfo->pci_device_id = adapter->pdev->device;
hinfo->subsys_vendor_id = adapter->pdev->subsystem_vendor;
@@ -3843,8 +3782,8 @@ megaraid_sysfs_alloc_resources(adapter_t *adapter)
raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
- raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
- PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
+ raid_dev->sysfs_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ PAGE_SIZE, &raid_dev->sysfs_buffer_dma, GFP_KERNEL);
if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
!raid_dev->sysfs_buffer) {
@@ -3881,7 +3820,7 @@ megaraid_sysfs_free_resources(adapter_t *adapter)
kfree(raid_dev->sysfs_mbox64);
if (raid_dev->sysfs_buffer) {
- pci_free_consistent(adapter->pdev, PAGE_SIZE,
+ dma_free_coherent(&adapter->pdev->dev, PAGE_SIZE,
raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
}
}
diff --git a/drivers/scsi/megaraid/megaraid_mbox.h b/drivers/scsi/megaraid/megaraid_mbox.h
index c1d86d961a92..e075aeb4012f 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.h
+++ b/drivers/scsi/megaraid/megaraid_mbox.h
@@ -117,7 +117,7 @@
* @raw_mbox : raw mailbox pointer
* @mbox : mailbox
* @mbox64 : extended mailbox
- * @mbox_dma_h : maibox dma address
+ * @mbox_dma_h : mailbox dma address
* @sgl64 : 64-bit scatter-gather list
* @sgl32 : 32-bit scatter-gather list
* @sgl_dma_h : dma handle for the scatter-gather list
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 9aa9590c5373..9b90c716f06d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -1330,11 +1330,11 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp,
device_id = MEGASAS_DEV_INDEX(scp);
pthru = (struct megasas_pthru_frame *)cmd->frame;
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
flags = MFI_FRAME_DIR_WRITE;
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
flags = MFI_FRAME_DIR_READ;
- else if (scp->sc_data_direction == PCI_DMA_NONE)
+ else if (scp->sc_data_direction == DMA_NONE)
flags = MFI_FRAME_DIR_NONE;
if (instance->flag_ieee == 1) {
@@ -1428,9 +1428,9 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp,
device_id = MEGASAS_DEV_INDEX(scp);
ldio = (struct megasas_io_frame *)cmd->frame;
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
flags = MFI_FRAME_DIR_WRITE;
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
flags = MFI_FRAME_DIR_READ;
if (instance->flag_ieee == 1) {
@@ -2240,9 +2240,9 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION_111));
else {
new_affiliation_111 =
- pci_zalloc_consistent(instance->pdev,
+ dma_zalloc_coherent(&instance->pdev->dev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
- &new_affiliation_111_h);
+ &new_affiliation_111_h, GFP_KERNEL);
if (!new_affiliation_111) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2302,7 +2302,7 @@ static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
}
out:
if (new_affiliation_111) {
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
new_affiliation_111,
new_affiliation_111_h);
@@ -2347,10 +2347,10 @@ static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
sizeof(struct MR_LD_VF_AFFILIATION));
else {
new_affiliation =
- pci_zalloc_consistent(instance->pdev,
+ dma_zalloc_coherent(&instance->pdev->dev,
(MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
- &new_affiliation_h);
+ &new_affiliation_h, GFP_KERNEL);
if (!new_affiliation) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate "
"memory for new affiliation for scsi%d\n",
@@ -2470,7 +2470,7 @@ out:
}
if (new_affiliation)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
(MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
new_affiliation, new_affiliation_h);
@@ -2513,9 +2513,9 @@ int megasas_sriov_start_heartbeat(struct megasas_instance *instance,
if (initial) {
instance->hb_host_mem =
- pci_zalloc_consistent(instance->pdev,
+ dma_zalloc_coherent(&instance->pdev->dev,
sizeof(struct MR_CTRL_HB_HOST_MEM),
- &instance->hb_host_mem_h);
+ &instance->hb_host_mem_h, GFP_KERNEL);
if (!instance->hb_host_mem) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "SR-IOV: Couldn't allocate"
" memory for heartbeat host memory for scsi%d\n",
@@ -4995,9 +4995,8 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
context_sz = sizeof(u32);
reply_q_sz = context_sz * (instance->max_fw_cmds + 1);
- instance->reply_queue = pci_alloc_consistent(instance->pdev,
- reply_q_sz,
- &instance->reply_queue_h);
+ instance->reply_queue = dma_alloc_coherent(&instance->pdev->dev,
+ reply_q_sz, &instance->reply_queue_h, GFP_KERNEL);
if (!instance->reply_queue) {
dev_printk(KERN_DEBUG, &instance->pdev->dev, "Out of DMA mem for reply queue\n");
@@ -5029,7 +5028,7 @@ megasas_init_adapter_mfi(struct megasas_instance *instance)
fail_fw_init:
- pci_free_consistent(instance->pdev, reply_q_sz,
+ dma_free_coherent(&instance->pdev->dev, reply_q_sz,
instance->reply_queue, instance->reply_queue_h);
fail_reply_queue:
megasas_free_cmds(instance);
@@ -5533,7 +5532,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
else {
if (instance->crash_dump_buf)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
CRASH_DMA_BUF_SIZE,
instance->crash_dump_buf,
instance->crash_dump_h);
@@ -5616,7 +5615,7 @@ static void megasas_release_mfi(struct megasas_instance *instance)
u32 reply_q_sz = sizeof(u32) *(instance->max_mfi_cmds + 1);
if (instance->reply_queue)
- pci_free_consistent(instance->pdev, reply_q_sz,
+ dma_free_coherent(&instance->pdev->dev, reply_q_sz,
instance->reply_queue, instance->reply_queue_h);
megasas_free_cmds(instance);
@@ -5655,10 +5654,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
}
dcmd = &cmd->frame->dcmd;
- el_info = pci_zalloc_consistent(instance->pdev,
- sizeof(struct megasas_evt_log_info),
- &el_info_h);
-
+ el_info = dma_zalloc_coherent(&instance->pdev->dev,
+ sizeof(struct megasas_evt_log_info), &el_info_h,
+ GFP_KERNEL);
if (!el_info) {
megasas_return_cmd(instance, cmd);
return -ENOMEM;
@@ -5695,8 +5693,9 @@ megasas_get_seq_num(struct megasas_instance *instance,
eli->boot_seq_num = el_info->boot_seq_num;
dcmd_failed:
- pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
- el_info, el_info_h);
+ dma_free_coherent(&instance->pdev->dev,
+ sizeof(struct megasas_evt_log_info),
+ el_info, el_info_h);
megasas_return_cmd(instance, cmd);
@@ -6134,10 +6133,10 @@ static inline void megasas_set_adapter_type(struct megasas_instance *instance)
static inline int megasas_alloc_mfi_ctrl_mem(struct megasas_instance *instance)
{
- instance->producer = pci_alloc_consistent(instance->pdev, sizeof(u32),
- &instance->producer_h);
- instance->consumer = pci_alloc_consistent(instance->pdev, sizeof(u32),
- &instance->consumer_h);
+ instance->producer = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(u32), &instance->producer_h, GFP_KERNEL);
+ instance->consumer = dma_alloc_coherent(&instance->pdev->dev,
+ sizeof(u32), &instance->consumer_h, GFP_KERNEL);
if (!instance->producer || !instance->consumer) {
dev_err(&instance->pdev->dev,
@@ -6199,11 +6198,11 @@ static inline void megasas_free_ctrl_mem(struct megasas_instance *instance)
kfree(instance->reply_map);
if (instance->adapter_type == MFI_SERIES) {
if (instance->producer)
- pci_free_consistent(instance->pdev, sizeof(u32),
+ dma_free_coherent(&instance->pdev->dev, sizeof(u32),
instance->producer,
instance->producer_h);
if (instance->consumer)
- pci_free_consistent(instance->pdev, sizeof(u32),
+ dma_free_coherent(&instance->pdev->dev, sizeof(u32),
instance->consumer,
instance->consumer_h);
} else {
@@ -6224,10 +6223,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
struct pci_dev *pdev = instance->pdev;
struct fusion_context *fusion = instance->ctrl_context;
- instance->evt_detail =
- pci_alloc_consistent(pdev,
- sizeof(struct megasas_evt_detail),
- &instance->evt_detail_h);
+ instance->evt_detail = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct megasas_evt_detail),
+ &instance->evt_detail_h, GFP_KERNEL);
if (!instance->evt_detail) {
dev_err(&instance->pdev->dev,
@@ -6250,9 +6248,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->pd_list_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
- &instance->pd_list_buf_h);
+ &instance->pd_list_buf_h, GFP_KERNEL);
if (!instance->pd_list_buf) {
dev_err(&pdev->dev, "Failed to allocate PD list buffer\n");
@@ -6260,9 +6258,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->ctrl_info_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
sizeof(struct megasas_ctrl_info),
- &instance->ctrl_info_buf_h);
+ &instance->ctrl_info_buf_h, GFP_KERNEL);
if (!instance->ctrl_info_buf) {
dev_err(&pdev->dev,
@@ -6271,9 +6269,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->ld_list_buf =
- pci_alloc_consistent(pdev,
+ dma_alloc_coherent(&pdev->dev,
sizeof(struct MR_LD_LIST),
- &instance->ld_list_buf_h);
+ &instance->ld_list_buf_h, GFP_KERNEL);
if (!instance->ld_list_buf) {
dev_err(&pdev->dev, "Failed to allocate LD list buffer\n");
@@ -6281,9 +6279,9 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
}
instance->ld_targetid_list_buf =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_LD_TARGETID_LIST),
- &instance->ld_targetid_list_buf_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_LD_TARGETID_LIST),
+ &instance->ld_targetid_list_buf_h, GFP_KERNEL);
if (!instance->ld_targetid_list_buf) {
dev_err(&pdev->dev,
@@ -6293,21 +6291,20 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance)
if (!reset_devices) {
instance->system_info_buf =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_DRV_SYSTEM_INFO),
- &instance->system_info_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_DRV_SYSTEM_INFO),
+ &instance->system_info_h, GFP_KERNEL);
instance->pd_info =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_PD_INFO),
- &instance->pd_info_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_PD_INFO),
+ &instance->pd_info_h, GFP_KERNEL);
instance->tgt_prop =
- pci_alloc_consistent(pdev,
- sizeof(struct MR_TARGET_PROPERTIES),
- &instance->tgt_prop_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_TARGET_PROPERTIES),
+ &instance->tgt_prop_h, GFP_KERNEL);
instance->crash_dump_buf =
- pci_alloc_consistent(pdev,
- CRASH_DMA_BUF_SIZE,
- &instance->crash_dump_h);
+ dma_alloc_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
+ &instance->crash_dump_h, GFP_KERNEL);
if (!instance->system_info_buf)
dev_err(&instance->pdev->dev,
@@ -6343,7 +6340,7 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
struct fusion_context *fusion = instance->ctrl_context;
if (instance->evt_detail)
- pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
+ dma_free_coherent(&pdev->dev, sizeof(struct megasas_evt_detail),
instance->evt_detail,
instance->evt_detail_h);
@@ -6354,41 +6351,41 @@ void megasas_free_ctrl_dma_buffers(struct megasas_instance *instance)
fusion->ioc_init_request_phys);
if (instance->pd_list_buf)
- pci_free_consistent(pdev,
+ dma_free_coherent(&pdev->dev,
MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
instance->pd_list_buf,
instance->pd_list_buf_h);
if (instance->ld_list_buf)
- pci_free_consistent(pdev, sizeof(struct MR_LD_LIST),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_LIST),
instance->ld_list_buf,
instance->ld_list_buf_h);
if (instance->ld_targetid_list_buf)
- pci_free_consistent(pdev, sizeof(struct MR_LD_TARGETID_LIST),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_LD_TARGETID_LIST),
instance->ld_targetid_list_buf,
instance->ld_targetid_list_buf_h);
if (instance->ctrl_info_buf)
- pci_free_consistent(pdev, sizeof(struct megasas_ctrl_info),
+ dma_free_coherent(&pdev->dev, sizeof(struct megasas_ctrl_info),
instance->ctrl_info_buf,
instance->ctrl_info_buf_h);
if (instance->system_info_buf)
- pci_free_consistent(pdev, sizeof(struct MR_DRV_SYSTEM_INFO),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_DRV_SYSTEM_INFO),
instance->system_info_buf,
instance->system_info_h);
if (instance->pd_info)
- pci_free_consistent(pdev, sizeof(struct MR_PD_INFO),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_PD_INFO),
instance->pd_info, instance->pd_info_h);
if (instance->tgt_prop)
- pci_free_consistent(pdev, sizeof(struct MR_TARGET_PROPERTIES),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_TARGET_PROPERTIES),
instance->tgt_prop, instance->tgt_prop_h);
if (instance->crash_dump_buf)
- pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+ dma_free_coherent(&pdev->dev, CRASH_DMA_BUF_SIZE,
instance->crash_dump_buf,
instance->crash_dump_h);
}
@@ -6516,17 +6513,20 @@ static int megasas_probe_one(struct pci_dev *pdev,
if (instance->requestorId) {
if (instance->PlasmaFW111) {
instance->vf_affiliation_111 =
- pci_alloc_consistent(pdev, sizeof(struct MR_LD_VF_AFFILIATION_111),
- &instance->vf_affiliation_111_h);
+ dma_alloc_coherent(&pdev->dev,
+ sizeof(struct MR_LD_VF_AFFILIATION_111),
+ &instance->vf_affiliation_111_h,
+ GFP_KERNEL);
if (!instance->vf_affiliation_111)
dev_warn(&pdev->dev, "Can't allocate "
"memory for VF affiliation buffer\n");
} else {
instance->vf_affiliation =
- pci_alloc_consistent(pdev,
- (MAX_LOGICAL_DRIVES + 1) *
- sizeof(struct MR_LD_VF_AFFILIATION),
- &instance->vf_affiliation_h);
+ dma_alloc_coherent(&pdev->dev,
+ (MAX_LOGICAL_DRIVES + 1) *
+ sizeof(struct MR_LD_VF_AFFILIATION),
+ &instance->vf_affiliation_h,
+ GFP_KERNEL);
if (!instance->vf_affiliation)
dev_warn(&pdev->dev, "Can't allocate "
"memory for VF affiliation buffer\n");
@@ -6994,19 +6994,19 @@ skip_firing_dcmds:
}
if (instance->vf_affiliation)
- pci_free_consistent(pdev, (MAX_LOGICAL_DRIVES + 1) *
+ dma_free_coherent(&pdev->dev, (MAX_LOGICAL_DRIVES + 1) *
sizeof(struct MR_LD_VF_AFFILIATION),
instance->vf_affiliation,
instance->vf_affiliation_h);
if (instance->vf_affiliation_111)
- pci_free_consistent(pdev,
+ dma_free_coherent(&pdev->dev,
sizeof(struct MR_LD_VF_AFFILIATION_111),
instance->vf_affiliation_111,
instance->vf_affiliation_111_h);
if (instance->hb_host_mem)
- pci_free_consistent(pdev, sizeof(struct MR_CTRL_HB_HOST_MEM),
+ dma_free_coherent(&pdev->dev, sizeof(struct MR_CTRL_HB_HOST_MEM),
instance->hb_host_mem,
instance->hb_host_mem_h);
@@ -7254,7 +7254,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
/*
* We don't change the dma_coherent_mask, so
- * pci_alloc_consistent only returns 32bit addresses
+ * dma_alloc_coherent only returns 32bit addresses
*/
if (instance->consistent_mask_64bit) {
kern_sge64[i].phys_addr = cpu_to_le64(buf_handle);
@@ -7523,6 +7523,9 @@ static int megasas_mgmt_compat_ioctl_fw(struct file *file, unsigned long arg)
get_user(user_sense_off, &cioc->sense_off))
return -EFAULT;
+ if (local_sense_off != user_sense_off)
+ return -EINVAL;
+
if (local_sense_len) {
void __user **sense_ioc_ptr =
(void __user **)((u8 *)((unsigned long)&ioc->frame.raw) + local_sense_off);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index c7f95bace353..f74b5ea24f0f 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -684,8 +684,8 @@ megasas_alloc_rdpq_fusion(struct megasas_instance *instance)
array_size = sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) *
MAX_MSIX_QUEUES_FUSION;
- fusion->rdpq_virt = pci_zalloc_consistent(instance->pdev, array_size,
- &fusion->rdpq_phys);
+ fusion->rdpq_virt = dma_zalloc_coherent(&instance->pdev->dev,
+ array_size, &fusion->rdpq_phys, GFP_KERNEL);
if (!fusion->rdpq_virt) {
dev_err(&instance->pdev->dev,
"Failed from %s %d\n", __func__, __LINE__);
@@ -813,7 +813,7 @@ megasas_free_rdpq_fusion(struct megasas_instance *instance) {
dma_pool_destroy(fusion->reply_frames_desc_pool_align);
if (fusion->rdpq_virt)
- pci_free_consistent(instance->pdev,
+ dma_free_coherent(&instance->pdev->dev,
sizeof(struct MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY) * MAX_MSIX_QUEUES_FUSION,
fusion->rdpq_virt, fusion->rdpq_phys);
}
@@ -2209,7 +2209,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[0] = MEGASAS_SCSI_VARIABLE_LENGTH_CMD;
cdb[7] = MEGASAS_SCSI_ADDL_CDB_LEN;
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ if (scp->sc_data_direction == DMA_FROM_DEVICE)
cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_READ32;
else
cdb[9] = MEGASAS_SCSI_SERVICE_ACTION_WRITE32;
@@ -2238,7 +2238,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
cdb[31] = (u8)(num_blocks & 0xff);
/* set SCSI IO EEDPFlags */
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) {
+ if (scp->sc_data_direction == DMA_FROM_DEVICE) {
io_request->EEDPFlags = cpu_to_le16(
MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
@@ -2621,7 +2621,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
scsi_buff_len = scsi_bufflen(scp);
io_request->DataLength = cpu_to_le32(scsi_buff_len);
- if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ if (scp->sc_data_direction == DMA_FROM_DEVICE)
io_info.isRead = 1;
local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
@@ -3088,9 +3088,9 @@ megasas_build_io_fusion(struct megasas_instance *instance,
io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
- if (scp->sc_data_direction == PCI_DMA_TODEVICE)
+ if (scp->sc_data_direction == DMA_TO_DEVICE)
io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE);
- else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
+ else if (scp->sc_data_direction == DMA_FROM_DEVICE)
io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ);
io_request->SGLOffset0 =
diff --git a/drivers/scsi/mesh.c b/drivers/scsi/mesh.c
index 82e01dbe90af..ec6940f2fcb3 100644
--- a/drivers/scsi/mesh.c
+++ b/drivers/scsi/mesh.c
@@ -1915,8 +1915,8 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
/* We use the PCI APIs for now until the generic one gets fixed
* enough or until we get some macio-specific versions
*/
- dma_cmd_space = pci_zalloc_consistent(macio_get_pci_dev(mdev),
- ms->dma_cmd_size, &dma_cmd_bus);
+ dma_cmd_space = dma_zalloc_coherent(&macio_get_pci_dev(mdev)->dev,
+ ms->dma_cmd_size, &dma_cmd_bus, GFP_KERNEL);
if (dma_cmd_space == NULL) {
printk(KERN_ERR "mesh: can't allocate DMA table\n");
goto out_unmap;
@@ -1974,7 +1974,7 @@ static int mesh_probe(struct macio_dev *mdev, const struct of_device_id *match)
*/
mesh_shutdown(mdev);
set_mesh_power(ms, 0);
- pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+ dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
ms->dma_cmd_space, ms->dma_cmd_bus);
out_unmap:
iounmap(ms->dma);
@@ -2007,7 +2007,7 @@ static int mesh_remove(struct macio_dev *mdev)
iounmap(ms->dma);
/* Free DMA commands memory */
- pci_free_consistent(macio_get_pci_dev(mdev), ms->dma_cmd_size,
+ dma_free_coherent(&macio_get_pci_dev(mdev)->dev, ms->dma_cmd_size,
ms->dma_cmd_space, ms->dma_cmd_bus);
/* Release memory resources */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 59d7844ee022..2500377d0723 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -122,8 +122,8 @@ mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
if (!(status & MPT3_CMD_RESET))
issue_reset = 1;
- pr_err(MPT3SAS_FMT "Command %s\n", ioc->name,
- ((issue_reset == 0) ? "terminated due to Host Reset" : "Timeout"));
+ ioc_err(ioc, "Command %s\n",
+ issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
_debug_dump_mf(mpi_request, sz);
return issue_reset;
@@ -336,9 +336,7 @@ _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
return ct->chain_buffer;
}
}
- pr_info(MPT3SAS_FMT
- "Provided chain_buffer_dma address is not in the lookup list\n",
- ioc->name);
+ ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
return NULL;
}
@@ -394,7 +392,7 @@ static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
/* Get scsi_cmd using smid */
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
if (scmd == NULL) {
- pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name);
+ ioc_err(ioc, "scmd is NULL\n");
return;
}
@@ -532,11 +530,11 @@ static int mpt3sas_remove_dead_ioc_func(void *arg)
struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
struct pci_dev *pdev;
- if ((ioc == NULL))
+ if (!ioc)
return -1;
pdev = ioc->pdev;
- if ((pdev == NULL))
+ if (!pdev)
return -1;
pci_stop_and_remove_bus_device_locked(pdev);
return 0;
@@ -566,8 +564,7 @@ _base_fault_reset_work(struct work_struct *work)
doorbell = mpt3sas_base_get_iocstate(ioc, 0);
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
- pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
- ioc->name);
+ ioc_err(ioc, "SAS host is non-operational !!!!\n");
/* It may be possible that EEH recovery can resolve some of
* pci bus failure issues rather removing the dead ioc function
@@ -600,13 +597,11 @@ _base_fault_reset_work(struct work_struct *work)
p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
"%s_dead_ioc_%d", ioc->driver_name, ioc->id);
if (IS_ERR(p))
- pr_err(MPT3SAS_FMT
- "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
+ __func__);
else
- pr_err(MPT3SAS_FMT
- "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
+ __func__);
return; /* don't rearm timer */
}
@@ -614,8 +609,8 @@ _base_fault_reset_work(struct work_struct *work)
if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
- __func__, (rc == 0) ? "success" : "failed");
+ ioc_warn(ioc, "%s: hard reset: %s\n",
+ __func__, rc == 0 ? "success" : "failed");
doorbell = mpt3sas_base_get_iocstate(ioc, 0);
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
mpt3sas_base_fault_info(ioc, doorbell &
@@ -657,8 +652,7 @@ mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
ioc->fault_reset_work_q =
create_singlethread_workqueue(ioc->fault_reset_work_q_name);
if (!ioc->fault_reset_work_q) {
- pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
- ioc->name, __func__, __LINE__);
+ ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
return;
}
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
@@ -700,8 +694,7 @@ mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
{
- pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
- ioc->name, fault_code);
+ ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
}
/**
@@ -728,8 +721,7 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_base_fault_info(ioc , doorbell);
else {
writel(0xC0FFEE00, &ioc->chip->Doorbell);
- pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
- ioc->name);
+ ioc_err(ioc, "Firmware is halted due to command timeout\n");
}
if (ioc->fwfault_debug == 2)
@@ -956,8 +948,8 @@ _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
break;
}
- pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
- ioc->name, desc, ioc_status, request_hdr, func_str);
+ ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
+ desc, ioc_status, request_hdr, func_str);
_debug_dump_mf(request_hdr, frame_sz/4);
}
@@ -1003,9 +995,9 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
{
Mpi2EventDataSasDiscovery_t *event_data =
(Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
- pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
- (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
- "start" : "stop");
+ ioc_info(ioc, "Discovery: (%s)",
+ event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+ "start" : "stop");
if (event_data->DiscoveryStatus)
pr_cont(" discovery_status(0x%08x)",
le32_to_cpu(event_data->DiscoveryStatus));
@@ -1059,14 +1051,13 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
{
Mpi26EventDataPCIeEnumeration_t *event_data =
(Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
- pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
- (event_data->ReasonCode ==
- MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
- "start" : "stop");
+ ioc_info(ioc, "PCIE Enumeration: (%s)",
+ event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
+ "start" : "stop");
if (event_data->EnumerationStatus)
- pr_info("enumeration_status(0x%08x)",
- le32_to_cpu(event_data->EnumerationStatus));
- pr_info("\n");
+ pr_cont("enumeration_status(0x%08x)",
+ le32_to_cpu(event_data->EnumerationStatus));
+ pr_cont("\n");
return;
}
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
@@ -1077,7 +1068,7 @@ _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
if (!desc)
return;
- pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
+ ioc_info(ioc, "%s\n", desc);
}
/**
@@ -1128,11 +1119,9 @@ _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
break;
}
- pr_warn(MPT3SAS_FMT
- "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
- ioc->name, log_info,
- originator_str, sas_loginfo.dw.code,
- sas_loginfo.dw.subcode);
+ ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
+ log_info,
+ originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
}
/**
@@ -1152,8 +1141,8 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
@@ -1249,9 +1238,9 @@ _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
delayed_event_ack->EventContext = mpi_reply->EventContext;
list_add_tail(&delayed_event_ack->list,
&ioc->delayed_event_ack_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED: EVENT ACK: event (0x%04x)\n",
- ioc->name, le16_to_cpu(mpi_reply->Event)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
+ le16_to_cpu(mpi_reply->Event)));
goto out;
}
@@ -2270,7 +2259,7 @@ _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
sges_left = scsi_dma_map(scmd);
if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device,
- "pci_map_sg failed: request for %d bytes!\n",
+ "scsi_dma_map failed: request for %d bytes!\n",
scsi_bufflen(scmd));
return -ENOMEM;
}
@@ -2418,7 +2407,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
sges_left = scsi_dma_map(scmd);
if (sges_left < 0) {
sdev_printk(KERN_ERR, scmd->device,
- "pci_map_sg failed: request for %d bytes!\n",
+ "scsi_dma_map failed: request for %d bytes!\n",
scsi_bufflen(scmd));
return -ENOMEM;
}
@@ -2563,44 +2552,41 @@ _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
static int
_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
+ u64 required_mask, coherent_mask;
struct sysinfo s;
- u64 consistent_dma_mask;
if (ioc->is_mcpu_endpoint)
goto try_32bit;
+ required_mask = dma_get_required_mask(&pdev->dev);
+ if (sizeof(dma_addr_t) == 4 || required_mask == 32)
+ goto try_32bit;
+
if (ioc->dma_mask)
- consistent_dma_mask = DMA_BIT_MASK(64);
+ coherent_mask = DMA_BIT_MASK(64);
else
- consistent_dma_mask = DMA_BIT_MASK(32);
-
- if (sizeof(dma_addr_t) > 4) {
- const uint64_t required_mask =
- dma_get_required_mask(&pdev->dev);
- if ((required_mask > DMA_BIT_MASK(32)) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
- !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
- ioc->base_add_sg_single = &_base_add_sg_single_64;
- ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = 64;
- goto out;
- }
- }
+ coherent_mask = DMA_BIT_MASK(32);
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
+ dma_set_coherent_mask(&pdev->dev, coherent_mask))
+ goto try_32bit;
+
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ ioc->dma_mask = 64;
+ goto out;
try_32bit:
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
- ioc->base_add_sg_single = &_base_add_sg_single_32;
- ioc->sge_size = sizeof(Mpi2SGESimple32_t);
- ioc->dma_mask = 32;
- } else
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
return -ENODEV;
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ ioc->dma_mask = 32;
out:
si_meminfo(&s);
- pr_info(MPT3SAS_FMT
- "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
- ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
+ ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+ ioc->dma_mask, convert_to_kb(s.totalram));
return 0;
}
@@ -2639,8 +2625,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
if (!base) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
- ioc->name));
+ dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
return -EINVAL;
}
@@ -2658,9 +2643,8 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
pci_read_config_word(ioc->pdev, base + 2, &message_control);
ioc->msix_vector_count = (message_control & 0x3FF) + 1;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "msix is supported, vector_count(%d)\n",
- ioc->name, ioc->msix_vector_count));
+ dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
+ ioc->msix_vector_count));
return 0;
}
@@ -2702,8 +2686,8 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
if (!reply_q) {
- pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
- ioc->name, (int)sizeof(struct adapter_reply_queue));
+ ioc_err(ioc, "unable to allocate memory %zu!\n",
+ sizeof(struct adapter_reply_queue));
return -ENOMEM;
}
reply_q->ioc = ioc;
@@ -2719,7 +2703,7 @@ _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
IRQF_SHARED, reply_q->name, reply_q);
if (r) {
- pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
+ pr_err("%s: unable to allocate interrupt %d!\n",
reply_q->name, pci_irq_vector(pdev, index));
kfree(reply_q);
return -EBUSY;
@@ -2761,8 +2745,8 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
reply_q->msix_index);
if (!mask) {
- pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
- ioc->name, reply_q->msix_index);
+ ioc_warn(ioc, "no affinity for msi %x\n",
+ reply_q->msix_index);
continue;
}
@@ -2833,9 +2817,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_queue_count = min_t(int, ioc->cpu_count,
ioc->msix_vector_count);
- printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
- ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
- ioc->cpu_count, max_msix_vectors);
+ ioc_info(ioc, "MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
+ ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
local_max_msix_vectors = (reset_devices) ? 1 : 8;
@@ -2857,9 +2840,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
irq_flags);
if (r < 0) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "pci_alloc_irq_vectors failed (r=%d) !!!\n",
- ioc->name, r));
+ dfailprintk(ioc,
+ ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
+ r));
goto try_ioapic;
}
@@ -2882,9 +2865,9 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_queue_count = 1;
r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
if (r < 0) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
- ioc->name, r));
+ dfailprintk(ioc,
+ ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
+ r));
} else
r = _base_request_irq(ioc, 0);
@@ -2900,8 +2883,7 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
{
struct pci_dev *pdev = ioc->pdev;
- dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
- ioc->name, __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
_base_free_irq(ioc);
_base_disable_msix(ioc);
@@ -2939,13 +2921,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
phys_addr_t chip_phys = 0;
struct adapter_reply_queue *reply_q;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
- ioc->name, __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
if (pci_enable_device_mem(pdev)) {
- pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
- ioc->name);
+ ioc_warn(ioc, "pci_enable_device_mem: failed\n");
ioc->bars = 0;
return -ENODEV;
}
@@ -2953,8 +2933,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (pci_request_selected_regions(pdev, ioc->bars,
ioc->driver_name)) {
- pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
- ioc->name);
+ ioc_warn(ioc, "pci_request_selected_regions: failed\n");
ioc->bars = 0;
r = -ENODEV;
goto out_fail;
@@ -2967,8 +2946,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (_base_config_dma_addressing(ioc, pdev) != 0) {
- pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
- ioc->name, pci_name(pdev));
+ ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
r = -ENODEV;
goto out_fail;
}
@@ -2991,8 +2969,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->chip == NULL) {
- pr_err(MPT3SAS_FMT "unable to map adapter memory! "
- " or resource not found\n", ioc->name);
+ ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
r = -EINVAL;
goto out_fail;
}
@@ -3026,9 +3003,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
ioc->combined_reply_index_count,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->replyPostRegisterIndex) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "allocation for reply Post Register Index failed!!!\n",
- ioc->name));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
r = -ENOMEM;
goto out_fail;
}
@@ -3053,15 +3029,15 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
}
list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
- pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
- reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
- "IO-APIC enabled"),
- pci_irq_vector(ioc->pdev, reply_q->msix_index));
+ pr_info("%s: %s enabled: IRQ %d\n",
+ reply_q->name,
+ ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
+ pci_irq_vector(ioc->pdev, reply_q->msix_index));
- pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n",
- ioc->name, &chip_phys, ioc->chip, memap_sz);
- pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
- ioc->name, (unsigned long long)pio_chip, pio_sz);
+ ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
+ &chip_phys, ioc->chip, memap_sz);
+ ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
+ (unsigned long long)pio_chip, pio_sz);
/* Save PCI configuration state for recovery from PCI AER/EEH errors */
pci_save_state(pdev);
@@ -3176,8 +3152,7 @@ mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (list_empty(&ioc->internal_free_list)) {
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- pr_err(MPT3SAS_FMT "%s: smid not available\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: smid not available\n", __func__);
return 0;
}
@@ -3545,89 +3520,85 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI2_MFGPAGE_DEVID_SAS2008:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_INTEL_RMS2LL080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS2LL080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS2LL080_BRANDING);
break;
case MPT2SAS_INTEL_RMS2LL040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS2LL040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS2LL040_BRANDING);
break;
case MPT2SAS_INTEL_SSD910_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_SSD910_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_SSD910_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
case MPI2_MFGPAGE_DEVID_SAS2308_2:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_INTEL_RS25GB008_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RS25GB008_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RS25GB008_BRANDING);
break;
case MPT2SAS_INTEL_RMS25JB080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25JB080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25JB080_BRANDING);
break;
case MPT2SAS_INTEL_RMS25JB040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25JB040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25JB040_BRANDING);
break;
case MPT2SAS_INTEL_RMS25KB080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25KB080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25KB080_BRANDING);
break;
case MPT2SAS_INTEL_RMS25KB040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25KB040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25KB040_BRANDING);
break;
case MPT2SAS_INTEL_RMS25LB040_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25LB040_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25LB040_BRANDING);
break;
case MPT2SAS_INTEL_RMS25LB080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_INTEL_RMS25LB080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_INTEL_RMS25LB080_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
case MPI25_MFGPAGE_DEVID_SAS3008:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_INTEL_RMS3JC080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RMS3JC080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RMS3JC080_BRANDING);
break;
case MPT3SAS_INTEL_RS3GC008_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RS3GC008_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RS3GC008_BRANDING);
break;
case MPT3SAS_INTEL_RS3FC044_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RS3FC044_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RS3FC044_BRANDING);
break;
case MPT3SAS_INTEL_RS3UC080_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_INTEL_RS3UC080_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_INTEL_RS3UC080_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
default:
- pr_info(MPT3SAS_FMT
- "Intel(R) Controller: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
@@ -3636,57 +3607,54 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI2_MFGPAGE_DEVID_SAS2008:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
break;
case MPT2SAS_DELL_PERC_H200_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_PERC_H200_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_PERC_H200_BRANDING);
break;
case MPT2SAS_DELL_6GBPS_SAS_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_DELL_6GBPS_SAS_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_DELL_6GBPS_SAS_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
case MPI25_MFGPAGE_DEVID_SAS3008:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_DELL_12G_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_DELL_12G_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_DELL_12G_HBA_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
default:
- pr_info(MPT3SAS_FMT
- "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
- ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
@@ -3695,46 +3663,42 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI25_MFGPAGE_DEVID_SAS3008:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
break;
case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
break;
case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
case MPI25_MFGPAGE_DEVID_SAS3108_1:
switch (ioc->pdev->subsystem_device) {
case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
break;
case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
- );
+ ioc_info(ioc, "%s\n",
+ MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
default:
- pr_info(MPT3SAS_FMT
- "Cisco SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
break;
@@ -3743,43 +3707,40 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
case MPI2_MFGPAGE_DEVID_SAS2004:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
case MPI2_MFGPAGE_DEVID_SAS2308_2:
switch (ioc->pdev->subsystem_device) {
case MPT2SAS_HP_2_4_INTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_2_4_INTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_2_4_INTERNAL_BRANDING);
break;
case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
break;
case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
break;
case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
- pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
+ ioc_info(ioc, "%s\n",
+ MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
break;
default:
- pr_info(MPT3SAS_FMT
- "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
default:
- pr_info(MPT3SAS_FMT
- "HP SAS HBA: Subsystem ID: 0x%X\n",
- ioc->name, ioc->pdev->subsystem_device);
+ ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
+ ioc->pdev->subsystem_device);
break;
}
default:
@@ -3806,28 +3767,25 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
u16 smid, ioc_status;
size_t data_length;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
data_length = sizeof(Mpi2FWImageHeader_t);
- fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
- &fwpkg_data_dma);
+ fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
+ &fwpkg_data_dma, GFP_KERNEL);
if (!fwpkg_data) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENOMEM;
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
r = -EAGAIN;
goto out;
}
@@ -3846,11 +3804,9 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
/* Wait for 15 seconds */
wait_for_completion_timeout(&ioc->base_cmds.done,
FW_IMG_HDR_READ_TIMEOUT*HZ);
- pr_info(MPT3SAS_FMT "%s: complete\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: complete\n", __func__);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi25FWUploadRequest_t)/4);
r = -ETIME;
@@ -3864,13 +3820,11 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
if (FWImgHdr->PackageVersion.Word) {
- pr_info(MPT3SAS_FMT "FW Package Version"
- "(%02d.%02d.%02d.%02d)\n",
- ioc->name,
- FWImgHdr->PackageVersion.Struct.Major,
- FWImgHdr->PackageVersion.Struct.Minor,
- FWImgHdr->PackageVersion.Struct.Unit,
- FWImgHdr->PackageVersion.Struct.Dev);
+ ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
+ FWImgHdr->PackageVersion.Struct.Major,
+ FWImgHdr->PackageVersion.Struct.Minor,
+ FWImgHdr->PackageVersion.Struct.Unit,
+ FWImgHdr->PackageVersion.Struct.Dev);
}
} else {
_debug_dump_mf(&mpi_reply,
@@ -3881,7 +3835,7 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
out:
if (fwpkg_data)
- pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
+ dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
fwpkg_data_dma);
return r;
}
@@ -3900,18 +3854,17 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
strncpy(desc, ioc->manu_pg0.ChipName, 16);
- pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
- "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
- ioc->name, desc,
- (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
- (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
- (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
- ioc->facts.FWVersion.Word & 0x000000FF,
- ioc->pdev->revision,
- (bios_version & 0xFF000000) >> 24,
- (bios_version & 0x00FF0000) >> 16,
- (bios_version & 0x0000FF00) >> 8,
- bios_version & 0x000000FF);
+ ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+ desc,
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF,
+ ioc->pdev->revision,
+ (bios_version & 0xFF000000) >> 24,
+ (bios_version & 0x00FF0000) >> 16,
+ (bios_version & 0x0000FF00) >> 8,
+ bios_version & 0x000000FF);
_base_display_OEMs_branding(ioc);
@@ -3920,82 +3873,81 @@ _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
i++;
}
- pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
+ ioc_info(ioc, "Protocol=(");
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
- pr_info("Initiator");
+ pr_cont("Initiator");
i++;
}
if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
- pr_info("%sTarget", i ? "," : "");
+ pr_cont("%sTarget", i ? "," : "");
i++;
}
i = 0;
- pr_info("), ");
- pr_info("Capabilities=(");
+ pr_cont("), Capabilities=(");
if (!ioc->hide_ir_msg) {
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
- pr_info("Raid");
+ pr_cont("Raid");
i++;
}
}
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
- pr_info("%sTLR", i ? "," : "");
+ pr_cont("%sTLR", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
- pr_info("%sMulticast", i ? "," : "");
+ pr_cont("%sMulticast", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
- pr_info("%sBIDI Target", i ? "," : "");
+ pr_cont("%sBIDI Target", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
- pr_info("%sEEDP", i ? "," : "");
+ pr_cont("%sEEDP", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
- pr_info("%sSnapshot Buffer", i ? "," : "");
+ pr_cont("%sSnapshot Buffer", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
- pr_info("%sDiag Trace Buffer", i ? "," : "");
+ pr_cont("%sDiag Trace Buffer", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
- pr_info("%sDiag Extended Buffer", i ? "," : "");
+ pr_cont("%sDiag Extended Buffer", i ? "," : "");
i++;
}
if (ioc->facts.IOCCapabilities &
MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
- pr_info("%sTask Set Full", i ? "," : "");
+ pr_cont("%sTask Set Full", i ? "," : "");
i++;
}
iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
- pr_info("%sNCQ", i ? "," : "");
+ pr_cont("%sNCQ", i ? "," : "");
i++;
}
- pr_info(")\n");
+ pr_cont(")\n");
}
/**
@@ -4028,21 +3980,21 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -4074,11 +4026,11 @@ mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
else
dmd_new =
dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
- pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
- ioc->name, dmd_orignal, dmd_new);
- pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
- ioc->name, io_missing_delay_original,
- io_missing_delay);
+ ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
+ dmd_orignal, dmd_new);
+ ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
+ io_missing_delay_original,
+ io_missing_delay);
ioc->device_missing_delay = dmd_new;
ioc->io_missing_delay = io_missing_delay;
}
@@ -4189,33 +4141,32 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
struct chain_tracker *ct;
struct reply_post_struct *rps;
- dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->request) {
- pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
+ dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
ioc->request, ioc->request_dma);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "request_pool(0x%p): free\n",
- ioc->name, ioc->request));
+ dexitprintk(ioc,
+ ioc_info(ioc, "request_pool(0x%p): free\n",
+ ioc->request));
ioc->request = NULL;
}
if (ioc->sense) {
dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
dma_pool_destroy(ioc->sense_dma_pool);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "sense_pool(0x%p): free\n",
- ioc->name, ioc->sense));
+ dexitprintk(ioc,
+ ioc_info(ioc, "sense_pool(0x%p): free\n",
+ ioc->sense));
ioc->sense = NULL;
}
if (ioc->reply) {
dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
dma_pool_destroy(ioc->reply_dma_pool);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_pool(0x%p): free\n",
- ioc->name, ioc->reply));
+ dexitprintk(ioc,
+ ioc_info(ioc, "reply_pool(0x%p): free\n",
+ ioc->reply));
ioc->reply = NULL;
}
@@ -4223,9 +4174,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
ioc->reply_free_dma);
dma_pool_destroy(ioc->reply_free_dma_pool);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_free_pool(0x%p): free\n",
- ioc->name, ioc->reply_free));
+ dexitprintk(ioc,
+ ioc_info(ioc, "reply_free_pool(0x%p): free\n",
+ ioc->reply_free));
ioc->reply_free = NULL;
}
@@ -4237,9 +4188,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_post_free_dma_pool,
rps->reply_post_free,
rps->reply_post_free_dma);
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_post_free_pool(0x%p): free\n",
- ioc->name, rps->reply_post_free));
+ dexitprintk(ioc,
+ ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
+ rps->reply_post_free));
rps->reply_post_free = NULL;
}
} while (ioc->rdpq_array_enable &&
@@ -4267,10 +4218,10 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
if (ioc->config_page) {
- dexitprintk(ioc, pr_info(MPT3SAS_FMT
- "config_page(0x%p): free\n", ioc->name,
- ioc->config_page));
- pci_free_consistent(ioc->pdev, ioc->config_page_sz,
+ dexitprintk(ioc,
+ ioc_info(ioc, "config_page(0x%p): free\n",
+ ioc->config_page));
+ dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
ioc->config_page, ioc->config_page_dma);
}
@@ -4338,8 +4289,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
int i, j;
struct chain_tracker *ct;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
retry_sz = 0;
@@ -4368,10 +4318,8 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
sg_tablesize = min_t(unsigned short, sg_tablesize,
SG_MAX_SEGMENTS);
- pr_warn(MPT3SAS_FMT
- "sg_tablesize(%u) is bigger than kernel "
- "defined SG_CHUNK_SIZE(%u)\n", ioc->name,
- sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
+ ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
+ sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
}
ioc->shost->sg_tablesize = sg_tablesize;
}
@@ -4381,9 +4329,8 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
INTERNAL_SCSIIO_CMDS_COUNT)) {
- pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
- Credits, it has just %d number of credits\n",
- ioc->name, facts->RequestCredit);
+ ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
+ facts->RequestCredit);
return -ENOMEM;
}
ioc->internal_depth = 10;
@@ -4482,11 +4429,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
- "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
- "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
- ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
- ioc->chains_needed_per_io));
+ dinitprintk(ioc,
+ ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
+ ioc->max_sges_in_main_message,
+ ioc->max_sges_in_chain_message,
+ ioc->shost->sg_tablesize,
+ ioc->chains_needed_per_io));
/* reply post queue, 16 byte align */
reply_post_free_sz = ioc->reply_post_queue_depth *
@@ -4501,48 +4449,40 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
sizeof(struct reply_post_struct), GFP_KERNEL);
if (!ioc->reply_post) {
- pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
goto out;
}
ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
&ioc->pdev->dev, sz, 16, 0);
if (!ioc->reply_post_free_dma_pool) {
- pr_err(MPT3SAS_FMT
- "reply_post_free pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
goto out;
}
i = 0;
do {
ioc->reply_post[i].reply_post_free =
- dma_pool_alloc(ioc->reply_post_free_dma_pool,
+ dma_pool_zalloc(ioc->reply_post_free_dma_pool,
GFP_KERNEL,
&ioc->reply_post[i].reply_post_free_dma);
if (!ioc->reply_post[i].reply_post_free) {
- pr_err(MPT3SAS_FMT
- "reply_post_free pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
goto out;
}
- memset(ioc->reply_post[i].reply_post_free, 0, sz);
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply post free pool (0x%p): depth(%d),"
- "element_size(%d), pool_size(%d kB)\n", ioc->name,
- ioc->reply_post[i].reply_post_free,
- ioc->reply_post_queue_depth, 8, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_post_free_dma = (0x%llx)\n", ioc->name,
- (unsigned long long)
- ioc->reply_post[i].reply_post_free_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->reply_post[i].reply_post_free,
+ ioc->reply_post_queue_depth,
+ 8, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
+ (u64)ioc->reply_post[i].reply_post_free_dma));
total_sz += sz;
} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
if (ioc->dma_mask == 64) {
if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
- pr_warn(MPT3SAS_FMT
- "no suitable consistent DMA mask for %s\n",
- ioc->name, pci_name(ioc->pdev));
+ ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
+ pci_name(ioc->pdev));
goto out;
}
}
@@ -4554,9 +4494,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
* with some internal commands that could be outstanding
*/
ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "scsi host: can_queue depth (%d)\n",
- ioc->name, ioc->shost->can_queue));
+ dinitprintk(ioc,
+ ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
+ ioc->shost->can_queue));
/* contiguous pool for request and chains, 16 byte align, one extra "
@@ -4572,12 +4512,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
sz += (ioc->internal_depth * ioc->request_sz);
ioc->request_dma_sz = sz;
- ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
+ ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
+ &ioc->request_dma, GFP_KERNEL);
if (!ioc->request) {
- pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
- "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
- "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
- ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+ ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
+ ioc->hba_queue_depth, ioc->chains_needed_per_io,
+ ioc->request_sz, sz / 1024);
if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
goto out;
retry_sz = 64;
@@ -4587,10 +4527,9 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
if (retry_sz)
- pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
- "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
- "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
- ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+ ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
+ ioc->hba_queue_depth, ioc->chains_needed_per_io,
+ ioc->request_sz, sz / 1024);
/* hi-priority queue */
ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
@@ -4604,24 +4543,26 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
ioc->request_sz);
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
- (ioc->hba_queue_depth * ioc->request_sz)/1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->request, ioc->hba_queue_depth,
+ ioc->request_sz,
+ (ioc->hba_queue_depth * ioc->request_sz) / 1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
- ioc->name, (unsigned long long) ioc->request_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "request pool: dma(0x%llx)\n",
+ (unsigned long long)ioc->request_dma));
total_sz += sz;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
- ioc->name, ioc->request, ioc->scsiio_depth));
+ dinitprintk(ioc,
+ ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
+ ioc->request, ioc->scsiio_depth));
ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
if (!ioc->chain_lookup) {
- pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
- "failed\n", ioc->name);
+ ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
goto out;
}
@@ -4629,8 +4570,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->scsiio_depth; i++) {
ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
if (!ioc->chain_lookup[i].chains_per_smid) {
- pr_err(MPT3SAS_FMT "chain_lookup: "
- " kzalloc failed\n", ioc->name);
+ ioc_err(ioc, "chain_lookup: kzalloc failed\n");
goto out;
}
}
@@ -4639,29 +4579,27 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
sizeof(struct request_tracker), GFP_KERNEL);
if (!ioc->hpr_lookup) {
- pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
- ioc->name);
+ ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
goto out;
}
ioc->hi_priority_smid = ioc->scsiio_depth + 1;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "hi_priority(0x%p): depth(%d), start smid(%d)\n",
- ioc->name, ioc->hi_priority,
- ioc->hi_priority_depth, ioc->hi_priority_smid));
+ dinitprintk(ioc,
+ ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
+ ioc->hi_priority,
+ ioc->hi_priority_depth, ioc->hi_priority_smid));
/* initialize internal queue smid's */
ioc->internal_lookup = kcalloc(ioc->internal_depth,
sizeof(struct request_tracker), GFP_KERNEL);
if (!ioc->internal_lookup) {
- pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
- ioc->name);
+ ioc_err(ioc, "internal_lookup: kcalloc failed\n");
goto out;
}
ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "internal(0x%p): depth(%d), start smid(%d)\n",
- ioc->name, ioc->internal,
- ioc->internal_depth, ioc->internal_smid));
+ dinitprintk(ioc,
+ ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
+ ioc->internal,
+ ioc->internal_depth, ioc->internal_smid));
/*
* The number of NVMe page sized blocks needed is:
* (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
@@ -4685,17 +4623,14 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
if (!ioc->pcie_sg_lookup) {
- pr_info(MPT3SAS_FMT
- "PCIe SGL lookup: kzalloc failed\n", ioc->name);
+ ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
goto out;
}
sz = nvme_blocks_needed * ioc->page_size;
ioc->pcie_sgl_dma_pool =
dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
if (!ioc->pcie_sgl_dma_pool) {
- pr_info(MPT3SAS_FMT
- "PCIe SGL pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
goto out;
}
@@ -4708,9 +4643,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->pcie_sgl_dma_pool, GFP_KERNEL,
&ioc->pcie_sg_lookup[i].pcie_sgl_dma);
if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
- pr_info(MPT3SAS_FMT
- "PCIe SGL pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
goto out;
}
for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
@@ -4724,20 +4657,20 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
}
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
- "element_size(%d), pool_size(%d kB)\n", ioc->name,
- ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
- "fit in a PRP page(%d)\n", ioc->name,
- ioc->chains_per_prp_buffer));
+ dinitprintk(ioc,
+ ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->scsiio_depth, sz,
+ (sz * ioc->scsiio_depth) / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
+ ioc->chains_per_prp_buffer));
total_sz += sz * ioc->scsiio_depth;
}
ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
ioc->chain_segment_sz, 16, 0);
if (!ioc->chain_dma_pool) {
- pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
goto out;
}
for (i = 0; i < ioc->scsiio_depth; i++) {
@@ -4748,8 +4681,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->chain_dma_pool, GFP_KERNEL,
&ct->chain_buffer_dma);
if (!ct->chain_buffer) {
- pr_err(MPT3SAS_FMT "chain_lookup: "
- " pci_pool_alloc failed\n", ioc->name);
+ ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
_base_release_memory_pools(ioc);
goto out;
}
@@ -4757,25 +4689,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
total_sz += ioc->chain_segment_sz;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
- ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->chain_depth, ioc->chain_segment_sz,
+ (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
/* sense buffers, 4 byte align */
sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
4, 0);
if (!ioc->sense_dma_pool) {
- pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: dma_pool_create failed\n");
goto out;
}
ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
&ioc->sense_dma);
if (!ioc->sense) {
- pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
goto out;
}
/* sense buffer requires to be in same 4 gb region.
@@ -4797,24 +4727,23 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
dma_pool_create("sense pool", &ioc->pdev->dev, sz,
roundup_pow_of_two(sz), 0);
if (!ioc->sense_dma_pool) {
- pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: pci_pool_create failed\n");
goto out;
}
ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
&ioc->sense_dma);
if (!ioc->sense) {
- pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
goto out;
}
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
- "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
- SCSI_SENSE_BUFFERSIZE, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
- ioc->name, (unsigned long long)ioc->sense_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->sense, ioc->scsiio_depth,
+ SCSI_SENSE_BUFFERSIZE, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "sense_dma(0x%llx)\n",
+ (unsigned long long)ioc->sense_dma));
total_sz += sz;
/* reply pool, 4 byte align */
@@ -4822,25 +4751,24 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
4, 0);
if (!ioc->reply_dma_pool) {
- pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "reply pool: dma_pool_create failed\n");
goto out;
}
ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
&ioc->reply_dma);
if (!ioc->reply) {
- pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
goto out;
}
ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
- ioc->name, ioc->reply,
- ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
- ioc->name, (unsigned long long)ioc->reply_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->reply, ioc->reply_free_queue_depth,
+ ioc->reply_sz, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_dma(0x%llx)\n",
+ (unsigned long long)ioc->reply_dma));
total_sz += sz;
/* reply free queue, 16 byte align */
@@ -4848,24 +4776,22 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
&ioc->pdev->dev, sz, 16, 0);
if (!ioc->reply_free_dma_pool) {
- pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
goto out;
}
- ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
+ ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
&ioc->reply_free_dma);
if (!ioc->reply_free) {
- pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
goto out;
}
- memset(ioc->reply_free, 0, sz);
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
- "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
- ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "reply_free_dma (0x%llx)\n",
- ioc->name, (unsigned long long)ioc->reply_free_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->reply_free, ioc->reply_free_queue_depth,
+ 4, sz / 1024));
+ dinitprintk(ioc,
+ ioc_info(ioc, "reply_free_dma (0x%llx)\n",
+ (unsigned long long)ioc->reply_free_dma));
total_sz += sz;
if (ioc->rdpq_array_enable) {
@@ -4876,8 +4802,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
&ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
if (!ioc->reply_post_free_array_dma_pool) {
dinitprintk(ioc,
- pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
- "dma_pool_create failed\n", ioc->name));
+ ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
goto out;
}
ioc->reply_post_free_array =
@@ -4885,34 +4810,31 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
GFP_KERNEL, &ioc->reply_post_free_array_dma);
if (!ioc->reply_post_free_array) {
dinitprintk(ioc,
- pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
- "dma_pool_alloc failed\n", ioc->name));
+ ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
goto out;
}
}
ioc->config_page_sz = 512;
- ioc->config_page = pci_alloc_consistent(ioc->pdev,
- ioc->config_page_sz, &ioc->config_page_dma);
+ ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
+ ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
if (!ioc->config_page) {
- pr_err(MPT3SAS_FMT
- "config page: dma_pool_alloc failed\n",
- ioc->name);
+ ioc_err(ioc, "config page: dma_pool_alloc failed\n");
goto out;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "config page(0x%p): size(%d)\n",
- ioc->name, ioc->config_page, ioc->config_page_sz));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
- ioc->name, (unsigned long long)ioc->config_page_dma));
+ dinitprintk(ioc,
+ ioc_info(ioc, "config page(0x%p): size(%d)\n",
+ ioc->config_page, ioc->config_page_sz));
+ dinitprintk(ioc,
+ ioc_info(ioc, "config_page_dma(0x%llx)\n",
+ (unsigned long long)ioc->config_page_dma));
total_sz += ioc->config_page_sz;
- pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
- ioc->name, total_sz/1024);
- pr_info(MPT3SAS_FMT
- "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
- ioc->name, ioc->shost->can_queue, facts->RequestCredit);
- pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
- ioc->name, ioc->shost->sg_tablesize);
+ ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
+ total_sz / 1024);
+ ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
+ ioc->shost->can_queue, facts->RequestCredit);
+ ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
+ ioc->shost->sg_tablesize);
return 0;
out:
@@ -4990,9 +4912,9 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
int_status = readl(&ioc->chip->HostInterruptStatus);
if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
}
@@ -5000,9 +4922,8 @@ _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
count++;
} while (--cntdn);
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ __func__, count, int_status);
return -EFAULT;
}
@@ -5017,9 +4938,9 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
int_status = readl(&ioc->chip->HostInterruptStatus);
if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
}
@@ -5027,9 +4948,8 @@ _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
count++;
} while (--cntdn);
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ __func__, count, int_status);
return -EFAULT;
}
@@ -5056,9 +4976,9 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
int_status = readl(&ioc->chip->HostInterruptStatus);
if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
doorbell = readl(&ioc->chip->Doorbell);
@@ -5075,9 +4995,8 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
} while (--cntdn);
out:
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), int_status(%x)!\n",
- ioc->name, __func__, count, int_status);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ __func__, count, int_status);
return -EFAULT;
}
@@ -5099,9 +5018,9 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
do {
doorbell_reg = readl(&ioc->chip->Doorbell);
if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: successful count(%d), timeout(%d)\n",
- ioc->name, __func__, count, timeout));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
+ __func__, count, timeout));
return 0;
}
@@ -5109,9 +5028,8 @@ _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
count++;
} while (--cntdn);
- pr_err(MPT3SAS_FMT
- "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
- ioc->name, __func__, count, doorbell_reg);
+ ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
+ __func__, count, doorbell_reg);
return -EFAULT;
}
@@ -5130,8 +5048,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
int r = 0;
if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
- pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: unknown reset_type\n", __func__);
return -EFAULT;
}
@@ -5139,7 +5056,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
return -EFAULT;
- pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
+ ioc_info(ioc, "sending message unit reset !!\n");
writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
&ioc->chip->Doorbell);
@@ -5149,15 +5066,14 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
}
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
if (ioc_state) {
- pr_err(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
+ ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state);
r = -EFAULT;
goto out;
}
out:
- pr_info(MPT3SAS_FMT "message unit reset: %s\n",
- ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+ ioc_info(ioc, "message unit reset: %s\n",
+ r == 0 ? "SUCCESS" : "FAILED");
return r;
}
@@ -5183,9 +5099,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
/* make sure doorbell is not in use */
if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
- pr_err(MPT3SAS_FMT
- "doorbell is in use (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
return -EFAULT;
}
@@ -5200,17 +5114,15 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
&ioc->chip->Doorbell);
if ((_base_spin_on_doorbell_int(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_ack(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake ack failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
@@ -5222,17 +5134,15 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
}
if (failed) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake sending request failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
/* now wait for the reply */
if ((_base_wait_for_doorbell_int(ioc, timeout))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
@@ -5241,9 +5151,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
& MPI2_DOORBELL_DATA_MASK);
writel(0, &ioc->chip->HostInterruptStatus);
if ((_base_wait_for_doorbell_int(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
@@ -5252,9 +5161,8 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
for (i = 2; i < default_reply->MsgLength * 2; i++) {
if ((_base_wait_for_doorbell_int(ioc, 5))) {
- pr_err(MPT3SAS_FMT
- "doorbell handshake int failed (line=%d)\n",
- ioc->name, __LINE__);
+ ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
+ __LINE__);
return -EFAULT;
}
if (i >= reply_bytes/2) /* overflow case */
@@ -5267,8 +5175,9 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
_base_wait_for_doorbell_int(ioc, 5);
if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
+ dhsprintk(ioc,
+ ioc_info(ioc, "doorbell is in use (line=%d)\n",
+ __LINE__));
}
writel(0, &ioc->chip->HostInterruptStatus);
@@ -5308,14 +5217,12 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
void *request;
u16 wait_state_count;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mutex_lock(&ioc->base_cmds.mutex);
if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: base_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5324,23 +5231,20 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5408,14 +5312,12 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
void *request;
u16 wait_state_count;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mutex_lock(&ioc->base_cmds.mutex);
if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: base_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5424,24 +5326,20 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name,
- __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -5495,8 +5393,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
struct mpt3sas_port_facts *pfacts;
int mpi_reply_sz, mpi_request_sz, r;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
@@ -5507,8 +5404,7 @@ _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
(u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
if (r != 0) {
- pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
+ ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
return r;
}
@@ -5536,26 +5432,26 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
u32 ioc_state;
int rc;
- dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->pci_error_recovery) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "%s: host in pci error recovery\n", ioc->name, __func__));
+ dfailprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery\n",
+ __func__));
return -EFAULT;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
- dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
- ioc->name, __func__, ioc_state));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+ __func__, ioc_state));
if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
(ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
return 0;
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, printk(MPT3SAS_FMT
- "unexpected doorbell active!\n", ioc->name));
+ dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
goto issue_diag_reset;
}
@@ -5567,9 +5463,9 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
if (ioc_state) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state));
+ dfailprintk(ioc,
+ ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state));
return -EFAULT;
}
@@ -5592,14 +5488,13 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
struct mpt3sas_facts *facts;
int mpi_reply_sz, mpi_request_sz, r;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
r = _base_wait_for_iocstate(ioc, 10);
if (r) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "%s: failed getting to correct state\n",
- ioc->name, __func__));
+ dfailprintk(ioc,
+ ioc_info(ioc, "%s: failed getting to correct state\n",
+ __func__));
return r;
}
mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
@@ -5610,8 +5505,7 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
(u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
if (r != 0) {
- pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
+ ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
return r;
}
@@ -5663,20 +5557,20 @@ _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
*/
ioc->page_size = 1 << facts->CurrentHostPageSize;
if (ioc->page_size == 1) {
- pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
- "default host page size to 4k\n", ioc->name);
+ ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
}
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
- ioc->name, facts->CurrentHostPageSize));
-
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "hba queue depth(%d), max chains per io(%d)\n",
- ioc->name, facts->RequestCredit,
- facts->MaxChainDepth));
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "request frame size(%d), reply frame size(%d)\n", ioc->name,
- facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
+ dinitprintk(ioc,
+ ioc_info(ioc, "CurrentHostPageSize(%d)\n",
+ facts->CurrentHostPageSize));
+
+ dinitprintk(ioc,
+ ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
+ facts->RequestCredit, facts->MaxChainDepth));
+ dinitprintk(ioc,
+ ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
+ facts->IOCRequestFrameSize * 4,
+ facts->ReplyFrameSize * 4));
return 0;
}
@@ -5696,8 +5590,7 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
u16 ioc_status;
u32 reply_post_free_array_sz = 0;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
@@ -5763,15 +5656,14 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
if (r != 0) {
- pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
- ioc->name, __func__, r);
+ ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
return r;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
mpi_reply.IOCLogInfo) {
- pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
+ ioc_err(ioc, "%s: failed\n", __func__);
r = -EIO;
}
@@ -5842,18 +5734,16 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
u16 smid;
u16 ioc_status;
- pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+ ioc_info(ioc, "sending port enable !!\n");
if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
@@ -5867,8 +5757,7 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2PortEnableRequest_t)/4);
if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
@@ -5881,16 +5770,15 @@ _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
mpi_reply = ioc->port_enable_cmds.reply;
ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
- ioc->name, __func__, ioc_status);
+ ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
+ __func__, ioc_status);
r = -EFAULT;
goto out;
}
out:
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
- pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
- "SUCCESS" : "FAILED"));
+ ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
return r;
}
@@ -5906,18 +5794,16 @@ mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
Mpi2PortEnableRequest_t *mpi_request;
u16 smid;
- pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+ ioc_info(ioc, "sending port enable !!\n");
if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
@@ -6020,19 +5906,16 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
int r = 0;
int i;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
- pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: internal command already in use\n", __func__);
return -EAGAIN;
}
smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return -EAGAIN;
}
ioc->base_cmds.status = MPT3_CMD_PENDING;
@@ -6049,8 +5932,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2EventNotificationRequest_t)/4);
if (ioc->base_cmds.status & MPT3_CMD_RESET)
@@ -6058,8 +5940,7 @@ _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
else
r = -ETIME;
} else
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
- ioc->name, __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
ioc->base_cmds.status = MPT3_CMD_NOT_USED;
return r;
}
@@ -6115,18 +5996,16 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
u32 count;
u32 hcb_size;
- pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
+ ioc_info(ioc, "sending diag reset !!\n");
- drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
- ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
count = 0;
do {
/* Write magic sequence to WriteSequence register
* Loop until in diagnostic mode
*/
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "write magic sequence\n", ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
@@ -6142,16 +6021,15 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
goto out;
host_diagnostic = readl(&ioc->chip->HostDiagnostic);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
- ioc->name, count, host_diagnostic));
+ drsprintk(ioc,
+ ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+ count, host_diagnostic));
} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
hcb_size = readl(&ioc->chip->HCBSize);
- drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
- ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
&ioc->chip->HostDiagnostic);
@@ -6174,43 +6052,38 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "restart the adapter assuming the HCB Address points to good F/W\n",
- ioc->name));
+ drsprintk(ioc,
+ ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
writel(host_diagnostic, &ioc->chip->HostDiagnostic);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "re-enable the HCDW\n", ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
&ioc->chip->HCBSize);
}
- drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
- ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
&ioc->chip->HostDiagnostic);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "disable writes to the diagnostic register\n", ioc->name));
+ drsprintk(ioc,
+ ioc_info(ioc, "disable writes to the diagnostic register\n"));
writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
- drsprintk(ioc, pr_info(MPT3SAS_FMT
- "Wait for FW to go to the READY state\n", ioc->name));
+ drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
if (ioc_state) {
- pr_err(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
+ ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state);
goto out;
}
- pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
+ ioc_info(ioc, "diag reset: SUCCESS\n");
return 0;
out:
- pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
+ ioc_err(ioc, "diag reset: FAILED\n");
return -EFAULT;
}
@@ -6228,15 +6101,15 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
int rc;
int count;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
if (ioc->pci_error_recovery)
return 0;
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
- dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
- ioc->name, __func__, ioc_state));
+ dhsprintk(ioc,
+ ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
+ __func__, ioc_state));
/* if in RESET state, it should move to READY state shortly */
count = 0;
@@ -6244,9 +6117,8 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
while ((ioc_state & MPI2_IOC_STATE_MASK) !=
MPI2_IOC_STATE_READY) {
if (count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed going to ready state (ioc_state=0x%x)\n",
- ioc->name, __func__, ioc_state);
+ ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
+ __func__, ioc_state);
return -EFAULT;
}
ssleep(1);
@@ -6258,9 +6130,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
return 0;
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "unexpected doorbell active!\n",
- ioc->name));
+ dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
goto issue_diag_reset;
}
@@ -6304,8 +6174,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
struct adapter_reply_queue *reply_q;
Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
/* clean the delayed target reset list */
list_for_each_entry_safe(delayed_tr, delayed_tr_next,
@@ -6465,8 +6334,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
{
- dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
/* synchronizing freeing resource with pci_access_mutex lock */
mutex_lock(&ioc->pci_access_mutex);
@@ -6494,8 +6362,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
int r, i;
int cpu_id, last_cpu_id = 0;
- dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
/* setup cpu_msix_table */
ioc->cpu_count = num_online_cpus();
@@ -6505,9 +6372,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
ioc->reply_queue_count = 1;
if (!ioc->cpu_msix_table) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT
- "allocation for cpu_msix_table failed!!!\n",
- ioc->name));
+ dfailprintk(ioc,
+ ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
r = -ENOMEM;
goto out_free_resources;
}
@@ -6516,9 +6382,8 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
sizeof(resource_size_t *), GFP_KERNEL);
if (!ioc->reply_post_host_index) {
- dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
- "for reply_post_host_index failed!!!\n",
- ioc->name));
+ dfailprintk(ioc,
+ ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
r = -ENOMEM;
goto out_free_resources;
}
@@ -6747,8 +6612,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
{
- dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
mpt3sas_base_stop_watchdog(ioc);
mpt3sas_base_free_resources(ioc);
@@ -6781,8 +6645,7 @@ static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
mpt3sas_scsih_pre_reset_handler(ioc);
mpt3sas_ctl_pre_reset_handler(ioc);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
}
/**
@@ -6793,8 +6656,7 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
mpt3sas_scsih_after_reset_handler(ioc);
mpt3sas_ctl_after_reset_handler(ioc);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
ioc->transport_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
@@ -6835,8 +6697,7 @@ static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
mpt3sas_scsih_reset_done_handler(ioc);
mpt3sas_ctl_reset_done_handler(ioc);
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
}
/**
@@ -6883,12 +6744,10 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
u32 ioc_state;
u8 is_fault = 0, is_trigger = 0;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
if (ioc->pci_error_recovery) {
- pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
r = 0;
goto out_unlocked;
}
@@ -6942,8 +6801,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
_base_reset_done_handler(ioc);
out:
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
- ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
+ dtmprintk(ioc,
+ ioc_info(ioc, "%s: %s\n",
+ __func__, r == 0 ? "SUCCESS" : "FAILED"));
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
ioc->shost_recovery = 0;
@@ -6959,7 +6819,6 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_trigger_master(ioc,
MASTER_TRIGGER_ADAPTER_RESET);
}
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
return r;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 96dc15e90bd8..8f1d6b071b39 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -158,7 +158,14 @@ struct mpt3sas_nvme_cmd {
/*
* logging format
*/
-#define MPT3SAS_FMT "%s: "
+#define ioc_err(ioc, fmt, ...) \
+ pr_err("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_notice(ioc, fmt, ...) \
+ pr_notice("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_warn(ioc, fmt, ...) \
+ pr_warn("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define ioc_info(ioc, fmt, ...) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
/*
* WarpDrive Specific Log codes
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index d29a2dcc7d0e..02209447f4ef 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -175,20 +175,18 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
if (!desc)
return;
- pr_info(MPT3SAS_FMT
- "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
- ioc->name, calling_function_name, desc,
- mpi_request->Header.PageNumber, mpi_request->Action,
- le32_to_cpu(mpi_request->PageAddress), smid);
+ ioc_info(ioc, "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
+ calling_function_name, desc,
+ mpi_request->Header.PageNumber, mpi_request->Action,
+ le32_to_cpu(mpi_request->PageAddress), smid);
if (!mpi_reply)
return;
if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
}
/**
@@ -210,9 +208,8 @@ _config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
&mem->page_dma, GFP_KERNEL);
if (!mem->page) {
- pr_err(MPT3SAS_FMT
- "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
- ioc->name, __func__, mem->sz);
+ ioc_err(ioc, "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
+ __func__, mem->sz);
r = -ENOMEM;
}
} else { /* use tmp buffer if less than 512 bytes */
@@ -313,8 +310,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
mutex_lock(&ioc->config_cmds.mutex);
if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: config_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: config_cmd in use\n", __func__);
mutex_unlock(&ioc->config_cmds.mutex);
return -EAGAIN;
}
@@ -362,34 +358,30 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
r = -EFAULT;
goto free_mem;
}
- pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n",
- ioc->name, __func__, retry_count);
+ ioc_info(ioc, "%s: attempting retry (%d)\n",
+ __func__, retry_count);
}
wait_state_count = 0;
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
ioc->config_cmds.status = MPT3_CMD_NOT_USED;
r = -EFAULT;
goto free_mem;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ioc->config_cmds.status = MPT3_CMD_NOT_USED;
r = -EAGAIN;
goto free_mem;
@@ -429,12 +421,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
(mpi_reply->Header.PageType & 0xF)) {
_debug_dump_mf(mpi_request, ioc->request_sz/4);
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
- panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
- " mpi_reply mismatch: Requested PageType(0x%02x)" \
- " Reply PageType(0x%02x)\n", \
- ioc->name, __func__,
- (mpi_request->Header.PageType & 0xF),
- (mpi_reply->Header.PageType & 0xF));
+ panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->Header.PageType & 0xF,
+ mpi_reply->Header.PageType & 0xF);
}
if (((mpi_request->Header.PageType & 0xF) ==
@@ -442,19 +432,18 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
mpi_request->ExtPageType != mpi_reply->ExtPageType) {
_debug_dump_mf(mpi_request, ioc->request_sz/4);
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
- panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
- " mpi_reply mismatch: Requested ExtPageType(0x%02x)"
- " Reply ExtPageType(0x%02x)\n",
- ioc->name, __func__, mpi_request->ExtPageType,
- mpi_reply->ExtPageType);
+ panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->ExtPageType,
+ mpi_reply->ExtPageType);
}
ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
& MPI2_IOCSTATUS_MASK;
}
if (retry_count)
- pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \
- ioc->name, __func__, retry_count);
+ ioc_info(ioc, "%s: retry (%d) completed!!\n",
+ __func__, retry_count);
if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
config_page && mpi_request->Action ==
@@ -469,14 +458,10 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
_debug_dump_config(p, min_t(u16, mem.sz,
config_page_sz)/4);
- panic(KERN_WARNING MPT3SAS_FMT
- "%s: Firmware BUG:" \
- " config page mismatch:"
- " Requested PageType(0x%02x)"
- " Reply PageType(0x%02x)\n",
- ioc->name, __func__,
- (mpi_request->Header.PageType & 0xF),
- (p[3] & 0xF));
+ panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->Header.PageType & 0xF,
+ p[3] & 0xF);
}
if (((mpi_request->Header.PageType & 0xF) ==
@@ -486,13 +471,9 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
_debug_dump_reply(mpi_reply, ioc->request_sz/4);
_debug_dump_config(p, min_t(u16, mem.sz,
config_page_sz)/4);
- panic(KERN_WARNING MPT3SAS_FMT
- "%s: Firmware BUG:" \
- " config page mismatch:"
- " Requested ExtPageType(0x%02x)"
- " Reply ExtPageType(0x%02x)\n",
- ioc->name, __func__,
- mpi_request->ExtPageType, p[6]);
+ panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->ExtPageType, p[6]);
}
}
memcpy(config_page, mem.page, min_t(u16, mem.sz,
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 5e8c059ce2c9..4afa597cbfba 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -185,17 +185,15 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
if (!desc)
return;
- pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
- ioc->name, calling_function_name, desc, smid);
+ ioc_info(ioc, "%s: %s, smid(%d)\n", calling_function_name, desc, smid);
if (!mpi_reply)
return;
if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
mpi_request->Function ==
@@ -208,38 +206,32 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
sas_device = mpt3sas_get_sdev_by_handle(ioc,
le16_to_cpu(scsi_reply->DevHandle));
if (sas_device) {
- pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->sas_address, sas_device->phy);
- pr_warn(MPT3SAS_FMT
- "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id, sas_device->slot);
+ ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
+ (u64)sas_device->sas_address,
+ sas_device->phy);
+ ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ (u64)sas_device->enclosure_logical_id,
+ sas_device->slot);
sas_device_put(sas_device);
}
if (!sas_device) {
pcie_device = mpt3sas_get_pdev_by_handle(ioc,
le16_to_cpu(scsi_reply->DevHandle));
if (pcie_device) {
- pr_warn(MPT3SAS_FMT
- "\tWWID(0x%016llx), port(%d)\n", ioc->name,
- (unsigned long long)pcie_device->wwid,
- pcie_device->port_num);
+ ioc_warn(ioc, "\tWWID(0x%016llx), port(%d)\n",
+ (unsigned long long)pcie_device->wwid,
+ pcie_device->port_num);
if (pcie_device->enclosure_handle != 0)
- pr_warn(MPT3SAS_FMT
- "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
- ioc->name, (unsigned long long)
- pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_warn(ioc, "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
pcie_device_put(pcie_device);
}
}
if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
- pr_info(MPT3SAS_FMT
- "\tscsi_state(0x%02x), scsi_status"
- "(0x%02x)\n", ioc->name,
- scsi_reply->SCSIState,
- scsi_reply->SCSIStatus);
+ ioc_info(ioc, "\tscsi_state(0x%02x), scsi_status(0x%02x)\n",
+ scsi_reply->SCSIState,
+ scsi_reply->SCSIStatus);
}
}
@@ -466,8 +458,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
int i;
u8 issue_reset;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
if (!(ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_REGISTERED))
@@ -487,8 +478,7 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
*/
void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
ioc->ctl_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
@@ -506,8 +496,7 @@ void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
int i;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
if (!(ioc->diag_buffer_status[i] &
@@ -612,10 +601,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
}
if (!found) {
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), lun(%d), no active mid!!\n",
- ioc->name,
- desc, le16_to_cpu(tm_request->DevHandle), lun));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), lun(%d), no active mid!!\n",
+ desc, le16_to_cpu(tm_request->DevHandle),
+ lun));
tm_reply = ioc->ctl_cmds.reply;
tm_reply->DevHandle = tm_request->DevHandle;
tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -631,10 +620,10 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
return 1;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
- desc, le16_to_cpu(tm_request->DevHandle), lun,
- le16_to_cpu(tm_request->TaskMID)));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), lun(%d), task_mid(%d)\n",
+ desc, le16_to_cpu(tm_request->DevHandle), lun,
+ le16_to_cpu(tm_request->TaskMID)));
return 0;
}
@@ -672,8 +661,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
issue_reset = 0;
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
ret = -EAGAIN;
goto out;
}
@@ -682,28 +670,23 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
ret = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name,
- __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
if (!mpi_request) {
- pr_err(MPT3SAS_FMT
- "%s: failed obtaining a memory for mpi_request\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a memory for mpi_request\n",
+ __func__);
ret = -ENOMEM;
goto out;
}
@@ -726,8 +709,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ret = -EAGAIN;
goto out;
}
@@ -762,8 +744,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
/* obtain dma-able memory for data transfer */
if (data_out_sz) /* WRITE */ {
- data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
- &data_out_dma);
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz,
+ &data_out_dma, GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -782,8 +764,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
}
if (data_in_sz) /* READ */ {
- data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
- &data_in_dma);
+ data_in = dma_alloc_coherent(&ioc->pdev->dev, data_in_sz,
+ &data_in_dma, GFP_KERNEL);
if (!data_in) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -823,9 +805,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->build_nvme_prp(ioc, smid, nvme_encap_request,
data_out_dma, data_out_sz, data_in_dma, data_in_sz);
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "handle(0x%04x) :"
- "ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x): ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -843,9 +825,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
mpt3sas_base_get_sense_buffer_dma(ioc, smid);
memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -863,10 +845,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
Mpi2SCSITaskManagementRequest_t *tm_request =
(Mpi2SCSITaskManagementRequest_t *)request;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
- ioc->name,
- le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+ dtmprintk(ioc,
+ ioc_info(ioc, "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
+ le16_to_cpu(tm_request->DevHandle),
+ tm_request->TaskType));
ioc->got_task_abort_from_ioctl = 1;
if (tm_request->TaskType ==
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
@@ -881,9 +863,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
ioc->got_task_abort_from_ioctl = 0;
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -929,9 +911,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
case MPI2_FUNCTION_SATA_PASSTHROUGH:
{
if (test_bit(device_handle, ioc->device_remove_in_progress)) {
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) :ioctl failed due to device removal in progress\n",
- ioc->name, device_handle));
+ dtmprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) :ioctl failed due to device removal in progress\n",
+ device_handle));
mpt3sas_base_free_smid(ioc, smid);
ret = -EINVAL;
goto out;
@@ -1017,12 +999,10 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
Mpi2SCSITaskManagementReply_t *tm_reply =
(Mpi2SCSITaskManagementReply_t *)mpi_reply;
- pr_info(MPT3SAS_FMT "TASK_MGMT: " \
- "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
- "TerminationCount(0x%08x)\n", ioc->name,
- le16_to_cpu(tm_reply->IOCStatus),
- le32_to_cpu(tm_reply->IOCLogInfo),
- le32_to_cpu(tm_reply->TerminationCount));
+ ioc_info(ioc, "TASK_MGMT: IOCStatus(0x%04x), IOCLogInfo(0x%08x), TerminationCount(0x%08x)\n",
+ le16_to_cpu(tm_reply->IOCStatus),
+ le32_to_cpu(tm_reply->IOCLogInfo),
+ le32_to_cpu(tm_reply->TerminationCount));
}
/* copy out xdata to user */
@@ -1054,9 +1034,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || mpi_request->Function ==
MPI2_FUNCTION_NVME_ENCAPSULATED)) {
if (karg.sense_data_ptr == NULL) {
- pr_info(MPT3SAS_FMT "Response buffer provided"
- " by application is NULL; Response data will"
- " not be returned.\n", ioc->name);
+ ioc_info(ioc, "Response buffer provided by application is NULL; Response data will not be returned\n");
goto out;
}
sz_arg = (mpi_request->Function ==
@@ -1079,9 +1057,8 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
mpi_request->Function ==
MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
- pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
- ioc->name,
- le16_to_cpu(mpi_request->FunctionDependent1));
+ ioc_info(ioc, "issue target reset: handle = (0x%04x)\n",
+ le16_to_cpu(mpi_request->FunctionDependent1));
mpt3sas_halt_firmware(ioc);
pcie_device = mpt3sas_get_pdev_by_handle(ioc,
le16_to_cpu(mpi_request->FunctionDependent1));
@@ -1106,11 +1083,11 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
/* free memory associated with sg buffers */
if (data_in)
- pci_free_consistent(ioc->pdev, data_in_sz, data_in,
+ dma_free_coherent(&ioc->pdev->dev, data_in_sz, data_in,
data_in_dma);
if (data_out)
- pci_free_consistent(ioc->pdev, data_out_sz, data_out,
+ dma_free_coherent(&ioc->pdev->dev, data_out_sz, data_out,
data_out_dma);
kfree(mpi_request);
@@ -1128,8 +1105,8 @@ _ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
{
struct mpt3_ioctl_iocinfo karg;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
memset(&karg, 0 , sizeof(karg));
if (ioc->pfacts)
@@ -1188,8 +1165,8 @@ _ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
memcpy(karg.event_types, ioc->event_type,
@@ -1219,8 +1196,8 @@ _ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
memcpy(ioc->event_type, karg.event_types,
MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
@@ -1259,8 +1236,8 @@ _ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
number_bytes = karg.hdr.max_data_size -
sizeof(struct mpt3_ioctl_header);
@@ -1306,12 +1283,11 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
ioc->is_driver_loading)
return -EAGAIN;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
+ __func__));
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_info(MPT3SAS_FMT "host reset: %s\n",
- ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
+ ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
return 0;
}
@@ -1440,8 +1416,8 @@ _ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
rc = _ctl_btdh_search_sas_device(ioc, &karg);
if (!rc)
@@ -1512,53 +1488,46 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
u32 ioc_state;
u8 issue_reset = 0;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EAGAIN;
goto out;
}
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
buffer_type = diag_register->buffer_type;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) {
- pr_err(MPT3SAS_FMT
- "%s: already has a registered buffer for buffer_type(0x%02x)\n",
- ioc->name, __func__,
- buffer_type);
+ ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (diag_register->requested_buffer_size % 4) {
- pr_err(MPT3SAS_FMT
- "%s: the requested_buffer_size is not 4 byte aligned\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: the requested_buffer_size is not 4 byte aligned\n",
+ __func__);
return -EINVAL;
}
smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1580,9 +1549,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (request_data) {
request_data_dma = ioc->diag_buffer_dma[buffer_type];
if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
- pci_free_consistent(ioc->pdev,
- ioc->diag_buffer_sz[buffer_type],
- request_data, request_data_dma);
+ dma_free_coherent(&ioc->pdev->dev,
+ ioc->diag_buffer_sz[buffer_type],
+ request_data, request_data_dma);
request_data = NULL;
}
}
@@ -1590,12 +1559,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (request_data == NULL) {
ioc->diag_buffer_sz[buffer_type] = 0;
ioc->diag_buffer_dma[buffer_type] = 0;
- request_data = pci_alloc_consistent(
- ioc->pdev, request_data_sz, &request_data_dma);
+ request_data = dma_alloc_coherent(&ioc->pdev->dev,
+ request_data_sz, &request_data_dma, GFP_KERNEL);
if (request_data == NULL) {
- pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
- " for diag buffers, requested size(%d)\n",
- ioc->name, __func__, request_data_sz);
+ ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n",
+ __func__, request_data_sz);
mpt3sas_base_free_smid(ioc, smid);
return -ENOMEM;
}
@@ -1612,11 +1580,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
- ioc->name, __func__, request_data,
- (unsigned long long)request_data_dma,
- le32_to_cpu(mpi_request->BufferLength)));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
+ __func__, request_data,
+ (unsigned long long)request_data_dma,
+ le32_to_cpu(mpi_request->BufferLength)));
for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
mpi_request->ProductSpecific[i] =
@@ -1637,8 +1605,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
/* process the completed Reply Message Frame */
if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
- pr_err(MPT3SAS_FMT "%s: no reply message\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: no reply message\n", __func__);
rc = -EFAULT;
goto out;
}
@@ -1649,13 +1616,11 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_REGISTERED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
- ioc->name, __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
- pr_info(MPT3SAS_FMT
- "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
- ioc->name, __func__,
- ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
rc = -EFAULT;
}
@@ -1666,7 +1631,7 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
out:
if (rc && request_data)
- pci_free_consistent(ioc->pdev, request_data_sz,
+ dma_free_coherent(&ioc->pdev->dev, request_data_sz,
request_data, request_data_dma);
ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
@@ -1689,8 +1654,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
if (bits_to_register & 1) {
- pr_info(MPT3SAS_FMT "registering trace buffer support\n",
- ioc->name);
+ ioc_info(ioc, "registering trace buffer support\n");
ioc->diag_trigger_master.MasterData =
(MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
@@ -1701,8 +1665,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
}
if (bits_to_register & 2) {
- pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
- ioc->name);
+ ioc_info(ioc, "registering snapshot buffer support\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
/* register for 2MB buffers */
diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1711,8 +1674,7 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
}
if (bits_to_register & 4) {
- pr_info(MPT3SAS_FMT "registering extended buffer support\n",
- ioc->name);
+ ioc_info(ioc, "registering extended buffer support\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
/* register for 2MB buffers */
diag_register.requested_buffer_size = 2 * (1024 * 1024);
@@ -1768,51 +1730,46 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
buffer_type = karg.unique_id & 0x000000ff;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is not registered\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
return -EINVAL;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) has not been released\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) has not been released\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
request_data_sz = ioc->diag_buffer_sz[buffer_type];
request_data_dma = ioc->diag_buffer_dma[buffer_type];
- pci_free_consistent(ioc->pdev, request_data_sz,
- request_data, request_data_dma);
+ dma_free_coherent(&ioc->pdev->dev, request_data_sz,
+ request_data, request_data_dma);
ioc->diag_buffer[buffer_type] = NULL;
ioc->diag_buffer_status[buffer_type] = 0;
return 0;
@@ -1841,41 +1798,37 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
karg.application_flags = 0;
buffer_type = karg.buffer_type;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is not registered\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (karg.unique_id & 0xffffff00) {
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have buffer for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
@@ -1897,9 +1850,8 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
- pr_err(MPT3SAS_FMT
- "%s: unable to write mpt3_diag_query data @ %p\n",
- ioc->name, __func__, arg);
+ ioc_err(ioc, "%s: unable to write mpt3_diag_query data @ %p\n",
+ __func__, arg);
return -EFAULT;
}
return 0;
@@ -1923,8 +1875,8 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
u32 ioc_state;
int rc;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
rc = 0;
*issue_reset = 0;
@@ -1935,24 +1887,22 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
MPT3_DIAG_BUFFER_IS_REGISTERED)
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_RELEASED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: skipping due to FAULT state\n", ioc->name,
- __func__));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: skipping due to FAULT state\n",
+ __func__));
rc = -EAGAIN;
goto out;
}
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1982,8 +1932,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
/* process the completed Reply Message Frame */
if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
- pr_err(MPT3SAS_FMT "%s: no reply message\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: no reply message\n", __func__);
rc = -EFAULT;
goto out;
}
@@ -1994,13 +1943,11 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_RELEASED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
- ioc->name, __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
- pr_info(MPT3SAS_FMT
- "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
- ioc->name, __func__,
- ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
rc = -EFAULT;
}
@@ -2033,47 +1980,41 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
buffer_type = karg.unique_id & 0x000000ff;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is not registered\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n",
+ __func__, buffer_type);
return -EINVAL;
}
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
if (ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED) {
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is already released\n",
- ioc->name, __func__,
- buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n",
+ __func__, buffer_type);
return 0;
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
@@ -2084,9 +2025,8 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
MPT3_DIAG_BUFFER_IS_RELEASED;
ioc->diag_buffer_status[buffer_type] &=
~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
- pr_err(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) was released due to host reset\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: buffer_type(0x%02x) was released due to host reset\n",
+ __func__, buffer_type);
return 0;
}
@@ -2124,38 +2064,34 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EFAULT;
}
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
- __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s\n",
+ __func__));
buffer_type = karg.unique_id & 0x000000ff;
if (!_ctl_diag_capability(ioc, buffer_type)) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have capability for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -EPERM;
}
if (karg.unique_id != ioc->unique_id[buffer_type]) {
- pr_err(MPT3SAS_FMT
- "%s: unique_id(0x%08x) is not registered\n",
- ioc->name, __func__, karg.unique_id);
+ ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n",
+ __func__, karg.unique_id);
return -EINVAL;
}
request_data = ioc->diag_buffer[buffer_type];
if (!request_data) {
- pr_err(MPT3SAS_FMT
- "%s: doesn't have buffer for buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type);
+ ioc_err(ioc, "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ __func__, buffer_type);
return -ENOMEM;
}
request_size = ioc->diag_buffer_sz[buffer_type];
if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
- pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
- "or bytes_to_read are not 4 byte aligned\n", ioc->name,
- __func__);
+ ioc_err(ioc, "%s: either the starting_offset or bytes_to_read are not 4 byte aligned\n",
+ __func__);
return -EINVAL;
}
@@ -2163,10 +2099,10 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
return -EINVAL;
diag_data = (void *)(request_data + karg.starting_offset);
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
- ioc->name, __func__,
- diag_data, karg.starting_offset, karg.bytes_to_read));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
+ __func__, diag_data, karg.starting_offset,
+ karg.bytes_to_read));
/* Truncate data on requests that are too large */
if ((diag_data + karg.bytes_to_read < diag_data) ||
@@ -2177,39 +2113,36 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
if (copy_to_user((void __user *)uarg->diagnostic_data,
diag_data, copy_size)) {
- pr_err(MPT3SAS_FMT
- "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
- ioc->name, __func__, diag_data);
+ ioc_err(ioc, "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
+ __func__, diag_data);
return -EFAULT;
}
if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
return 0;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: Reregister buffer_type(0x%02x)\n",
- ioc->name, __func__, buffer_type));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: Reregister buffer_type(0x%02x)\n",
+ __func__, buffer_type));
if ((ioc->diag_buffer_status[buffer_type] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: buffer_type(0x%02x) is still registered\n",
- ioc->name, __func__, buffer_type));
+ dctlprintk(ioc,
+ ioc_info(ioc, "%s: buffer_type(0x%02x) is still registered\n",
+ __func__, buffer_type));
return 0;
}
/* Get a free request frame and save the message context.
*/
if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: ctl_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -2247,8 +2180,7 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
/* process the completed Reply Message Frame */
if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
- pr_err(MPT3SAS_FMT "%s: no reply message\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: no reply message\n", __func__);
rc = -EFAULT;
goto out;
}
@@ -2259,13 +2191,11 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
ioc->diag_buffer_status[buffer_type] |=
MPT3_DIAG_BUFFER_IS_REGISTERED;
- dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
- ioc->name, __func__));
+ dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__));
} else {
- pr_info(MPT3SAS_FMT
- "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
- ioc->name, __func__,
- ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ __func__, ioc_status,
+ le32_to_cpu(mpi_reply->IOCLogInfo));
rc = -EFAULT;
}
@@ -2450,8 +2380,9 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
ret = _ctl_diag_read_buffer(ioc, arg);
break;
default:
- dctlprintk(ioc, pr_info(MPT3SAS_FMT
- "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
+ dctlprintk(ioc,
+ ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
+ cmd));
break;
}
@@ -2840,8 +2771,8 @@ _ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
return -EINVAL;
ioc->logging_level = val;
- pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
- ioc->logging_level);
+ ioc_info(ioc, "logging_level=%08xh\n",
+ ioc->logging_level);
return strlen(buf);
}
static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
@@ -2877,8 +2808,8 @@ _ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
return -EINVAL;
ioc->fwfault_debug = val;
- pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
- ioc->fwfault_debug);
+ ioc_info(ioc, "fwfault_debug=%d\n",
+ ioc->fwfault_debug);
return strlen(buf);
}
static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
@@ -2958,8 +2889,8 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
ssize_t rc = 0;
if (!ioc->is_warpdrive) {
- pr_err(MPT3SAS_FMT "%s: BRM attribute is only for"
- " warpdrive\n", ioc->name, __func__);
+ ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
+ __func__);
goto out;
}
/* pci_access_mutex lock acquired by sysfs show path */
@@ -2973,30 +2904,28 @@ _ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr,
sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
if (!io_unit_pg3) {
- pr_err(MPT3SAS_FMT "%s: failed allocating memory "
- "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz);
+ ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
+ __func__, sz);
goto out;
}
if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) !=
0) {
- pr_err(MPT3SAS_FMT
- "%s: failed reading iounit_pg3\n", ioc->name,
- __func__);
+ ioc_err(ioc, "%s: failed reading iounit_pg3\n",
+ __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with "
- "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status);
+ ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
+ __func__, ioc_status);
goto out;
}
if (io_unit_pg3->GPIOCount < 25) {
- pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than "
- "25 entries, detected (%d) entries\n", ioc->name, __func__,
- io_unit_pg3->GPIOCount);
+ ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
+ __func__, io_unit_pg3->GPIOCount);
goto out;
}
@@ -3039,17 +2968,15 @@ _ctl_host_trace_buffer_size_show(struct device *cdev,
struct DIAG_BUFFER_START *request_data;
if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
@@ -3089,17 +3016,15 @@ _ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
u32 size;
if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
- pr_err(MPT3SAS_FMT
- "%s: host_trace_buffer is not registered\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: host_trace_buffer is not registered\n",
+ __func__);
return 0;
}
@@ -3188,8 +3113,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
goto out;
memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
- pr_info(MPT3SAS_FMT "posting host trace buffers\n",
- ioc->name);
+ ioc_info(ioc, "posting host trace buffers\n");
diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
diag_register.requested_buffer_size = (1024 * 1024);
diag_register.unique_id = 0x7075900;
@@ -3205,8 +3129,7 @@ _ctl_host_trace_buffer_enable_store(struct device *cdev,
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_RELEASED))
goto out;
- pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
- ioc->name);
+ ioc_info(ioc, "releasing host trace buffer\n");
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset);
}
@@ -3658,8 +3581,10 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate)
if ((ioc->diag_buffer_status[i] &
MPT3_DIAG_BUFFER_IS_RELEASED))
continue;
- pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
- ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
+ dma_free_coherent(&ioc->pdev->dev,
+ ioc->diag_buffer_sz[i],
+ ioc->diag_buffer[i],
+ ioc->diag_buffer_dma[i]);
ioc->diag_buffer[i] = NULL;
ioc->diag_buffer_status[i] = 0;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 53133cfd420f..03c52847ed07 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -418,8 +418,8 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
@@ -442,10 +442,8 @@ _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
return -ENXIO;
/* else error case */
- pr_err(MPT3SAS_FMT
- "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
- ioc->name, handle, ioc_status,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
return -EIO;
}
@@ -508,10 +506,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
(ioc->bios_pg2.ReqBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.RequestedBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: req_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: req_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->req_boot_device.device = device;
ioc->req_boot_device.channel = channel;
}
@@ -523,10 +520,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
(ioc->bios_pg2.ReqAltBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.RequestedAltBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: req_alt_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: req_alt_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->req_alt_boot_device.device = device;
ioc->req_alt_boot_device.channel = channel;
}
@@ -538,10 +534,9 @@ _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
(ioc->bios_pg2.CurrentBootDeviceForm &
MPI2_BIOSPAGE2_FORM_MASK),
&ioc->bios_pg2.CurrentBootDevice)) {
- dinitprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: current_boot_device(0x%016llx)\n",
- ioc->name, __func__,
- (unsigned long long)sas_address));
+ dinitprintk(ioc,
+ ioc_info(ioc, "%s: current_boot_device(0x%016llx)\n",
+ __func__, (u64)sas_address));
ioc->current_boot_device.device = device;
ioc->current_boot_device.channel = channel;
}
@@ -752,19 +747,16 @@ _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
sas_device->chassis_slot);
} else {
if (sas_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "enclosure logical id(0x%016llx), slot(%d) \n",
- ioc->name, (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot);
+ ioc_info(ioc, "enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)sas_device->enclosure_logical_id,
+ sas_device->slot);
if (sas_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, sas_device->enclosure_level,
- sas_device->connector_name);
+ ioc_info(ioc, "enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name);
if (sas_device->is_chassis_slot_valid)
- pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n",
- ioc->name, sas_device->chassis_slot);
+ ioc_info(ioc, "chassis slot(0x%04x)\n",
+ sas_device->chassis_slot);
}
}
@@ -784,10 +776,8 @@ _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
if (!sas_device)
return;
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, sas_device->handle,
- (unsigned long long) sas_device->sas_address);
+ ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_device->handle, (u64)sas_device->sas_address);
_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
@@ -872,10 +862,10 @@ _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__, sas_device->handle,
+ (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
@@ -923,10 +913,10 @@ _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- __func__, sas_device->handle,
- (unsigned long long)sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__, sas_device->handle,
+ (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
@@ -1073,21 +1063,16 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
if (!pcie_device)
return;
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, pcie_device->handle,
- (unsigned long long) pcie_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ pcie_device->handle, (u64)pcie_device->wwid);
if (pcie_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "removing enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_info(ioc, "removing enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
if (pcie_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "removing enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, pcie_device->enclosure_level,
- pcie_device->connector_name);
+ ioc_info(ioc, "removing enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
if (!list_empty(&pcie_device->list)) {
@@ -1146,20 +1131,21 @@ _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure logical id(0x%016llx), slot( %d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__, pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+ __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
pcie_device_get(pcie_device);
@@ -1191,20 +1177,21 @@ _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle (0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure logical id(0x%016llx), slot( %d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure logical id(0x%016llx), slot( %d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__, pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enclosure level(0x%04x), connector name( %s)\n",
+ __func__, pcie_device->enclosure_level,
+ pcie_device->connector_name));
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
pcie_device_get(pcie_device);
@@ -1304,9 +1291,10 @@ _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- raid_device->handle, (unsigned long long)raid_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ raid_device->handle, (u64)raid_device->wwid));
spin_lock_irqsave(&ioc->raid_device_lock, flags);
list_add_tail(&raid_device->list, &ioc->raid_device_list);
@@ -1857,16 +1845,16 @@ _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -1952,8 +1940,8 @@ scsih_get_resync(struct device *dev)
if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
percent_complete = 0;
goto out;
}
@@ -2006,8 +1994,8 @@ scsih_get_state(struct device *dev)
if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
sizeof(Mpi2RaidVolPage0_t))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -2103,9 +2091,9 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
&num_pds)) || !num_pds) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2114,17 +2102,17 @@ _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
sizeof(Mpi2RaidVol0PhysDisk_t));
vol_pg0 = kzalloc(sz, GFP_KERNEL);
if (!vol_pg0) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
kfree(vol_pg0);
return 1;
}
@@ -2215,16 +2203,16 @@ scsih_slave_configure(struct scsi_device *sdev)
raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
if (!raid_device) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
- __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if (_scsih_get_volume_capabilities(ioc, raid_device)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
- __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2308,16 +2296,16 @@ scsih_slave_configure(struct scsi_device *sdev)
if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
if (mpt3sas_config_get_volume_handle(ioc, handle,
&volume_handle)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
volume_handle, &volume_wwid)) {
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
}
@@ -2329,9 +2317,9 @@ scsih_slave_configure(struct scsi_device *sdev)
sas_device_priv_data->sas_target->sas_address);
if (!pcie_device) {
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
- __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2377,9 +2365,9 @@ scsih_slave_configure(struct scsi_device *sdev)
sas_device_priv_data->sas_target->sas_address);
if (!sas_device) {
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
- dfailprintk(ioc, pr_warn(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
- __func__));
+ dfailprintk(ioc,
+ ioc_warn(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
return 1;
}
@@ -2515,8 +2503,7 @@ _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
desc = "unknown";
break;
}
- pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
- ioc->name, response_code, desc);
+ ioc_warn(ioc, "response_code(0x%01x): %s\n", response_code, desc);
}
/**
@@ -2640,22 +2627,19 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
lockdep_assert_held(&ioc->tm_cmds.mutex);
if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
- pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: tm_cmd busy!!!\n", __func__);
return FAILED;
}
if (ioc->shost_recovery || ioc->remove_host ||
ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return FAILED;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
if (ioc_state & MPI2_DOORBELL_USED) {
- dhsprintk(ioc, pr_info(MPT3SAS_FMT
- "unexpected doorbell active!\n", ioc->name));
+ dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
return (!rc) ? SUCCESS : FAILED;
}
@@ -2669,14 +2653,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
return FAILED;
}
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
- ioc->name, handle, type, smid_task, timeout, tr_method));
+ dtmprintk(ioc,
+ ioc_info(ioc, "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
+ handle, type, smid_task, timeout, tr_method));
ioc->tm_cmds.status = MPT3_CMD_PENDING;
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->tm_cmds.smid = smid;
@@ -2709,11 +2692,11 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
mpi_reply = ioc->tm_cmds.reply;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
- "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dtmprintk(ioc,
+ ioc_info(ioc, "complete tm: ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
if (ioc->logging_level & MPT_DEBUG_TM) {
_scsih_response_code(ioc, mpi_reply->ResponseCode);
if (mpi_reply->IOCStatus)
@@ -3060,13 +3043,11 @@ scsih_host_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
int r, retval;
- pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
- ioc->name, scmd);
+ ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
scsi_print_command(scmd);
if (ioc->is_driver_loading || ioc->remove_host) {
- pr_info(MPT3SAS_FMT "Blocking the host reset\n",
- ioc->name);
+ ioc_info(ioc, "Blocking the host reset\n");
r = FAILED;
goto out;
}
@@ -3074,8 +3055,8 @@ scsih_host_reset(struct scsi_cmnd *scmd)
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
r = (retval < 0) ? FAILED : SUCCESS;
out:
- pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
- ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ ioc_info(ioc, "host reset: %s scmd(%p)\n",
+ r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
return r;
}
@@ -3567,18 +3548,16 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
u8 tr_method = 0;
if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host in pci error recovery: handle(0x%04x)\n",
- __func__, ioc->name,
- handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery: handle(0x%04x)\n",
+ __func__, handle));
return;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host is not operational: handle(0x%04x)\n",
- __func__, ioc->name,
- handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational: handle(0x%04x)\n",
+ __func__, handle));
return;
}
@@ -3614,39 +3593,31 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
}
if (sas_target_priv_data) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle,
- (unsigned long long)sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle, (u64)sas_address));
if (sas_device) {
if (sas_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag:enclosure logical "
- "id(0x%016llx), slot(%d)\n", ioc->name,
- (unsigned long long)
- sas_device->enclosure_logical_id,
- sas_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag:enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)sas_device->enclosure_logical_id,
+ sas_device->slot));
if (sas_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: enclosure "
- "level(0x%04x), connector name( %s)\n",
- ioc->name, sas_device->enclosure_level,
- sas_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: enclosure level(0x%04x), connector name( %s)\n",
+ sas_device->enclosure_level,
+ sas_device->connector_name));
} else if (pcie_device) {
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: logical "
- "id(0x%016llx), slot(%d)\n", ioc->name,
- (unsigned long long)
- pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag:, enclosure "
- "level(0x%04x), "
- "connector name( %s)\n", ioc->name,
- pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag:, enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
}
_scsih_ublock_io_device(ioc, sas_address);
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
@@ -3660,16 +3631,15 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
goto out;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid,
- ioc->tm_tr_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_tr_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -3717,39 +3687,39 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
struct _sc_list *delayed_sc;
if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host in pci error recovery\n", __func__,
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery\n",
+ __func__));
return 1;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host is not operational\n", __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational\n",
+ __func__));
return 1;
}
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
handle = le16_to_cpu(mpi_request_tm->DevHandle);
if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
- dewtprintk(ioc, pr_err(MPT3SAS_FMT
- "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
- ioc->name, handle,
- le16_to_cpu(mpi_reply->DevHandle), smid));
+ dewtprintk(ioc,
+ ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
return 0;
}
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
- "loginfo(0x%08x), completed(%d)\n", ioc->name,
- handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
if (!smid_sas_ctrl) {
@@ -3759,16 +3729,15 @@ _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
INIT_LIST_HEAD(&delayed_sc->list);
delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:sc:handle(0x%04x), (open)\n",
- ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:sc:handle(0x%04x), (open)\n",
+ handle));
return _scsih_check_for_pending_tm(ioc, smid);
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid_sas_ctrl,
- ioc->tm_sas_control_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid_sas_ctrl, ioc->tm_sas_control_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
@@ -3803,20 +3772,19 @@ _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (likely(mpi_reply)) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "sc_complete:handle(0x%04x), (open) "
- "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
- le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
if (le16_to_cpu(mpi_reply->IOCStatus) ==
MPI2_IOCSTATUS_SUCCESS) {
clear_bit(le16_to_cpu(mpi_reply->DevHandle),
ioc->device_remove_in_progress);
}
} else {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
}
return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
}
@@ -3839,9 +3807,9 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct _tr_list *delayed_tr;
if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host reset in progress!\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host reset in progress!\n",
+ __func__));
return;
}
@@ -3853,16 +3821,15 @@ _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n",
- ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
return;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid,
- ioc->tm_tr_volume_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_tr_volume_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
@@ -3892,33 +3859,32 @@ _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host reset in progress!\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host reset in progress!\n",
+ __func__));
return 1;
}
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
handle = le16_to_cpu(mpi_request_tm->DevHandle);
if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
- dewtprintk(ioc, pr_err(MPT3SAS_FMT
- "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
- ioc->name, handle,
- le16_to_cpu(mpi_reply->DevHandle), smid));
+ dewtprintk(ioc,
+ ioc_err(ioc, "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ handle, le16_to_cpu(mpi_reply->DevHandle),
+ smid));
return 0;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
- "loginfo(0x%08x), completed(%d)\n", ioc->name,
- handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo),
- le32_to_cpu(mpi_reply->TerminationCount)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), loginfo(0x%08x), completed(%d)\n",
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
return _scsih_check_for_pending_tm(ioc, smid);
}
@@ -3948,10 +3914,9 @@ _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
- ioc->name, le16_to_cpu(event), smid,
- ioc->base_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
+ le16_to_cpu(event), smid, ioc->base_cb_idx));
ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
@@ -3981,21 +3946,21 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
unsigned long flags;
if (ioc->remove_host) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host has been removed\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host has been removed\n",
+ __func__));
return;
} else if (ioc->pci_error_recovery) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host in pci error recovery\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host in pci error recovery\n",
+ __func__));
return;
}
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: host is not operational\n",
- __func__, ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: host is not operational\n",
+ __func__));
return;
}
@@ -4007,10 +3972,9 @@ _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
- ioc->name, handle, smid,
- ioc->tm_sas_control_cb_idx));
+ dewtprintk(ioc,
+ ioc_info(ioc, "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ handle, smid, ioc->tm_sas_control_cb_idx));
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
@@ -4171,8 +4135,8 @@ _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
expander_handle) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting ignoring flag\n", ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting ignoring flag\n"));
fw_event->ignore = 1;
}
}
@@ -4243,9 +4207,8 @@ _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
switch_handle) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting ignoring flag for switch event\n",
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting ignoring flag for switch event\n"));
fw_event->ignore = 1;
}
}
@@ -4274,10 +4237,9 @@ _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_target_priv_data =
raid_device->starget->hostdata;
sas_target_priv_data->deleted = 1;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "setting delete flag: handle(0x%04x), "
- "wwid(0x%016llx)\n", ioc->name, handle,
- (unsigned long long) raid_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "setting delete flag: handle(0x%04x), wwid(0x%016llx)\n",
+ handle, (u64)raid_device->wwid));
}
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
@@ -4379,9 +4341,9 @@ _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
INIT_LIST_HEAD(&delayed_tr->list);
delayed_tr->handle = handle;
list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
- handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "DELAYED:tr:handle(0x%04x), (open)\n",
+ handle));
} else
_scsih_tm_tr_send(ioc, handle);
}
@@ -4424,15 +4386,14 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
Mpi2EventDataTemperature_t *event_data)
{
if (ioc->temp_sensors_count >= event_data->SensorNum) {
- pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s"
- " exceeded for Sensor: %d !!!\n", ioc->name,
- ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
- ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
- ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
- ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
- event_data->SensorNum);
- pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n",
- ioc->name, event_data->CurrentTemperature);
+ ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
+ le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
+ le16_to_cpu(event_data->Status) & 0x2 ? "1 " : " ",
+ le16_to_cpu(event_data->Status) & 0x4 ? "2 " : " ",
+ le16_to_cpu(event_data->Status) & 0x8 ? "3 " : " ",
+ event_data->SensorNum);
+ ioc_err(ioc, "Current Temp In Celsius: %d\n",
+ event_data->CurrentTemperature);
}
}
@@ -4480,8 +4441,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
scmd->result = DID_RESET << 16;
scmd->scsi_done(scmd);
}
- dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
- ioc->name, count));
+ dtmprintk(ioc, ioc_info(ioc, "completing %d cmds\n", count));
}
/**
@@ -4680,8 +4640,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
_scsih_set_satl_pending(scmd, false);
goto out;
}
@@ -4919,37 +4878,28 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
scsi_print_command(scmd);
if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
- pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
- device_str, (unsigned long long)priv_target->sas_address);
+ ioc_warn(ioc, "\t%s wwid(0x%016llx)\n",
+ device_str, (u64)priv_target->sas_address);
} else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
if (pcie_device) {
- pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n",
- ioc->name,
- (unsigned long long)pcie_device->wwid,
- pcie_device->port_num);
+ ioc_info(ioc, "\twwid(0x%016llx), port(%d)\n",
+ (u64)pcie_device->wwid, pcie_device->port_num);
if (pcie_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "\tenclosure logical id(0x%016llx), "
- "slot(%d)\n", ioc->name,
- (unsigned long long)
- pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_info(ioc, "\tenclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
if (pcie_device->connector_name[0])
- pr_info(MPT3SAS_FMT
- "\tenclosure level(0x%04x),"
- "connector name( %s)\n",
- ioc->name, pcie_device->enclosure_level,
- pcie_device->connector_name);
+ ioc_info(ioc, "\tenclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
pcie_device_put(pcie_device);
}
} else {
sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
if (sas_device) {
- pr_warn(MPT3SAS_FMT
- "\tsas_address(0x%016llx), phy(%d)\n",
- ioc->name, (unsigned long long)
- sas_device->sas_address, sas_device->phy);
+ ioc_warn(ioc, "\tsas_address(0x%016llx), phy(%d)\n",
+ (u64)sas_device->sas_address, sas_device->phy);
_scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL);
@@ -4958,30 +4908,23 @@ _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
}
}
- pr_warn(MPT3SAS_FMT
- "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->DevHandle),
- desc_ioc_state, ioc_status, smid);
- pr_warn(MPT3SAS_FMT
- "\trequest_len(%d), underflow(%d), resid(%d)\n",
- ioc->name, scsi_bufflen(scmd), scmd->underflow,
- scsi_get_resid(scmd));
- pr_warn(MPT3SAS_FMT
- "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->TaskTag),
- le32_to_cpu(mpi_reply->TransferCount), scmd->result);
- pr_warn(MPT3SAS_FMT
- "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
- ioc->name, desc_scsi_status,
- scsi_status, desc_scsi_state, scsi_state);
+ ioc_warn(ioc, "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
+ le16_to_cpu(mpi_reply->DevHandle),
+ desc_ioc_state, ioc_status, smid);
+ ioc_warn(ioc, "\trequest_len(%d), underflow(%d), resid(%d)\n",
+ scsi_bufflen(scmd), scmd->underflow, scsi_get_resid(scmd));
+ ioc_warn(ioc, "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
+ le16_to_cpu(mpi_reply->TaskTag),
+ le32_to_cpu(mpi_reply->TransferCount), scmd->result);
+ ioc_warn(ioc, "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
+ desc_scsi_status, scsi_status, desc_scsi_state, scsi_state);
if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
struct sense_info data;
_scsih_normalize_sense(scmd->sense_buffer, &data);
- pr_warn(MPT3SAS_FMT
- "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
- ioc->name, data.skey,
- data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+ ioc_warn(ioc, "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+ data.skey, data.asc, data.ascq,
+ le32_to_cpu(mpi_reply->SenseCount));
}
if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
response_info = le32_to_cpu(mpi_reply->ResponseInfo);
@@ -5016,17 +4959,17 @@ _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
&mpi_request)) != 0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
sas_device->pfa_led_on = 1;
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
goto out;
}
out:
@@ -5056,16 +4999,16 @@ _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
&mpi_request)) != 0) {
- printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
- dewtprintk(ioc, printk(MPT3SAS_FMT
- "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo)));
+ dewtprintk(ioc,
+ ioc_info(ioc, "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
return;
}
}
@@ -5133,8 +5076,8 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
event_reply = kzalloc(sz, GFP_KERNEL);
if (!event_reply) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5424,16 +5367,16 @@ _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
u16 attached_handle;
u8 link_rate;
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "updating handles for sas_host(0x%016llx)\n",
- ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
+ dtmprintk(ioc,
+ ioc_info(ioc, "updating handles for sas_host(0x%016llx)\n",
+ (u64)ioc->sas_hba.sas_address));
sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
* sizeof(Mpi2SasIOUnit0PhyData_t));
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -5483,15 +5426,15 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
if (!num_phys) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc->sas_hba.phy = kcalloc(num_phys,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!ioc->sas_hba.phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc->sas_hba.num_phys = num_phys;
@@ -5501,21 +5444,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
sizeof(Mpi2SasIOUnit0PhyData_t));
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
sas_iounit_pg0, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5524,21 +5467,21 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5557,15 +5500,15 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
i))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
@@ -5579,18 +5522,17 @@ _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
}
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out;
}
ioc->sas_hba.enclosure_handle =
le16_to_cpu(sas_device_pg0.EnclosureHandle);
ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
- pr_info(MPT3SAS_FMT
- "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
- ioc->name, ioc->sas_hba.handle,
- (unsigned long long) ioc->sas_hba.sas_address,
- ioc->sas_hba.num_phys) ;
+ ioc_info(ioc, "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ ioc->sas_hba.handle,
+ (u64)ioc->sas_hba.sas_address,
+ ioc->sas_hba.num_phys);
if (ioc->sas_hba.enclosure_handle) {
if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -5639,16 +5581,16 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -5656,8 +5598,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
!= 0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
if (sas_address_parent != ioc->sas_hba.sas_address) {
@@ -5684,8 +5626,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_expander = kzalloc(sizeof(struct _sas_node),
GFP_KERNEL);
if (!sas_expander) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -5694,18 +5636,17 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_expander->sas_address_parent = sas_address_parent;
sas_expander->sas_address = sas_address;
- pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
- " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
- handle, parent_handle, (unsigned long long)
- sas_expander->sas_address, sas_expander->num_phys);
+ ioc_info(ioc, "expander_add: handle(0x%04x), parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ handle, parent_handle,
+ (u64)sas_expander->sas_address, sas_expander->num_phys);
if (!sas_expander->num_phys)
goto out_fail;
sas_expander->phy = kcalloc(sas_expander->num_phys,
sizeof(struct _sas_phy), GFP_KERNEL);
if (!sas_expander->phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5714,8 +5655,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
sas_address_parent);
if (!mpt3sas_port) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5724,8 +5665,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
for (i = 0 ; i < sas_expander->num_phys ; i++) {
if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
&expander_pg1, i, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5735,8 +5676,8 @@ _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if ((mpt3sas_transport_add_expander_phy(ioc,
&sas_expander->phy[i], expander_pg1,
sas_expander->parent_dev))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -1;
goto out_fail;
}
@@ -5883,9 +5824,8 @@ _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
if (!rc)
return 0;
- pr_err(MPT3SAS_FMT
- "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
- ioc->name, desc, (unsigned long long)sas_address, handle);
+ ioc_err(ioc, "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
+ desc, (u64)sas_address, handle);
return rc;
}
@@ -5979,9 +5919,8 @@ _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
- pr_err(MPT3SAS_FMT
- "device is not present handle(0x%04x), flags!!!\n",
- ioc->name, handle);
+ ioc_err(ioc, "device is not present handle(0x%04x), flags!!!\n",
+ handle);
goto out_unlock;
}
@@ -6028,16 +5967,16 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
@@ -6051,8 +5990,8 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
/* check if device is present */
if (!(le16_to_cpu(sas_device_pg0.Flags) &
MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
- pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
- ioc->name, handle);
+ ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+ handle);
return -1;
}
@@ -6074,16 +6013,15 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
mpt3sas_scsih_enclosure_find_by_handle(ioc,
le16_to_cpu(sas_device_pg0.EnclosureHandle));
if (enclosure_dev == NULL)
- pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
- "doesn't match with enclosure device!\n",
- ioc->name, sas_device_pg0.EnclosureHandle);
+ ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+ sas_device_pg0.EnclosureHandle);
}
sas_device = kzalloc(sizeof(struct _sas_device),
GFP_KERNEL);
if (!sas_device) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
@@ -6092,8 +6030,8 @@ _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
if (_scsih_get_sas_address(ioc,
le16_to_cpu(sas_device_pg0.ParentDevHandle),
&sas_device->sas_address_parent) != 0)
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_device->enclosure_handle =
le16_to_cpu(sas_device_pg0.EnclosureHandle);
if (sas_device->enclosure_handle != 0)
@@ -6158,11 +6096,10 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->pfa_led_on = 0;
}
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__,
- sas_device->handle, (unsigned long long)
- sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__,
+ sas_device->handle, (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
@@ -6180,18 +6117,15 @@ _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
sas_device->sas_address,
sas_device->sas_address_parent);
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, sas_device->handle,
- (unsigned long long) sas_device->sas_address);
+ ioc_info(ioc, "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_device->handle, (u64)sas_device->sas_address);
_scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, __func__,
- sas_device->handle, (unsigned long long)
- sas_device->sas_address));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
+ __func__,
+ sas_device->handle, (u64)sas_device->sas_address));
dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
NULL, NULL));
}
@@ -6231,8 +6165,7 @@ _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
status_str = "unknown status";
break;
}
- pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
- ioc->name, status_str);
+ ioc_info(ioc, "sas topology change: (%s)\n", status_str);
pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
"start_phy(%02d), count(%d)\n",
le16_to_cpu(event_data->ExpanderDevHandle),
@@ -6309,8 +6242,7 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
_scsih_sas_host_refresh(ioc);
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "ignoring expander event\n", ioc->name));
+ dewtprintk(ioc, ioc_info(ioc, "ignoring expander event\n"));
return 0;
}
@@ -6339,8 +6271,8 @@ _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
/* handle siblings events */
for (i = 0; i < event_data->NumEntries; i++) {
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "ignoring expander event\n", ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "ignoring expander event\n"));
return 0;
}
if (ioc->remove_host || ioc->pci_error_recovery)
@@ -6464,15 +6396,14 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
reason_str = "unknown reason";
break;
}
- pr_info(MPT3SAS_FMT "device status change: (%s)\n"
- "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
- ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- le16_to_cpu(event_data->TaskTag));
+ ioc_info(ioc, "device status change: (%s)\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+ reason_str, le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ le16_to_cpu(event_data->TaskTag));
if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
- pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
- event_data->ASC, event_data->ASCQ);
- pr_info("\n");
+ pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
+ event_data->ASC, event_data->ASCQ);
+ pr_cont("\n");
}
/**
@@ -6605,20 +6536,16 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
desc = "nvme failure status";
break;
default:
- pr_err(MPT3SAS_FMT
- " NVMe discovery error(0x%02x): wwid(0x%016llx),"
- "handle(0x%04x)\n", ioc->name, access_status,
- (unsigned long long)wwid, handle);
+ ioc_err(ioc, "NVMe discovery error(0x%02x): wwid(0x%016llx), handle(0x%04x)\n",
+ access_status, (u64)wwid, handle);
return rc;
}
if (!rc)
return rc;
- pr_info(MPT3SAS_FMT
- "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
- ioc->name, desc,
- (unsigned long long)wwid, handle);
+ ioc_info(ioc, "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
+ desc, (u64)wwid, handle);
return rc;
}
@@ -6634,22 +6561,22 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
{
struct MPT3SAS_TARGET *sas_target_priv_data;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)
- pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__,
- pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: enter: enclosure level(0x%04x), connector name(%s)\n",
+ __func__,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
if (pcie_device->starget && pcie_device->starget->hostdata) {
sas_target_priv_data = pcie_device->starget->hostdata;
@@ -6658,39 +6585,35 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
}
- pr_info(MPT3SAS_FMT
- "removing handle(0x%04x), wwid (0x%016llx)\n",
- ioc->name, pcie_device->handle,
- (unsigned long long) pcie_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ pcie_device->handle, (u64)pcie_device->wwid);
if (pcie_device->enclosure_handle != 0)
- pr_info(MPT3SAS_FMT
- "removing : enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot);
+ ioc_info(ioc, "removing : enclosure logical id(0x%016llx), slot(%d)\n",
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot);
if (pcie_device->connector_name[0] != '\0')
- pr_info(MPT3SAS_FMT
- "removing: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, pcie_device->enclosure_level,
- pcie_device->connector_name);
+ ioc_info(ioc, "removing: enclosure level(0x%04x), connector name( %s)\n",
+ pcie_device->enclosure_level,
+ pcie_device->connector_name);
if (pcie_device->starget)
scsi_remove_target(&pcie_device->starget->dev);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
- pcie_device->handle, (unsigned long long)
- pcie_device->wwid));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
+ __func__,
+ pcie_device->handle, (u64)pcie_device->wwid));
if (pcie_device->enclosure_handle != 0)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
- ioc->name, __func__,
- (unsigned long long)pcie_device->enclosure_logical_id,
- pcie_device->slot));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
+ __func__,
+ (u64)pcie_device->enclosure_logical_id,
+ pcie_device->slot));
if (pcie_device->connector_name[0] != '\0')
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
- ioc->name, __func__, pcie_device->enclosure_level,
- pcie_device->connector_name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
+ __func__,
+ pcie_device->enclosure_level,
+ pcie_device->connector_name));
kfree(pcie_device->serial_number);
}
@@ -6760,9 +6683,8 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* check if device is present */
if (!(le32_to_cpu(pcie_device_pg0.Flags) &
MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
- pr_info(MPT3SAS_FMT
- "device is not present handle(0x%04x), flags!!!\n",
- ioc->name, handle);
+ ioc_info(ioc, "device is not present handle(0x%04x), flags!!!\n",
+ handle);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
pcie_device_put(pcie_device);
return;
@@ -6806,16 +6728,15 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
&pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
@@ -6825,9 +6746,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* check if device is present */
if (!(le32_to_cpu(pcie_device_pg0.Flags) &
MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
- pr_err(MPT3SAS_FMT
- "device is not present handle(0x04%x)!!!\n",
- ioc->name, handle);
+ ioc_err(ioc, "device is not present handle(0x04%x)!!!\n",
+ handle);
return 0;
}
@@ -6848,8 +6768,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
if (!pcie_device) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 0;
}
@@ -6890,16 +6810,16 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* TODO -- Add device name once FW supports it */
if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
&pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
kfree(pcie_device);
return 0;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
kfree(pcie_device);
return 0;
}
@@ -6956,8 +6876,7 @@ _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
status_str = "unknown status";
break;
}
- pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n",
- ioc->name, status_str);
+ ioc_info(ioc, "pcie topology change: (%s)\n", status_str);
pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
"start_port(%02d), count(%d)\n",
le16_to_cpu(event_data->SwitchDevHandle),
@@ -7030,16 +6949,15 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
return;
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
- ioc->name));
+ dewtprintk(ioc, ioc_info(ioc, "ignoring switch event\n"));
return;
}
/* handle siblings events */
for (i = 0; i < event_data->NumEntries; i++) {
if (fw_event->ignore) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "ignoring switch event\n", ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "ignoring switch event\n"));
return;
}
if (ioc->remove_host || ioc->pci_error_recovery)
@@ -7084,9 +7002,9 @@ _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
if (!test_bit(handle, ioc->pend_os_device_add))
break;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "handle(0x%04x) device not found: convert "
- "event to a device add\n", ioc->name, handle));
+ dewtprintk(ioc,
+ ioc_info(ioc, "handle(0x%04x) device not found: convert event to a device add\n",
+ handle));
event_data->PortEntry[i].PortStatus &= 0xF0;
event_data->PortEntry[i].PortStatus |=
MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
@@ -7169,15 +7087,15 @@ _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
break;
}
- pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n"
- "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
- ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->WWID),
- le16_to_cpu(event_data->TaskTag));
+ ioc_info(ioc, "PCIE device status change: (%s)\n"
+ "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
+ reason_str, le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->WWID),
+ le16_to_cpu(event_data->TaskTag));
if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
- pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+ pr_cont(", ASC(0x%x), ASCQ(0x%x)\n",
event_data->ASC, event_data->ASCQ);
- pr_info("\n");
+ pr_cont("\n");
}
/**
@@ -7255,12 +7173,12 @@ _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
break;
}
- pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
- "\thandle(0x%04x), enclosure logical id(0x%016llx)"
- " number slots(%d)\n", ioc->name, reason_str,
- le16_to_cpu(event_data->EnclosureHandle),
- (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
- le16_to_cpu(event_data->StartSlot));
+ ioc_info(ioc, "enclosure status change: (%s)\n"
+ "\thandle(0x%04x), enclosure logical id(0x%016llx) number slots(%d)\n",
+ reason_str,
+ le16_to_cpu(event_data->EnclosureHandle),
+ (u64)le64_to_cpu(event_data->EnclosureLogicalID),
+ le16_to_cpu(event_data->StartSlot));
}
/**
@@ -7298,9 +7216,8 @@ _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
kzalloc(sizeof(struct _enclosure_node),
GFP_KERNEL);
if (!enclosure_dev) {
- pr_info(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_info(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
@@ -7358,10 +7275,8 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
u8 task_abort_retries;
mutex_lock(&ioc->tm_cmds.mutex);
- pr_info(MPT3SAS_FMT
- "%s: enter: phy number(%d), width(%d)\n",
- ioc->name, __func__, event_data->PhyNum,
- event_data->PortWidth);
+ ioc_info(ioc, "%s: enter: phy number(%d), width(%d)\n",
+ __func__, event_data->PhyNum, event_data->PortWidth);
_scsih_block_io_all_device(ioc);
@@ -7371,12 +7286,12 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
/* sanity checks for retrying this loop */
if (max_retries++ == 5) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
- ioc->name, __func__));
+ dewtprintk(ioc, ioc_info(ioc, "%s: giving up\n", __func__));
goto out;
} else if (max_retries > 1)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
- ioc->name, __func__, max_retries - 1));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: %d retry\n",
+ __func__, max_retries - 1));
termination_count = 0;
query_count = 0;
@@ -7443,9 +7358,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
task_abort_retries = 0;
tm_retry:
if (task_abort_retries++ == 60) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: ABORT_TASK: giving up\n", ioc->name,
- __func__));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: ABORT_TASK: giving up\n",
+ __func__));
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
goto broadcast_aen_retry;
}
@@ -7474,9 +7389,10 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
}
if (ioc->broadcast_aen_pending) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: loop back due to pending AEN\n",
- ioc->name, __func__));
+ dewtprintk(ioc,
+ ioc_info(ioc,
+ "%s: loop back due to pending AEN\n",
+ __func__));
ioc->broadcast_aen_pending = 0;
goto broadcast_aen_retry;
}
@@ -7485,9 +7401,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
out_no_lock:
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - exit, query_count = %d termination_count = %d\n",
- ioc->name, __func__, query_count, termination_count));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s - exit, query_count = %d termination_count = %d\n",
+ __func__, query_count, termination_count));
ioc->broadcast_aen_busy = 0;
if (!ioc->shost_recovery)
@@ -7509,13 +7425,13 @@ _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
(Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
- pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
- (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
- "start" : "stop");
+ ioc_info(ioc, "discovery event: (%s)",
+ event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
+ "start" : "stop");
if (event_data->DiscoveryStatus)
- pr_info("discovery_status(0x%08x)",
- le32_to_cpu(event_data->DiscoveryStatus));
- pr_info("\n");
+ pr_cont("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_cont("\n");
}
if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
@@ -7545,20 +7461,16 @@ _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
switch (event_data->ReasonCode) {
case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
- pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
- "(handle:0x%04x, sas_address:0x%016llx,"
- "physical_port:0x%02x) has failed",
- ioc->name, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- event_data->PhysicalPort);
+ ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has failed\n",
+ le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ event_data->PhysicalPort);
break;
case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
- pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
- "(handle:0x%04x, sas_address:0x%016llx,"
- "physical_port:0x%02x) has timed out",
- ioc->name, le16_to_cpu(event_data->DevHandle),
- (unsigned long long)le64_to_cpu(event_data->SASAddress),
- event_data->PhysicalPort);
+ ioc_warn(ioc, "SMP command sent to the expander (handle:0x%04x, sas_address:0x%016llx, physical_port:0x%02x) has timed out\n",
+ le16_to_cpu(event_data->DevHandle),
+ (u64)le64_to_cpu(event_data->SASAddress),
+ event_data->PhysicalPort);
break;
default:
break;
@@ -7581,11 +7493,10 @@ _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
return;
- pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x",
- ioc->name,
- (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
- "started" : "completed",
- event_data->Flags);
+ ioc_info(ioc, "pcie enumeration event: (%s) Flag 0x%02x",
+ (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
+ "started" : "completed",
+ event_data->Flags);
if (event_data->EnumerationStatus)
pr_cont("enumeration_status(0x%08x)",
le32_to_cpu(event_data->EnumerationStatus));
@@ -7617,8 +7528,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
mutex_lock(&ioc->scsih_cmds.mutex);
if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -7626,8 +7536,7 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
rc = -EAGAIN;
goto out;
@@ -7641,9 +7550,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
mpi_request->PhysDiskNum = phys_disk_num;
- dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
- "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
- handle, phys_disk_num));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: turning fast path on for handle(0x%04x), phys_disk_num (0x%02x)\n",
+ handle, phys_disk_num));
init_completion(&ioc->scsih_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
@@ -7668,15 +7577,13 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
log_info = 0;
ioc_status &= MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "IR RAID_ACTION: failed: ioc_status(0x%04x), "
- "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
- log_info));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: failed: ioc_status(0x%04x), loginfo(0x%08x)!!!\n",
+ ioc_status, log_info));
rc = -EFAULT;
} else
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "IR RAID_ACTION: completed successfully\n",
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "IR RAID_ACTION: completed successfully\n"));
}
out:
@@ -7721,9 +7628,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
if (!wwid) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -7736,9 +7642,8 @@ _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
if (!raid_device) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -7781,9 +7686,8 @@ _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_target_priv_data = starget->hostdata;
sas_target_priv_data->deleted = 1;
}
- pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, raid_device->handle,
- (unsigned long long) raid_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ raid_device->handle, (u64)raid_device->wwid);
list_del(&raid_device->list);
kfree(raid_device);
}
@@ -7925,16 +7829,16 @@ _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -7964,10 +7868,10 @@ _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
- pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
- ioc->name, (le32_to_cpu(event_data->Flags) &
- MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
- "foreign" : "native", event_data->NumElements);
+ ioc_info(ioc, "raid config change: (%s), elements(%d)\n",
+ le32_to_cpu(event_data->Flags) & MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG ?
+ "foreign" : "native",
+ event_data->NumElements);
for (i = 0; i < event_data->NumElements; i++, element++) {
switch (element->ReasonCode) {
case MPI2_EVENT_IR_CHANGE_RC_ADDED:
@@ -8123,10 +8027,11 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
handle = le16_to_cpu(event_data->VolDevHandle);
state = le32_to_cpu(event_data->NewValue);
if (!ioc->hide_ir_msg)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
- ioc->name, __func__, handle,
- le32_to_cpu(event_data->PreviousValue), state));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ __func__, handle,
+ le32_to_cpu(event_data->PreviousValue),
+ state));
switch (state) {
case MPI2_RAID_VOL_STATE_MISSING:
case MPI2_RAID_VOL_STATE_FAILED:
@@ -8146,17 +8051,15 @@ _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
if (!wwid) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
break;
}
raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
if (!raid_device) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
break;
}
@@ -8207,10 +8110,11 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
state = le32_to_cpu(event_data->NewValue);
if (!ioc->hide_ir_msg)
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
- ioc->name, __func__, handle,
- le32_to_cpu(event_data->PreviousValue), state));
+ dewtprintk(ioc,
+ ioc_info(ioc, "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ __func__, handle,
+ le32_to_cpu(event_data->PreviousValue),
+ state));
switch (state) {
case MPI2_RAID_PD_STATE_ONLINE:
@@ -8231,16 +8135,16 @@ _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
&sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -8294,11 +8198,10 @@ _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
if (!reason_str)
return;
- pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
- "\thandle(0x%04x), percent complete(%d)\n",
- ioc->name, reason_str,
- le16_to_cpu(event_data->VolDevHandle),
- event_data->PercentComplete);
+ ioc_info(ioc, "raid operational status: (%s)\thandle(0x%04x), percent complete(%d)\n",
+ reason_str,
+ le16_to_cpu(event_data->VolDevHandle),
+ event_data->PercentComplete);
}
/**
@@ -8379,9 +8282,8 @@ Mpi2SasDevicePage0_t *sas_device_pg0)
mpt3sas_scsih_enclosure_find_by_handle(ioc,
le16_to_cpu(sas_device_pg0->EnclosureHandle));
if (enclosure_dev == NULL)
- pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
- "doesn't match with enclosure device!\n",
- ioc->name, sas_device_pg0->EnclosureHandle);
+ ioc_info(ioc, "Enclosure handle(0x%04x) doesn't match with enclosure device!\n",
+ sas_device_pg0->EnclosureHandle);
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
@@ -8475,8 +8377,7 @@ _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
enclosure_dev =
kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
if (!enclosure_dev) {
- pr_err(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n", ioc->name,
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
return;
}
@@ -8513,7 +8414,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
u16 handle;
u32 device_info;
- pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+ ioc_info(ioc, "search for end-devices: start\n");
if (list_empty(&ioc->sas_device_list))
goto out;
@@ -8534,8 +8435,7 @@ _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
}
out:
- pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
- ioc->name);
+ ioc_info(ioc, "search for end-devices: complete\n");
}
/**
@@ -8628,7 +8528,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
u16 handle;
u32 device_info;
- pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+ ioc_info(ioc, "search for end-devices: start\n");
if (list_empty(&ioc->pcie_device_list))
goto out;
@@ -8640,10 +8540,9 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from %s: "
- "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name,
- __func__, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from %s: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ __func__, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(pcie_device_pg0.DevHandle);
@@ -8653,8 +8552,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
}
out:
- pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n",
- ioc->name);
+ ioc_info(ioc, "search for PCIe end-devices: complete\n");
}
/**
@@ -8735,8 +8633,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
if (!ioc->ir_firmware)
return;
- pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
- ioc->name);
+ ioc_info(ioc, "search for raid volumes: start\n");
if (list_empty(&ioc->raid_device_list))
goto out;
@@ -8779,8 +8676,7 @@ _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
}
}
out:
- pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
- ioc->name);
+ ioc_info(ioc, "search for responding raid volumes: complete\n");
}
/**
@@ -8852,7 +8748,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
u64 sas_address;
u16 handle;
- pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
+ ioc_info(ioc, "search for expanders: start\n");
if (list_empty(&ioc->sas_expander_list))
goto out;
@@ -8875,7 +8771,7 @@ _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
}
out:
- pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
+ ioc_info(ioc, "search for expanders: complete\n");
}
/**
@@ -8893,12 +8789,10 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
unsigned long flags;
LIST_HEAD(head);
- pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: start\n");
/* removing unresponding end devices */
- pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: end-devices\n");
/*
* Iterate, pulling off devices marked as non-responding. We become the
* owner for the reference the list had on any object we prune.
@@ -8922,9 +8816,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
sas_device_put(sas_device);
}
- pr_info(MPT3SAS_FMT
- " Removing unresponding devices: pcie end-devices\n"
- , ioc->name);
+ ioc_info(ioc, "Removing unresponding devices: pcie end-devices\n");
INIT_LIST_HEAD(&head);
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
list_for_each_entry_safe(pcie_device, pcie_device_next,
@@ -8944,8 +8836,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
/* removing unresponding volumes */
if (ioc->ir_firmware) {
- pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: volumes\n");
list_for_each_entry_safe(raid_device, raid_device_next,
&ioc->raid_device_list, list) {
if (!raid_device->responding)
@@ -8957,8 +8848,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
}
/* removing unresponding expanders */
- pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: expanders\n");
spin_lock_irqsave(&ioc->sas_node_lock, flags);
INIT_LIST_HEAD(&tmp_list);
list_for_each_entry_safe(sas_expander, sas_expander_next,
@@ -8974,8 +8864,7 @@ _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
_scsih_expander_node_remove(ioc, sas_expander);
}
- pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
- ioc->name);
+ ioc_info(ioc, "removing unresponding devices: complete\n");
/* unblock devices */
_scsih_ublock_io_all_device(ioc);
@@ -8992,8 +8881,8 @@ _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
for (i = 0 ; i < sas_expander->num_phys ; i++) {
if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
&expander_pg1, i, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return;
}
@@ -9029,11 +8918,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
u8 retry_count;
unsigned long flags;
- pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
+ ioc_info(ioc, "scan devices: start\n");
_scsih_sas_host_refresh(ioc);
- pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: expanders start\n");
/* expanders */
handle = 0xFFFF;
@@ -9042,10 +8931,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from expander scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(expander_pg0.DevHandle);
@@ -9057,25 +8944,22 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
_scsih_refresh_expander_links(ioc, expander_device,
handle);
else {
- pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(expander_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(expander_pg0.SASAddress));
_scsih_expander_add(ioc, handle);
- pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(expander_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding expander: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(expander_pg0.SASAddress));
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: expanders complete\n");
if (!ioc->ir_firmware)
goto skip_to_sas;
- pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: phys disk start\n");
/* phys disk */
phys_disk_num = 0xFF;
@@ -9085,10 +8969,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from phys disk scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
phys_disk_num = pd_pg0.PhysDiskNum;
@@ -9105,19 +8987,16 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from phys disk scan ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle,
&sas_address)) {
- pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
- " handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
mpt3sas_transport_update_links(ioc, sas_address,
handle, sas_device_pg0.PhyNum,
MPI2_SAS_NEG_LINK_RATE_1_5);
@@ -9131,17 +9010,15 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
1)) {
ssleep(1);
}
- pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
- " handle (0x%04x), sas_addr(0x%016llx)\n",
- ioc->name, handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding phys disk: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: phys disk complete\n");
- pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
+ ioc_info(ioc, "\tscan devices: volumes start\n");
/* volumes */
handle = 0xFFFF;
@@ -9150,10 +9027,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(volume_pg1.DevHandle);
@@ -9170,10 +9045,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
- "ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from volume scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
@@ -9182,23 +9055,19 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
element.VolDevHandle = volume_pg1.DevHandle;
- pr_info(MPT3SAS_FMT
- "\tBEFORE adding volume: handle (0x%04x)\n",
- ioc->name, volume_pg1.DevHandle);
+ ioc_info(ioc, "\tBEFORE adding volume: handle (0x%04x)\n",
+ volume_pg1.DevHandle);
_scsih_sas_volume_add(ioc, &element);
- pr_info(MPT3SAS_FMT
- "\tAFTER adding volume: handle (0x%04x)\n",
- ioc->name, volume_pg1.DevHandle);
+ ioc_info(ioc, "\tAFTER adding volume: handle (0x%04x)\n",
+ volume_pg1.DevHandle);
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: volumes complete\n");
skip_to_sas:
- pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: end devices start\n");
/* sas devices */
handle = 0xFFFF;
@@ -9208,10 +9077,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
- " ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(sas_device_pg0.DevHandle);
@@ -9226,10 +9093,9 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
}
parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
- pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tBEFORE adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
mpt3sas_transport_update_links(ioc, sas_address, handle,
sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
retry_count = 0;
@@ -9241,16 +9107,13 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
0)) {
ssleep(1);
}
- pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
- "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
- handle, (unsigned long long)
- le64_to_cpu(sas_device_pg0.SASAddress));
+ ioc_info(ioc, "\tAFTER adding end device: handle (0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (u64)le64_to_cpu(sas_device_pg0.SASAddress));
}
}
- pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
- ioc->name);
- pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
- ioc->name);
+ ioc_info(ioc, "\tscan devices: end devices complete\n");
+ ioc_info(ioc, "\tscan devices: pcie end devices start\n");
/* pcie devices */
handle = 0xFFFF;
@@ -9260,10 +9123,8 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
& MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_info(MPT3SAS_FMT "\tbreak from pcie end device"
- " scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, ioc_status,
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "\tbreak from pcie end device scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc_status, le32_to_cpu(mpi_reply.IOCLogInfo));
break;
}
handle = le16_to_cpu(pcie_device_pg0.DevHandle);
@@ -9280,14 +9141,11 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
_scsih_pcie_add_device(ioc, handle);
- pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: "
- "handle (0x%04x), wwid(0x%016llx)\n", ioc->name,
- handle,
- (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
+ ioc_info(ioc, "\tAFTER adding pcie end device: handle (0x%04x), wwid(0x%016llx)\n",
+ handle, (u64)le64_to_cpu(pcie_device_pg0.WWID));
}
- pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n",
- ioc->name);
- pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
+ ioc_info(ioc, "\tpcie devices: pcie end devices complete\n");
+ ioc_info(ioc, "scan devices: complete\n");
}
/**
@@ -9298,8 +9156,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
*/
void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
}
/**
@@ -9311,8 +9168,7 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
ioc->scsih_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
@@ -9340,8 +9196,7 @@ mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, pr_info(MPT3SAS_FMT
- "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
!ioc->sas_hba.num_phys)) {
_scsih_prep_device_scan(ioc);
@@ -9396,9 +9251,8 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
if (missing_delay[0] != -1 && missing_delay[1] != -1)
mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
missing_delay[1]);
- dewtprintk(ioc, pr_info(MPT3SAS_FMT
- "port enable: complete from worker thread\n",
- ioc->name));
+ dewtprintk(ioc,
+ ioc_info(ioc, "port enable: complete from worker thread\n"));
break;
case MPT3SAS_TURN_ON_PFA_LED:
_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
@@ -9496,8 +9350,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
if (unlikely(!mpi_reply)) {
- pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
@@ -9564,30 +9418,16 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
switch (le32_to_cpu(*log_code)) {
case MPT2_WARPDRIVE_LC_SSDT:
- pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
- "IO Throttling has occurred in the WarpDrive "
- "subsystem. Check WarpDrive documentation for "
- "additional details.\n", ioc->name);
+ ioc_warn(ioc, "WarpDrive Warning: IO Throttling has occurred in the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
break;
case MPT2_WARPDRIVE_LC_SSDLW:
- pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
- "Program/Erase Cycles for the WarpDrive subsystem "
- "in degraded range. Check WarpDrive documentation "
- "for additional details.\n", ioc->name);
+ ioc_warn(ioc, "WarpDrive Warning: Program/Erase Cycles for the WarpDrive subsystem in degraded range. Check WarpDrive documentation for additional details.\n");
break;
case MPT2_WARPDRIVE_LC_SSDLF:
- pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
- "There are no Program/Erase Cycles for the "
- "WarpDrive subsystem. The storage device will be "
- "in read-only mode. Check WarpDrive documentation "
- "for additional details.\n", ioc->name);
+ ioc_err(ioc, "WarpDrive Fatal Error: There are no Program/Erase Cycles for the WarpDrive subsystem. The storage device will be in read-only mode. Check WarpDrive documentation for additional details.\n");
break;
case MPT2_WARPDRIVE_LC_BRMF:
- pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
- "The Backup Rail Monitor has failed on the "
- "WarpDrive subsystem. Check WarpDrive "
- "documentation for additional details.\n",
- ioc->name);
+ ioc_err(ioc, "WarpDrive Fatal Error: The Backup Rail Monitor has failed on the WarpDrive subsystem. Check WarpDrive documentation for additional details.\n");
break;
}
@@ -9613,9 +9453,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
(Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
switch (ActiveCableEventData->ReasonCode) {
case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
- pr_notice(MPT3SAS_FMT
- "Currently an active cable with ReceptacleID %d\n",
- ioc->name, ActiveCableEventData->ReceptacleID);
+ ioc_notice(ioc, "Currently an active cable with ReceptacleID %d\n",
+ ActiveCableEventData->ReceptacleID);
pr_notice("cannot be powered and devices connected\n");
pr_notice("to this active cable will not be seen\n");
pr_notice("This active cable requires %d mW of power\n",
@@ -9623,9 +9462,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
break;
case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
- pr_notice(MPT3SAS_FMT
- "Currently a cable with ReceptacleID %d\n",
- ioc->name, ActiveCableEventData->ReceptacleID);
+ ioc_notice(ioc, "Currently a cable with ReceptacleID %d\n",
+ ActiveCableEventData->ReceptacleID);
pr_notice(
"is not running at optimal speed(12 Gb/s rate)\n");
break;
@@ -9640,8 +9478,8 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
fw_event = alloc_fw_event_work(sz);
if (!fw_event) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return 1;
}
@@ -9690,11 +9528,9 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
sas_expander->sas_address_parent);
- pr_info(MPT3SAS_FMT
- "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
- ioc->name,
- sas_expander->handle, (unsigned long long)
- sas_expander->sas_address);
+ ioc_info(ioc, "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address);
spin_lock_irqsave(&ioc->sas_node_lock, flags);
list_del(&sas_expander->list);
@@ -9729,16 +9565,14 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
mutex_lock(&ioc->scsih_cmds.mutex);
if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
goto out;
}
ioc->scsih_cmds.status = MPT3_CMD_PENDING;
smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
goto out;
}
@@ -9751,24 +9585,22 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
if (!ioc->hide_ir_msg)
- pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
+ ioc_info(ioc, "IR shutdown (sending)\n");
init_completion(&ioc->scsih_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
goto out;
}
if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
mpi_reply = ioc->scsih_cmds.reply;
if (!ioc->hide_ir_msg)
- pr_info(MPT3SAS_FMT "IR shutdown "
- "(complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
- le32_to_cpu(mpi_reply->IOCLogInfo));
+ ioc_info(ioc, "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
}
out:
@@ -9817,9 +9649,8 @@ static void scsih_remove(struct pci_dev *pdev)
sas_target_priv_data->deleted = 1;
scsi_remove_target(&raid_device->starget->dev);
}
- pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
- ioc->name, raid_device->handle,
- (unsigned long long) raid_device->wwid);
+ ioc_info(ioc, "removing handle(0x%04x), wwid(0x%016llx)\n",
+ raid_device->handle, (u64)raid_device->wwid);
_scsih_raid_device_remove(ioc, raid_device);
}
list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
@@ -10230,7 +10061,7 @@ scsih_scan_start(struct Scsi_Host *shost)
rc = mpt3sas_port_enable(ioc);
if (rc != 0)
- pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
+ ioc_info(ioc, "port enable: FAILED\n");
}
/**
@@ -10255,9 +10086,7 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (time >= (300 * HZ)) {
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
- pr_info(MPT3SAS_FMT
- "port enable: FAILED with timeout (timeout=300s)\n",
- ioc->name);
+ ioc_info(ioc, "port enable: FAILED with timeout (timeout=300s)\n");
ioc->is_driver_loading = 0;
return 1;
}
@@ -10266,16 +10095,15 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
return 0;
if (ioc->start_scan_failed) {
- pr_info(MPT3SAS_FMT
- "port enable: FAILED with (ioc_status=0x%08x)\n",
- ioc->name, ioc->start_scan_failed);
+ ioc_info(ioc, "port enable: FAILED with (ioc_status=0x%08x)\n",
+ ioc->start_scan_failed);
ioc->is_driver_loading = 0;
ioc->wait_for_discovery_to_complete = 0;
ioc->remove_host = 1;
return 1;
}
- pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
+ ioc_info(ioc, "port enable: SUCCESS\n");
ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
if (ioc->wait_for_discovery_to_complete) {
@@ -10586,28 +10414,22 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ioc->is_mcpu_endpoint) {
/* mCPU MPI support 64K max IO */
shost->max_sectors = 128;
- pr_info(MPT3SAS_FMT
- "The max_sectors value is set to %d\n",
- ioc->name, shost->max_sectors);
+ ioc_info(ioc, "The max_sectors value is set to %d\n",
+ shost->max_sectors);
} else {
if (max_sectors != 0xFFFF) {
if (max_sectors < 64) {
shost->max_sectors = 64;
- pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
- "for max_sectors, range is 64 to 32767. " \
- "Assigning value of 64.\n", \
- ioc->name, max_sectors);
+ ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767. Assigning value of 64.\n",
+ max_sectors);
} else if (max_sectors > 32767) {
shost->max_sectors = 32767;
- pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
- "for max_sectors, range is 64 to 32767." \
- "Assigning default value of 32767.\n", \
- ioc->name, max_sectors);
+ ioc_warn(ioc, "Invalid value %d passed for max_sectors, range is 64 to 32767.Assigning default value of 32767.\n",
+ max_sectors);
} else {
shost->max_sectors = max_sectors & 0xFFFE;
- pr_info(MPT3SAS_FMT
- "The max_sectors value is set to %d\n",
- ioc->name, shost->max_sectors);
+ ioc_info(ioc, "The max_sectors value is set to %d\n",
+ shost->max_sectors);
}
}
}
@@ -10627,16 +10449,16 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->firmware_event_thread = alloc_ordered_workqueue(
ioc->firmware_event_name, 0);
if (!ioc->firmware_event_thread) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rv = -ENODEV;
goto out_thread_fail;
}
ioc->is_driver_loading = 1;
if ((mpt3sas_base_attach(ioc))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rv = -ENODEV;
goto out_attach_fail;
}
@@ -10657,8 +10479,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rv = scsi_add_host(shost, &pdev->dev);
if (rv) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_add_shost_fail;
}
@@ -10695,9 +10517,8 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
flush_scheduled_work();
scsi_block_requests(shost);
device_state = pci_choose_state(pdev, state);
- pr_info(MPT3SAS_FMT
- "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
- ioc->name, pdev, pci_name(pdev), device_state);
+ ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
pci_save_state(pdev);
mpt3sas_base_free_resources(ioc);
@@ -10719,9 +10540,8 @@ scsih_resume(struct pci_dev *pdev)
pci_power_t device_state = pdev->current_state;
int r;
- pr_info(MPT3SAS_FMT
- "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
- ioc->name, pdev, pci_name(pdev), device_state);
+ ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ pdev, pci_name(pdev), device_state);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake(pdev, PCI_D0, 0);
@@ -10753,8 +10573,7 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
- ioc->name, state);
+ ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
switch (state) {
case pci_channel_io_normal:
@@ -10791,8 +10610,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
int rc;
- pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
- ioc->name);
+ ioc_info(ioc, "PCI error: slot reset callback!!\n");
ioc->pci_error_recovery = 0;
ioc->pdev = pdev;
@@ -10803,8 +10621,8 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
- pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
- (rc == 0) ? "success" : "failed");
+ ioc_warn(ioc, "hard reset: %s\n",
+ (rc == 0) ? "success" : "failed");
if (!rc)
return PCI_ERS_RESULT_RECOVERED;
@@ -10826,9 +10644,8 @@ scsih_pci_resume(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
+ ioc_info(ioc, "PCI error: resume callback!!\n");
- pci_cleanup_aer_uncorrect_error_status(pdev);
mpt3sas_base_start_watchdog(ioc);
scsi_unblock_requests(ioc->shost);
}
@@ -10843,8 +10660,7 @@ scsih_pci_mmio_enabled(struct pci_dev *pdev)
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
- pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
- ioc->name);
+ ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
/* TODO - dump whatever for debugging purposes */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index f8cc2677c1cd..6a8a3c09b4b1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -146,25 +146,22 @@ _transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
u32 ioc_status;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT
- "handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n",
- ioc->name, handle, ioc_status,
- __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "handle(0x%04x), ioc_status(0x%04x) failure at %s:%d/%s()!\n",
+ handle, ioc_status, __FILE__, __LINE__, __func__);
return -EIO;
}
@@ -310,16 +307,14 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -329,26 +324,22 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -359,9 +350,8 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
data_out_sz = sizeof(struct rep_manu_request);
data_in_sz = sizeof(struct rep_manu_reply);
- data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz,
- &data_out_dma);
-
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
+ &data_out_dma, GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -388,16 +378,15 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
data_in_sz);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - send to sas_addr(0x%016llx)\n",
- ioc->name, (unsigned long long)sas_address));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "report_manufacture - send to sas_addr(0x%016llx)\n",
+ (u64)sas_address));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -405,17 +394,16 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
goto issue_host_reset;
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - complete\n", ioc->name));
+ dtransportprintk(ioc, ioc_info(ioc, "report_manufacture - complete\n"));
if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
u8 *tmp;
mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - reply data transfer size(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "report_manufacture - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct rep_manu_reply))
@@ -439,8 +427,8 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
manufacture_reply->component_revision_id;
}
} else
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "report_manufacture - no reply\n", ioc->name));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "report_manufacture - no reply\n"));
issue_host_reset:
if (issue_reset)
@@ -448,7 +436,7 @@ _transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
- pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz,
+ dma_free_coherent(&ioc->pdev->dev, data_out_sz + data_in_sz,
data_out, data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
@@ -643,8 +631,8 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
mpt3sas_port = kzalloc(sizeof(struct _sas_port),
GFP_KERNEL);
if (!mpt3sas_port) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return NULL;
}
@@ -655,22 +643,21 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
if (!sas_node) {
- pr_err(MPT3SAS_FMT
- "%s: Could not find parent sas_address(0x%016llx)!\n",
- ioc->name, __func__, (unsigned long long)sas_address);
+ ioc_err(ioc, "%s: Could not find parent sas_address(0x%016llx)!\n",
+ __func__, (u64)sas_address);
goto out_fail;
}
if ((_transport_set_identify(ioc, handle,
&mpt3sas_port->remote_identify))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
@@ -687,20 +674,20 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
}
if (!mpt3sas_port->num_phys) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
if (!sas_node->parent_dev) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
port = sas_port_alloc_num(sas_node->parent_dev);
if ((sas_port_add(port))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
goto out_fail;
}
@@ -729,17 +716,17 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
sas_device = mpt3sas_get_sdev_by_addr(ioc,
mpt3sas_port->remote_identify.sas_address);
if (!sas_device) {
- dfailprintk(ioc, printk(MPT3SAS_FMT
- "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__));
+ dfailprintk(ioc,
+ ioc_info(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__));
goto out_fail;
}
sas_device->pend_sas_rphy_add = 1;
}
if ((sas_rphy_add(rphy))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
}
if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) {
@@ -861,14 +848,14 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
phy = sas_phy_alloc(parent_dev, phy_index);
if (!phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
&mpt3sas_phy->identify))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -890,8 +877,8 @@ mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
phy_pg0.ProgrammedLinkRate >> 4);
if ((sas_phy_add(phy))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -929,14 +916,14 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
phy = sas_phy_alloc(parent_dev, phy_index);
if (!phy) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -1;
}
if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
&mpt3sas_phy->identify))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -960,8 +947,8 @@ mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
expander_pg1.ProgrammedLinkRate >> 4);
if ((sas_phy_add(phy))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
sas_phy_free(phy);
return -1;
}
@@ -1098,16 +1085,14 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1117,26 +1102,22 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1146,7 +1127,8 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
sz = sizeof(struct phy_error_log_request) +
sizeof(struct phy_error_log_reply);
- data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -1179,17 +1161,16 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
data_out_dma + sizeof(struct phy_error_log_request),
sizeof(struct phy_error_log_reply));
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
- ioc->name, (unsigned long long)phy->identify.sas_address,
- phy->number));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
+ (u64)phy->identify.sas_address,
+ phy->number));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1197,16 +1178,15 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
goto issue_host_reset;
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - complete\n", ioc->name));
+ dtransportprintk(ioc, ioc_info(ioc, "phy_error_log - complete\n"));
if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - reply data transfer size(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct phy_error_log_reply))
@@ -1215,9 +1195,9 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
phy_error_log_reply = data_out +
sizeof(struct phy_error_log_request);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - function_result(%d)\n",
- ioc->name, phy_error_log_reply->function_result));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - function_result(%d)\n",
+ phy_error_log_reply->function_result));
phy->invalid_dword_count =
be32_to_cpu(phy_error_log_reply->invalid_dword);
@@ -1229,8 +1209,8 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
be32_to_cpu(phy_error_log_reply->phy_reset_problem);
rc = 0;
} else
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_error_log - no reply\n", ioc->name));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_error_log - no reply\n"));
issue_host_reset:
if (issue_reset)
@@ -1238,7 +1218,7 @@ _transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
- pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+ dma_free_coherent(&ioc->pdev->dev, sz, data_out, data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
return rc;
@@ -1273,17 +1253,16 @@ _transport_get_linkerrors(struct sas_phy *phy)
/* get hba phy error logs */
if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
phy->number))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
- ioc->name, phy->number,
- le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
+ phy->number,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
phy->running_disparity_error_count =
@@ -1411,16 +1390,14 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
u16 wait_state_count;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
return -EFAULT;
}
mutex_lock(&ioc->transport_cmds.mutex);
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1430,26 +1407,22 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto out;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto out;
}
@@ -1459,7 +1432,8 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
sz = sizeof(struct phy_control_request) +
sizeof(struct phy_control_reply);
- data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ data_out = dma_alloc_coherent(&ioc->pdev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
if (!data_out) {
pr_err("failure at %s:%d/%s()!\n", __FILE__,
__LINE__, __func__);
@@ -1497,17 +1471,16 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
data_out_dma + sizeof(struct phy_control_request),
sizeof(struct phy_control_reply));
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
- ioc->name, (unsigned long long)phy->identify.sas_address,
- phy->number, phy_operation));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
+ (u64)phy->identify.sas_address,
+ phy->number, phy_operation));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s: timeout\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
@@ -1515,16 +1488,15 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
goto issue_host_reset;
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - complete\n", ioc->name));
+ dtransportprintk(ioc, ioc_info(ioc, "phy_control - complete\n"));
if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
mpi_reply = ioc->transport_cmds.reply;
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - reply data transfer size(%d)\n",
- ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - reply data transfer size(%d)\n",
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
sizeof(struct phy_control_reply))
@@ -1533,14 +1505,14 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
phy_control_reply = data_out +
sizeof(struct phy_control_request);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - function_result(%d)\n",
- ioc->name, phy_control_reply->function_result));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - function_result(%d)\n",
+ phy_control_reply->function_result));
rc = 0;
} else
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "phy_control - no reply\n", ioc->name));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "phy_control - no reply\n"));
issue_host_reset:
if (issue_reset)
@@ -1548,7 +1520,8 @@ _transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
out:
ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
if (data_out)
- pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+ dma_free_coherent(&ioc->pdev->dev, sz, data_out,
+ data_out_dma);
mutex_unlock(&ioc->transport_cmds.mutex);
return rc;
@@ -1591,16 +1564,15 @@ _transport_phy_reset(struct sas_phy *phy, int hard_reset)
mpi_request.PhyNum = phy->number;
if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
return -ENXIO;
}
if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
- pr_info(MPT3SAS_FMT
- "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
- ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus),
- le32_to_cpu(mpi_reply.IOCLogInfo));
+ ioc_info(ioc, "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ phy->number, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
return 0;
}
@@ -1647,23 +1619,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
sizeof(Mpi2SasIOUnit0PhyData_t));
sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg0) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
sas_iounit_pg0, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
@@ -1672,10 +1644,8 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
if (sas_iounit_pg0->PhyData[i].PortFlags &
MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
- pr_err(MPT3SAS_FMT "discovery is active on " \
- "port = %d, phy = %d: unable to enable/disable "
- "phys, try again later!\n", ioc->name,
- sas_iounit_pg0->PhyData[i].Port, i);
+ ioc_err(ioc, "discovery is active on port = %d, phy = %d: unable to enable/disable phys, try again later!\n",
+ sas_iounit_pg0->PhyData[i].Port, i);
discovery_active = 1;
}
}
@@ -1690,23 +1660,23 @@ _transport_phy_enable(struct sas_phy *phy, int enable)
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
@@ -1798,23 +1768,23 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
sizeof(Mpi2SasIOUnit1PhyData_t));
sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
if (!sas_iounit_pg1) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENOMEM;
goto out;
}
if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
sas_iounit_pg1, sz))) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -EIO;
goto out;
}
@@ -1833,8 +1803,8 @@ _transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
sz)) {
- pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
- ioc->name, __FILE__, __LINE__, __func__);
+ ioc_err(ioc, "failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
rc = -ENXIO;
goto out;
}
@@ -1922,8 +1892,7 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
unsigned int reslen = 0;
if (ioc->shost_recovery || ioc->pci_error_recovery) {
- pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
- __func__, ioc->name);
+ ioc_info(ioc, "%s: host reset in progress!\n", __func__);
rc = -EFAULT;
goto job_done;
}
@@ -1933,8 +1902,8 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
goto job_done;
if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
- pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
- __func__);
+ ioc_err(ioc, "%s: transport_cmds in use\n",
+ __func__);
rc = -EAGAIN;
goto out;
}
@@ -1959,26 +1928,22 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
if (wait_state_count++ == 10) {
- pr_err(MPT3SAS_FMT
- "%s: failed due to ioc not operational\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed due to ioc not operational\n",
+ __func__);
rc = -EFAULT;
goto unmap_in;
}
ssleep(1);
ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
- pr_info(MPT3SAS_FMT
- "%s: waiting for operational state(count=%d)\n",
- ioc->name, __func__, wait_state_count);
+ ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
+ __func__, wait_state_count);
}
if (wait_state_count)
- pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
- ioc->name, __func__);
+ ioc_info(ioc, "%s: ioc is operational\n", __func__);
smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
if (!smid) {
- pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
- ioc->name, __func__);
+ ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
rc = -EAGAIN;
goto unmap_in;
}
@@ -1999,16 +1964,15 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
ioc->build_sg(ioc, psge, dma_addr_out, dma_len_out - 4, dma_addr_in,
dma_len_in - 4);
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - sending smp request\n", ioc->name, __func__));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "%s: sending smp request\n", __func__));
init_completion(&ioc->transport_cmds.done);
mpt3sas_base_put_smid_default(ioc, smid);
wait_for_completion_timeout(&ioc->transport_cmds.done, 10*HZ);
if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
- pr_err(MPT3SAS_FMT "%s : timeout\n",
- __func__, ioc->name);
+ ioc_err(ioc, "%s: timeout\n", __func__);
_debug_dump_mf(mpi_request,
sizeof(Mpi2SmpPassthroughRequest_t)/4);
if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) {
@@ -2018,12 +1982,11 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
}
}
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - complete\n", ioc->name, __func__));
+ dtransportprintk(ioc, ioc_info(ioc, "%s - complete\n", __func__));
if (!(ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID)) {
- dtransportprintk(ioc, pr_info(MPT3SAS_FMT
- "%s - no reply\n", ioc->name, __func__));
+ dtransportprintk(ioc,
+ ioc_info(ioc, "%s: no reply\n", __func__));
rc = -ENXIO;
goto unmap_in;
}
@@ -2031,9 +1994,9 @@ _transport_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
mpi_reply = ioc->transport_cmds.reply;
dtransportprintk(ioc,
- pr_info(MPT3SAS_FMT "%s - reply data transfer size(%d)\n",
- ioc->name, __func__,
- le16_to_cpu(mpi_reply->ResponseDataLength)));
+ ioc_info(ioc, "%s: reply data transfer size(%d)\n",
+ __func__,
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
memcpy(job->reply, mpi_reply, sizeof(*mpi_reply));
job->reply_len = sizeof(*mpi_reply);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index cae7c1eaef34..6ac453fd5937 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -72,8 +72,7 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
u16 sz, event_data_sz;
unsigned long flags;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4;
@@ -85,23 +84,23 @@ _mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
mpi_reply->EventDataLength = cpu_to_le16(event_data_sz);
memcpy(&mpi_reply->EventData, event_data,
sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: add to driver event log\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: add to driver event log\n",
+ __func__));
mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
kfree(mpi_reply);
out:
/* clearing the diag_trigger_active flag */
spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: clearing diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: clearing diag_trigger_active flag\n",
+ __func__));
ioc->diag_trigger_active = 0;
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -115,22 +114,22 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
{
u8 issue_reset = 0;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
/* release the diag buffer trace */
if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: release trace diag buffer\n", ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: release trace diag buffer\n",
+ __func__));
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
&issue_reset);
}
_mpt3sas_raise_sigio(ioc, event_data);
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -168,9 +167,9 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
by_pass_checks:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - trigger_bitmask = 0x%08x\n",
- ioc->name, __func__, trigger_bitmask));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - trigger_bitmask = 0x%08x\n",
+ __func__, trigger_bitmask));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -182,9 +181,9 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
if (ioc->diag_trigger_master.MasterData & trigger_bitmask) {
found_match = 1;
ioc->diag_trigger_active = 1;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
}
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
@@ -202,8 +201,8 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -239,9 +238,9 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
return;
}
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
- ioc->name, __func__, event, log_entry_qualifier));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
+ __func__, event, log_entry_qualifier));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -263,26 +262,26 @@ mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
}
found_match = 1;
ioc->diag_trigger_active = 1;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
}
spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
if (!found_match)
goto out;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
event_data.trigger_type = MPT3SAS_TRIGGER_EVENT;
event_data.u.event.EventValue = event;
event_data.u.event.LogEntryQualifier = log_entry_qualifier;
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -319,9 +318,9 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
return;
}
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
- ioc->name, __func__, sense_key, asc, ascq));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
+ __func__, sense_key, asc, ascq));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -347,9 +346,9 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
if (!found_match)
goto out;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
event_data.trigger_type = MPT3SAS_TRIGGER_SCSI;
event_data.u.scsi.SenseKey = sense_key;
@@ -357,8 +356,8 @@ mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
event_data.u.scsi.ASCQ = ascq;
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
/**
@@ -393,9 +392,9 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
return;
}
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
- ioc->name, __func__, ioc_status, loginfo));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
+ __func__, ioc_status, loginfo));
/* don't send trigger if an trigger is currently active */
if (ioc->diag_trigger_active) {
@@ -420,15 +419,15 @@ mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
if (!found_match)
goto out;
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
- "%s: setting diag_trigger_active flag\n",
- ioc->name, __func__));
+ dTriggerDiagPrintk(ioc,
+ ioc_info(ioc, "%s: setting diag_trigger_active flag\n",
+ __func__));
memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
event_data.trigger_type = MPT3SAS_TRIGGER_MPI;
event_data.u.mpi.IOCStatus = ioc_status;
event_data.u.mpi.IocLogInfo = loginfo;
mpt3sas_send_trigger_data_event(ioc, &event_data);
out:
- dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
- __func__));
+ dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
+ __func__));
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
index b4927f2b7677..cc07ba41f507 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_warpdrive.c
@@ -127,20 +127,17 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
return;
if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "globally as drives are exposed\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as drives are exposed\n");
return;
}
if (mpt3sas_get_num_volumes(ioc) > 1) {
_warpdrive_disable_ddio(ioc);
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "globally as number of drives > 1\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled globally as number of drives > 1\n");
return;
}
if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
&num_pds)) || !num_pds) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "Failure in computing number of drives\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in computing number of drives\n");
return;
}
@@ -148,15 +145,13 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
sizeof(Mpi2RaidVol0PhysDisk_t));
vol_pg0 = kzalloc(sz, GFP_KERNEL);
if (!vol_pg0) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "Memory allocation failure for RVPG0\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled Memory allocation failure for RVPG0\n");
return;
}
if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "Failure in retrieving RVPG0\n", ioc->name);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled Failure in retrieving RVPG0\n");
kfree(vol_pg0);
return;
}
@@ -166,10 +161,8 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
* assumed for WARPDRIVE, disable direct I/O
*/
if (num_pds > MPT_MAX_WARPDRIVE_PDS) {
- pr_warn(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x): num_mem=%d, "
- "max_mem_allowed=%d\n", ioc->name, raid_device->handle,
- num_pds, MPT_MAX_WARPDRIVE_PDS);
+ ioc_warn(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): num_mem=%d, max_mem_allowed=%d\n",
+ raid_device->handle, num_pds, MPT_MAX_WARPDRIVE_PDS);
kfree(vol_pg0);
return;
}
@@ -179,22 +172,18 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
vol_pg0->PhysDisk[count].PhysDiskNum) ||
le16_to_cpu(pd_pg0.DevHandle) ==
MPT3SAS_INVALID_DEVICE_HANDLE) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
- "disabled for the drive with handle(0x%04x) member"
- "handle retrieval failed for member number=%d\n",
- ioc->name, raid_device->handle,
- vol_pg0->PhysDisk[count].PhysDiskNum);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle retrieval failed for member number=%d\n",
+ raid_device->handle,
+ vol_pg0->PhysDisk[count].PhysDiskNum);
goto out_error;
}
/* Disable direct I/O if member drive lba exceeds 4 bytes */
dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
if (dev_max_lba >> 32) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is "
- "disabled for the drive with handle(0x%04x) member"
- " handle (0x%04x) unsupported max lba 0x%016llx\n",
- ioc->name, raid_device->handle,
- le16_to_cpu(pd_pg0.DevHandle),
- (unsigned long long)dev_max_lba);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) member handle (0x%04x) unsupported max lba 0x%016llx\n",
+ raid_device->handle,
+ le16_to_cpu(pd_pg0.DevHandle),
+ (u64)dev_max_lba);
goto out_error;
}
@@ -206,41 +195,36 @@ mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc,
* not RAID0
*/
if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x): type=%d, "
- "s_sz=%uK, blk_size=%u\n", ioc->name,
- raid_device->handle, raid_device->volume_type,
- (le32_to_cpu(vol_pg0->StripeSize) *
- le16_to_cpu(vol_pg0->BlockSize)) / 1024,
- le16_to_cpu(vol_pg0->BlockSize));
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x): type=%d, s_sz=%uK, blk_size=%u\n",
+ raid_device->handle, raid_device->volume_type,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024,
+ le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
stripe_exp = find_first_bit(&stripe_sz, 32);
if (stripe_exp == 32) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
- ioc->name, raid_device->handle,
- (le32_to_cpu(vol_pg0->StripeSize) *
- le16_to_cpu(vol_pg0->BlockSize)) / 1024);
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+ raid_device->handle,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024);
goto out_error;
}
raid_device->stripe_exponent = stripe_exp;
block_sz = le16_to_cpu(vol_pg0->BlockSize);
block_exp = find_first_bit(&block_sz, 16);
if (block_exp == 16) {
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) invalid block sz %u\n",
- ioc->name, raid_device->handle,
- le16_to_cpu(vol_pg0->BlockSize));
+ ioc_info(ioc, "WarpDrive : Direct IO is disabled for the drive with handle(0x%04x) invalid block sz %u\n",
+ raid_device->handle, le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
raid_device->block_exponent = block_exp;
raid_device->direct_io_enabled = 1;
- pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is Enabled for the drive"
- " with handle(0x%04x)\n", ioc->name, raid_device->handle);
+ ioc_info(ioc, "WarpDrive : Direct IO is Enabled for the drive with handle(0x%04x)\n",
+ raid_device->handle);
/*
* WARPDRIVE: Though the following fields are not used for direct IO,
* stored for future purpose:
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 8c91637cd598..3ac34373746c 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -403,29 +403,14 @@ static int pci_go_64(struct pci_dev *pdev)
{
int rc;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (rc) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
return rc;
}
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
}
return rc;
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index cff43bd9f675..3df1428df317 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -336,13 +336,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
* DMA-map SMP request, response buffers
*/
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ elem = dma_map_sg(mvi->dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ elem = dma_map_sg(mvi->dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out;
@@ -416,10 +416,10 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
err_out_2:
dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
err_out:
dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return rc;
}
@@ -904,9 +904,9 @@ static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
break;
case SAS_PROTOCOL_SATA:
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index b3cd9a6b1d30..2458974d1af6 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -143,8 +143,8 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
case RESOURCE_UNCACHED_MEMORY:
size = round_up(size, 8);
- res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
- &res->bus_addr);
+ res->virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size,
+ &res->bus_addr, GFP_KERNEL);
if (!res->virt_addr) {
dev_err(&mhba->pdev->dev,
"unable to allocate consistent mem,"
@@ -175,7 +175,7 @@ static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
switch (res->type) {
case RESOURCE_UNCACHED_MEMORY:
- pci_free_consistent(mhba->pdev, res->size,
+ dma_free_coherent(&mhba->pdev->dev, res->size,
res->virt_addr, res->bus_addr);
break;
case RESOURCE_CACHED_MEMORY:
@@ -211,14 +211,14 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
dma_addr_t busaddr;
sg = scsi_sglist(scmd);
- *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
- (int) scmd->sc_data_direction);
+ *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum,
+ scmd->sc_data_direction);
if (*sg_count > mhba->max_sge) {
dev_err(&mhba->pdev->dev,
"sg count[0x%x] is bigger than max sg[0x%x].\n",
*sg_count, mhba->max_sge);
- pci_unmap_sg(mhba->pdev, sg, sgnum,
- (int) scmd->sc_data_direction);
+ dma_unmap_sg(&mhba->pdev->dev, sg, sgnum,
+ scmd->sc_data_direction);
return -1;
}
for (i = 0; i < *sg_count; i++) {
@@ -246,7 +246,8 @@ static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
if (size == 0)
return 0;
- virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
+ virt_addr = dma_zalloc_coherent(&mhba->pdev->dev, size, &phy_addr,
+ GFP_KERNEL);
if (!virt_addr)
return -1;
@@ -274,8 +275,8 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
}
INIT_LIST_HEAD(&cmd->queue_pointer);
- cmd->frame = pci_alloc_consistent(mhba->pdev,
- mhba->ib_max_size, &cmd->frame_phys);
+ cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
+ &cmd->frame_phys, GFP_KERNEL);
if (!cmd->frame) {
dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
" frame,size = %d.\n", mhba->ib_max_size);
@@ -287,7 +288,7 @@ static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
dev_err(&mhba->pdev->dev, "failed to allocate memory"
" for internal frame\n");
- pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+ dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
cmd->frame, cmd->frame_phys);
kfree(cmd);
return NULL;
@@ -313,10 +314,10 @@ static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
phy_addr = (dma_addr_t) m_sg->baseaddr_l |
(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
- pci_free_consistent(mhba->pdev, size, cmd->data_buf,
+ dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
phy_addr);
}
- pci_free_consistent(mhba->pdev, mhba->ib_max_size,
+ dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
cmd->frame, cmd->frame_phys);
kfree(cmd);
}
@@ -663,16 +664,17 @@ static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
}
}
-static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
+static int mvumi_pci_set_master(struct pci_dev *pdev)
{
- unsigned int ret = 0;
+ int ret = 0;
+
pci_set_master(pdev);
if (IS_DMA64) {
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
} else
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
return ret;
}
@@ -771,7 +773,7 @@ static void mvumi_release_fw(struct mvumi_hba *mhba)
mvumi_free_cmds(mhba);
mvumi_release_mem_resource(mhba);
mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
- pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+ dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
mhba->handshake_page, mhba->handshake_page_phys);
kfree(mhba->regs);
pci_release_regions(mhba->pdev);
@@ -1339,9 +1341,9 @@ static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
}
if (scsi_bufflen(scmd))
- pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+ dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
scsi_sg_count(scmd),
- (int) scmd->sc_data_direction);
+ scmd->sc_data_direction);
cmd->scmd->scsi_done(scmd);
mvumi_return_cmd(mhba, cmd);
}
@@ -2148,9 +2150,9 @@ static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
scmd->SCp.ptr = NULL;
if (scsi_bufflen(scmd)) {
- pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
+ dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
scsi_sg_count(scmd),
- (int)scmd->sc_data_direction);
+ scmd->sc_data_direction);
}
mvumi_return_cmd(mhba, cmd);
spin_unlock_irqrestore(mhba->shost->host_lock, flags);
@@ -2362,8 +2364,8 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
ret = -ENOMEM;
goto fail_alloc_mem;
}
- mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
- &mhba->handshake_page_phys);
+ mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
+ HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
if (!mhba->handshake_page) {
dev_err(&mhba->pdev->dev,
"failed to allocate memory for handshake\n");
@@ -2383,7 +2385,7 @@ static int mvumi_init_fw(struct mvumi_hba *mhba)
fail_ready_state:
mvumi_release_mem_resource(mhba);
- pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
+ dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
mhba->handshake_page, mhba->handshake_page_phys);
fail_alloc_page:
kfree(mhba->regs);
@@ -2480,20 +2482,9 @@ static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
return ret;
- pci_set_master(pdev);
-
- if (IS_DMA64) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail_set_dma_mask;
- }
- } else {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail_set_dma_mask;
- }
+ ret = mvumi_pci_set_master(pdev);
+ if (ret)
+ goto fail_set_dma_mask;
host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
if (!host) {
@@ -2627,19 +2618,11 @@ static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
dev_err(&pdev->dev, "enable device failed\n");
return ret;
}
- pci_set_master(pdev);
- if (IS_DMA64) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail;
- }
- } else {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret)
- goto fail;
- }
+
+ ret = mvumi_pci_set_master(pdev);
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
if (ret)
goto fail;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
new file mode 100644
index 000000000000..aeb282f617c5
--- /dev/null
+++ b/drivers/scsi/myrb.c
@@ -0,0 +1,3656 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/raid_class.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "myrb.h"
+
+static struct raid_template *myrb_raid_template;
+
+static void myrb_monitor(struct work_struct *work);
+static inline void myrb_translate_devstate(void *DeviceState);
+
+static inline int myrb_logical_channel(struct Scsi_Host *shost)
+{
+ return shost->max_channel - 1;
+}
+
+static struct myrb_devstate_name_entry {
+ enum myrb_devstate state;
+ const char *name;
+} myrb_devstate_name_list[] = {
+ { MYRB_DEVICE_DEAD, "Dead" },
+ { MYRB_DEVICE_WO, "WriteOnly" },
+ { MYRB_DEVICE_ONLINE, "Online" },
+ { MYRB_DEVICE_CRITICAL, "Critical" },
+ { MYRB_DEVICE_STANDBY, "Standby" },
+ { MYRB_DEVICE_OFFLINE, "Offline" },
+};
+
+static const char *myrb_devstate_name(enum myrb_devstate state)
+{
+ struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
+ if (entry[i].state == state)
+ return entry[i].name;
+ }
+ return "Unknown";
+}
+
+static struct myrb_raidlevel_name_entry {
+ enum myrb_raidlevel level;
+ const char *name;
+} myrb_raidlevel_name_list[] = {
+ { MYRB_RAID_LEVEL0, "RAID0" },
+ { MYRB_RAID_LEVEL1, "RAID1" },
+ { MYRB_RAID_LEVEL3, "RAID3" },
+ { MYRB_RAID_LEVEL5, "RAID5" },
+ { MYRB_RAID_LEVEL6, "RAID6" },
+ { MYRB_RAID_JBOD, "JBOD" },
+};
+
+static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
+{
+ struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
+ if (entry[i].level == level)
+ return entry[i].name;
+ }
+ return NULL;
+}
+
+/**
+ * myrb_create_mempools - allocates auxiliary data structures
+ *
+ * Return: true on success, false otherwise.
+ */
+static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
+{
+ size_t elem_size, elem_align;
+
+ elem_align = sizeof(struct myrb_sge);
+ elem_size = cb->host->sg_tablesize * elem_align;
+ cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
+ elem_size, elem_align, 0);
+ if (cb->sg_pool == NULL) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate SG pool\n");
+ return false;
+ }
+
+ cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
+ sizeof(struct myrb_dcdb),
+ sizeof(unsigned int), 0);
+ if (!cb->dcdb_pool) {
+ dma_pool_destroy(cb->sg_pool);
+ cb->sg_pool = NULL;
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate DCDB pool\n");
+ return false;
+ }
+
+ snprintf(cb->work_q_name, sizeof(cb->work_q_name),
+ "myrb_wq_%d", cb->host->host_no);
+ cb->work_q = create_singlethread_workqueue(cb->work_q_name);
+ if (!cb->work_q) {
+ dma_pool_destroy(cb->dcdb_pool);
+ cb->dcdb_pool = NULL;
+ dma_pool_destroy(cb->sg_pool);
+ cb->sg_pool = NULL;
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to create workqueue\n");
+ return false;
+ }
+
+ /*
+ * Initialize the Monitoring Timer.
+ */
+ INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
+ queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
+
+ return true;
+}
+
+/**
+ * myrb_destroy_mempools - tears down the memory pools for the controller
+ */
+static void myrb_destroy_mempools(struct myrb_hba *cb)
+{
+ cancel_delayed_work_sync(&cb->monitor_work);
+ destroy_workqueue(cb->work_q);
+
+ dma_pool_destroy(cb->sg_pool);
+ dma_pool_destroy(cb->dcdb_pool);
+}
+
+/**
+ * myrb_reset_cmd - reset command block
+ */
+static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
+{
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ memset(mbox, 0, sizeof(union myrb_cmd_mbox));
+ cmd_blk->status = 0;
+}
+
+/**
+ * myrb_qcmd - queues command block for execution
+ */
+static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ void __iomem *base = cb->io_base;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
+
+ cb->write_cmd_mbox(next_mbox, mbox);
+ if (cb->prev_cmd_mbox1->words[0] == 0 ||
+ cb->prev_cmd_mbox2->words[0] == 0)
+ cb->get_cmd_mbox(base);
+ cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
+ cb->prev_cmd_mbox1 = next_mbox;
+ if (++next_mbox > cb->last_cmd_mbox)
+ next_mbox = cb->first_cmd_mbox;
+ cb->next_cmd_mbox = next_mbox;
+}
+
+/**
+ * myrb_exec_cmd - executes command block and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
+ struct myrb_cmdblk *cmd_blk)
+{
+ DECLARE_COMPLETION_ONSTACK(cmpl);
+ unsigned long flags;
+
+ cmd_blk->completion = &cmpl;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ cb->qcmd(cb, cmd_blk);
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+
+ WARN_ON(in_interrupt());
+ wait_for_completion(&cmpl);
+ return cmd_blk->status;
+}
+
+/**
+ * myrb_exec_type3 - executes a type 3 command and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_type3(struct myrb_hba *cb,
+ enum myrb_cmd_opcode op, dma_addr_t addr)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ mutex_lock(&cb->dcmd_mutex);
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_DCMD_TAG;
+ mbox->type3.opcode = op;
+ mbox->type3.addr = addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ return status;
+}
+
+/**
+ * myrb_exec_type3D - executes a type 3D command and waits for completion.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
+ enum myrb_cmd_opcode op, struct scsi_device *sdev,
+ struct myrb_pdev_state *pdev_info)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+ dma_addr_t pdev_info_addr;
+
+ pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
+ sizeof(struct myrb_pdev_state),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
+ return MYRB_STATUS_SUBSYS_FAILED;
+
+ mutex_lock(&cb->dcmd_mutex);
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3D.id = MYRB_DCMD_TAG;
+ mbox->type3D.opcode = op;
+ mbox->type3D.channel = sdev->channel;
+ mbox->type3D.target = sdev->id;
+ mbox->type3D.addr = pdev_info_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
+ sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
+ if (status == MYRB_STATUS_SUCCESS &&
+ mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
+ myrb_translate_devstate(pdev_info);
+
+ return status;
+}
+
+static char *myrb_event_msg[] = {
+ "killed because write recovery failed",
+ "killed because of SCSI bus reset failure",
+ "killed because of double check condition",
+ "killed because it was removed",
+ "killed because of gross error on SCSI chip",
+ "killed because of bad tag returned from drive",
+ "killed because of timeout on SCSI command",
+ "killed because of reset SCSI command issued from system",
+ "killed because busy or parity error count exceeded limit",
+ "killed because of 'kill drive' command from system",
+ "killed because of selection timeout",
+ "killed due to SCSI phase sequence error",
+ "killed due to unknown status",
+};
+
+/**
+ * myrb_get_event - get event log from HBA
+ * @cb: pointer to the hba structure
+ * @event: number of the event
+ *
+ * Execute a type 3E command and logs the event message
+ */
+static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_log_entry *ev_buf;
+ dma_addr_t ev_addr;
+ unsigned short status;
+
+ ev_buf = dma_alloc_coherent(&cb->pdev->dev,
+ sizeof(struct myrb_log_entry),
+ &ev_addr, GFP_KERNEL);
+ if (!ev_buf)
+ return;
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3E.id = MYRB_MCMD_TAG;
+ mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
+ mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
+ mbox->type3E.opqual = 1;
+ mbox->type3E.ev_seq = event;
+ mbox->type3E.addr = ev_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (status != MYRB_STATUS_SUCCESS)
+ shost_printk(KERN_INFO, cb->host,
+ "Failed to get event log %d, status %04x\n",
+ event, status);
+
+ else if (ev_buf->seq_num == event) {
+ struct scsi_sense_hdr sshdr;
+
+ memset(&sshdr, 0, sizeof(sshdr));
+ scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
+
+ if (sshdr.sense_key == VENDOR_SPECIFIC &&
+ sshdr.asc == 0x80 &&
+ sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
+ shost_printk(KERN_CRIT, cb->host,
+ "Physical drive %d:%d: %s\n",
+ ev_buf->channel, ev_buf->target,
+ myrb_event_msg[sshdr.ascq]);
+ else
+ shost_printk(KERN_CRIT, cb->host,
+ "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
+ ev_buf->channel, ev_buf->target,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ }
+
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
+ ev_buf, ev_addr);
+}
+
+/**
+ * myrb_get_errtable - retrieves the error table from the controller
+ *
+ * Executes a type 3 command and logs the error table from the controller.
+ */
+static void myrb_get_errtable(struct myrb_hba *cb)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+ struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
+
+ memcpy(&old_table, cb->err_table, sizeof(old_table));
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_MCMD_TAG;
+ mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
+ mbox->type3.addr = cb->err_table_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (status == MYRB_STATUS_SUCCESS) {
+ struct myrb_error_entry *table = cb->err_table;
+ struct myrb_error_entry *new, *old;
+ size_t err_table_offset;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, cb->host) {
+ if (sdev->channel >= myrb_logical_channel(cb->host))
+ continue;
+ err_table_offset = sdev->channel * MYRB_MAX_TARGETS
+ + sdev->id;
+ new = table + err_table_offset;
+ old = &old_table[err_table_offset];
+ if (new->parity_err == old->parity_err &&
+ new->soft_err == old->soft_err &&
+ new->hard_err == old->hard_err &&
+ new->misc_err == old->misc_err)
+ continue;
+ sdev_printk(KERN_CRIT, sdev,
+ "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
+ new->parity_err, new->soft_err,
+ new->hard_err, new->misc_err);
+ }
+ }
+}
+
+/**
+ * myrb_get_ldev_info - retrieves the logical device table from the controller
+ *
+ * Executes a type 3 command and updates the logical device table.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
+{
+ unsigned short status;
+ int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
+ struct Scsi_Host *shost = cb->host;
+
+ status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
+ cb->ldev_info_addr);
+ if (status != MYRB_STATUS_SUCCESS)
+ return status;
+
+ for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
+ struct myrb_ldev_info *old = NULL;
+ struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
+ ldev_num, 0);
+ if (!sdev) {
+ if (new->state == MYRB_DEVICE_OFFLINE)
+ continue;
+ shost_printk(KERN_INFO, shost,
+ "Adding Logical Drive %d in state %s\n",
+ ldev_num, myrb_devstate_name(new->state));
+ scsi_add_device(shost, myrb_logical_channel(shost),
+ ldev_num, 0);
+ continue;
+ }
+ old = sdev->hostdata;
+ if (new->state != old->state)
+ shost_printk(KERN_INFO, shost,
+ "Logical Drive %d is now %s\n",
+ ldev_num, myrb_devstate_name(new->state));
+ if (new->wb_enabled != old->wb_enabled)
+ sdev_printk(KERN_INFO, sdev,
+ "Logical Drive is now WRITE %s\n",
+ (new->wb_enabled ? "BACK" : "THRU"));
+ memcpy(old, new, sizeof(*new));
+ scsi_device_put(sdev);
+ }
+ return status;
+}
+
+/**
+ * myrb_get_rbld_progress - get rebuild progress information
+ *
+ * Executes a type 3 command and returns the rebuild progress
+ * information.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
+ struct myrb_rbld_progress *rbld)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_rbld_progress *rbld_buf;
+ dma_addr_t rbld_addr;
+ unsigned short status;
+
+ rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
+ sizeof(struct myrb_rbld_progress),
+ &rbld_addr, GFP_KERNEL);
+ if (!rbld_buf)
+ return MYRB_STATUS_RBLD_NOT_CHECKED;
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_MCMD_TAG;
+ mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
+ mbox->type3.addr = rbld_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (rbld)
+ memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
+ rbld_buf, rbld_addr);
+ return status;
+}
+
+/**
+ * myrb_update_rbld_progress - updates the rebuild status
+ *
+ * Updates the rebuild status for the attached logical devices.
+ *
+ */
+static void myrb_update_rbld_progress(struct myrb_hba *cb)
+{
+ struct myrb_rbld_progress rbld_buf;
+ unsigned short status;
+
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+ if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
+ cb->last_rbld_status == MYRB_STATUS_SUCCESS)
+ status = MYRB_STATUS_RBLD_SUCCESS;
+ if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
+ unsigned int blocks_done =
+ rbld_buf.ldev_size - rbld_buf.blocks_left;
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(cb->host,
+ myrb_logical_channel(cb->host),
+ rbld_buf.ldev_num, 0);
+ if (!sdev)
+ return;
+
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild in Progress, %d%% completed\n",
+ (100 * (blocks_done >> 7))
+ / (rbld_buf.ldev_size >> 7));
+ break;
+ case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to Logical Drive Failure\n");
+ break;
+ case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to Bad Blocks on Other Drives\n");
+ break;
+ case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
+ break;
+ case MYRB_STATUS_RBLD_SUCCESS:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Completed Successfully\n");
+ break;
+ case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Successfully Terminated\n");
+ break;
+ default:
+ break;
+ }
+ scsi_device_put(sdev);
+ }
+ cb->last_rbld_status = status;
+}
+
+/**
+ * myrb_get_cc_progress - retrieve the rebuild status
+ *
+ * Execute a type 3 Command and fetch the rebuild / consistency check
+ * status.
+ */
+static void myrb_get_cc_progress(struct myrb_hba *cb)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_rbld_progress *rbld_buf;
+ dma_addr_t rbld_addr;
+ unsigned short status;
+
+ rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
+ sizeof(struct myrb_rbld_progress),
+ &rbld_addr, GFP_KERNEL);
+ if (!rbld_buf) {
+ cb->need_cc_status = true;
+ return;
+ }
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3.id = MYRB_MCMD_TAG;
+ mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
+ mbox->type3.addr = rbld_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ if (status == MYRB_STATUS_SUCCESS) {
+ unsigned int ldev_num = rbld_buf->ldev_num;
+ unsigned int ldev_size = rbld_buf->ldev_size;
+ unsigned int blocks_done =
+ ldev_size - rbld_buf->blocks_left;
+ struct scsi_device *sdev;
+
+ sdev = scsi_device_lookup(cb->host,
+ myrb_logical_channel(cb->host),
+ ldev_num, 0);
+ if (sdev) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check in Progress: %d%% completed\n",
+ (100 * (blocks_done >> 7))
+ / (ldev_size >> 7));
+ scsi_device_put(sdev);
+ }
+ }
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
+ rbld_buf, rbld_addr);
+}
+
+/**
+ * myrb_bgi_control - updates background initialisation status
+ *
+ * Executes a type 3B command and updates the background initialisation status
+ */
+static void myrb_bgi_control(struct myrb_hba *cb)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_bgi_status *bgi, *last_bgi;
+ dma_addr_t bgi_addr;
+ struct scsi_device *sdev = NULL;
+ unsigned short status;
+
+ bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
+ &bgi_addr, GFP_KERNEL);
+ if (!bgi) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate bgi memory\n");
+ return;
+ }
+ myrb_reset_cmd(cmd_blk);
+ mbox->type3B.id = MYRB_DCMD_TAG;
+ mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
+ mbox->type3B.optype = 0x20;
+ mbox->type3B.addr = bgi_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ last_bgi = &cb->bgi_status;
+ sdev = scsi_device_lookup(cb->host,
+ myrb_logical_channel(cb->host),
+ bgi->ldev_num, 0);
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ switch (bgi->status) {
+ case MYRB_BGI_INVALID:
+ break;
+ case MYRB_BGI_STARTED:
+ if (!sdev)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Started\n");
+ break;
+ case MYRB_BGI_INPROGRESS:
+ if (!sdev)
+ break;
+ if (bgi->blocks_done == last_bgi->blocks_done &&
+ bgi->ldev_num == last_bgi->ldev_num)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization in Progress: %d%% completed\n",
+ (100 * (bgi->blocks_done >> 7))
+ / (bgi->ldev_size >> 7));
+ break;
+ case MYRB_BGI_SUSPENDED:
+ if (!sdev)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Suspended\n");
+ break;
+ case MYRB_BGI_CANCELLED:
+ if (!sdev)
+ break;
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Cancelled\n");
+ break;
+ }
+ memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
+ break;
+ case MYRB_STATUS_BGI_SUCCESS:
+ if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Completed Successfully\n");
+ cb->bgi_status.status = MYRB_BGI_INVALID;
+ break;
+ case MYRB_STATUS_BGI_ABORTED:
+ if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
+ sdev_printk(KERN_INFO, sdev,
+ "Background Initialization Aborted\n");
+ /* Fallthrough */
+ case MYRB_STATUS_NO_BGI_INPROGRESS:
+ cb->bgi_status.status = MYRB_BGI_INVALID;
+ break;
+ }
+ if (sdev)
+ scsi_device_put(sdev);
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
+ bgi, bgi_addr);
+}
+
+/**
+ * myrb_hba_enquiry - updates the controller status
+ *
+ * Executes a DAC_V1_Enquiry command and updates the controller status.
+ *
+ * Return: command status
+ */
+static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
+{
+ struct myrb_enquiry old, *new;
+ unsigned short status;
+
+ memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
+
+ status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
+ if (status != MYRB_STATUS_SUCCESS)
+ return status;
+
+ new = cb->enquiry;
+ if (new->ldev_count > old.ldev_count) {
+ int ldev_num = old.ldev_count - 1;
+
+ while (++ldev_num < new->ldev_count)
+ shost_printk(KERN_CRIT, cb->host,
+ "Logical Drive %d Now Exists\n",
+ ldev_num);
+ }
+ if (new->ldev_count < old.ldev_count) {
+ int ldev_num = new->ldev_count - 1;
+
+ while (++ldev_num < old.ldev_count)
+ shost_printk(KERN_CRIT, cb->host,
+ "Logical Drive %d No Longer Exists\n",
+ ldev_num);
+ }
+ if (new->status.deferred != old.status.deferred)
+ shost_printk(KERN_CRIT, cb->host,
+ "Deferred Write Error Flag is now %s\n",
+ (new->status.deferred ? "TRUE" : "FALSE"));
+ if (new->ev_seq != old.ev_seq) {
+ cb->new_ev_seq = new->ev_seq;
+ cb->need_err_info = true;
+ shost_printk(KERN_INFO, cb->host,
+ "Event log %d/%d (%d/%d) available\n",
+ cb->old_ev_seq, cb->new_ev_seq,
+ old.ev_seq, new->ev_seq);
+ }
+ if ((new->ldev_critical > 0 &&
+ new->ldev_critical != old.ldev_critical) ||
+ (new->ldev_offline > 0 &&
+ new->ldev_offline != old.ldev_offline) ||
+ (new->ldev_count != old.ldev_count)) {
+ shost_printk(KERN_INFO, cb->host,
+ "Logical drive count changed (%d/%d/%d)\n",
+ new->ldev_critical,
+ new->ldev_offline,
+ new->ldev_count);
+ cb->need_ldev_info = true;
+ }
+ if (new->pdev_dead > 0 ||
+ new->pdev_dead != old.pdev_dead ||
+ time_after_eq(jiffies, cb->secondary_monitor_time
+ + MYRB_SECONDARY_MONITOR_INTERVAL)) {
+ cb->need_bgi_status = cb->bgi_status_supported;
+ cb->secondary_monitor_time = jiffies;
+ }
+ if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
+ new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
+ old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
+ old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
+ cb->need_rbld = true;
+ cb->rbld_first = (new->ldev_critical < old.ldev_critical);
+ }
+ if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
+ switch (new->rbld) {
+ case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Completed Successfully\n");
+ break;
+ case MYRB_STDBY_RBLD_IN_PROGRESS:
+ case MYRB_BG_RBLD_IN_PROGRESS:
+ break;
+ case MYRB_BG_CHECK_IN_PROGRESS:
+ cb->need_cc_status = true;
+ break;
+ case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Completed with Error\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Failed - Physical Device Failed\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Failed - Logical Drive Failed\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Failed - Other Causes\n");
+ break;
+ case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
+ shost_printk(KERN_INFO, cb->host,
+ "Consistency Check Successfully Terminated\n");
+ break;
+ }
+ else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
+ cb->need_cc_status = true;
+
+ return MYRB_STATUS_SUCCESS;
+}
+
+/**
+ * myrb_set_pdev_state - sets the device state for a physical device
+ *
+ * Return: command status
+ */
+static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
+ struct scsi_device *sdev, enum myrb_devstate state)
+{
+ struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned short status;
+
+ mutex_lock(&cb->dcmd_mutex);
+ mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
+ mbox->type3D.id = MYRB_DCMD_TAG;
+ mbox->type3D.channel = sdev->channel;
+ mbox->type3D.target = sdev->id;
+ mbox->type3D.state = state & 0x1F;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+
+ return status;
+}
+
+/**
+ * myrb_enable_mmio - enables the Memory Mailbox Interface
+ *
+ * PD and P controller types have no memory mailbox, but still need the
+ * other dma mapped memory.
+ *
+ * Return: true on success, false otherwise.
+ */
+static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
+{
+ void __iomem *base = cb->io_base;
+ struct pci_dev *pdev = cb->pdev;
+ size_t err_table_size;
+ size_t ldev_info_size;
+ union myrb_cmd_mbox *cmd_mbox_mem;
+ struct myrb_stat_mbox *stat_mbox_mem;
+ union myrb_cmd_mbox mbox;
+ unsigned short status;
+
+ memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_err(&pdev->dev, "DMA mask out of range\n");
+ return false;
+ }
+
+ cb->enquiry = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct myrb_enquiry),
+ &cb->enquiry_addr, GFP_KERNEL);
+ if (!cb->enquiry)
+ return false;
+
+ err_table_size = sizeof(struct myrb_error_entry) *
+ MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
+ cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
+ &cb->err_table_addr, GFP_KERNEL);
+ if (!cb->err_table)
+ return false;
+
+ ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
+ cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
+ &cb->ldev_info_addr, GFP_KERNEL);
+ if (!cb->ldev_info_buf)
+ return false;
+
+ /*
+ * Skip mailbox initialisation for PD and P Controllers
+ */
+ if (!mmio_init_fn)
+ return true;
+
+ /* These are the base addresses for the command memory mailbox array */
+ cb->cmd_mbox_size = MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
+ cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
+ cb->cmd_mbox_size,
+ &cb->cmd_mbox_addr,
+ GFP_KERNEL);
+ if (!cb->first_cmd_mbox)
+ return false;
+
+ cmd_mbox_mem = cb->first_cmd_mbox;
+ cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
+ cb->last_cmd_mbox = cmd_mbox_mem;
+ cb->next_cmd_mbox = cb->first_cmd_mbox;
+ cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
+ cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
+
+ /* These are the base addresses for the status memory mailbox array */
+ cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
+ sizeof(struct myrb_stat_mbox);
+ cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
+ cb->stat_mbox_size,
+ &cb->stat_mbox_addr,
+ GFP_KERNEL);
+ if (!cb->first_stat_mbox)
+ return false;
+
+ stat_mbox_mem = cb->first_stat_mbox;
+ stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
+ cb->last_stat_mbox = stat_mbox_mem;
+ cb->next_stat_mbox = cb->first_stat_mbox;
+
+ /* Enable the Memory Mailbox Interface. */
+ cb->dual_mode_interface = true;
+ mbox.typeX.opcode = 0x2B;
+ mbox.typeX.id = 0;
+ mbox.typeX.opcode2 = 0x14;
+ mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
+ mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
+
+ status = mmio_init_fn(pdev, base, &mbox);
+ if (status != MYRB_STATUS_SUCCESS) {
+ cb->dual_mode_interface = false;
+ mbox.typeX.opcode2 = 0x10;
+ status = mmio_init_fn(pdev, base, &mbox);
+ if (status != MYRB_STATUS_SUCCESS) {
+ dev_err(&pdev->dev,
+ "Failed to enable mailbox, statux %02X\n",
+ status);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * myrb_get_hba_config - reads the configuration information
+ *
+ * Reads the configuration information from the controller and
+ * initializes the controller structure.
+ *
+ * Return: 0 on success, errno otherwise
+ */
+static int myrb_get_hba_config(struct myrb_hba *cb)
+{
+ struct myrb_enquiry2 *enquiry2;
+ dma_addr_t enquiry2_addr;
+ struct myrb_config2 *config2;
+ dma_addr_t config2_addr;
+ struct Scsi_Host *shost = cb->host;
+ struct pci_dev *pdev = cb->pdev;
+ int pchan_max = 0, pchan_cur = 0;
+ unsigned short status;
+ int ret = -ENODEV, memsize = 0;
+
+ enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+ &enquiry2_addr, GFP_KERNEL);
+ if (!enquiry2) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate V1 enquiry2 memory\n");
+ return -ENOMEM;
+ }
+ config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
+ &config2_addr, GFP_KERNEL);
+ if (!config2) {
+ shost_printk(KERN_ERR, cb->host,
+ "Failed to allocate V1 config2 memory\n");
+ dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+ enquiry2, enquiry2_addr);
+ return -ENOMEM;
+ }
+ mutex_lock(&cb->dma_mutex);
+ status = myrb_hba_enquiry(cb);
+ mutex_unlock(&cb->dma_mutex);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed it issue V1 Enquiry\n");
+ goto out_free;
+ }
+
+ status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed to issue V1 Enquiry2\n");
+ goto out_free;
+ }
+
+ status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed to issue ReadConfig2\n");
+ goto out_free;
+ }
+
+ status = myrb_get_ldev_info(cb);
+ if (status != MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Failed to get logical drive information\n");
+ goto out_free;
+ }
+
+ /*
+ * Initialize the Controller Model Name and Full Model Name fields.
+ */
+ switch (enquiry2->hw.sub_model) {
+ case DAC960_V1_P_PD_PU:
+ if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
+ strcpy(cb->model_name, "DAC960PU");
+ else
+ strcpy(cb->model_name, "DAC960PD");
+ break;
+ case DAC960_V1_PL:
+ strcpy(cb->model_name, "DAC960PL");
+ break;
+ case DAC960_V1_PG:
+ strcpy(cb->model_name, "DAC960PG");
+ break;
+ case DAC960_V1_PJ:
+ strcpy(cb->model_name, "DAC960PJ");
+ break;
+ case DAC960_V1_PR:
+ strcpy(cb->model_name, "DAC960PR");
+ break;
+ case DAC960_V1_PT:
+ strcpy(cb->model_name, "DAC960PT");
+ break;
+ case DAC960_V1_PTL0:
+ strcpy(cb->model_name, "DAC960PTL0");
+ break;
+ case DAC960_V1_PRL:
+ strcpy(cb->model_name, "DAC960PRL");
+ break;
+ case DAC960_V1_PTL1:
+ strcpy(cb->model_name, "DAC960PTL1");
+ break;
+ case DAC960_V1_1164P:
+ strcpy(cb->model_name, "eXtremeRAID 1100");
+ break;
+ default:
+ shost_printk(KERN_WARNING, cb->host,
+ "Unknown Model %X\n",
+ enquiry2->hw.sub_model);
+ goto out;
+ }
+ /*
+ * Initialize the Controller Firmware Version field and verify that it
+ * is a supported firmware version.
+ * The supported firmware versions are:
+ *
+ * DAC1164P 5.06 and above
+ * DAC960PTL/PRL/PJ/PG 4.06 and above
+ * DAC960PU/PD/PL 3.51 and above
+ * DAC960PU/PD/PL/P 2.73 and above
+ */
+#if defined(CONFIG_ALPHA)
+ /*
+ * DEC Alpha machines were often equipped with DAC960 cards that were
+ * OEMed from Mylex, and had their own custom firmware. Version 2.70,
+ * the last custom FW revision to be released by DEC for these older
+ * controllers, appears to work quite well with this driver.
+ *
+ * Cards tested successfully were several versions each of the PD and
+ * PU, called by DEC the KZPSC and KZPAC, respectively, and having
+ * the Manufacturer Numbers (from Mylex), usually on a sticker on the
+ * back of the board, of:
+ *
+ * KZPSC: D040347 (1-channel) or D040348 (2-channel)
+ * or D040349 (3-channel)
+ * KZPAC: D040395 (1-channel) or D040396 (2-channel)
+ * or D040397 (3-channel)
+ */
+# define FIRMWARE_27X "2.70"
+#else
+# define FIRMWARE_27X "2.73"
+#endif
+
+ if (enquiry2->fw.major_version == 0) {
+ enquiry2->fw.major_version = cb->enquiry->fw_major_version;
+ enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
+ enquiry2->fw.firmware_type = '0';
+ enquiry2->fw.turn_id = 0;
+ }
+ sprintf(cb->fw_version, "%d.%02d-%c-%02d",
+ enquiry2->fw.major_version,
+ enquiry2->fw.minor_version,
+ enquiry2->fw.firmware_type,
+ enquiry2->fw.turn_id);
+ if (!((enquiry2->fw.major_version == 5 &&
+ enquiry2->fw.minor_version >= 6) ||
+ (enquiry2->fw.major_version == 4 &&
+ enquiry2->fw.minor_version >= 6) ||
+ (enquiry2->fw.major_version == 3 &&
+ enquiry2->fw.minor_version >= 51) ||
+ (enquiry2->fw.major_version == 2 &&
+ strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
+ shost_printk(KERN_WARNING, cb->host,
+ "Firmware Version '%s' unsupported\n",
+ cb->fw_version);
+ goto out;
+ }
+ /*
+ * Initialize the Channels, Targets, Memory Size, and SAF-TE
+ * Enclosure Management Enabled fields.
+ */
+ switch (enquiry2->hw.model) {
+ case MYRB_5_CHANNEL_BOARD:
+ pchan_max = 5;
+ break;
+ case MYRB_3_CHANNEL_BOARD:
+ case MYRB_3_CHANNEL_ASIC_DAC:
+ pchan_max = 3;
+ break;
+ case MYRB_2_CHANNEL_BOARD:
+ pchan_max = 2;
+ break;
+ default:
+ pchan_max = enquiry2->cfg_chan;
+ break;
+ }
+ pchan_cur = enquiry2->cur_chan;
+ if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
+ cb->bus_width = 32;
+ else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
+ cb->bus_width = 16;
+ else
+ cb->bus_width = 8;
+ cb->ldev_block_size = enquiry2->ldev_block_size;
+ shost->max_channel = pchan_cur;
+ shost->max_id = enquiry2->max_targets;
+ memsize = enquiry2->mem_size >> 20;
+ cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
+ /*
+ * Initialize the Controller Queue Depth, Driver Queue Depth,
+ * Logical Drive Count, Maximum Blocks per Command, Controller
+ * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
+ * The Driver Queue Depth must be at most one less than the
+ * Controller Queue Depth to allow for an automatic drive
+ * rebuild operation.
+ */
+ shost->can_queue = cb->enquiry->max_tcq;
+ if (shost->can_queue < 3)
+ shost->can_queue = enquiry2->max_cmds;
+ if (shost->can_queue < 3)
+ /* Play safe and disable TCQ */
+ shost->can_queue = 1;
+
+ if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
+ shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
+ shost->max_sectors = enquiry2->max_sectors;
+ shost->sg_tablesize = enquiry2->max_sge;
+ if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
+ shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
+ /*
+ * Initialize the Stripe Size, Segment Size, and Geometry Translation.
+ */
+ cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
+ >> (10 - MYRB_BLKSIZE_BITS);
+ cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
+ >> (10 - MYRB_BLKSIZE_BITS);
+ /* Assume 255/63 translation */
+ cb->ldev_geom_heads = 255;
+ cb->ldev_geom_sectors = 63;
+ if (config2->drive_geometry) {
+ cb->ldev_geom_heads = 128;
+ cb->ldev_geom_sectors = 32;
+ }
+
+ /*
+ * Initialize the Background Initialization Status.
+ */
+ if ((cb->fw_version[0] == '4' &&
+ strcmp(cb->fw_version, "4.08") >= 0) ||
+ (cb->fw_version[0] == '5' &&
+ strcmp(cb->fw_version, "5.08") >= 0)) {
+ cb->bgi_status_supported = true;
+ myrb_bgi_control(cb);
+ }
+ cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
+ ret = 0;
+
+out:
+ shost_printk(KERN_INFO, cb->host,
+ "Configuring %s PCI RAID Controller\n", cb->model_name);
+ shost_printk(KERN_INFO, cb->host,
+ " Firmware Version: %s, Memory Size: %dMB\n",
+ cb->fw_version, memsize);
+ if (cb->io_addr == 0)
+ shost_printk(KERN_INFO, cb->host,
+ " I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
+ (unsigned long)cb->pci_addr, cb->irq);
+ else
+ shost_printk(KERN_INFO, cb->host,
+ " I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
+ (unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
+ cb->irq);
+ shost_printk(KERN_INFO, cb->host,
+ " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
+ cb->host->can_queue, cb->host->max_sectors);
+ shost_printk(KERN_INFO, cb->host,
+ " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
+ cb->host->can_queue, cb->host->sg_tablesize,
+ MYRB_SCATTER_GATHER_LIMIT);
+ shost_printk(KERN_INFO, cb->host,
+ " Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
+ cb->stripe_size, cb->segment_size,
+ cb->ldev_geom_heads, cb->ldev_geom_sectors,
+ cb->safte_enabled ?
+ " SAF-TE Enclosure Management Enabled" : "");
+ shost_printk(KERN_INFO, cb->host,
+ " Physical: %d/%d channels %d/%d/%d devices\n",
+ pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
+ cb->host->max_id);
+
+ shost_printk(KERN_INFO, cb->host,
+ " Logical: 1/1 channels, %d/%d disks\n",
+ cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
+
+out_free:
+ dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
+ enquiry2, enquiry2_addr);
+ dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
+ config2, config2_addr);
+
+ return ret;
+}
+
+/**
+ * myrb_unmap - unmaps controller structures
+ */
+static void myrb_unmap(struct myrb_hba *cb)
+{
+ if (cb->ldev_info_buf) {
+ size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
+ MYRB_MAX_LDEVS;
+ dma_free_coherent(&cb->pdev->dev, ldev_info_size,
+ cb->ldev_info_buf, cb->ldev_info_addr);
+ cb->ldev_info_buf = NULL;
+ }
+ if (cb->err_table) {
+ size_t err_table_size = sizeof(struct myrb_error_entry) *
+ MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
+ dma_free_coherent(&cb->pdev->dev, err_table_size,
+ cb->err_table, cb->err_table_addr);
+ cb->err_table = NULL;
+ }
+ if (cb->enquiry) {
+ dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
+ cb->enquiry, cb->enquiry_addr);
+ cb->enquiry = NULL;
+ }
+ if (cb->first_stat_mbox) {
+ dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
+ cb->first_stat_mbox, cb->stat_mbox_addr);
+ cb->first_stat_mbox = NULL;
+ }
+ if (cb->first_cmd_mbox) {
+ dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
+ cb->first_cmd_mbox, cb->cmd_mbox_addr);
+ cb->first_cmd_mbox = NULL;
+ }
+}
+
+/**
+ * myrb_cleanup - cleanup controller structures
+ */
+static void myrb_cleanup(struct myrb_hba *cb)
+{
+ struct pci_dev *pdev = cb->pdev;
+
+ /* Free the memory mailbox, status, and related structures */
+ myrb_unmap(cb);
+
+ if (cb->mmio_base) {
+ cb->disable_intr(cb->io_base);
+ iounmap(cb->mmio_base);
+ }
+ if (cb->irq)
+ free_irq(cb->irq, cb);
+ if (cb->io_addr)
+ release_region(cb->io_addr, 0x80);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ scsi_host_put(cb->host);
+}
+
+static int myrb_host_reset(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost = scmd->device->host;
+ struct myrb_hba *cb = shost_priv(shost);
+
+ cb->reset(cb->io_base);
+ return SUCCESS;
+}
+
+static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct myrb_hba *cb = shost_priv(shost);
+ struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_dcdb *dcdb;
+ dma_addr_t dcdb_addr;
+ struct scsi_device *sdev = scmd->device;
+ struct scatterlist *sgl;
+ unsigned long flags;
+ int nsge;
+
+ myrb_reset_cmd(cmd_blk);
+ dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
+ if (!dcdb)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ nsge = scsi_dma_map(scmd);
+ if (nsge > 1) {
+ dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
+ scmd->result = (DID_ERROR << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ mbox->type3.opcode = MYRB_CMD_DCDB;
+ mbox->type3.id = scmd->request->tag + 3;
+ mbox->type3.addr = dcdb_addr;
+ dcdb->channel = sdev->channel;
+ dcdb->target = sdev->id;
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
+ break;
+ case DMA_TO_DEVICE:
+ dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
+ break;
+ case DMA_FROM_DEVICE:
+ dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
+ break;
+ default:
+ dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
+ break;
+ }
+ dcdb->early_status = false;
+ if (scmd->request->timeout <= 10)
+ dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
+ else if (scmd->request->timeout <= 60)
+ dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
+ else if (scmd->request->timeout <= 600)
+ dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
+ else
+ dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
+ dcdb->no_autosense = false;
+ dcdb->allow_disconnect = true;
+ sgl = scsi_sglist(scmd);
+ dcdb->dma_addr = sg_dma_address(sgl);
+ if (sg_dma_len(sgl) > USHRT_MAX) {
+ dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
+ dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
+ } else {
+ dcdb->xfer_len_lo = sg_dma_len(sgl);
+ dcdb->xfer_len_hi4 = 0;
+ }
+ dcdb->cdb_len = scmd->cmd_len;
+ dcdb->sense_len = sizeof(dcdb->sense);
+ memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ cb->qcmd(cb, cmd_blk);
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return 0;
+}
+
+static void myrb_inquiry(struct myrb_hba *cb,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char inq[36] = {
+ 0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
+ 0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20,
+ };
+
+ if (cb->bus_width > 16)
+ inq[7] |= 1 << 6;
+ if (cb->bus_width > 8)
+ inq[7] |= 1 << 5;
+ memcpy(&inq[16], cb->model_name, 16);
+ memcpy(&inq[32], cb->fw_version, 1);
+ memcpy(&inq[33], &cb->fw_version[2], 2);
+ memcpy(&inq[35], &cb->fw_version[7], 1);
+
+ scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
+}
+
+static void
+myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
+ struct myrb_ldev_info *ldev_info)
+{
+ unsigned char modes[32], *mode_pg;
+ bool dbd;
+ size_t mode_len;
+
+ dbd = (scmd->cmnd[1] & 0x08) == 0x08;
+ if (dbd) {
+ mode_len = 24;
+ mode_pg = &modes[4];
+ } else {
+ mode_len = 32;
+ mode_pg = &modes[12];
+ }
+ memset(modes, 0, sizeof(modes));
+ modes[0] = mode_len - 1;
+ if (!dbd) {
+ unsigned char *block_desc = &modes[4];
+
+ modes[3] = 8;
+ put_unaligned_be32(ldev_info->size, &block_desc[0]);
+ put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
+ }
+ mode_pg[0] = 0x08;
+ mode_pg[1] = 0x12;
+ if (ldev_info->wb_enabled)
+ mode_pg[2] |= 0x04;
+ if (cb->segment_size) {
+ mode_pg[2] |= 0x08;
+ put_unaligned_be16(cb->segment_size, &mode_pg[14]);
+ }
+
+ scsi_sg_copy_from_buffer(scmd, modes, mode_len);
+}
+
+static void myrb_request_sense(struct myrb_hba *cb,
+ struct scsi_cmnd *scmd)
+{
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ NO_SENSE, 0, 0);
+ scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
+ SCSI_SENSE_BUFFERSIZE);
+}
+
+static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
+ struct myrb_ldev_info *ldev_info)
+{
+ unsigned char data[8];
+
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Capacity %u, blocksize %u\n",
+ ldev_info->size, cb->ldev_block_size);
+ put_unaligned_be32(ldev_info->size - 1, &data[0]);
+ put_unaligned_be32(cb->ldev_block_size, &data[4]);
+ scsi_sg_copy_from_buffer(scmd, data, 8);
+}
+
+static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct myrb_hba *cb = shost_priv(shost);
+ struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct myrb_ldev_info *ldev_info;
+ struct scsi_device *sdev = scmd->device;
+ struct scatterlist *sgl;
+ unsigned long flags;
+ u64 lba;
+ u32 block_cnt;
+ int nsge;
+
+ ldev_info = sdev->hostdata;
+ if (ldev_info->state != MYRB_DEVICE_ONLINE &&
+ ldev_info->state != MYRB_DEVICE_WO) {
+ dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
+ sdev->id, ldev_info ? ldev_info->state : 0xff);
+ scmd->result = (DID_BAD_TARGET << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ switch (scmd->cmnd[0]) {
+ case TEST_UNIT_READY:
+ scmd->result = (DID_OK << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ case INQUIRY:
+ if (scmd->cmnd[1] & 1) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ myrb_inquiry(cb, scmd);
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ case SYNCHRONIZE_CACHE:
+ scmd->result = (DID_OK << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ case MODE_SENSE:
+ if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
+ (scmd->cmnd[2] & 0x3F) != 0x08) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ myrb_mode_sense(cb, scmd, ldev_info);
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ case READ_CAPACITY:
+ if ((scmd->cmnd[1] & 1) ||
+ (scmd->cmnd[8] & 1)) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ if (lba) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ myrb_read_capacity(cb, scmd, ldev_info);
+ scmd->scsi_done(scmd);
+ return 0;
+ case REQUEST_SENSE:
+ myrb_request_sense(cb, scmd);
+ scmd->result = (DID_OK << 16);
+ return 0;
+ case SEND_DIAGNOSTIC:
+ if (scmd->cmnd[1] != 0x04) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ /* Assume good status */
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ case READ_6:
+ if (ldev_info->state == MYRB_DEVICE_WO) {
+ /* Data protect, attempt to read invalid data */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ DATA_PROTECT, 0x21, 0x06);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ case WRITE_6:
+ lba = (((scmd->cmnd[1] & 0x1F) << 16) |
+ (scmd->cmnd[2] << 8) |
+ scmd->cmnd[3]);
+ block_cnt = scmd->cmnd[4];
+ break;
+ case READ_10:
+ if (ldev_info->state == MYRB_DEVICE_WO) {
+ /* Data protect, attempt to read invalid data */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ DATA_PROTECT, 0x21, 0x06);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ case WRITE_10:
+ case VERIFY: /* 0x2F */
+ case WRITE_VERIFY: /* 0x2E */
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
+ break;
+ case READ_12:
+ if (ldev_info->state == MYRB_DEVICE_WO) {
+ /* Data protect, attempt to read invalid data */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ DATA_PROTECT, 0x21, 0x06);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ case WRITE_12:
+ case VERIFY_12: /* 0xAF */
+ case WRITE_VERIFY_12: /* 0xAE */
+ lba = get_unaligned_be32(&scmd->cmnd[2]);
+ block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
+ break;
+ default:
+ /* Illegal request, invalid opcode */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x20, 0);
+ scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ myrb_reset_cmd(cmd_blk);
+ mbox->type5.id = scmd->request->tag + 3;
+ if (scmd->sc_data_direction == DMA_NONE)
+ goto submit;
+ nsge = scsi_dma_map(scmd);
+ if (nsge == 1) {
+ sgl = scsi_sglist(scmd);
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mbox->type5.opcode = MYRB_CMD_READ;
+ else
+ mbox->type5.opcode = MYRB_CMD_WRITE;
+
+ mbox->type5.ld.xfer_len = block_cnt;
+ mbox->type5.ld.ldev_num = sdev->id;
+ mbox->type5.lba = lba;
+ mbox->type5.addr = (u32)sg_dma_address(sgl);
+ } else {
+ struct myrb_sge *hw_sgl;
+ dma_addr_t hw_sgl_addr;
+ int i;
+
+ hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
+ if (!hw_sgl)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ cmd_blk->sgl = hw_sgl;
+ cmd_blk->sgl_addr = hw_sgl_addr;
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mbox->type5.opcode = MYRB_CMD_READ_SG;
+ else
+ mbox->type5.opcode = MYRB_CMD_WRITE_SG;
+
+ mbox->type5.ld.xfer_len = block_cnt;
+ mbox->type5.ld.ldev_num = sdev->id;
+ mbox->type5.lba = lba;
+ mbox->type5.addr = hw_sgl_addr;
+ mbox->type5.sg_count = nsge;
+
+ scsi_for_each_sg(scmd, sgl, nsge, i) {
+ hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
+ hw_sgl->sge_count = (u32)sg_dma_len(sgl);
+ hw_sgl++;
+ }
+ }
+submit:
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ cb->qcmd(cb, cmd_blk);
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+
+ return 0;
+}
+
+static int myrb_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct scsi_device *sdev = scmd->device;
+
+ if (sdev->channel > myrb_logical_channel(shost)) {
+ scmd->result = (DID_BAD_TARGET << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ if (sdev->channel == myrb_logical_channel(shost))
+ return myrb_ldev_queuecommand(shost, scmd);
+
+ return myrb_pthru_queuecommand(shost, scmd);
+}
+
+static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
+{
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_ldev_info *ldev_info;
+ unsigned short ldev_num = sdev->id;
+ enum raid_level level;
+
+ ldev_info = cb->ldev_info_buf + ldev_num;
+ if (!ldev_info)
+ return -ENXIO;
+
+ sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
+ if (!sdev->hostdata)
+ return -ENOMEM;
+ dev_dbg(&sdev->sdev_gendev,
+ "slave alloc ldev %d state %x\n",
+ ldev_num, ldev_info->state);
+ memcpy(sdev->hostdata, ldev_info,
+ sizeof(*ldev_info));
+ switch (ldev_info->raid_level) {
+ case MYRB_RAID_LEVEL0:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case MYRB_RAID_LEVEL1:
+ level = RAID_LEVEL_1;
+ break;
+ case MYRB_RAID_LEVEL3:
+ level = RAID_LEVEL_3;
+ break;
+ case MYRB_RAID_LEVEL5:
+ level = RAID_LEVEL_5;
+ break;
+ case MYRB_RAID_LEVEL6:
+ level = RAID_LEVEL_6;
+ break;
+ case MYRB_RAID_JBOD:
+ level = RAID_LEVEL_JBOD;
+ break;
+ default:
+ level = RAID_LEVEL_UNKNOWN;
+ break;
+ }
+ raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
+ return 0;
+}
+
+static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
+{
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_pdev_state *pdev_info;
+ unsigned short status;
+
+ if (sdev->id > MYRB_MAX_TARGETS)
+ return -ENXIO;
+
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ if (!pdev_info)
+ return -ENOMEM;
+
+ status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
+ sdev, pdev_info);
+ if (status != MYRB_STATUS_SUCCESS) {
+ dev_dbg(&sdev->sdev_gendev,
+ "Failed to get device state, status %x\n",
+ status);
+ kfree(pdev_info);
+ return -ENXIO;
+ }
+ if (!pdev_info->present) {
+ dev_dbg(&sdev->sdev_gendev,
+ "device not present, skip\n");
+ kfree(pdev_info);
+ return -ENXIO;
+ }
+ dev_dbg(&sdev->sdev_gendev,
+ "slave alloc pdev %d:%d state %x\n",
+ sdev->channel, sdev->id, pdev_info->state);
+ sdev->hostdata = pdev_info;
+
+ return 0;
+}
+
+static int myrb_slave_alloc(struct scsi_device *sdev)
+{
+ if (sdev->channel > myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ if (sdev->lun > 0)
+ return -ENXIO;
+
+ if (sdev->channel == myrb_logical_channel(sdev->host))
+ return myrb_ldev_slave_alloc(sdev);
+
+ return myrb_pdev_slave_alloc(sdev);
+}
+
+static int myrb_slave_configure(struct scsi_device *sdev)
+{
+ struct myrb_ldev_info *ldev_info;
+
+ if (sdev->channel > myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host)) {
+ sdev->no_uld_attach = 1;
+ return 0;
+ }
+ if (sdev->lun != 0)
+ return -ENXIO;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ if (ldev_info->state != MYRB_DEVICE_ONLINE)
+ sdev_printk(KERN_INFO, sdev,
+ "Logical drive is %s\n",
+ myrb_devstate_name(ldev_info->state));
+
+ sdev->tagged_supported = 1;
+ return 0;
+}
+
+static void myrb_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+}
+
+static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ struct myrb_hba *cb = shost_priv(sdev->host);
+
+ geom[0] = cb->ldev_geom_heads;
+ geom[1] = cb->ldev_geom_sectors;
+ geom[2] = sector_div(capacity, geom[0] * geom[1]);
+
+ return 0;
+}
+
+static ssize_t raid_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ int ret;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel == myrb_logical_channel(sdev->host)) {
+ struct myrb_ldev_info *ldev_info = sdev->hostdata;
+ const char *name;
+
+ name = myrb_devstate_name(ldev_info->state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->state);
+ } else {
+ struct myrb_pdev_state *pdev_info = sdev->hostdata;
+ unsigned short status;
+ const char *name;
+
+ status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
+ sdev, pdev_info);
+ if (status != MYRB_STATUS_SUCCESS)
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device state, status %x\n",
+ status);
+
+ if (!pdev_info->present)
+ name = "Removed";
+ else
+ name = myrb_devstate_name(pdev_info->state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ pdev_info->state);
+ }
+ return ret;
+}
+
+static ssize_t raid_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_pdev_state *pdev_info;
+ enum myrb_devstate new_state;
+ unsigned short status;
+
+ if (!strncmp(buf, "kill", 4) ||
+ !strncmp(buf, "offline", 7))
+ new_state = MYRB_DEVICE_DEAD;
+ else if (!strncmp(buf, "online", 6))
+ new_state = MYRB_DEVICE_ONLINE;
+ else if (!strncmp(buf, "standby", 7))
+ new_state = MYRB_DEVICE_STANDBY;
+ else
+ return -EINVAL;
+
+ pdev_info = sdev->hostdata;
+ if (!pdev_info) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - no physical device information\n");
+ return -ENXIO;
+ }
+ if (!pdev_info->present) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - device not present\n");
+ return -ENXIO;
+ }
+
+ if (pdev_info->state == new_state)
+ return count;
+
+ status = myrb_set_pdev_state(cb, sdev, new_state);
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ break;
+ case MYRB_STATUS_START_DEVICE_FAILED:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Unable to Start Device\n");
+ count = -EAGAIN;
+ break;
+ case MYRB_STATUS_NO_DEVICE:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - No Device at Address\n");
+ count = -ENODEV;
+ break;
+ case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Invalid Channel or Target or Modifier\n");
+ count = -EINVAL;
+ break;
+ case MYRB_STATUS_CHANNEL_BUSY:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Channel Busy\n");
+ count = -EBUSY;
+ break;
+ default:
+ sdev_printk(KERN_INFO, sdev,
+ "Failed - Unexpected Status %04X\n", status);
+ count = -EIO;
+ break;
+ }
+ return count;
+}
+static DEVICE_ATTR_RW(raid_state);
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ if (sdev->channel == myrb_logical_channel(sdev->host)) {
+ struct myrb_ldev_info *ldev_info = sdev->hostdata;
+ const char *name;
+
+ if (!ldev_info)
+ return -ENXIO;
+
+ name = myrb_raidlevel_name(ldev_info->raid_level);
+ if (!name)
+ return snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->state);
+ return snprintf(buf, 32, "%s\n", name);
+ }
+ return snprintf(buf, 32, "Physical Drive\n");
+}
+static DEVICE_ATTR_RO(raid_level);
+
+static ssize_t rebuild_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_rbld_progress rbld_buf;
+ unsigned char status;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host))
+ return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+
+ if (rbld_buf.ldev_num != sdev->id ||
+ status != MYRB_STATUS_SUCCESS)
+ return snprintf(buf, 32, "not rebuilding\n");
+
+ return snprintf(buf, 32, "rebuilding block %u of %u\n",
+ rbld_buf.ldev_size - rbld_buf.blocks_left,
+ rbld_buf.ldev_size);
+}
+
+static ssize_t rebuild_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_cmdblk *cmd_blk;
+ union myrb_cmd_mbox *mbox;
+ unsigned short status;
+ int rc, start;
+ const char *msg;
+
+ rc = kstrtoint(buf, 0, &start);
+ if (rc)
+ return rc;
+
+ if (sdev->channel >= myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ status = myrb_get_rbld_progress(cb, NULL);
+ if (start) {
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Initiated; already in progress\n");
+ return -EALREADY;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
+ mbox->type3D.id = MYRB_DCMD_TAG;
+ mbox->type3D.channel = sdev->channel;
+ mbox->type3D.target = sdev->id;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ } else {
+ struct pci_dev *pdev = cb->pdev;
+ unsigned char *rate;
+ dma_addr_t rate_addr;
+
+ if (status != MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Cancelled; not in progress\n");
+ return 0;
+ }
+
+ rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
+ &rate_addr, GFP_KERNEL);
+ if (rate == NULL) {
+ sdev_printk(KERN_INFO, sdev,
+ "Cancellation of Rebuild Failed - Out of Memory\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
+ mbox->type3R.id = MYRB_DCMD_TAG;
+ mbox->type3R.rbld_rate = 0xFF;
+ mbox->type3R.addr = rate_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
+ mutex_unlock(&cb->dcmd_mutex);
+ }
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
+ start ? "Initiated" : "Cancelled");
+ return count;
+ }
+ if (!start) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Cancelled, status 0x%x\n",
+ status);
+ return -EIO;
+ }
+
+ switch (status) {
+ case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
+ msg = "Attempt to Rebuild Online or Unresponsive Drive";
+ break;
+ case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
+ msg = "New Disk Failed During Rebuild";
+ break;
+ case MYRB_STATUS_INVALID_ADDRESS:
+ msg = "Invalid Device Address";
+ break;
+ case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
+ msg = "Already in Progress";
+ break;
+ default:
+ msg = NULL;
+ break;
+ }
+ if (msg)
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed - %s\n", msg);
+ else
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Failed, status 0x%x\n", status);
+
+ return -EIO;
+}
+static DEVICE_ATTR_RW(rebuild);
+
+static ssize_t consistency_check_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_rbld_progress rbld_buf;
+ struct myrb_cmdblk *cmd_blk;
+ union myrb_cmd_mbox *mbox;
+ unsigned short ldev_num = 0xFFFF;
+ unsigned short status;
+ int rc, start;
+ const char *msg;
+
+ rc = kstrtoint(buf, 0, &start);
+ if (rc)
+ return rc;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host))
+ return -ENXIO;
+
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+ if (start) {
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Not Initiated; already in progress\n");
+ return -EALREADY;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
+ mbox->type3C.id = MYRB_DCMD_TAG;
+ mbox->type3C.ldev_num = sdev->id;
+ mbox->type3C.auto_restore = true;
+
+ status = myrb_exec_cmd(cb, cmd_blk);
+ mutex_unlock(&cb->dcmd_mutex);
+ } else {
+ struct pci_dev *pdev = cb->pdev;
+ unsigned char *rate;
+ dma_addr_t rate_addr;
+
+ if (ldev_num != sdev->id) {
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Not Cancelled; not in progress\n");
+ return 0;
+ }
+ rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
+ &rate_addr, GFP_KERNEL);
+ if (rate == NULL) {
+ sdev_printk(KERN_INFO, sdev,
+ "Cancellation of Check Consistency Failed - Out of Memory\n");
+ return -ENOMEM;
+ }
+ mutex_lock(&cb->dcmd_mutex);
+ cmd_blk = &cb->dcmd_blk;
+ myrb_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
+ mbox->type3R.id = MYRB_DCMD_TAG;
+ mbox->type3R.rbld_rate = 0xFF;
+ mbox->type3R.addr = rate_addr;
+ status = myrb_exec_cmd(cb, cmd_blk);
+ dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
+ mutex_unlock(&cb->dcmd_mutex);
+ }
+ if (status == MYRB_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
+ start ? "Initiated" : "Cancelled");
+ return count;
+ }
+ if (!start) {
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Not Cancelled, status 0x%x\n",
+ status);
+ return -EIO;
+ }
+
+ switch (status) {
+ case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
+ msg = "Dependent Physical Device is DEAD";
+ break;
+ case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
+ msg = "New Disk Failed During Rebuild";
+ break;
+ case MYRB_STATUS_INVALID_ADDRESS:
+ msg = "Invalid or Nonredundant Logical Drive";
+ break;
+ case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
+ msg = "Already in Progress";
+ break;
+ default:
+ msg = NULL;
+ break;
+ }
+ if (msg)
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Failed - %s\n", msg);
+ else
+ sdev_printk(KERN_INFO, sdev,
+ "Check Consistency Failed, status 0x%x\n", status);
+
+ return -EIO;
+}
+
+static ssize_t consistency_check_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return rebuild_show(dev, attr, buf);
+}
+static DEVICE_ATTR_RW(consistency_check);
+
+static ssize_t ctlr_num_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+
+ return snprintf(buf, 20, "%d\n", cb->ctlr_num);
+}
+static DEVICE_ATTR_RO(ctlr_num);
+
+static ssize_t firmware_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", cb->fw_version);
+}
+static DEVICE_ATTR_RO(firmware);
+
+static ssize_t model_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", cb->model_name);
+}
+static DEVICE_ATTR_RO(model);
+
+static ssize_t flush_cache_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrb_hba *cb = shost_priv(shost);
+ unsigned short status;
+
+ status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
+ if (status == MYRB_STATUS_SUCCESS) {
+ shost_printk(KERN_INFO, shost,
+ "Cache Flush Completed\n");
+ return count;
+ }
+ shost_printk(KERN_INFO, shost,
+ "Cache Flush Failed, status %x\n", status);
+ return -EIO;
+}
+static DEVICE_ATTR_WO(flush_cache);
+
+static struct device_attribute *myrb_sdev_attrs[] = {
+ &dev_attr_rebuild,
+ &dev_attr_consistency_check,
+ &dev_attr_raid_state,
+ &dev_attr_raid_level,
+ NULL,
+};
+
+static struct device_attribute *myrb_shost_attrs[] = {
+ &dev_attr_ctlr_num,
+ &dev_attr_model,
+ &dev_attr_firmware,
+ &dev_attr_flush_cache,
+ NULL,
+};
+
+struct scsi_host_template myrb_template = {
+ .module = THIS_MODULE,
+ .name = "DAC960",
+ .proc_name = "myrb",
+ .queuecommand = myrb_queuecommand,
+ .eh_host_reset_handler = myrb_host_reset,
+ .slave_alloc = myrb_slave_alloc,
+ .slave_configure = myrb_slave_configure,
+ .slave_destroy = myrb_slave_destroy,
+ .bios_param = myrb_biosparam,
+ .cmd_size = sizeof(struct myrb_cmdblk),
+ .shost_attrs = myrb_shost_attrs,
+ .sdev_attrs = myrb_sdev_attrs,
+ .this_id = -1,
+};
+
+/**
+ * myrb_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int myrb_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return sdev->channel == myrb_logical_channel(sdev->host);
+}
+
+/**
+ * myrb_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void myrb_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_rbld_progress rbld_buf;
+ unsigned int percent_complete = 0;
+ unsigned short status;
+ unsigned int ldev_size = 0, remaining = 0;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host))
+ return;
+ status = myrb_get_rbld_progress(cb, &rbld_buf);
+ if (status == MYRB_STATUS_SUCCESS) {
+ if (rbld_buf.ldev_num == sdev->id) {
+ ldev_size = rbld_buf.ldev_size;
+ remaining = rbld_buf.blocks_left;
+ }
+ }
+ if (remaining && ldev_size)
+ percent_complete = (ldev_size - remaining) * 100 / ldev_size;
+ raid_set_resync(myrb_raid_template, dev, percent_complete);
+}
+
+/**
+ * myrb_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void myrb_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrb_hba *cb = shost_priv(sdev->host);
+ struct myrb_ldev_info *ldev_info = sdev->hostdata;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+ unsigned short status;
+
+ if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
+ state = RAID_STATE_UNKNOWN;
+ else {
+ status = myrb_get_rbld_progress(cb, NULL);
+ if (status == MYRB_STATUS_SUCCESS)
+ state = RAID_STATE_RESYNCING;
+ else {
+ switch (ldev_info->state) {
+ case MYRB_DEVICE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MYRB_DEVICE_WO:
+ case MYRB_DEVICE_CRITICAL:
+ state = RAID_STATE_DEGRADED;
+ break;
+ default:
+ state = RAID_STATE_OFFLINE;
+ }
+ }
+ }
+ raid_set_state(myrb_raid_template, dev, state);
+}
+
+struct raid_function_template myrb_raid_functions = {
+ .cookie = &myrb_template,
+ .is_raid = myrb_is_raid,
+ .get_resync = myrb_get_resync,
+ .get_state = myrb_get_state,
+};
+
+static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
+ struct scsi_cmnd *scmd)
+{
+ unsigned short status;
+
+ if (!cmd_blk)
+ return;
+
+ scsi_dma_unmap(scmd);
+
+ if (cmd_blk->dcdb) {
+ memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
+ dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
+ cmd_blk->dcdb_addr);
+ cmd_blk->dcdb = NULL;
+ }
+ if (cmd_blk->sgl) {
+ dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
+ cmd_blk->sgl = NULL;
+ cmd_blk->sgl_addr = 0;
+ }
+ status = cmd_blk->status;
+ switch (status) {
+ case MYRB_STATUS_SUCCESS:
+ case MYRB_STATUS_DEVICE_BUSY:
+ scmd->result = (DID_OK << 16) | status;
+ break;
+ case MYRB_STATUS_BAD_DATA:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Bad Data Encountered\n");
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ /* Unrecovered read error */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x11, 0);
+ else
+ /* Write error */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x0C, 0);
+ scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ break;
+ case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
+ scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ /* Unrecovered read error, auto-reallocation failed */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x11, 0x04);
+ else
+ /* Write error, auto-reallocation failed */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ MEDIUM_ERROR, 0x0C, 0x02);
+ scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
+ break;
+ case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Logical Drive Nonexistent or Offline");
+ scmd->result = (DID_BAD_TARGET << 16);
+ break;
+ case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
+ dev_dbg(&scmd->device->sdev_gendev,
+ "Attempt to Access Beyond End of Logical Drive");
+ /* Logical block address out of range */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ NOT_READY, 0x21, 0);
+ break;
+ case MYRB_STATUS_DEVICE_NONRESPONSIVE:
+ dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
+ scmd->result = (DID_BAD_TARGET << 16);
+ break;
+ default:
+ scmd_printk(KERN_ERR, scmd,
+ "Unexpected Error Status %04X", status);
+ scmd->result = (DID_ERROR << 16);
+ break;
+ }
+ scmd->scsi_done(scmd);
+}
+
+static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ if (!cmd_blk)
+ return;
+
+ if (cmd_blk->completion) {
+ complete(cmd_blk->completion);
+ cmd_blk->completion = NULL;
+ }
+}
+
+static void myrb_monitor(struct work_struct *work)
+{
+ struct myrb_hba *cb = container_of(work,
+ struct myrb_hba, monitor_work.work);
+ struct Scsi_Host *shost = cb->host;
+ unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
+
+ dev_dbg(&shost->shost_gendev, "monitor tick\n");
+
+ if (cb->new_ev_seq > cb->old_ev_seq) {
+ int event = cb->old_ev_seq;
+
+ dev_dbg(&shost->shost_gendev,
+ "get event log no %d/%d\n",
+ cb->new_ev_seq, event);
+ myrb_get_event(cb, event);
+ cb->old_ev_seq = event + 1;
+ interval = 10;
+ } else if (cb->need_err_info) {
+ cb->need_err_info = false;
+ dev_dbg(&shost->shost_gendev, "get error table\n");
+ myrb_get_errtable(cb);
+ interval = 10;
+ } else if (cb->need_rbld && cb->rbld_first) {
+ cb->need_rbld = false;
+ dev_dbg(&shost->shost_gendev,
+ "get rebuild progress\n");
+ myrb_update_rbld_progress(cb);
+ interval = 10;
+ } else if (cb->need_ldev_info) {
+ cb->need_ldev_info = false;
+ dev_dbg(&shost->shost_gendev,
+ "get logical drive info\n");
+ myrb_get_ldev_info(cb);
+ interval = 10;
+ } else if (cb->need_rbld) {
+ cb->need_rbld = false;
+ dev_dbg(&shost->shost_gendev,
+ "get rebuild progress\n");
+ myrb_update_rbld_progress(cb);
+ interval = 10;
+ } else if (cb->need_cc_status) {
+ cb->need_cc_status = false;
+ dev_dbg(&shost->shost_gendev,
+ "get consistency check progress\n");
+ myrb_get_cc_progress(cb);
+ interval = 10;
+ } else if (cb->need_bgi_status) {
+ cb->need_bgi_status = false;
+ dev_dbg(&shost->shost_gendev, "get background init status\n");
+ myrb_bgi_control(cb);
+ interval = 10;
+ } else {
+ dev_dbg(&shost->shost_gendev, "new enquiry\n");
+ mutex_lock(&cb->dma_mutex);
+ myrb_hba_enquiry(cb);
+ mutex_unlock(&cb->dma_mutex);
+ if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
+ cb->need_err_info || cb->need_rbld ||
+ cb->need_ldev_info || cb->need_cc_status ||
+ cb->need_bgi_status) {
+ dev_dbg(&shost->shost_gendev,
+ "reschedule monitor\n");
+ interval = 0;
+ }
+ }
+ if (interval > 1)
+ cb->primary_monitor_time = jiffies;
+ queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
+}
+
+/**
+ * myrb_err_status - reports controller BIOS messages
+ *
+ * Controller BIOS messages are passed through the Error Status Register
+ * when the driver performs the BIOS handshaking.
+ *
+ * Return: true for fatal errors and false otherwise.
+ */
+bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
+ unsigned char parm0, unsigned char parm1)
+{
+ struct pci_dev *pdev = cb->pdev;
+
+ switch (error) {
+ case 0x00:
+ dev_info(&pdev->dev,
+ "Physical Device %d:%d Not Responding\n",
+ parm1, parm0);
+ break;
+ case 0x08:
+ dev_notice(&pdev->dev, "Spinning Up Drives\n");
+ break;
+ case 0x30:
+ dev_notice(&pdev->dev, "Configuration Checksum Error\n");
+ break;
+ case 0x60:
+ dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
+ break;
+ case 0x70:
+ dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
+ break;
+ case 0x90:
+ dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
+ parm1, parm0);
+ break;
+ case 0xA0:
+ dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
+ break;
+ case 0xB0:
+ dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
+ break;
+ case 0xD0:
+ dev_notice(&pdev->dev, "New Controller Configuration Found\n");
+ break;
+ case 0xF0:
+ dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
+ return true;
+ default:
+ dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
+ error);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Hardware-specific functions
+ */
+
+/*
+ * DAC960 LA Series Controllers
+ */
+
+static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
+}
+
+static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
+
+ return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
+}
+
+static inline bool DAC960_LA_init_in_progress(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
+
+ return !(idb & DAC960_LA_IDB_INIT_DONE);
+}
+
+static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline void DAC960_LA_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_LA_ODB_OFFSET);
+}
+
+static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
+
+ return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
+
+ return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_LA_enable_intr(void __iomem *base)
+{
+ unsigned char odb = 0xFF;
+
+ odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
+ writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_LA_disable_intr(void __iomem *base)
+{
+ unsigned char odb = 0xFF;
+
+ odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
+ writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_LA_intr_enabled(void __iomem *base)
+{
+ unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
+
+ return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
+ union myrb_cmd_mbox *mbox)
+{
+ mem_mbox->words[1] = mbox->words[1];
+ mem_mbox->words[2] = mbox->words[2];
+ mem_mbox->words[3] = mbox->words[3];
+ /* Memory barrier to prevent reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Memory barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
+ writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
+ writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
+ writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
+}
+
+static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
+{
+ return readb(base + DAC960_LA_STSID_OFFSET);
+}
+
+static inline unsigned short DAC960_LA_read_status(void __iomem *base)
+{
+ return readw(base + DAC960_LA_STS_OFFSET);
+}
+
+static inline bool
+DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
+
+ if (!(errsts & DAC960_LA_ERRSTS_PENDING))
+ return false;
+ errsts &= ~DAC960_LA_ERRSTS_PENDING;
+
+ *error = errsts;
+ *param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
+ *param1 = readb(base + DAC960_LA_CMDID_OFFSET);
+ writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned short
+DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ unsigned short status;
+ int timeout = 0;
+
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (!DAC960_LA_hw_mbox_is_full(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (DAC960_LA_hw_mbox_is_full(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for empty mailbox\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ DAC960_LA_write_hw_mbox(base, mbox);
+ DAC960_LA_hw_mbox_new_cmd(base);
+ timeout = 0;
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_LA_hw_mbox_status_available(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (!DAC960_LA_hw_mbox_status_available(base)) {
+ dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ status = DAC960_LA_read_status(base);
+ DAC960_LA_ack_hw_mbox_intr(base);
+ DAC960_LA_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_LA_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ DAC960_LA_disable_intr(base);
+ DAC960_LA_ack_hw_mbox_status(base);
+ udelay(1000);
+ timeout = 0;
+ while (DAC960_LA_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_LA_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -ENODEV;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_LA_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_LA_enable_intr(base);
+ cb->qcmd = myrb_qcmd;
+ cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
+ if (cb->dual_mode_interface)
+ cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
+ else
+ cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
+ cb->disable_intr = DAC960_LA_disable_intr;
+ cb->reset = DAC960_LA_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ struct myrb_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ DAC960_LA_ack_intr(base);
+ next_stat_mbox = cb->next_stat_mbox;
+ while (next_stat_mbox->valid) {
+ unsigned char id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = next_stat_mbox->status;
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
+ if (++next_stat_mbox > cb->last_stat_mbox)
+ next_stat_mbox = cb->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ }
+ cb->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_LA_privdata = {
+ .hw_init = DAC960_LA_hw_init,
+ .irq_handler = DAC960_LA_intr_handler,
+ .mmio_size = DAC960_LA_mmio_size,
+};
+
+/*
+ * DAC960 PG Series Controllers
+ */
+static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_gen_intr(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_reset_ctrl(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
+{
+ writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
+}
+
+static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
+{
+ unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
+
+ return idb & DAC960_PG_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_PG_init_in_progress(void __iomem *base)
+{
+ unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
+
+ return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
+{
+ writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
+{
+ writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline void DAC960_PG_ack_intr(void __iomem *base)
+{
+ writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_PG_ODB_OFFSET);
+}
+
+static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
+
+ return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
+
+ return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_PG_enable_intr(void __iomem *base)
+{
+ unsigned int imask = (unsigned int)-1;
+
+ imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
+ writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_PG_disable_intr(void __iomem *base)
+{
+ unsigned int imask = (unsigned int)-1;
+
+ writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_PG_intr_enabled(void __iomem *base)
+{
+ unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
+
+ return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
+ union myrb_cmd_mbox *mbox)
+{
+ mem_mbox->words[1] = mbox->words[1];
+ mem_mbox->words[2] = mbox->words[2];
+ mem_mbox->words[3] = mbox->words[3];
+ /* Memory barrier to prevent reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Memory barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
+ writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
+ writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
+ writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
+}
+
+static inline unsigned char
+DAC960_PG_read_status_cmd_ident(void __iomem *base)
+{
+ return readb(base + DAC960_PG_STSID_OFFSET);
+}
+
+static inline unsigned short
+DAC960_PG_read_status(void __iomem *base)
+{
+ return readw(base + DAC960_PG_STS_OFFSET);
+}
+
+static inline bool
+DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
+
+ if (!(errsts & DAC960_PG_ERRSTS_PENDING))
+ return false;
+ errsts &= ~DAC960_PG_ERRSTS_PENDING;
+ *error = errsts;
+ *param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
+ *param1 = readb(base + DAC960_PG_CMDID_OFFSET);
+ writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned short
+DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ unsigned short status;
+ int timeout = 0;
+
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (!DAC960_PG_hw_mbox_is_full(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (DAC960_PG_hw_mbox_is_full(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for empty mailbox\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ DAC960_PG_write_hw_mbox(base, mbox);
+ DAC960_PG_hw_mbox_new_cmd(base);
+
+ timeout = 0;
+ while (timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PG_hw_mbox_status_available(base))
+ break;
+ udelay(10);
+ timeout++;
+ }
+ if (!DAC960_PG_hw_mbox_status_available(base)) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for mailbox status\n");
+ return MYRB_STATUS_SUBSYS_TIMEOUT;
+ }
+ status = DAC960_PG_read_status(base);
+ DAC960_PG_ack_hw_mbox_intr(base);
+ DAC960_PG_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_PG_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ DAC960_PG_disable_intr(base);
+ DAC960_PG_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_PG_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PG_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_PG_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_PG_enable_intr(base);
+ cb->qcmd = myrb_qcmd;
+ cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
+ if (cb->dual_mode_interface)
+ cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
+ else
+ cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
+ cb->disable_intr = DAC960_PG_disable_intr;
+ cb->reset = DAC960_PG_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ struct myrb_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ DAC960_PG_ack_intr(base);
+ next_stat_mbox = cb->next_stat_mbox;
+ while (next_stat_mbox->valid) {
+ unsigned char id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = next_stat_mbox->status;
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
+ if (++next_stat_mbox > cb->last_stat_mbox)
+ next_stat_mbox = cb->first_stat_mbox;
+
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ cb->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_PG_privdata = {
+ .hw_init = DAC960_PG_hw_init,
+ .irq_handler = DAC960_PG_intr_handler,
+ .mmio_size = DAC960_PG_mmio_size,
+};
+
+
+/*
+ * DAC960 PD Series Controllers
+ */
+
+static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline void DAC960_PD_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
+}
+
+static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
+
+ return idb & DAC960_PD_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_PD_init_in_progress(void __iomem *base)
+{
+ unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
+
+ return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_PD_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
+}
+
+static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
+{
+ unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
+
+ return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_PD_enable_intr(void __iomem *base)
+{
+ writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
+}
+
+static inline void DAC960_PD_disable_intr(void __iomem *base)
+{
+ writeb(0, base + DAC960_PD_IRQEN_OFFSET);
+}
+
+static inline bool DAC960_PD_intr_enabled(void __iomem *base)
+{
+ unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
+
+ return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
+}
+
+static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
+ union myrb_cmd_mbox *mbox)
+{
+ writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
+ writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
+ writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
+ writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
+}
+
+static inline unsigned char
+DAC960_PD_read_status_cmd_ident(void __iomem *base)
+{
+ return readb(base + DAC960_PD_STSID_OFFSET);
+}
+
+static inline unsigned short
+DAC960_PD_read_status(void __iomem *base)
+{
+ return readw(base + DAC960_PD_STS_OFFSET);
+}
+
+static inline bool
+DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
+
+ if (!(errsts & DAC960_PD_ERRSTS_PENDING))
+ return false;
+ errsts &= ~DAC960_PD_ERRSTS_PENDING;
+ *error = errsts;
+ *param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
+ *param1 = readb(base + DAC960_PD_CMDID_OFFSET);
+ writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
+ return true;
+}
+
+static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ void __iomem *base = cb->io_base;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ while (DAC960_PD_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_PD_write_cmd_mbox(base, mbox);
+ DAC960_PD_hw_mbox_new_cmd(base);
+}
+
+static int DAC960_PD_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ if (!request_region(cb->io_addr, 0x80, "myrb")) {
+ dev_err(&pdev->dev, "IO port 0x%lx busy\n",
+ (unsigned long)cb->io_addr);
+ return -EBUSY;
+ }
+ DAC960_PD_disable_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_PD_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PD_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, NULL)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_PD_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_PD_enable_intr(base);
+ cb->qcmd = DAC960_PD_qcmd;
+ cb->disable_intr = DAC960_PD_disable_intr;
+ cb->reset = DAC960_PD_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ while (DAC960_PD_hw_mbox_status_available(base)) {
+ unsigned char id = DAC960_PD_read_status_cmd_ident(base);
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = DAC960_PD_read_status(base);
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ DAC960_PD_ack_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_PD_privdata = {
+ .hw_init = DAC960_PD_hw_init,
+ .irq_handler = DAC960_PD_intr_handler,
+ .mmio_size = DAC960_PD_mmio_size,
+};
+
+
+/*
+ * DAC960 P Series Controllers
+ *
+ * Similar to the DAC960 PD Series Controllers, but some commands have
+ * to be translated.
+ */
+
+static inline void myrb_translate_enquiry(void *enq)
+{
+ memcpy(enq + 132, enq + 36, 64);
+ memset(enq + 36, 0, 96);
+}
+
+static inline void myrb_translate_devstate(void *state)
+{
+ memcpy(state + 2, state + 3, 1);
+ memmove(state + 4, state + 5, 2);
+ memmove(state + 6, state + 8, 4);
+}
+
+static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
+{
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ int ldev_num = mbox->type5.ld.ldev_num;
+
+ mbox->bytes[3] &= 0x7;
+ mbox->bytes[3] |= mbox->bytes[7] << 6;
+ mbox->bytes[7] = ldev_num;
+}
+
+static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
+{
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+ int ldev_num = mbox->bytes[7];
+
+ mbox->bytes[7] = mbox->bytes[3] >> 6;
+ mbox->bytes[3] &= 0x7;
+ mbox->bytes[3] |= ldev_num << 3;
+}
+
+static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
+{
+ void __iomem *base = cb->io_base;
+ union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ switch (mbox->common.opcode) {
+ case MYRB_CMD_ENQUIRY:
+ mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
+ break;
+ case MYRB_CMD_GET_DEVICE_STATE:
+ mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
+ break;
+ case MYRB_CMD_READ:
+ mbox->common.opcode = MYRB_CMD_READ_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE:
+ mbox->common.opcode = MYRB_CMD_WRITE_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_READ_SG:
+ mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE_SG:
+ mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
+ myrb_translate_to_rw_command(cmd_blk);
+ break;
+ default:
+ break;
+ }
+ while (DAC960_PD_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_PD_write_cmd_mbox(base, mbox);
+ DAC960_PD_hw_mbox_new_cmd(base);
+}
+
+
+static int DAC960_P_hw_init(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char error, parm0, parm1;
+
+ if (!request_region(cb->io_addr, 0x80, "myrb")) {
+ dev_err(&pdev->dev, "IO port 0x%lx busy\n",
+ (unsigned long)cb->io_addr);
+ return -EBUSY;
+ }
+ DAC960_PD_disable_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_PD_init_in_progress(base) &&
+ timeout < MYRB_MAILBOX_TIMEOUT) {
+ if (DAC960_PD_read_error_status(base, &error,
+ &parm0, &parm1) &&
+ myrb_err_status(cb, error, parm0, parm1))
+ return -EAGAIN;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRB_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrb_enable_mmio(cb, NULL)) {
+ dev_err(&pdev->dev,
+ "Unable to allocate DMA mapped memory\n");
+ DAC960_PD_reset_ctrl(base);
+ return -ETIMEDOUT;
+ }
+ DAC960_PD_enable_intr(base);
+ cb->qcmd = DAC960_P_qcmd;
+ cb->disable_intr = DAC960_PD_disable_intr;
+ cb->reset = DAC960_PD_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
+{
+ struct myrb_hba *cb = arg;
+ void __iomem *base = cb->io_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cb->queue_lock, flags);
+ while (DAC960_PD_hw_mbox_status_available(base)) {
+ unsigned char id = DAC960_PD_read_status_cmd_ident(base);
+ struct scsi_cmnd *scmd = NULL;
+ struct myrb_cmdblk *cmd_blk = NULL;
+ union myrb_cmd_mbox *mbox;
+ enum myrb_cmd_opcode op;
+
+
+ if (id == MYRB_DCMD_TAG)
+ cmd_blk = &cb->dcmd_blk;
+ else if (id == MYRB_MCMD_TAG)
+ cmd_blk = &cb->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cb->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk)
+ cmd_blk->status = DAC960_PD_read_status(base);
+ else
+ dev_err(&cb->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ DAC960_PD_ack_intr(base);
+ DAC960_PD_ack_hw_mbox_status(base);
+
+ if (!cmd_blk)
+ continue;
+
+ mbox = &cmd_blk->mbox;
+ op = mbox->common.opcode;
+ switch (op) {
+ case MYRB_CMD_ENQUIRY_OLD:
+ mbox->common.opcode = MYRB_CMD_ENQUIRY;
+ myrb_translate_enquiry(cb->enquiry);
+ break;
+ case MYRB_CMD_READ_OLD:
+ mbox->common.opcode = MYRB_CMD_READ;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE_OLD:
+ mbox->common.opcode = MYRB_CMD_WRITE;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_READ_SG_OLD:
+ mbox->common.opcode = MYRB_CMD_READ_SG;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ case MYRB_CMD_WRITE_SG_OLD:
+ mbox->common.opcode = MYRB_CMD_WRITE_SG;
+ myrb_translate_from_rw_command(cmd_blk);
+ break;
+ default:
+ break;
+ }
+ if (id < 3)
+ myrb_handle_cmdblk(cb, cmd_blk);
+ else
+ myrb_handle_scsi(cb, cmd_blk, scmd);
+ }
+ spin_unlock_irqrestore(&cb->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrb_privdata DAC960_P_privdata = {
+ .hw_init = DAC960_P_hw_init,
+ .irq_handler = DAC960_P_intr_handler,
+ .mmio_size = DAC960_PD_mmio_size,
+};
+
+static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
+ const struct pci_device_id *entry)
+{
+ struct myrb_privdata *privdata =
+ (struct myrb_privdata *)entry->driver_data;
+ irq_handler_t irq_handler = privdata->irq_handler;
+ unsigned int mmio_size = privdata->mmio_size;
+ struct Scsi_Host *shost;
+ struct myrb_hba *cb = NULL;
+
+ shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
+ if (!shost) {
+ dev_err(&pdev->dev, "Unable to allocate Controller\n");
+ return NULL;
+ }
+ shost->max_cmd_len = 12;
+ shost->max_lun = 256;
+ cb = shost_priv(shost);
+ mutex_init(&cb->dcmd_mutex);
+ mutex_init(&cb->dma_mutex);
+ cb->pdev = pdev;
+
+ if (pci_enable_device(pdev))
+ goto failure;
+
+ if (privdata->hw_init == DAC960_PD_hw_init ||
+ privdata->hw_init == DAC960_P_hw_init) {
+ cb->io_addr = pci_resource_start(pdev, 0);
+ cb->pci_addr = pci_resource_start(pdev, 1);
+ } else
+ cb->pci_addr = pci_resource_start(pdev, 0);
+
+ pci_set_drvdata(pdev, cb);
+ spin_lock_init(&cb->queue_lock);
+ if (mmio_size < PAGE_SIZE)
+ mmio_size = PAGE_SIZE;
+ cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
+ if (cb->mmio_base == NULL) {
+ dev_err(&pdev->dev,
+ "Unable to map Controller Register Window\n");
+ goto failure;
+ }
+
+ cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
+ if (privdata->hw_init(pdev, cb, cb->io_base))
+ goto failure;
+
+ if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
+ dev_err(&pdev->dev,
+ "Unable to acquire IRQ Channel %d\n", pdev->irq);
+ goto failure;
+ }
+ cb->irq = pdev->irq;
+ return cb;
+
+failure:
+ dev_err(&pdev->dev,
+ "Failed to initialize Controller\n");
+ myrb_cleanup(cb);
+ return NULL;
+}
+
+static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+ struct myrb_hba *cb;
+ int ret;
+
+ cb = myrb_detect(dev, entry);
+ if (!cb)
+ return -ENODEV;
+
+ ret = myrb_get_hba_config(cb);
+ if (ret < 0) {
+ myrb_cleanup(cb);
+ return ret;
+ }
+
+ if (!myrb_create_mempools(dev, cb)) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ ret = scsi_add_host(cb->host, &dev->dev);
+ if (ret) {
+ dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
+ myrb_destroy_mempools(cb);
+ goto failed;
+ }
+ scsi_scan_host(cb->host);
+ return 0;
+failed:
+ myrb_cleanup(cb);
+ return ret;
+}
+
+
+static void myrb_remove(struct pci_dev *pdev)
+{
+ struct myrb_hba *cb = pci_get_drvdata(pdev);
+
+ shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
+ myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
+ myrb_cleanup(cb);
+ myrb_destroy_mempools(cb);
+}
+
+
+static const struct pci_device_id myrb_id_table[] = {
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
+ PCI_DEVICE_ID_DEC_21285,
+ PCI_VENDOR_ID_MYLEX,
+ PCI_DEVICE_ID_MYLEX_DAC960_LA),
+ .driver_data = (unsigned long) &DAC960_LA_privdata,
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
+ },
+ {0, },
+};
+
+MODULE_DEVICE_TABLE(pci, myrb_id_table);
+
+static struct pci_driver myrb_pci_driver = {
+ .name = "myrb",
+ .id_table = myrb_id_table,
+ .probe = myrb_probe,
+ .remove = myrb_remove,
+};
+
+static int __init myrb_init_module(void)
+{
+ int ret;
+
+ myrb_raid_template = raid_class_attach(&myrb_raid_functions);
+ if (!myrb_raid_template)
+ return -ENODEV;
+
+ ret = pci_register_driver(&myrb_pci_driver);
+ if (ret)
+ raid_class_release(myrb_raid_template);
+
+ return ret;
+}
+
+static void __exit myrb_cleanup_module(void)
+{
+ pci_unregister_driver(&myrb_pci_driver);
+ raid_class_release(myrb_raid_template);
+}
+
+module_init(myrb_init_module);
+module_exit(myrb_cleanup_module);
+
+MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h
new file mode 100644
index 000000000000..9289c19fcb2f
--- /dev/null
+++ b/drivers/scsi/myrb.h
@@ -0,0 +1,958 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver,
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ *
+ */
+
+#ifndef MYRB_H
+#define MYRB_H
+
+#define MYRB_MAX_LDEVS 32
+#define MYRB_MAX_CHANNELS 3
+#define MYRB_MAX_TARGETS 16
+#define MYRB_MAX_PHYSICAL_DEVICES 45
+#define MYRB_SCATTER_GATHER_LIMIT 32
+#define MYRB_CMD_MBOX_COUNT 256
+#define MYRB_STAT_MBOX_COUNT 1024
+
+#define MYRB_BLKSIZE_BITS 9
+#define MYRB_MAILBOX_TIMEOUT 1000000
+
+#define MYRB_DCMD_TAG 1
+#define MYRB_MCMD_TAG 2
+
+#define MYRB_PRIMARY_MONITOR_INTERVAL (10 * HZ)
+#define MYRB_SECONDARY_MONITOR_INTERVAL (60 * HZ)
+
+/*
+ * DAC960 V1 Firmware Command Opcodes.
+ */
+enum myrb_cmd_opcode {
+ /* I/O Commands */
+ MYRB_CMD_READ_EXTENDED = 0x33,
+ MYRB_CMD_WRITE_EXTENDED = 0x34,
+ MYRB_CMD_READAHEAD_EXTENDED = 0x35,
+ MYRB_CMD_READ_EXTENDED_SG = 0xB3,
+ MYRB_CMD_WRITE_EXTENDED_SG = 0xB4,
+ MYRB_CMD_READ = 0x36,
+ MYRB_CMD_READ_SG = 0xB6,
+ MYRB_CMD_WRITE = 0x37,
+ MYRB_CMD_WRITE_SG = 0xB7,
+ MYRB_CMD_DCDB = 0x04,
+ MYRB_CMD_DCDB_SG = 0x84,
+ MYRB_CMD_FLUSH = 0x0A,
+ /* Controller Status Related Commands */
+ MYRB_CMD_ENQUIRY = 0x53,
+ MYRB_CMD_ENQUIRY2 = 0x1C,
+ MYRB_CMD_GET_LDRV_ELEMENT = 0x55,
+ MYRB_CMD_GET_LDEV_INFO = 0x19,
+ MYRB_CMD_IOPORTREAD = 0x39,
+ MYRB_CMD_IOPORTWRITE = 0x3A,
+ MYRB_CMD_GET_SD_STATS = 0x3E,
+ MYRB_CMD_GET_PD_STATS = 0x3F,
+ MYRB_CMD_EVENT_LOG_OPERATION = 0x72,
+ /* Device Related Commands */
+ MYRB_CMD_START_DEVICE = 0x10,
+ MYRB_CMD_GET_DEVICE_STATE = 0x50,
+ MYRB_CMD_STOP_CHANNEL = 0x13,
+ MYRB_CMD_START_CHANNEL = 0x12,
+ MYRB_CMD_RESET_CHANNEL = 0x1A,
+ /* Commands Associated with Data Consistency and Errors */
+ MYRB_CMD_REBUILD = 0x09,
+ MYRB_CMD_REBUILD_ASYNC = 0x16,
+ MYRB_CMD_CHECK_CONSISTENCY = 0x0F,
+ MYRB_CMD_CHECK_CONSISTENCY_ASYNC = 0x1E,
+ MYRB_CMD_REBUILD_STAT = 0x0C,
+ MYRB_CMD_GET_REBUILD_PROGRESS = 0x27,
+ MYRB_CMD_REBUILD_CONTROL = 0x1F,
+ MYRB_CMD_READ_BADBLOCK_TABLE = 0x0B,
+ MYRB_CMD_READ_BADDATA_TABLE = 0x25,
+ MYRB_CMD_CLEAR_BADDATA_TABLE = 0x26,
+ MYRB_CMD_GET_ERROR_TABLE = 0x17,
+ MYRB_CMD_ADD_CAPACITY_ASYNC = 0x2A,
+ MYRB_CMD_BGI_CONTROL = 0x2B,
+ /* Configuration Related Commands */
+ MYRB_CMD_READ_CONFIG2 = 0x3D,
+ MYRB_CMD_WRITE_CONFIG2 = 0x3C,
+ MYRB_CMD_READ_CONFIG_ONDISK = 0x4A,
+ MYRB_CMD_WRITE_CONFIG_ONDISK = 0x4B,
+ MYRB_CMD_READ_CONFIG = 0x4E,
+ MYRB_CMD_READ_BACKUP_CONFIG = 0x4D,
+ MYRB_CMD_WRITE_CONFIG = 0x4F,
+ MYRB_CMD_ADD_CONFIG = 0x4C,
+ MYRB_CMD_READ_CONFIG_LABEL = 0x48,
+ MYRB_CMD_WRITE_CONFIG_LABEL = 0x49,
+ /* Firmware Upgrade Related Commands */
+ MYRB_CMD_LOAD_IMAGE = 0x20,
+ MYRB_CMD_STORE_IMAGE = 0x21,
+ MYRB_CMD_PROGRAM_IMAGE = 0x22,
+ /* Diagnostic Commands */
+ MYRB_CMD_SET_DIAGNOSTIC_MODE = 0x31,
+ MYRB_CMD_RUN_DIAGNOSTIC = 0x32,
+ /* Subsystem Service Commands */
+ MYRB_CMD_GET_SUBSYS_DATA = 0x70,
+ MYRB_CMD_SET_SUBSYS_PARAM = 0x71,
+ /* Version 2.xx Firmware Commands */
+ MYRB_CMD_ENQUIRY_OLD = 0x05,
+ MYRB_CMD_GET_DEVICE_STATE_OLD = 0x14,
+ MYRB_CMD_READ_OLD = 0x02,
+ MYRB_CMD_WRITE_OLD = 0x03,
+ MYRB_CMD_READ_SG_OLD = 0x82,
+ MYRB_CMD_WRITE_SG_OLD = 0x83
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Command Status Codes.
+ */
+#define MYRB_STATUS_SUCCESS 0x0000 /* Common */
+#define MYRB_STATUS_CHECK_CONDITION 0x0002 /* Common */
+#define MYRB_STATUS_NO_DEVICE 0x0102 /* Common */
+#define MYRB_STATUS_INVALID_ADDRESS 0x0105 /* Common */
+#define MYRB_STATUS_INVALID_PARAM 0x0105 /* Common */
+#define MYRB_STATUS_IRRECOVERABLE_DATA_ERROR 0x0001 /* I/O */
+#define MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE 0x0002 /* I/O */
+#define MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV 0x0105 /* I/O */
+#define MYRB_STATUS_BAD_DATA 0x010C /* I/O */
+#define MYRB_STATUS_DEVICE_BUSY 0x0008 /* DCDB */
+#define MYRB_STATUS_DEVICE_NONRESPONSIVE 0x000E /* DCDB */
+#define MYRB_STATUS_COMMAND_TERMINATED 0x000F /* DCDB */
+#define MYRB_STATUS_START_DEVICE_FAILED 0x0002 /* Device */
+#define MYRB_STATUS_INVALID_CHANNEL_OR_TARGET 0x0105 /* Device */
+#define MYRB_STATUS_CHANNEL_BUSY 0x0106 /* Device */
+#define MYRB_STATUS_OUT_OF_MEMORY 0x0107 /* Device */
+#define MYRB_STATUS_CHANNEL_NOT_STOPPED 0x0002 /* Device */
+#define MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE 0x0002 /* Consistency */
+#define MYRB_STATUS_RBLD_BADBLOCKS 0x0003 /* Consistency */
+#define MYRB_STATUS_RBLD_NEW_DISK_FAILED 0x0004 /* Consistency */
+#define MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS 0x0106 /* Consistency */
+#define MYRB_STATUS_DEPENDENT_DISK_DEAD 0x0002 /* Consistency */
+#define MYRB_STATUS_INCONSISTENT_BLOCKS 0x0003 /* Consistency */
+#define MYRB_STATUS_INVALID_OR_NONREDUNDANT_LDRV 0x0105 /* Consistency */
+#define MYRB_STATUS_NO_RBLD_OR_CHECK_INPROGRESS 0x0105 /* Consistency */
+#define MYRB_STATUS_RBLD_IN_PROGRESS_DATA_VALID 0x0000 /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE 0x0002 /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_BADBLOCKS 0x0003 /* Consistency */
+#define MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED 0x0004 /* Consistency */
+#define MYRB_STATUS_RBLD_SUCCESS 0x0100 /* Consistency */
+#define MYRB_STATUS_RBLD_SUCCESS_TERMINATED 0x0107 /* Consistency */
+#define MYRB_STATUS_RBLD_NOT_CHECKED 0x0108 /* Consistency */
+#define MYRB_STATUS_BGI_SUCCESS 0x0100 /* Consistency */
+#define MYRB_STATUS_BGI_ABORTED 0x0005 /* Consistency */
+#define MYRB_STATUS_NO_BGI_INPROGRESS 0x0105 /* Consistency */
+#define MYRB_STATUS_ADD_CAPACITY_INPROGRESS 0x0004 /* Consistency */
+#define MYRB_STATUS_ADD_CAPACITY_FAILED_OR_SUSPENDED 0x00F4 /* Consistency */
+#define MYRB_STATUS_CONFIG2_CSUM_ERROR 0x0002 /* Configuration */
+#define MYRB_STATUS_CONFIGURATION_SUSPENDED 0x0106 /* Configuration */
+#define MYRB_STATUS_FAILED_TO_CONFIGURE_NVRAM 0x0105 /* Configuration */
+#define MYRB_STATUS_CONFIGURATION_NOT_SAVED 0x0106 /* Configuration */
+#define MYRB_STATUS_SUBSYS_NOTINSTALLED 0x0001 /* Subsystem */
+#define MYRB_STATUS_SUBSYS_FAILED 0x0002 /* Subsystem */
+#define MYRB_STATUS_SUBSYS_BUSY 0x0106 /* Subsystem */
+#define MYRB_STATUS_SUBSYS_TIMEOUT 0x0108 /* Subsystem */
+
+/*
+ * DAC960 V1 Firmware Enquiry Command reply structure.
+ */
+struct myrb_enquiry {
+ unsigned char ldev_count; /* Byte 0 */
+ unsigned int rsvd1:24; /* Bytes 1-3 */
+ unsigned int ldev_sizes[32]; /* Bytes 4-131 */
+ unsigned short flash_age; /* Bytes 132-133 */
+ struct {
+ unsigned char deferred:1; /* Byte 134 Bit 0 */
+ unsigned char low_bat:1; /* Byte 134 Bit 1 */
+ unsigned char rsvd2:6; /* Byte 134 Bits 2-7 */
+ } status;
+ unsigned char rsvd3:8; /* Byte 135 */
+ unsigned char fw_minor_version; /* Byte 136 */
+ unsigned char fw_major_version; /* Byte 137 */
+ enum {
+ MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS = 0x00,
+ MYRB_STDBY_RBLD_IN_PROGRESS = 0x01,
+ MYRB_BG_RBLD_IN_PROGRESS = 0x02,
+ MYRB_BG_CHECK_IN_PROGRESS = 0x03,
+ MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR = 0xFF,
+ MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED = 0xF0,
+ MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED = 0xF1,
+ MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER = 0xF2,
+ MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED = 0xF3
+ } __packed rbld; /* Byte 138 */
+ unsigned char max_tcq; /* Byte 139 */
+ unsigned char ldev_offline; /* Byte 140 */
+ unsigned char rsvd4:8; /* Byte 141 */
+ unsigned short ev_seq; /* Bytes 142-143 */
+ unsigned char ldev_critical; /* Byte 144 */
+ unsigned int rsvd5:24; /* Bytes 145-147 */
+ unsigned char pdev_dead; /* Byte 148 */
+ unsigned char rsvd6:8; /* Byte 149 */
+ unsigned char rbld_count; /* Byte 150 */
+ struct {
+ unsigned char rsvd7:3; /* Byte 151 Bits 0-2 */
+ unsigned char bbu_present:1; /* Byte 151 Bit 3 */
+ unsigned char rsvd8:4; /* Byte 151 Bits 4-7 */
+ } misc;
+ struct {
+ unsigned char target;
+ unsigned char channel;
+ } dead_drives[21]; /* Bytes 152-194 */
+ unsigned char rsvd9[62]; /* Bytes 195-255 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Enquiry2 Command reply structure.
+ */
+struct myrb_enquiry2 {
+ struct {
+ enum {
+ DAC960_V1_P_PD_PU = 0x01,
+ DAC960_V1_PL = 0x02,
+ DAC960_V1_PG = 0x10,
+ DAC960_V1_PJ = 0x11,
+ DAC960_V1_PR = 0x12,
+ DAC960_V1_PT = 0x13,
+ DAC960_V1_PTL0 = 0x14,
+ DAC960_V1_PRL = 0x15,
+ DAC960_V1_PTL1 = 0x16,
+ DAC960_V1_1164P = 0x20
+ } __packed sub_model; /* Byte 0 */
+ unsigned char actual_channels; /* Byte 1 */
+ enum {
+ MYRB_5_CHANNEL_BOARD = 0x01,
+ MYRB_3_CHANNEL_BOARD = 0x02,
+ MYRB_2_CHANNEL_BOARD = 0x03,
+ MYRB_3_CHANNEL_ASIC_DAC = 0x04
+ } __packed model; /* Byte 2 */
+ enum {
+ MYRB_EISA_CONTROLLER = 0x01,
+ MYRB_MCA_CONTROLLER = 0x02,
+ MYRB_PCI_CONTROLLER = 0x03,
+ MYRB_SCSI_TO_SCSI = 0x08
+ } __packed controller; /* Byte 3 */
+ } hw; /* Bytes 0-3 */
+ /* MajorVersion.MinorVersion-FirmwareType-TurnID */
+ struct {
+ unsigned char major_version; /* Byte 4 */
+ unsigned char minor_version; /* Byte 5 */
+ unsigned char turn_id; /* Byte 6 */
+ char firmware_type; /* Byte 7 */
+ } fw; /* Bytes 4-7 */
+ unsigned int rsvd1; /* Byte 8-11 */
+ unsigned char cfg_chan; /* Byte 12 */
+ unsigned char cur_chan; /* Byte 13 */
+ unsigned char max_targets; /* Byte 14 */
+ unsigned char max_tcq; /* Byte 15 */
+ unsigned char max_ldev; /* Byte 16 */
+ unsigned char max_arms; /* Byte 17 */
+ unsigned char max_spans; /* Byte 18 */
+ unsigned char rsvd2; /* Byte 19 */
+ unsigned int rsvd3; /* Bytes 20-23 */
+ unsigned int mem_size; /* Bytes 24-27 */
+ unsigned int cache_size; /* Bytes 28-31 */
+ unsigned int flash_size; /* Bytes 32-35 */
+ unsigned int nvram_size; /* Bytes 36-39 */
+ struct {
+ enum {
+ MYRB_RAM_TYPE_DRAM = 0x0,
+ MYRB_RAM_TYPE_EDO = 0x1,
+ MYRB_RAM_TYPE_SDRAM = 0x2,
+ MYRB_RAM_TYPE_Last = 0x7
+ } __packed ram:3; /* Byte 40 Bits 0-2 */
+ enum {
+ MYRB_ERR_CORR_None = 0x0,
+ MYRB_ERR_CORR_Parity = 0x1,
+ MYRB_ERR_CORR_ECC = 0x2,
+ MYRB_ERR_CORR_Last = 0x7
+ } __packed ec:3; /* Byte 40 Bits 3-5 */
+ unsigned char fast_page:1; /* Byte 40 Bit 6 */
+ unsigned char low_power:1; /* Byte 40 Bit 7 */
+ unsigned char rsvd4; /* Bytes 41 */
+ } mem_type;
+ unsigned short clock_speed; /* Bytes 42-43 */
+ unsigned short mem_speed; /* Bytes 44-45 */
+ unsigned short hw_speed; /* Bytes 46-47 */
+ unsigned char rsvd5[12]; /* Bytes 48-59 */
+ unsigned short max_cmds; /* Bytes 60-61 */
+ unsigned short max_sge; /* Bytes 62-63 */
+ unsigned short max_drv_cmds; /* Bytes 64-65 */
+ unsigned short max_io_desc; /* Bytes 66-67 */
+ unsigned short max_sectors; /* Bytes 68-69 */
+ unsigned char latency; /* Byte 70 */
+ unsigned char rsvd6; /* Byte 71 */
+ unsigned char scsi_tmo; /* Byte 72 */
+ unsigned char rsvd7; /* Byte 73 */
+ unsigned short min_freelines; /* Bytes 74-75 */
+ unsigned char rsvd8[8]; /* Bytes 76-83 */
+ unsigned char rbld_rate_const; /* Byte 84 */
+ unsigned char rsvd9[11]; /* Byte 85-95 */
+ unsigned short pdrv_block_size; /* Bytes 96-97 */
+ unsigned short ldev_block_size; /* Bytes 98-99 */
+ unsigned short max_blocks_per_cmd; /* Bytes 100-101 */
+ unsigned short block_factor; /* Bytes 102-103 */
+ unsigned short cacheline_size; /* Bytes 104-105 */
+ struct {
+ enum {
+ MYRB_WIDTH_NARROW_8BIT = 0x0,
+ MYRB_WIDTH_WIDE_16BIT = 0x1,
+ MYRB_WIDTH_WIDE_32BIT = 0x2
+ } __packed bus_width:2; /* Byte 106 Bits 0-1 */
+ enum {
+ MYRB_SCSI_SPEED_FAST = 0x0,
+ MYRB_SCSI_SPEED_ULTRA = 0x1,
+ MYRB_SCSI_SPEED_ULTRA2 = 0x2
+ } __packed bus_speed:2; /* Byte 106 Bits 2-3 */
+ unsigned char differential:1; /* Byte 106 Bit 4 */
+ unsigned char rsvd10:3; /* Byte 106 Bits 5-7 */
+ } scsi_cap;
+ unsigned char rsvd11[5]; /* Byte 107-111 */
+ unsigned short fw_build; /* Bytes 112-113 */
+ enum {
+ MYRB_FAULT_AEMI = 0x01,
+ MYRB_FAULT_OEM1 = 0x02,
+ MYRB_FAULT_OEM2 = 0x04,
+ MYRB_FAULT_OEM3 = 0x08,
+ MYRB_FAULT_CONNER = 0x10,
+ MYRB_FAULT_SAFTE = 0x20
+ } __packed fault_mgmt; /* Byte 114 */
+ unsigned char rsvd12; /* Byte 115 */
+ struct {
+ unsigned int clustering:1; /* Byte 116 Bit 0 */
+ unsigned int online_RAID_expansion:1; /* Byte 116 Bit 1 */
+ unsigned int readahead:1; /* Byte 116 Bit 2 */
+ unsigned int bgi:1; /* Byte 116 Bit 3 */
+ unsigned int rsvd13:28; /* Bytes 116-119 */
+ } fw_features;
+ unsigned char rsvd14[8]; /* Bytes 120-127 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Logical Drive State type.
+ */
+enum myrb_devstate {
+ MYRB_DEVICE_DEAD = 0x00,
+ MYRB_DEVICE_WO = 0x02,
+ MYRB_DEVICE_ONLINE = 0x03,
+ MYRB_DEVICE_CRITICAL = 0x04,
+ MYRB_DEVICE_STANDBY = 0x10,
+ MYRB_DEVICE_OFFLINE = 0xFF
+} __packed;
+
+/*
+ * DAC960 V1 RAID Levels
+ */
+enum myrb_raidlevel {
+ MYRB_RAID_LEVEL0 = 0x0, /* RAID 0 */
+ MYRB_RAID_LEVEL1 = 0x1, /* RAID 1 */
+ MYRB_RAID_LEVEL3 = 0x3, /* RAID 3 */
+ MYRB_RAID_LEVEL5 = 0x5, /* RAID 5 */
+ MYRB_RAID_LEVEL6 = 0x6, /* RAID 6 */
+ MYRB_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Logical Drive Information structure.
+ */
+struct myrb_ldev_info {
+ unsigned int size; /* Bytes 0-3 */
+ enum myrb_devstate state; /* Byte 4 */
+ unsigned int raid_level:7; /* Byte 5 Bits 0-6 */
+ unsigned int wb_enabled:1; /* Byte 5 Bit 7 */
+ unsigned int rsvd:16; /* Bytes 6-7 */
+};
+
+/*
+ * DAC960 V1 Firmware Perform Event Log Operation Types.
+ */
+#define DAC960_V1_GetEventLogEntry 0x00
+
+/*
+ * DAC960 V1 Firmware Get Event Log Entry Command reply structure.
+ */
+struct myrb_log_entry {
+ unsigned char msg_type; /* Byte 0 */
+ unsigned char msg_len; /* Byte 1 */
+ unsigned char target:5; /* Byte 2 Bits 0-4 */
+ unsigned char channel:3; /* Byte 2 Bits 5-7 */
+ unsigned char lun:6; /* Byte 3 Bits 0-5 */
+ unsigned char rsvd1:2; /* Byte 3 Bits 6-7 */
+ unsigned short seq_num; /* Bytes 4-5 */
+ unsigned char sense[26]; /* Bytes 6-31 */
+};
+
+/*
+ * DAC960 V1 Firmware Get Device State Command reply structure.
+ * The structure is padded by 2 bytes for compatibility with Version 2.xx
+ * Firmware.
+ */
+struct myrb_pdev_state {
+ unsigned int present:1; /* Byte 0 Bit 0 */
+ unsigned int :7; /* Byte 0 Bits 1-7 */
+ enum {
+ MYRB_TYPE_OTHER = 0x0,
+ MYRB_TYPE_DISK = 0x1,
+ MYRB_TYPE_TAPE = 0x2,
+ MYRB_TYPE_CDROM_OR_WORM = 0x3
+ } __packed devtype:2; /* Byte 1 Bits 0-1 */
+ unsigned int rsvd1:1; /* Byte 1 Bit 2 */
+ unsigned int fast20:1; /* Byte 1 Bit 3 */
+ unsigned int sync:1; /* Byte 1 Bit 4 */
+ unsigned int fast:1; /* Byte 1 Bit 5 */
+ unsigned int wide:1; /* Byte 1 Bit 6 */
+ unsigned int tcq_supported:1; /* Byte 1 Bit 7 */
+ enum myrb_devstate state; /* Byte 2 */
+ unsigned int rsvd2:8; /* Byte 3 */
+ unsigned int sync_multiplier; /* Byte 4 */
+ unsigned int sync_offset:5; /* Byte 5 Bits 0-4 */
+ unsigned int rsvd3:3; /* Byte 5 Bits 5-7 */
+ unsigned int size; /* Bytes 6-9 */
+ unsigned int rsvd4:16; /* Bytes 10-11 */
+} __packed;
+
+/*
+ * DAC960 V1 Firmware Get Rebuild Progress Command reply structure.
+ */
+struct myrb_rbld_progress {
+ unsigned int ldev_num; /* Bytes 0-3 */
+ unsigned int ldev_size; /* Bytes 4-7 */
+ unsigned int blocks_left; /* Bytes 8-11 */
+};
+
+/*
+ * DAC960 V1 Firmware Background Initialization Status Command reply structure.
+ */
+struct myrb_bgi_status {
+ unsigned int ldev_size; /* Bytes 0-3 */
+ unsigned int blocks_done; /* Bytes 4-7 */
+ unsigned char rsvd1[12]; /* Bytes 8-19 */
+ unsigned int ldev_num; /* Bytes 20-23 */
+ unsigned char raid_level; /* Byte 24 */
+ enum {
+ MYRB_BGI_INVALID = 0x00,
+ MYRB_BGI_STARTED = 0x02,
+ MYRB_BGI_INPROGRESS = 0x04,
+ MYRB_BGI_SUSPENDED = 0x05,
+ MYRB_BGI_CANCELLED = 0x06
+ } __packed status; /* Byte 25 */
+ unsigned char rsvd2[6]; /* Bytes 26-31 */
+};
+
+/*
+ * DAC960 V1 Firmware Error Table Entry structure.
+ */
+struct myrb_error_entry {
+ unsigned char parity_err; /* Byte 0 */
+ unsigned char soft_err; /* Byte 1 */
+ unsigned char hard_err; /* Byte 2 */
+ unsigned char misc_err; /* Byte 3 */
+};
+
+/*
+ * DAC960 V1 Firmware Read Config2 Command reply structure.
+ */
+struct myrb_config2 {
+ unsigned rsvd1:1; /* Byte 0 Bit 0 */
+ unsigned active_negation:1; /* Byte 0 Bit 1 */
+ unsigned rsvd2:5; /* Byte 0 Bits 2-6 */
+ unsigned no_rescan_on_reset_during_scan:1; /* Byte 0 Bit 7 */
+ unsigned StorageWorks_support:1; /* Byte 1 Bit 0 */
+ unsigned HewlettPackard_support:1; /* Byte 1 Bit 1 */
+ unsigned no_disconnect_on_first_command:1; /* Byte 1 Bit 2 */
+ unsigned rsvd3:2; /* Byte 1 Bits 3-4 */
+ unsigned AEMI_ARM:1; /* Byte 1 Bit 5 */
+ unsigned AEMI_OFM:1; /* Byte 1 Bit 6 */
+ unsigned rsvd4:1; /* Byte 1 Bit 7 */
+ enum {
+ MYRB_OEMID_MYLEX = 0x00,
+ MYRB_OEMID_IBM = 0x08,
+ MYRB_OEMID_HP = 0x0A,
+ MYRB_OEMID_DEC = 0x0C,
+ MYRB_OEMID_SIEMENS = 0x10,
+ MYRB_OEMID_INTEL = 0x12
+ } __packed OEMID; /* Byte 2 */
+ unsigned char oem_model_number; /* Byte 3 */
+ unsigned char physical_sector; /* Byte 4 */
+ unsigned char logical_sector; /* Byte 5 */
+ unsigned char block_factor; /* Byte 6 */
+ unsigned readahead_enabled:1; /* Byte 7 Bit 0 */
+ unsigned low_BIOS_delay:1; /* Byte 7 Bit 1 */
+ unsigned rsvd5:2; /* Byte 7 Bits 2-3 */
+ unsigned restrict_reassign_to_one_sector:1; /* Byte 7 Bit 4 */
+ unsigned rsvd6:1; /* Byte 7 Bit 5 */
+ unsigned FUA_during_write_recovery:1; /* Byte 7 Bit 6 */
+ unsigned enable_LeftSymmetricRAID5Algorithm:1; /* Byte 7 Bit 7 */
+ unsigned char default_rebuild_rate; /* Byte 8 */
+ unsigned char rsvd7; /* Byte 9 */
+ unsigned char blocks_per_cacheline; /* Byte 10 */
+ unsigned char blocks_per_stripe; /* Byte 11 */
+ struct {
+ enum {
+ MYRB_SPEED_ASYNC = 0x0,
+ MYRB_SPEED_SYNC_8MHz = 0x1,
+ MYRB_SPEED_SYNC_5MHz = 0x2,
+ MYRB_SPEED_SYNC_10_OR_20MHz = 0x3
+ } __packed speed:2; /* Byte 11 Bits 0-1 */
+ unsigned force_8bit:1; /* Byte 11 Bit 2 */
+ unsigned disable_fast20:1; /* Byte 11 Bit 3 */
+ unsigned rsvd8:3; /* Byte 11 Bits 4-6 */
+ unsigned enable_tcq:1; /* Byte 11 Bit 7 */
+ } __packed channelparam[6]; /* Bytes 12-17 */
+ unsigned char SCSIInitiatorID; /* Byte 18 */
+ unsigned char rsvd9; /* Byte 19 */
+ enum {
+ MYRB_STARTUP_CONTROLLER_SPINUP = 0x00,
+ MYRB_STARTUP_POWERON_SPINUP = 0x01
+ } __packed startup; /* Byte 20 */
+ unsigned char simultaneous_device_spinup_count; /* Byte 21 */
+ unsigned char seconds_delay_between_spinups; /* Byte 22 */
+ unsigned char rsvd10[29]; /* Bytes 23-51 */
+ unsigned BIOS_disabled:1; /* Byte 52 Bit 0 */
+ unsigned CDROM_boot_enabled:1; /* Byte 52 Bit 1 */
+ unsigned rsvd11:3; /* Byte 52 Bits 2-4 */
+ enum {
+ MYRB_GEOM_128_32 = 0x0,
+ MYRB_GEOM_255_63 = 0x1,
+ MYRB_GEOM_RESERVED1 = 0x2,
+ MYRB_GEOM_RESERVED2 = 0x3
+ } __packed drive_geometry:2; /* Byte 52 Bits 5-6 */
+ unsigned rsvd12:1; /* Byte 52 Bit 7 */
+ unsigned char rsvd13[9]; /* Bytes 53-61 */
+ unsigned short csum; /* Bytes 62-63 */
+};
+
+/*
+ * DAC960 V1 Firmware DCDB request structure.
+ */
+struct myrb_dcdb {
+ unsigned target:4; /* Byte 0 Bits 0-3 */
+ unsigned channel:4; /* Byte 0 Bits 4-7 */
+ enum {
+ MYRB_DCDB_XFER_NONE = 0,
+ MYRB_DCDB_XFER_DEVICE_TO_SYSTEM = 1,
+ MYRB_DCDB_XFER_SYSTEM_TO_DEVICE = 2,
+ MYRB_DCDB_XFER_ILLEGAL = 3
+ } __packed data_xfer:2; /* Byte 1 Bits 0-1 */
+ unsigned early_status:1; /* Byte 1 Bit 2 */
+ unsigned rsvd1:1; /* Byte 1 Bit 3 */
+ enum {
+ MYRB_DCDB_TMO_24_HRS = 0,
+ MYRB_DCDB_TMO_10_SECS = 1,
+ MYRB_DCDB_TMO_60_SECS = 2,
+ MYRB_DCDB_TMO_10_MINS = 3
+ } __packed timeout:2; /* Byte 1 Bits 4-5 */
+ unsigned no_autosense:1; /* Byte 1 Bit 6 */
+ unsigned allow_disconnect:1; /* Byte 1 Bit 7 */
+ unsigned short xfer_len_lo; /* Bytes 2-3 */
+ u32 dma_addr; /* Bytes 4-7 */
+ unsigned char cdb_len:4; /* Byte 8 Bits 0-3 */
+ unsigned char xfer_len_hi4:4; /* Byte 8 Bits 4-7 */
+ unsigned char sense_len; /* Byte 9 */
+ unsigned char cdb[12]; /* Bytes 10-21 */
+ unsigned char sense[64]; /* Bytes 22-85 */
+ unsigned char status; /* Byte 86 */
+ unsigned char rsvd2; /* Byte 87 */
+};
+
+/*
+ * DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address
+ *32 Bit Byte Count structure.
+ */
+struct myrb_sge {
+ u32 sge_addr; /* Bytes 0-3 */
+ u32 sge_count; /* Bytes 4-7 */
+};
+
+/*
+ * 13 Byte DAC960 V1 Firmware Command Mailbox structure.
+ * Bytes 13-15 are not used. The structure is padded to 16 bytes for
+ * efficient access.
+ */
+union myrb_cmd_mbox {
+ unsigned int words[4]; /* Words 0-3 */
+ unsigned char bytes[16]; /* Bytes 0-15 */
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd[14]; /* Bytes 2-15 */
+ } __packed common;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd1[6]; /* Bytes 2-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char optype; /* Byte 2 */
+ unsigned char rsvd1[5]; /* Bytes 3-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3B;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd1[5]; /* Bytes 2-6 */
+ unsigned char ldev_num:6; /* Byte 7 Bits 0-6 */
+ unsigned char auto_restore:1; /* Byte 7 Bit 7 */
+ unsigned char rsvd2[8]; /* Bytes 8-15 */
+ } __packed type3C;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char channel; /* Byte 2 */
+ unsigned char target; /* Byte 3 */
+ enum myrb_devstate state; /* Byte 4 */
+ unsigned char rsvd1[3]; /* Bytes 5-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3D;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char optype; /* Byte 2 */
+ unsigned char opqual; /* Byte 3 */
+ unsigned short ev_seq; /* Bytes 4-5 */
+ unsigned char rsvd1[2]; /* Bytes 6-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed type3E;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char rsvd1[2]; /* Bytes 2-3 */
+ unsigned char rbld_rate; /* Byte 4 */
+ unsigned char rsvd2[3]; /* Bytes 5-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char rsvd3[4]; /* Bytes 12-15 */
+ } __packed type3R;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned short xfer_len; /* Bytes 2-3 */
+ unsigned int lba; /* Bytes 4-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char ldev_num; /* Byte 12 */
+ unsigned char rsvd[3]; /* Bytes 13-15 */
+ } __packed type4;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ struct {
+ unsigned short xfer_len:11; /* Bytes 2-3 */
+ unsigned char ldev_num:5; /* Byte 3 Bits 3-7 */
+ } __packed ld;
+ unsigned int lba; /* Bytes 4-7 */
+ u32 addr; /* Bytes 8-11 */
+ unsigned char sg_count:6; /* Byte 12 Bits 0-5 */
+ enum {
+ MYRB_SGL_ADDR32_COUNT32 = 0x0,
+ MYRB_SGL_ADDR32_COUNT16 = 0x1,
+ MYRB_SGL_COUNT32_ADDR32 = 0x2,
+ MYRB_SGL_COUNT16_ADDR32 = 0x3
+ } __packed sg_type:2; /* Byte 12 Bits 6-7 */
+ unsigned char rsvd[3]; /* Bytes 13-15 */
+ } __packed type5;
+ struct {
+ enum myrb_cmd_opcode opcode; /* Byte 0 */
+ unsigned char id; /* Byte 1 */
+ unsigned char opcode2; /* Byte 2 */
+ unsigned char rsvd1:8; /* Byte 3 */
+ u32 cmd_mbox_addr; /* Bytes 4-7 */
+ u32 stat_mbox_addr; /* Bytes 8-11 */
+ unsigned char rsvd2[4]; /* Bytes 12-15 */
+ } __packed typeX;
+};
+
+/*
+ * DAC960 V1 Firmware Controller Status Mailbox structure.
+ */
+struct myrb_stat_mbox {
+ unsigned char id; /* Byte 0 */
+ unsigned char rsvd:7; /* Byte 1 Bits 0-6 */
+ unsigned char valid:1; /* Byte 1 Bit 7 */
+ unsigned short status; /* Bytes 2-3 */
+};
+
+struct myrb_cmdblk {
+ union myrb_cmd_mbox mbox;
+ unsigned short status;
+ struct completion *completion;
+ struct myrb_dcdb *dcdb;
+ dma_addr_t dcdb_addr;
+ struct myrb_sge *sgl;
+ dma_addr_t sgl_addr;
+};
+
+struct myrb_hba {
+ unsigned int ldev_block_size;
+ unsigned char ldev_geom_heads;
+ unsigned char ldev_geom_sectors;
+ unsigned char bus_width;
+ unsigned short stripe_size;
+ unsigned short segment_size;
+ unsigned short new_ev_seq;
+ unsigned short old_ev_seq;
+ bool dual_mode_interface;
+ bool bgi_status_supported;
+ bool safte_enabled;
+ bool need_ldev_info;
+ bool need_err_info;
+ bool need_rbld;
+ bool need_cc_status;
+ bool need_bgi_status;
+ bool rbld_first;
+
+ struct pci_dev *pdev;
+ struct Scsi_Host *host;
+
+ struct workqueue_struct *work_q;
+ char work_q_name[20];
+ struct delayed_work monitor_work;
+ unsigned long primary_monitor_time;
+ unsigned long secondary_monitor_time;
+
+ struct dma_pool *sg_pool;
+ struct dma_pool *dcdb_pool;
+
+ spinlock_t queue_lock;
+
+ void (*qcmd)(struct myrb_hba *cs, struct myrb_cmdblk *cmd_blk);
+ void (*write_cmd_mbox)(union myrb_cmd_mbox *next_mbox,
+ union myrb_cmd_mbox *cmd_mbox);
+ void (*get_cmd_mbox)(void __iomem *base);
+ void (*disable_intr)(void __iomem *base);
+ void (*reset)(void __iomem *base);
+
+ unsigned int ctlr_num;
+ unsigned char model_name[20];
+ unsigned char fw_version[12];
+
+ unsigned int irq;
+ phys_addr_t io_addr;
+ phys_addr_t pci_addr;
+ void __iomem *io_base;
+ void __iomem *mmio_base;
+
+ size_t cmd_mbox_size;
+ dma_addr_t cmd_mbox_addr;
+ union myrb_cmd_mbox *first_cmd_mbox;
+ union myrb_cmd_mbox *last_cmd_mbox;
+ union myrb_cmd_mbox *next_cmd_mbox;
+ union myrb_cmd_mbox *prev_cmd_mbox1;
+ union myrb_cmd_mbox *prev_cmd_mbox2;
+
+ size_t stat_mbox_size;
+ dma_addr_t stat_mbox_addr;
+ struct myrb_stat_mbox *first_stat_mbox;
+ struct myrb_stat_mbox *last_stat_mbox;
+ struct myrb_stat_mbox *next_stat_mbox;
+
+ struct myrb_cmdblk dcmd_blk;
+ struct myrb_cmdblk mcmd_blk;
+ struct mutex dcmd_mutex;
+
+ struct myrb_enquiry *enquiry;
+ dma_addr_t enquiry_addr;
+
+ struct myrb_error_entry *err_table;
+ dma_addr_t err_table_addr;
+
+ unsigned short last_rbld_status;
+
+ struct myrb_ldev_info *ldev_info_buf;
+ dma_addr_t ldev_info_addr;
+
+ struct myrb_bgi_status bgi_status;
+
+ struct mutex dma_mutex;
+};
+
+/*
+ * DAC960 LA Series Controller Interface Register Offsets.
+ */
+#define DAC960_LA_mmio_size 0x80
+
+enum DAC960_LA_reg_offset {
+ DAC960_LA_IRQMASK_OFFSET = 0x34,
+ DAC960_LA_CMDOP_OFFSET = 0x50,
+ DAC960_LA_CMDID_OFFSET = 0x51,
+ DAC960_LA_MBOX2_OFFSET = 0x52,
+ DAC960_LA_MBOX3_OFFSET = 0x53,
+ DAC960_LA_MBOX4_OFFSET = 0x54,
+ DAC960_LA_MBOX5_OFFSET = 0x55,
+ DAC960_LA_MBOX6_OFFSET = 0x56,
+ DAC960_LA_MBOX7_OFFSET = 0x57,
+ DAC960_LA_MBOX8_OFFSET = 0x58,
+ DAC960_LA_MBOX9_OFFSET = 0x59,
+ DAC960_LA_MBOX10_OFFSET = 0x5A,
+ DAC960_LA_MBOX11_OFFSET = 0x5B,
+ DAC960_LA_MBOX12_OFFSET = 0x5C,
+ DAC960_LA_STSID_OFFSET = 0x5D,
+ DAC960_LA_STS_OFFSET = 0x5E,
+ DAC960_LA_IDB_OFFSET = 0x60,
+ DAC960_LA_ODB_OFFSET = 0x61,
+ DAC960_LA_ERRSTS_OFFSET = 0x63,
+};
+
+/*
+ * DAC960 LA Series Inbound Door Bell Register.
+ */
+#define DAC960_LA_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_LA_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_LA_IDB_GEN_IRQ 0x04
+#define DAC960_LA_IDB_CTRL_RESET 0x08
+#define DAC960_LA_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_LA_IDB_HWMBOX_EMPTY 0x01
+#define DAC960_LA_IDB_INIT_DONE 0x02
+
+/*
+ * DAC960 LA Series Outbound Door Bell Register.
+ */
+#define DAC960_LA_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_LA_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_LA_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_LA_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 LA Series Interrupt Mask Register.
+ */
+#define DAC960_LA_IRQMASK_DISABLE_IRQ 0x04
+
+/*
+ * DAC960 LA Series Error Status Register.
+ */
+#define DAC960_LA_ERRSTS_PENDING 0x02
+
+/*
+ * DAC960 PG Series Controller Interface Register Offsets.
+ */
+#define DAC960_PG_mmio_size 0x2000
+
+enum DAC960_PG_reg_offset {
+ DAC960_PG_IDB_OFFSET = 0x0020,
+ DAC960_PG_ODB_OFFSET = 0x002C,
+ DAC960_PG_IRQMASK_OFFSET = 0x0034,
+ DAC960_PG_CMDOP_OFFSET = 0x1000,
+ DAC960_PG_CMDID_OFFSET = 0x1001,
+ DAC960_PG_MBOX2_OFFSET = 0x1002,
+ DAC960_PG_MBOX3_OFFSET = 0x1003,
+ DAC960_PG_MBOX4_OFFSET = 0x1004,
+ DAC960_PG_MBOX5_OFFSET = 0x1005,
+ DAC960_PG_MBOX6_OFFSET = 0x1006,
+ DAC960_PG_MBOX7_OFFSET = 0x1007,
+ DAC960_PG_MBOX8_OFFSET = 0x1008,
+ DAC960_PG_MBOX9_OFFSET = 0x1009,
+ DAC960_PG_MBOX10_OFFSET = 0x100A,
+ DAC960_PG_MBOX11_OFFSET = 0x100B,
+ DAC960_PG_MBOX12_OFFSET = 0x100C,
+ DAC960_PG_STSID_OFFSET = 0x1018,
+ DAC960_PG_STS_OFFSET = 0x101A,
+ DAC960_PG_ERRSTS_OFFSET = 0x103F,
+};
+
+/*
+ * DAC960 PG Series Inbound Door Bell Register.
+ */
+#define DAC960_PG_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_PG_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_PG_IDB_GEN_IRQ 0x04
+#define DAC960_PG_IDB_CTRL_RESET 0x08
+#define DAC960_PG_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_PG_IDB_HWMBOX_FULL 0x01
+#define DAC960_PG_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 PG Series Outbound Door Bell Register.
+ */
+#define DAC960_PG_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_PG_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_PG_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_PG_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 PG Series Interrupt Mask Register.
+ */
+#define DAC960_PG_IRQMASK_MSI_MASK1 0x03
+#define DAC960_PG_IRQMASK_DISABLE_IRQ 0x04
+#define DAC960_PG_IRQMASK_MSI_MASK2 0xF8
+
+/*
+ * DAC960 PG Series Error Status Register.
+ */
+#define DAC960_PG_ERRSTS_PENDING 0x04
+
+/*
+ * DAC960 PD Series Controller Interface Register Offsets.
+ */
+#define DAC960_PD_mmio_size 0x80
+
+enum DAC960_PD_reg_offset {
+ DAC960_PD_CMDOP_OFFSET = 0x00,
+ DAC960_PD_CMDID_OFFSET = 0x01,
+ DAC960_PD_MBOX2_OFFSET = 0x02,
+ DAC960_PD_MBOX3_OFFSET = 0x03,
+ DAC960_PD_MBOX4_OFFSET = 0x04,
+ DAC960_PD_MBOX5_OFFSET = 0x05,
+ DAC960_PD_MBOX6_OFFSET = 0x06,
+ DAC960_PD_MBOX7_OFFSET = 0x07,
+ DAC960_PD_MBOX8_OFFSET = 0x08,
+ DAC960_PD_MBOX9_OFFSET = 0x09,
+ DAC960_PD_MBOX10_OFFSET = 0x0A,
+ DAC960_PD_MBOX11_OFFSET = 0x0B,
+ DAC960_PD_MBOX12_OFFSET = 0x0C,
+ DAC960_PD_STSID_OFFSET = 0x0D,
+ DAC960_PD_STS_OFFSET = 0x0E,
+ DAC960_PD_ERRSTS_OFFSET = 0x3F,
+ DAC960_PD_IDB_OFFSET = 0x40,
+ DAC960_PD_ODB_OFFSET = 0x41,
+ DAC960_PD_IRQEN_OFFSET = 0x43,
+};
+
+/*
+ * DAC960 PD Series Inbound Door Bell Register.
+ */
+#define DAC960_PD_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_PD_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_PD_IDB_GEN_IRQ 0x04
+#define DAC960_PD_IDB_CTRL_RESET 0x08
+
+#define DAC960_PD_IDB_HWMBOX_FULL 0x01
+#define DAC960_PD_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 PD Series Outbound Door Bell Register.
+ */
+#define DAC960_PD_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_PD_ODB_HWMBOX_STS_AVAIL 0x01
+
+/*
+ * DAC960 PD Series Interrupt Enable Register.
+ */
+#define DAC960_PD_IRQMASK_ENABLE_IRQ 0x01
+
+/*
+ * DAC960 PD Series Error Status Register.
+ */
+#define DAC960_PD_ERRSTS_PENDING 0x04
+
+typedef int (*myrb_hw_init_t)(struct pci_dev *pdev,
+ struct myrb_hba *cb, void __iomem *base);
+typedef unsigned short (*mbox_mmio_init_t)(struct pci_dev *pdev,
+ void __iomem *base,
+ union myrb_cmd_mbox *mbox);
+
+struct myrb_privdata {
+ myrb_hw_init_t hw_init;
+ irq_handler_t irq_handler;
+ unsigned int mmio_size;
+};
+
+#endif /* MYRB_H */
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
new file mode 100644
index 000000000000..0264a2e2bc19
--- /dev/null
+++ b/drivers/scsi/myrs.c
@@ -0,0 +1,3268 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * This driver supports the newer, SCSI-based firmware interface only.
+ *
+ * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver, which has
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/raid_class.h>
+#include <asm/unaligned.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_tcq.h>
+#include "myrs.h"
+
+static struct raid_template *myrs_raid_template;
+
+static struct myrs_devstate_name_entry {
+ enum myrs_devstate state;
+ char *name;
+} myrs_devstate_name_list[] = {
+ { MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
+ { MYRS_DEVICE_ONLINE, "Online" },
+ { MYRS_DEVICE_REBUILD, "Rebuild" },
+ { MYRS_DEVICE_MISSING, "Missing" },
+ { MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
+ { MYRS_DEVICE_OFFLINE, "Offline" },
+ { MYRS_DEVICE_CRITICAL, "Critical" },
+ { MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
+ { MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
+ { MYRS_DEVICE_STANDBY, "Standby" },
+ { MYRS_DEVICE_INVALID_STATE, "Invalid" },
+};
+
+static char *myrs_devstate_name(enum myrs_devstate state)
+{
+ struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
+ if (entry[i].state == state)
+ return entry[i].name;
+ }
+ return NULL;
+}
+
+static struct myrs_raid_level_name_entry {
+ enum myrs_raid_level level;
+ char *name;
+} myrs_raid_level_name_list[] = {
+ { MYRS_RAID_LEVEL0, "RAID0" },
+ { MYRS_RAID_LEVEL1, "RAID1" },
+ { MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
+ { MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
+ { MYRS_RAID_LEVEL6, "RAID6" },
+ { MYRS_RAID_JBOD, "JBOD" },
+ { MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
+ { MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
+ { MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
+ { MYRS_RAID_SPAN, "Mylex SPAN" },
+ { MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
+ { MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
+ { MYRS_RAID_PHYSICAL, "Physical device" },
+};
+
+static char *myrs_raid_level_name(enum myrs_raid_level level)
+{
+ struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
+ if (entry[i].level == level)
+ return entry[i].name;
+ }
+ return NULL;
+}
+
+/**
+ * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
+ */
+static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
+{
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+
+ memset(mbox, 0, sizeof(union myrs_cmd_mbox));
+ cmd_blk->status = 0;
+}
+
+/**
+ * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
+ */
+static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
+{
+ void __iomem *base = cs->io_base;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
+
+ cs->write_cmd_mbox(next_mbox, mbox);
+
+ if (cs->prev_cmd_mbox1->words[0] == 0 ||
+ cs->prev_cmd_mbox2->words[0] == 0)
+ cs->get_cmd_mbox(base);
+
+ cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
+ cs->prev_cmd_mbox1 = next_mbox;
+
+ if (++next_mbox > cs->last_cmd_mbox)
+ next_mbox = cs->first_cmd_mbox;
+
+ cs->next_cmd_mbox = next_mbox;
+}
+
+/**
+ * myrs_exec_cmd - executes V2 Command and waits for completion.
+ */
+static void myrs_exec_cmd(struct myrs_hba *cs,
+ struct myrs_cmdblk *cmd_blk)
+{
+ DECLARE_COMPLETION_ONSTACK(complete);
+ unsigned long flags;
+
+ cmd_blk->complete = &complete;
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ myrs_qcmd(cs, cmd_blk);
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+
+ WARN_ON(in_interrupt());
+ wait_for_completion(&complete);
+}
+
+/**
+ * myrs_report_progress - prints progress message
+ */
+static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
+ unsigned char *msg, unsigned long blocks,
+ unsigned long size)
+{
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d: %s in Progress: %d%% completed\n",
+ ldev_num, msg,
+ (100 * (int)(blocks >> 7)) / (int)(size >> 7));
+}
+
+/**
+ * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
+ */
+static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ dma_addr_t ctlr_info_addr;
+ union myrs_sgl *sgl;
+ unsigned char status;
+ struct myrs_ctlr_info old;
+
+ memcpy(&old, cs->ctlr_info, sizeof(struct myrs_ctlr_info));
+ ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
+ sizeof(struct myrs_ctlr_info),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->ctlr_info.id = MYRS_DCMD_TAG;
+ mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->ctlr_info.control.dma_ctrl_to_host = true;
+ mbox->ctlr_info.control.no_autosense = true;
+ mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
+ mbox->ctlr_info.ctlr_num = 0;
+ mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
+ sgl = &mbox->ctlr_info.dma_addr;
+ sgl->sge[0].sge_addr = ctlr_info_addr;
+ sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
+ sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
+ if (status == MYRS_STATUS_SUCCESS) {
+ if (cs->ctlr_info->bg_init_active +
+ cs->ctlr_info->ldev_init_active +
+ cs->ctlr_info->pdev_init_active +
+ cs->ctlr_info->cc_active +
+ cs->ctlr_info->rbld_active +
+ cs->ctlr_info->exp_active != 0)
+ cs->needs_update = true;
+ if (cs->ctlr_info->ldev_present != old.ldev_present ||
+ cs->ctlr_info->ldev_critical != old.ldev_critical ||
+ cs->ctlr_info->ldev_offline != old.ldev_offline)
+ shost_printk(KERN_INFO, cs->host,
+ "Logical drive count changes (%d/%d/%d)\n",
+ cs->ctlr_info->ldev_critical,
+ cs->ctlr_info->ldev_offline,
+ cs->ctlr_info->ldev_present);
+ }
+
+ return status;
+}
+
+/**
+ * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
+ */
+static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
+ unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ dma_addr_t ldev_info_addr;
+ struct myrs_ldev_info ldev_info_orig;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
+ ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
+ sizeof(struct myrs_ldev_info),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->ldev_info.id = MYRS_DCMD_TAG;
+ mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->ldev_info.control.dma_ctrl_to_host = true;
+ mbox->ldev_info.control.no_autosense = true;
+ mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
+ mbox->ldev_info.ldev.ldev_num = ldev_num;
+ mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
+ sgl = &mbox->ldev_info.dma_addr;
+ sgl->sge[0].sge_addr = ldev_info_addr;
+ sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev,
+ "Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
+ sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
+ if (status == MYRS_STATUS_SUCCESS) {
+ unsigned short ldev_num = ldev_info->ldev_num;
+ struct myrs_ldev_info *new = ldev_info;
+ struct myrs_ldev_info *old = &ldev_info_orig;
+ unsigned long ldev_size = new->cfg_devsize;
+
+ if (new->dev_state != old->dev_state) {
+ const char *name;
+
+ name = myrs_devstate_name(new->dev_state);
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d is now %s\n",
+ ldev_num, name ? name : "Invalid");
+ }
+ if ((new->soft_errs != old->soft_errs) ||
+ (new->cmds_failed != old->cmds_failed) ||
+ (new->deferred_write_errs != old->deferred_write_errs))
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
+ ldev_num, new->soft_errs,
+ new->cmds_failed,
+ new->deferred_write_errs);
+ if (new->bg_init_active)
+ myrs_report_progress(cs, ldev_num,
+ "Background Initialization",
+ new->bg_init_lba, ldev_size);
+ else if (new->fg_init_active)
+ myrs_report_progress(cs, ldev_num,
+ "Foreground Initialization",
+ new->fg_init_lba, ldev_size);
+ else if (new->migration_active)
+ myrs_report_progress(cs, ldev_num,
+ "Data Migration",
+ new->migration_lba, ldev_size);
+ else if (new->patrol_active)
+ myrs_report_progress(cs, ldev_num,
+ "Patrol Operation",
+ new->patrol_lba, ldev_size);
+ if (old->bg_init_active && !new->bg_init_active)
+ shost_printk(KERN_INFO, cs->host,
+ "Logical Drive %d: Background Initialization %s\n",
+ ldev_num,
+ (new->ldev_control.ldev_init_done ?
+ "Completed" : "Failed"));
+ }
+ return status;
+}
+
+/**
+ * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
+ */
+static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
+ unsigned char channel, unsigned char target, unsigned char lun,
+ struct myrs_pdev_info *pdev_info)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ dma_addr_t pdev_info_addr;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
+ sizeof(struct myrs_pdev_info),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->pdev_info.id = MYRS_DCMD_TAG;
+ mbox->pdev_info.control.dma_ctrl_to_host = true;
+ mbox->pdev_info.control.no_autosense = true;
+ mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
+ mbox->pdev_info.pdev.lun = lun;
+ mbox->pdev_info.pdev.target = target;
+ mbox->pdev_info.pdev.channel = channel;
+ mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
+ sgl = &mbox->pdev_info.dma_addr;
+ sgl->sge[0].sge_addr = pdev_info_addr;
+ sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev,
+ "Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
+ channel, target, lun);
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
+ sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
+ return status;
+}
+
+/**
+ * myrs_dev_op - executes a "Device Operation" Command
+ */
+static unsigned char myrs_dev_op(struct myrs_hba *cs,
+ enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ unsigned char status;
+
+ mutex_lock(&cs->dcmd_mutex);
+ myrs_reset_cmd(cmd_blk);
+ mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->dev_op.id = MYRS_DCMD_TAG;
+ mbox->dev_op.control.dma_ctrl_to_host = true;
+ mbox->dev_op.control.no_autosense = true;
+ mbox->dev_op.ioctl_opcode = opcode;
+ mbox->dev_op.opdev = opdev;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ return status;
+}
+
+/**
+ * myrs_translate_pdev - translates a Physical Device Channel and
+ * TargetID into a Logical Device.
+ */
+static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
+ unsigned char channel, unsigned char target, unsigned char lun,
+ struct myrs_devmap *devmap)
+{
+ struct pci_dev *pdev = cs->pdev;
+ dma_addr_t devmap_addr;
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ memset(devmap, 0x0, sizeof(struct myrs_devmap));
+ devmap_addr = dma_map_single(&pdev->dev, devmap,
+ sizeof(struct myrs_devmap),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, devmap_addr))
+ return MYRS_STATUS_FAILED;
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ mbox = &cmd_blk->mbox;
+ mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->pdev_info.control.dma_ctrl_to_host = true;
+ mbox->pdev_info.control.no_autosense = true;
+ mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
+ mbox->pdev_info.pdev.target = target;
+ mbox->pdev_info.pdev.channel = channel;
+ mbox->pdev_info.pdev.lun = lun;
+ mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
+ sgl = &mbox->pdev_info.dma_addr;
+ sgl->sge[0].sge_addr = devmap_addr;
+ sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
+
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ dma_unmap_single(&pdev->dev, devmap_addr,
+ sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
+ return status;
+}
+
+/**
+ * myrs_get_event - executes a Get Event Command
+ */
+static unsigned char myrs_get_event(struct myrs_hba *cs,
+ unsigned int event_num, struct myrs_event *event_buf)
+{
+ struct pci_dev *pdev = cs->pdev;
+ dma_addr_t event_addr;
+ struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrs_sgl *sgl;
+ unsigned char status;
+
+ event_addr = dma_map_single(&pdev->dev, event_buf,
+ sizeof(struct myrs_event), DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, event_addr))
+ return MYRS_STATUS_FAILED;
+
+ mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->get_event.dma_size = sizeof(struct myrs_event);
+ mbox->get_event.evnum_upper = event_num >> 16;
+ mbox->get_event.ctlr_num = 0;
+ mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
+ mbox->get_event.evnum_lower = event_num & 0xFFFF;
+ sgl = &mbox->get_event.dma_addr;
+ sgl->sge[0].sge_addr = event_addr;
+ sgl->sge[0].sge_count = mbox->get_event.dma_size;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ dma_unmap_single(&pdev->dev, event_addr,
+ sizeof(struct myrs_event), DMA_FROM_DEVICE);
+
+ return status;
+}
+
+/*
+ * myrs_get_fwstatus - executes a Get Health Status Command
+ */
+static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
+{
+ struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ union myrs_sgl *sgl;
+ unsigned char status = cmd_blk->status;
+
+ myrs_reset_cmd(cmd_blk);
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_MCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ mbox->common.dma_size = sizeof(struct myrs_fwstat);
+ mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
+ sgl = &mbox->common.dma_addr;
+ sgl->sge[0].sge_addr = cs->fwstat_addr;
+ sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
+ dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+
+ return status;
+}
+
+/**
+ * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
+ */
+static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
+ enable_mbox_t enable_mbox_fn)
+{
+ void __iomem *base = cs->io_base;
+ struct pci_dev *pdev = cs->pdev;
+ union myrs_cmd_mbox *cmd_mbox;
+ struct myrs_stat_mbox *stat_mbox;
+ union myrs_cmd_mbox *mbox;
+ dma_addr_t mbox_addr;
+ unsigned char status = MYRS_STATUS_FAILED;
+
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dev_err(&pdev->dev, "DMA mask out of range\n");
+ return false;
+ }
+
+ /* Temporary dma mapping, used only in the scope of this function */
+ mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
+ &mbox_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, mbox_addr))
+ return false;
+
+ /* These are the base addresses for the command memory mailbox array */
+ cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
+ cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
+ &cs->cmd_mbox_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
+ dev_err(&pdev->dev, "Failed to map command mailbox\n");
+ goto out_free;
+ }
+ cs->first_cmd_mbox = cmd_mbox;
+ cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
+ cs->last_cmd_mbox = cmd_mbox;
+ cs->next_cmd_mbox = cs->first_cmd_mbox;
+ cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
+ cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
+
+ /* These are the base addresses for the status memory mailbox array */
+ cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
+ stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
+ &cs->stat_mbox_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
+ dev_err(&pdev->dev, "Failed to map status mailbox\n");
+ goto out_free;
+ }
+
+ cs->first_stat_mbox = stat_mbox;
+ stat_mbox += MYRS_MAX_STAT_MBOX - 1;
+ cs->last_stat_mbox = stat_mbox;
+ cs->next_stat_mbox = cs->first_stat_mbox;
+
+ cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct myrs_fwstat),
+ &cs->fwstat_addr, GFP_KERNEL);
+ if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
+ dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
+ cs->fwstat_buf = NULL;
+ goto out_free;
+ }
+ cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
+ GFP_KERNEL | GFP_DMA);
+ if (!cs->ctlr_info)
+ goto out_free;
+
+ cs->event_buf = kzalloc(sizeof(struct myrs_event),
+ GFP_KERNEL | GFP_DMA);
+ if (!cs->event_buf)
+ goto out_free;
+
+ /* Enable the Memory Mailbox Interface. */
+ memset(mbox, 0, sizeof(union myrs_cmd_mbox));
+ mbox->set_mbox.id = 1;
+ mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->set_mbox.control.no_autosense = true;
+ mbox->set_mbox.first_cmd_mbox_size_kb =
+ (MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
+ mbox->set_mbox.first_stat_mbox_size_kb =
+ (MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
+ mbox->set_mbox.second_cmd_mbox_size_kb = 0;
+ mbox->set_mbox.second_stat_mbox_size_kb = 0;
+ mbox->set_mbox.sense_len = 0;
+ mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
+ mbox->set_mbox.fwstat_buf_size_kb = 1;
+ mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
+ mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
+ mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
+ status = enable_mbox_fn(base, mbox_addr);
+
+out_free:
+ dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
+ mbox, mbox_addr);
+ if (status != MYRS_STATUS_SUCCESS)
+ dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
+ status);
+ return (status == MYRS_STATUS_SUCCESS);
+}
+
+/**
+ * myrs_get_config - reads the Configuration Information
+ */
+static int myrs_get_config(struct myrs_hba *cs)
+{
+ struct myrs_ctlr_info *info = cs->ctlr_info;
+ struct Scsi_Host *shost = cs->host;
+ unsigned char status;
+ unsigned char model[20];
+ unsigned char fw_version[12];
+ int i, model_len;
+
+ /* Get data into dma-able area, then copy into permanent location */
+ mutex_lock(&cs->cinfo_mutex);
+ status = myrs_get_ctlr_info(cs);
+ mutex_unlock(&cs->cinfo_mutex);
+ if (status != MYRS_STATUS_SUCCESS) {
+ shost_printk(KERN_ERR, shost,
+ "Failed to get controller information\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the Controller Model Name and Full Model Name fields. */
+ model_len = sizeof(info->ctlr_name);
+ if (model_len > sizeof(model)-1)
+ model_len = sizeof(model)-1;
+ memcpy(model, info->ctlr_name, model_len);
+ model_len--;
+ while (model[model_len] == ' ' || model[model_len] == '\0')
+ model_len--;
+ model[++model_len] = '\0';
+ strcpy(cs->model_name, "DAC960 ");
+ strcat(cs->model_name, model);
+ /* Initialize the Controller Firmware Version field. */
+ sprintf(fw_version, "%d.%02d-%02d",
+ info->fw_major_version, info->fw_minor_version,
+ info->fw_turn_number);
+ if (info->fw_major_version == 6 &&
+ info->fw_minor_version == 0 &&
+ info->fw_turn_number < 1) {
+ shost_printk(KERN_WARNING, shost,
+ "FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
+ "STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
+ "PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
+ fw_version);
+ return -ENODEV;
+ }
+ /* Initialize the Controller Channels and Targets. */
+ shost->max_channel = info->physchan_present + info->virtchan_present;
+ shost->max_id = info->max_targets[0];
+ for (i = 1; i < 16; i++) {
+ if (!info->max_targets[i])
+ continue;
+ if (shost->max_id < info->max_targets[i])
+ shost->max_id = info->max_targets[i];
+ }
+
+ /*
+ * Initialize the Controller Queue Depth, Driver Queue Depth,
+ * Logical Drive Count, Maximum Blocks per Command, Controller
+ * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
+ * The Driver Queue Depth must be at most three less than
+ * the Controller Queue Depth; tag '1' is reserved for
+ * direct commands, and tag '2' for monitoring commands.
+ */
+ shost->can_queue = info->max_tcq - 3;
+ if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
+ shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
+ shost->max_sectors = info->max_transfer_size;
+ shost->sg_tablesize = info->max_sge;
+ if (shost->sg_tablesize > MYRS_SG_LIMIT)
+ shost->sg_tablesize = MYRS_SG_LIMIT;
+
+ shost_printk(KERN_INFO, shost,
+ "Configuring %s PCI RAID Controller\n", model);
+ shost_printk(KERN_INFO, shost,
+ " Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
+ fw_version, info->physchan_present, info->mem_size_mb);
+
+ shost_printk(KERN_INFO, shost,
+ " Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
+ shost->can_queue, shost->max_sectors);
+
+ shost_printk(KERN_INFO, shost,
+ " Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
+ shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
+ for (i = 0; i < info->physchan_max; i++) {
+ if (!info->max_targets[i])
+ continue;
+ shost_printk(KERN_INFO, shost,
+ " Device Channel %d: max %d devices\n",
+ i, info->max_targets[i]);
+ }
+ shost_printk(KERN_INFO, shost,
+ " Physical: %d/%d channels, %d disks, %d devices\n",
+ info->physchan_present, info->physchan_max,
+ info->pdisk_present, info->pdev_present);
+
+ shost_printk(KERN_INFO, shost,
+ " Logical: %d/%d channels, %d disks\n",
+ info->virtchan_present, info->virtchan_max,
+ info->ldev_present);
+ return 0;
+}
+
+/**
+ * myrs_log_event - prints a Controller Event message
+ */
+static struct {
+ int ev_code;
+ unsigned char *ev_msg;
+} myrs_ev_list[] = {
+ /* Physical Device Events (0x0000 - 0x007F) */
+ { 0x0001, "P Online" },
+ { 0x0002, "P Standby" },
+ { 0x0005, "P Automatic Rebuild Started" },
+ { 0x0006, "P Manual Rebuild Started" },
+ { 0x0007, "P Rebuild Completed" },
+ { 0x0008, "P Rebuild Cancelled" },
+ { 0x0009, "P Rebuild Failed for Unknown Reasons" },
+ { 0x000A, "P Rebuild Failed due to New Physical Device" },
+ { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
+ { 0x000C, "S Offline" },
+ { 0x000D, "P Found" },
+ { 0x000E, "P Removed" },
+ { 0x000F, "P Unconfigured" },
+ { 0x0010, "P Expand Capacity Started" },
+ { 0x0011, "P Expand Capacity Completed" },
+ { 0x0012, "P Expand Capacity Failed" },
+ { 0x0013, "P Command Timed Out" },
+ { 0x0014, "P Command Aborted" },
+ { 0x0015, "P Command Retried" },
+ { 0x0016, "P Parity Error" },
+ { 0x0017, "P Soft Error" },
+ { 0x0018, "P Miscellaneous Error" },
+ { 0x0019, "P Reset" },
+ { 0x001A, "P Active Spare Found" },
+ { 0x001B, "P Warm Spare Found" },
+ { 0x001C, "S Sense Data Received" },
+ { 0x001D, "P Initialization Started" },
+ { 0x001E, "P Initialization Completed" },
+ { 0x001F, "P Initialization Failed" },
+ { 0x0020, "P Initialization Cancelled" },
+ { 0x0021, "P Failed because Write Recovery Failed" },
+ { 0x0022, "P Failed because SCSI Bus Reset Failed" },
+ { 0x0023, "P Failed because of Double Check Condition" },
+ { 0x0024, "P Failed because Device Cannot Be Accessed" },
+ { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
+ { 0x0026, "P Failed because of Bad Tag from Device" },
+ { 0x0027, "P Failed because of Command Timeout" },
+ { 0x0028, "P Failed because of System Reset" },
+ { 0x0029, "P Failed because of Busy Status or Parity Error" },
+ { 0x002A, "P Failed because Host Set Device to Failed State" },
+ { 0x002B, "P Failed because of Selection Timeout" },
+ { 0x002C, "P Failed because of SCSI Bus Phase Error" },
+ { 0x002D, "P Failed because Device Returned Unknown Status" },
+ { 0x002E, "P Failed because Device Not Ready" },
+ { 0x002F, "P Failed because Device Not Found at Startup" },
+ { 0x0030, "P Failed because COD Write Operation Failed" },
+ { 0x0031, "P Failed because BDT Write Operation Failed" },
+ { 0x0039, "P Missing at Startup" },
+ { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
+ { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
+ { 0x003D, "P Standby Rebuild Started" },
+ /* Logical Device Events (0x0080 - 0x00FF) */
+ { 0x0080, "M Consistency Check Started" },
+ { 0x0081, "M Consistency Check Completed" },
+ { 0x0082, "M Consistency Check Cancelled" },
+ { 0x0083, "M Consistency Check Completed With Errors" },
+ { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
+ { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
+ { 0x0086, "L Offline" },
+ { 0x0087, "L Critical" },
+ { 0x0088, "L Online" },
+ { 0x0089, "M Automatic Rebuild Started" },
+ { 0x008A, "M Manual Rebuild Started" },
+ { 0x008B, "M Rebuild Completed" },
+ { 0x008C, "M Rebuild Cancelled" },
+ { 0x008D, "M Rebuild Failed for Unknown Reasons" },
+ { 0x008E, "M Rebuild Failed due to New Physical Device" },
+ { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
+ { 0x0090, "M Initialization Started" },
+ { 0x0091, "M Initialization Completed" },
+ { 0x0092, "M Initialization Cancelled" },
+ { 0x0093, "M Initialization Failed" },
+ { 0x0094, "L Found" },
+ { 0x0095, "L Deleted" },
+ { 0x0096, "M Expand Capacity Started" },
+ { 0x0097, "M Expand Capacity Completed" },
+ { 0x0098, "M Expand Capacity Failed" },
+ { 0x0099, "L Bad Block Found" },
+ { 0x009A, "L Size Changed" },
+ { 0x009B, "L Type Changed" },
+ { 0x009C, "L Bad Data Block Found" },
+ { 0x009E, "L Read of Data Block in BDT" },
+ { 0x009F, "L Write Back Data for Disk Block Lost" },
+ { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
+ { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
+ { 0x00A2, "L Standby Rebuild Started" },
+ /* Fault Management Events (0x0100 - 0x017F) */
+ { 0x0140, "E Fan %d Failed" },
+ { 0x0141, "E Fan %d OK" },
+ { 0x0142, "E Fan %d Not Present" },
+ { 0x0143, "E Power Supply %d Failed" },
+ { 0x0144, "E Power Supply %d OK" },
+ { 0x0145, "E Power Supply %d Not Present" },
+ { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
+ { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
+ { 0x0148, "E Temperature Sensor %d Temperature Normal" },
+ { 0x0149, "E Temperature Sensor %d Not Present" },
+ { 0x014A, "E Enclosure Management Unit %d Access Critical" },
+ { 0x014B, "E Enclosure Management Unit %d Access OK" },
+ { 0x014C, "E Enclosure Management Unit %d Access Offline" },
+ /* Controller Events (0x0180 - 0x01FF) */
+ { 0x0181, "C Cache Write Back Error" },
+ { 0x0188, "C Battery Backup Unit Found" },
+ { 0x0189, "C Battery Backup Unit Charge Level Low" },
+ { 0x018A, "C Battery Backup Unit Charge Level OK" },
+ { 0x0193, "C Installation Aborted" },
+ { 0x0195, "C Battery Backup Unit Physically Removed" },
+ { 0x0196, "C Memory Error During Warm Boot" },
+ { 0x019E, "C Memory Soft ECC Error Corrected" },
+ { 0x019F, "C Memory Hard ECC Error Corrected" },
+ { 0x01A2, "C Battery Backup Unit Failed" },
+ { 0x01AB, "C Mirror Race Recovery Failed" },
+ { 0x01AC, "C Mirror Race on Critical Drive" },
+ /* Controller Internal Processor Events */
+ { 0x0380, "C Internal Controller Hung" },
+ { 0x0381, "C Internal Controller Firmware Breakpoint" },
+ { 0x0390, "C Internal Controller i960 Processor Specific Error" },
+ { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
+ { 0, "" }
+};
+
+static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
+{
+ unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
+ int ev_idx = 0, ev_code;
+ unsigned char ev_type, *ev_msg;
+ struct Scsi_Host *shost = cs->host;
+ struct scsi_device *sdev;
+ struct scsi_sense_hdr sshdr;
+ unsigned char sense_info[4];
+ unsigned char cmd_specific[4];
+
+ if (ev->ev_code == 0x1C) {
+ if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
+ memset(&sshdr, 0x0, sizeof(sshdr));
+ memset(sense_info, 0x0, sizeof(sense_info));
+ memset(cmd_specific, 0x0, sizeof(cmd_specific));
+ } else {
+ memcpy(sense_info, &ev->sense_data[3], 4);
+ memcpy(cmd_specific, &ev->sense_data[7], 4);
+ }
+ }
+ if (sshdr.sense_key == VENDOR_SPECIFIC &&
+ (sshdr.asc == 0x80 || sshdr.asc == 0x81))
+ ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
+ while (true) {
+ ev_code = myrs_ev_list[ev_idx].ev_code;
+ if (ev_code == ev->ev_code || ev_code == 0)
+ break;
+ ev_idx++;
+ }
+ ev_type = myrs_ev_list[ev_idx].ev_msg[0];
+ ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
+ if (ev_code == 0) {
+ shost_printk(KERN_WARNING, shost,
+ "Unknown Controller Event Code %04X\n",
+ ev->ev_code);
+ return;
+ }
+ switch (ev_type) {
+ case 'P':
+ sdev = scsi_device_lookup(shost, ev->channel,
+ ev->target, 0);
+ sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
+ ev->ev_seq, ev_msg);
+ if (sdev && sdev->hostdata &&
+ sdev->channel < cs->ctlr_info->physchan_present) {
+ struct myrs_pdev_info *pdev_info = sdev->hostdata;
+
+ switch (ev->ev_code) {
+ case 0x0001:
+ case 0x0007:
+ pdev_info->dev_state = MYRS_DEVICE_ONLINE;
+ break;
+ case 0x0002:
+ pdev_info->dev_state = MYRS_DEVICE_STANDBY;
+ break;
+ case 0x000C:
+ pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
+ break;
+ case 0x000E:
+ pdev_info->dev_state = MYRS_DEVICE_MISSING;
+ break;
+ case 0x000F:
+ pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
+ break;
+ }
+ }
+ break;
+ case 'L':
+ shost_printk(KERN_INFO, shost,
+ "event %d: Logical Drive %d %s\n",
+ ev->ev_seq, ev->lun, ev_msg);
+ cs->needs_update = true;
+ break;
+ case 'M':
+ shost_printk(KERN_INFO, shost,
+ "event %d: Logical Drive %d %s\n",
+ ev->ev_seq, ev->lun, ev_msg);
+ cs->needs_update = true;
+ break;
+ case 'S':
+ if (sshdr.sense_key == NO_SENSE ||
+ (sshdr.sense_key == NOT_READY &&
+ sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
+ sshdr.ascq == 0x02)))
+ break;
+ shost_printk(KERN_INFO, shost,
+ "event %d: Physical Device %d:%d %s\n",
+ ev->ev_seq, ev->channel, ev->target, ev_msg);
+ shost_printk(KERN_INFO, shost,
+ "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
+ ev->channel, ev->target,
+ sshdr.sense_key, sshdr.asc, sshdr.ascq);
+ shost_printk(KERN_INFO, shost,
+ "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
+ ev->channel, ev->target,
+ sense_info[0], sense_info[1],
+ sense_info[2], sense_info[3],
+ cmd_specific[0], cmd_specific[1],
+ cmd_specific[2], cmd_specific[3]);
+ break;
+ case 'E':
+ if (cs->disable_enc_msg)
+ break;
+ sprintf(msg_buf, ev_msg, ev->lun);
+ shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
+ ev->ev_seq, ev->target, msg_buf);
+ break;
+ case 'C':
+ shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
+ ev->ev_seq, ev_msg);
+ break;
+ default:
+ shost_printk(KERN_INFO, shost,
+ "event %d: Unknown Event Code %04X\n",
+ ev->ev_seq, ev->ev_code);
+ break;
+ }
+}
+
+/*
+ * SCSI sysfs interface functions
+ */
+static ssize_t raid_state_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ int ret;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+ const char *name;
+
+ name = myrs_devstate_name(ldev_info->dev_state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->dev_state);
+ } else {
+ struct myrs_pdev_info *pdev_info;
+ const char *name;
+
+ pdev_info = sdev->hostdata;
+ name = myrs_devstate_name(pdev_info->dev_state);
+ if (name)
+ ret = snprintf(buf, 32, "%s\n", name);
+ else
+ ret = snprintf(buf, 32, "Invalid (%02X)\n",
+ pdev_info->dev_state);
+ }
+ return ret;
+}
+
+static ssize_t raid_state_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ enum myrs_devstate new_state;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (!strncmp(buf, "offline", 7) ||
+ !strncmp(buf, "kill", 4))
+ new_state = MYRS_DEVICE_OFFLINE;
+ else if (!strncmp(buf, "online", 6))
+ new_state = MYRS_DEVICE_ONLINE;
+ else if (!strncmp(buf, "standby", 7))
+ new_state = MYRS_DEVICE_STANDBY;
+ else
+ return -EINVAL;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present) {
+ struct myrs_pdev_info *pdev_info = sdev->hostdata;
+ struct myrs_devmap *pdev_devmap =
+ (struct myrs_devmap *)&pdev_info->rsvd13;
+
+ if (pdev_info->dev_state == new_state) {
+ sdev_printk(KERN_INFO, sdev,
+ "Device already in %s\n",
+ myrs_devstate_name(new_state));
+ return count;
+ }
+ status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
+ sdev->lun, pdev_devmap);
+ if (status != MYRS_STATUS_SUCCESS)
+ return -ENXIO;
+ ldev_num = pdev_devmap->ldev_num;
+ } else {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ if (ldev_info->dev_state == new_state) {
+ sdev_printk(KERN_INFO, sdev,
+ "Device already in %s\n",
+ myrs_devstate_name(new_state));
+ return count;
+ }
+ ldev_num = ldev_info->ldev_num;
+ }
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
+ mbox->set_devstate.state = new_state;
+ mbox->set_devstate.ldev.ldev_num = ldev_num;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status == MYRS_STATUS_SUCCESS) {
+ if (sdev->channel < cs->ctlr_info->physchan_present) {
+ struct myrs_pdev_info *pdev_info = sdev->hostdata;
+
+ pdev_info->dev_state = new_state;
+ } else {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ ldev_info->dev_state = new_state;
+ }
+ sdev_printk(KERN_INFO, sdev,
+ "Set device state to %s\n",
+ myrs_devstate_name(new_state));
+ return count;
+ }
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to set device state to %s, status 0x%02x\n",
+ myrs_devstate_name(new_state), status);
+ return -EINVAL;
+}
+static DEVICE_ATTR_RW(raid_state);
+
+static ssize_t raid_level_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ const char *name = NULL;
+
+ if (!sdev->hostdata)
+ return snprintf(buf, 16, "Unknown\n");
+
+ if (sdev->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info;
+
+ ldev_info = sdev->hostdata;
+ name = myrs_raid_level_name(ldev_info->raid_level);
+ if (!name)
+ return snprintf(buf, 32, "Invalid (%02X)\n",
+ ldev_info->dev_state);
+
+ } else
+ name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
+
+ return snprintf(buf, 32, "%s\n", name);
+}
+static DEVICE_ATTR_RO(raid_level);
+
+static ssize_t rebuild_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return snprintf(buf, 32, "physical device - not rebuilding\n");
+
+ ldev_info = sdev->hostdata;
+ ldev_num = ldev_info->ldev_num;
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device information, status 0x%02x\n",
+ status);
+ return -EIO;
+ }
+ if (ldev_info->rbld_active) {
+ return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
+ (size_t)ldev_info->rbld_lba,
+ (size_t)ldev_info->cfg_devsize);
+ } else
+ return snprintf(buf, 32, "not rebuilding\n");
+}
+
+static ssize_t rebuild_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ unsigned short ldev_num;
+ unsigned char status;
+ int rebuild, ret;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return -EINVAL;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->ldev_num;
+
+ ret = kstrtoint(buf, 0, &rebuild);
+ if (ret)
+ return ret;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device information, status 0x%02x\n",
+ status);
+ return -EIO;
+ }
+
+ if (rebuild && ldev_info->rbld_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Initiated; already in progress\n");
+ return -EALREADY;
+ }
+ if (!rebuild && !ldev_info->rbld_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not Cancelled; no rebuild in progress\n");
+ return count;
+ }
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ if (rebuild) {
+ mbox->ldev_info.ldev.ldev_num = ldev_num;
+ mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
+ } else {
+ mbox->ldev_info.ldev.ldev_num = ldev_num;
+ mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
+ }
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status) {
+ sdev_printk(KERN_INFO, sdev,
+ "Rebuild Not %s, status 0x%02x\n",
+ rebuild ? "Initiated" : "Cancelled", status);
+ ret = -EIO;
+ } else {
+ sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
+ rebuild ? "Initiated" : "Cancelled");
+ ret = count;
+ }
+
+ return ret;
+}
+static DEVICE_ATTR_RW(rebuild);
+
+static ssize_t consistency_check_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ unsigned short ldev_num;
+ unsigned char status;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return snprintf(buf, 32, "physical device - not checking\n");
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->ldev_num;
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (ldev_info->cc_active)
+ return snprintf(buf, 32, "checking block %zu of %zu\n",
+ (size_t)ldev_info->cc_lba,
+ (size_t)ldev_info->cfg_devsize);
+ else
+ return snprintf(buf, 32, "not checking\n");
+}
+
+static ssize_t consistency_check_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ unsigned short ldev_num;
+ unsigned char status;
+ int check, ret;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present)
+ return -EINVAL;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ ldev_num = ldev_info->ldev_num;
+
+ ret = kstrtoint(buf, 0, &check);
+ if (ret)
+ return ret;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Failed to get device information, status 0x%02x\n",
+ status);
+ return -EIO;
+ }
+ if (check && ldev_info->cc_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not Initiated; "
+ "already in progress\n");
+ return -EALREADY;
+ }
+ if (!check && !ldev_info->cc_active) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not Cancelled; "
+ "check not in progress\n");
+ return count;
+ }
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ if (check) {
+ mbox->cc.ldev.ldev_num = ldev_num;
+ mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
+ mbox->cc.restore_consistency = true;
+ mbox->cc.initialized_area_only = false;
+ } else {
+ mbox->cc.ldev.ldev_num = ldev_num;
+ mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
+ }
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev_printk(KERN_INFO, sdev,
+ "Consistency Check Not %s, status 0x%02x\n",
+ check ? "Initiated" : "Cancelled", status);
+ ret = -EIO;
+ } else {
+ sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
+ check ? "Initiated" : "Cancelled");
+ ret = count;
+ }
+
+ return ret;
+}
+static DEVICE_ATTR_RW(consistency_check);
+
+static struct device_attribute *myrs_sdev_attrs[] = {
+ &dev_attr_consistency_check,
+ &dev_attr_rebuild,
+ &dev_attr_raid_state,
+ &dev_attr_raid_level,
+ NULL,
+};
+
+static ssize_t serial_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ char serial[17];
+
+ memcpy(serial, cs->ctlr_info->serial_number, 16);
+ serial[16] = '\0';
+ return snprintf(buf, 16, "%s\n", serial);
+}
+static DEVICE_ATTR_RO(serial);
+
+static ssize_t ctlr_num_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 20, "%d\n", cs->host->host_no);
+}
+static DEVICE_ATTR_RO(ctlr_num);
+
+static struct myrs_cpu_type_tbl {
+ enum myrs_cpu_type type;
+ char *name;
+} myrs_cpu_type_names[] = {
+ { MYRS_CPUTYPE_i960CA, "i960CA" },
+ { MYRS_CPUTYPE_i960RD, "i960RD" },
+ { MYRS_CPUTYPE_i960RN, "i960RN" },
+ { MYRS_CPUTYPE_i960RP, "i960RP" },
+ { MYRS_CPUTYPE_NorthBay, "NorthBay" },
+ { MYRS_CPUTYPE_StrongArm, "StrongARM" },
+ { MYRS_CPUTYPE_i960RM, "i960RM" },
+};
+
+static ssize_t processor_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ struct myrs_cpu_type_tbl *tbl;
+ const char *first_processor = NULL;
+ const char *second_processor = NULL;
+ struct myrs_ctlr_info *info = cs->ctlr_info;
+ ssize_t ret;
+ int i;
+
+ if (info->cpu[0].cpu_count) {
+ tbl = myrs_cpu_type_names;
+ for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
+ if (tbl[i].type == info->cpu[0].cpu_type) {
+ first_processor = tbl[i].name;
+ break;
+ }
+ }
+ }
+ if (info->cpu[1].cpu_count) {
+ tbl = myrs_cpu_type_names;
+ for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
+ if (tbl[i].type == info->cpu[1].cpu_type) {
+ second_processor = tbl[i].name;
+ break;
+ }
+ }
+ }
+ if (first_processor && second_processor)
+ ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
+ "2: %s (%s, %d cpus)\n",
+ info->cpu[0].cpu_name,
+ first_processor, info->cpu[0].cpu_count,
+ info->cpu[1].cpu_name,
+ second_processor, info->cpu[1].cpu_count);
+ else if (first_processor && !second_processor)
+ ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
+ info->cpu[0].cpu_name,
+ first_processor, info->cpu[0].cpu_count);
+ else if (!first_processor && second_processor)
+ ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
+ info->cpu[1].cpu_name,
+ second_processor, info->cpu[1].cpu_count);
+ else
+ ret = snprintf(buf, 64, "1: absent\n2: absent\n");
+
+ return ret;
+}
+static DEVICE_ATTR_RO(processor);
+
+static ssize_t model_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 28, "%s\n", cs->model_name);
+}
+static DEVICE_ATTR_RO(model);
+
+static ssize_t ctlr_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
+}
+static DEVICE_ATTR_RO(ctlr_type);
+
+static ssize_t cache_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
+}
+static DEVICE_ATTR_RO(cache_size);
+
+static ssize_t firmware_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 16, "%d.%02d-%02d\n",
+ cs->ctlr_info->fw_major_version,
+ cs->ctlr_info->fw_minor_version,
+ cs->ctlr_info->fw_turn_number);
+}
+static DEVICE_ATTR_RO(firmware);
+
+static ssize_t discovery_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ struct myrs_cmdblk *cmd_blk;
+ union myrs_cmd_mbox *mbox;
+ unsigned char status;
+
+ mutex_lock(&cs->dcmd_mutex);
+ cmd_blk = &cs->dcmd_blk;
+ myrs_reset_cmd(cmd_blk);
+ mbox = &cmd_blk->mbox;
+ mbox->common.opcode = MYRS_CMD_OP_IOCTL;
+ mbox->common.id = MYRS_DCMD_TAG;
+ mbox->common.control.dma_ctrl_to_host = true;
+ mbox->common.control.no_autosense = true;
+ mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
+ myrs_exec_cmd(cs, cmd_blk);
+ status = cmd_blk->status;
+ mutex_unlock(&cs->dcmd_mutex);
+ if (status != MYRS_STATUS_SUCCESS) {
+ shost_printk(KERN_INFO, shost,
+ "Discovery Not Initiated, status %02X\n",
+ status);
+ return -EINVAL;
+ }
+ shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
+ cs->next_evseq = 0;
+ cs->needs_update = true;
+ queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
+ flush_delayed_work(&cs->monitor_work);
+ shost_printk(KERN_INFO, shost, "Discovery Completed\n");
+
+ return count;
+}
+static DEVICE_ATTR_WO(discovery);
+
+static ssize_t flush_cache_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+ unsigned char status;
+
+ status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
+ MYRS_RAID_CONTROLLER);
+ if (status == MYRS_STATUS_SUCCESS) {
+ shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
+ return count;
+ }
+ shost_printk(KERN_INFO, shost,
+ "Cache Flush failed, status 0x%02x\n", status);
+ return -EIO;
+}
+static DEVICE_ATTR_WO(flush_cache);
+
+static ssize_t disable_enclosure_messages_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct myrs_hba *cs = shost_priv(shost);
+
+ return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
+}
+
+static ssize_t disable_enclosure_messages_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ int value, ret;
+
+ ret = kstrtoint(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ if (value > 2)
+ return -EINVAL;
+
+ cs->disable_enc_msg = value;
+ return count;
+}
+static DEVICE_ATTR_RW(disable_enclosure_messages);
+
+static struct device_attribute *myrs_shost_attrs[] = {
+ &dev_attr_serial,
+ &dev_attr_ctlr_num,
+ &dev_attr_processor,
+ &dev_attr_model,
+ &dev_attr_ctlr_type,
+ &dev_attr_cache_size,
+ &dev_attr_firmware,
+ &dev_attr_discovery,
+ &dev_attr_flush_cache,
+ &dev_attr_disable_enclosure_messages,
+ NULL,
+};
+
+/*
+ * SCSI midlayer interface
+ */
+int myrs_host_reset(struct scsi_cmnd *scmd)
+{
+ struct Scsi_Host *shost = scmd->device->host;
+ struct myrs_hba *cs = shost_priv(shost);
+
+ cs->reset(cs->io_base);
+ return SUCCESS;
+}
+
+static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
+ struct myrs_ldev_info *ldev_info)
+{
+ unsigned char modes[32], *mode_pg;
+ bool dbd;
+ size_t mode_len;
+
+ dbd = (scmd->cmnd[1] & 0x08) == 0x08;
+ if (dbd) {
+ mode_len = 24;
+ mode_pg = &modes[4];
+ } else {
+ mode_len = 32;
+ mode_pg = &modes[12];
+ }
+ memset(modes, 0, sizeof(modes));
+ modes[0] = mode_len - 1;
+ modes[2] = 0x10; /* Enable FUA */
+ if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
+ modes[2] |= 0x80;
+ if (!dbd) {
+ unsigned char *block_desc = &modes[4];
+
+ modes[3] = 8;
+ put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
+ put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
+ }
+ mode_pg[0] = 0x08;
+ mode_pg[1] = 0x12;
+ if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
+ mode_pg[2] |= 0x01;
+ if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
+ ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
+ mode_pg[2] |= 0x04;
+ if (ldev_info->cacheline_size) {
+ mode_pg[2] |= 0x08;
+ put_unaligned_be16(1 << ldev_info->cacheline_size,
+ &mode_pg[14]);
+ }
+
+ scsi_sg_copy_from_buffer(scmd, modes, mode_len);
+}
+
+static int myrs_queuecommand(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct myrs_hba *cs = shost_priv(shost);
+ struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
+ union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
+ struct scsi_device *sdev = scmd->device;
+ union myrs_sgl *hw_sge;
+ dma_addr_t sense_addr;
+ struct scatterlist *sgl;
+ unsigned long flags, timeout;
+ int nsge;
+
+ if (!scmd->device->hostdata) {
+ scmd->result = (DID_NO_CONNECT << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ switch (scmd->cmnd[0]) {
+ case REPORT_LUNS:
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST,
+ 0x20, 0x0);
+ scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+ scmd->scsi_done(scmd);
+ return 0;
+ case MODE_SENSE:
+ if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
+ (scmd->cmnd[2] & 0x3F) != 0x08) {
+ /* Illegal request, invalid field in CDB */
+ scsi_build_sense_buffer(0, scmd->sense_buffer,
+ ILLEGAL_REQUEST, 0x24, 0);
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ } else {
+ myrs_mode_sense(cs, scmd, ldev_info);
+ scmd->result = (DID_OK << 16);
+ }
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ break;
+ }
+
+ myrs_reset_cmd(cmd_blk);
+ cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
+ &sense_addr);
+ if (!cmd_blk->sense)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ cmd_blk->sense_addr = sense_addr;
+
+ timeout = scmd->request->timeout;
+ if (scmd->cmd_len <= 10) {
+ if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
+ mbox->SCSI_10.pdev.lun = ldev_info->lun;
+ mbox->SCSI_10.pdev.target = ldev_info->target;
+ mbox->SCSI_10.pdev.channel = ldev_info->channel;
+ mbox->SCSI_10.pdev.ctlr = 0;
+ } else {
+ mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
+ mbox->SCSI_10.pdev.lun = sdev->lun;
+ mbox->SCSI_10.pdev.target = sdev->id;
+ mbox->SCSI_10.pdev.channel = sdev->channel;
+ }
+ mbox->SCSI_10.id = scmd->request->tag + 3;
+ mbox->SCSI_10.control.dma_ctrl_to_host =
+ (scmd->sc_data_direction == DMA_FROM_DEVICE);
+ if (scmd->request->cmd_flags & REQ_FUA)
+ mbox->SCSI_10.control.fua = true;
+ mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
+ mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
+ mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
+ mbox->SCSI_10.cdb_len = scmd->cmd_len;
+ if (timeout > 60) {
+ mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
+ mbox->SCSI_10.tmo.tmo_val = timeout / 60;
+ } else {
+ mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
+ mbox->SCSI_10.tmo.tmo_val = timeout;
+ }
+ memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
+ hw_sge = &mbox->SCSI_10.dma_addr;
+ cmd_blk->dcdb = NULL;
+ } else {
+ dma_addr_t dcdb_dma;
+
+ cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
+ &dcdb_dma);
+ if (!cmd_blk->dcdb) {
+ dma_pool_free(cs->sense_pool, cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ cmd_blk->dcdb_dma = dcdb_dma;
+ if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+
+ mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
+ mbox->SCSI_255.pdev.lun = ldev_info->lun;
+ mbox->SCSI_255.pdev.target = ldev_info->target;
+ mbox->SCSI_255.pdev.channel = ldev_info->channel;
+ mbox->SCSI_255.pdev.ctlr = 0;
+ } else {
+ mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
+ mbox->SCSI_255.pdev.lun = sdev->lun;
+ mbox->SCSI_255.pdev.target = sdev->id;
+ mbox->SCSI_255.pdev.channel = sdev->channel;
+ }
+ mbox->SCSI_255.id = scmd->request->tag + 3;
+ mbox->SCSI_255.control.dma_ctrl_to_host =
+ (scmd->sc_data_direction == DMA_FROM_DEVICE);
+ if (scmd->request->cmd_flags & REQ_FUA)
+ mbox->SCSI_255.control.fua = true;
+ mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
+ mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
+ mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
+ mbox->SCSI_255.cdb_len = scmd->cmd_len;
+ mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
+ if (timeout > 60) {
+ mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
+ mbox->SCSI_255.tmo.tmo_val = timeout / 60;
+ } else {
+ mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
+ mbox->SCSI_255.tmo.tmo_val = timeout;
+ }
+ memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
+ hw_sge = &mbox->SCSI_255.dma_addr;
+ }
+ if (scmd->sc_data_direction == DMA_NONE)
+ goto submit;
+ nsge = scsi_dma_map(scmd);
+ if (nsge == 1) {
+ sgl = scsi_sglist(scmd);
+ hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
+ hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
+ } else {
+ struct myrs_sge *hw_sgl;
+ dma_addr_t hw_sgl_addr;
+ int i;
+
+ if (nsge > 2) {
+ hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
+ &hw_sgl_addr);
+ if (WARN_ON(!hw_sgl)) {
+ if (cmd_blk->dcdb) {
+ dma_pool_free(cs->dcdb_pool,
+ cmd_blk->dcdb,
+ cmd_blk->dcdb_dma);
+ cmd_blk->dcdb = NULL;
+ cmd_blk->dcdb_dma = 0;
+ }
+ dma_pool_free(cs->sense_pool,
+ cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+ cmd_blk->sgl = hw_sgl;
+ cmd_blk->sgl_addr = hw_sgl_addr;
+ if (scmd->cmd_len <= 10)
+ mbox->SCSI_10.control.add_sge_mem = true;
+ else
+ mbox->SCSI_255.control.add_sge_mem = true;
+ hw_sge->ext.sge0_len = nsge;
+ hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
+ } else
+ hw_sgl = hw_sge->sge;
+
+ scsi_for_each_sg(scmd, sgl, nsge, i) {
+ if (WARN_ON(!hw_sgl)) {
+ scsi_dma_unmap(scmd);
+ scmd->result = (DID_ERROR << 16);
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
+ hw_sgl->sge_count = (u64)sg_dma_len(sgl);
+ hw_sgl++;
+ }
+ }
+submit:
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ myrs_qcmd(cs, cmd_blk);
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+
+ return 0;
+}
+
+static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
+ struct scsi_device *sdev)
+{
+ unsigned short ldev_num;
+ unsigned int chan_offset =
+ sdev->channel - cs->ctlr_info->physchan_present;
+
+ ldev_num = sdev->id + chan_offset * sdev->host->max_id;
+
+ return ldev_num;
+}
+
+static int myrs_slave_alloc(struct scsi_device *sdev)
+{
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ unsigned char status;
+
+ if (sdev->channel > sdev->host->max_channel)
+ return 0;
+
+ if (sdev->channel >= cs->ctlr_info->physchan_present) {
+ struct myrs_ldev_info *ldev_info;
+ unsigned short ldev_num;
+
+ if (sdev->lun > 0)
+ return -ENXIO;
+
+ ldev_num = myrs_translate_ldev(cs, sdev);
+
+ ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
+ if (!ldev_info)
+ return -ENOMEM;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev->hostdata = NULL;
+ kfree(ldev_info);
+ } else {
+ enum raid_level level;
+
+ dev_dbg(&sdev->sdev_gendev,
+ "Logical device mapping %d:%d:%d -> %d\n",
+ ldev_info->channel, ldev_info->target,
+ ldev_info->lun, ldev_info->ldev_num);
+
+ sdev->hostdata = ldev_info;
+ switch (ldev_info->raid_level) {
+ case MYRS_RAID_LEVEL0:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case MYRS_RAID_LEVEL1:
+ level = RAID_LEVEL_1;
+ break;
+ case MYRS_RAID_LEVEL3:
+ case MYRS_RAID_LEVEL3F:
+ case MYRS_RAID_LEVEL3L:
+ level = RAID_LEVEL_3;
+ break;
+ case MYRS_RAID_LEVEL5:
+ case MYRS_RAID_LEVEL5L:
+ level = RAID_LEVEL_5;
+ break;
+ case MYRS_RAID_LEVEL6:
+ level = RAID_LEVEL_6;
+ break;
+ case MYRS_RAID_LEVELE:
+ case MYRS_RAID_NEWSPAN:
+ case MYRS_RAID_SPAN:
+ level = RAID_LEVEL_LINEAR;
+ break;
+ case MYRS_RAID_JBOD:
+ level = RAID_LEVEL_JBOD;
+ break;
+ default:
+ level = RAID_LEVEL_UNKNOWN;
+ break;
+ }
+ raid_set_level(myrs_raid_template,
+ &sdev->sdev_gendev, level);
+ if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
+ const char *name;
+
+ name = myrs_devstate_name(ldev_info->dev_state);
+ sdev_printk(KERN_DEBUG, sdev,
+ "logical device in state %s\n",
+ name ? name : "Invalid");
+ }
+ }
+ } else {
+ struct myrs_pdev_info *pdev_info;
+
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ if (!pdev_info)
+ return -ENOMEM;
+
+ status = myrs_get_pdev_info(cs, sdev->channel,
+ sdev->id, sdev->lun,
+ pdev_info);
+ if (status != MYRS_STATUS_SUCCESS) {
+ sdev->hostdata = NULL;
+ kfree(pdev_info);
+ return -ENXIO;
+ }
+ sdev->hostdata = pdev_info;
+ }
+ return 0;
+}
+
+static int myrs_slave_configure(struct scsi_device *sdev)
+{
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info;
+
+ if (sdev->channel > sdev->host->max_channel)
+ return -ENXIO;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present) {
+ /* Skip HBA device */
+ if (sdev->type == TYPE_RAID)
+ return -ENXIO;
+ sdev->no_uld_attach = 1;
+ return 0;
+ }
+ if (sdev->lun != 0)
+ return -ENXIO;
+
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ return -ENXIO;
+ if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
+ ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
+ sdev->wce_default_on = 1;
+ sdev->tagged_supported = 1;
+ return 0;
+}
+
+static void myrs_slave_destroy(struct scsi_device *sdev)
+{
+ kfree(sdev->hostdata);
+}
+
+struct scsi_host_template myrs_template = {
+ .module = THIS_MODULE,
+ .name = "DAC960",
+ .proc_name = "myrs",
+ .queuecommand = myrs_queuecommand,
+ .eh_host_reset_handler = myrs_host_reset,
+ .slave_alloc = myrs_slave_alloc,
+ .slave_configure = myrs_slave_configure,
+ .slave_destroy = myrs_slave_destroy,
+ .cmd_size = sizeof(struct myrs_cmdblk),
+ .shost_attrs = myrs_shost_attrs,
+ .sdev_attrs = myrs_sdev_attrs,
+ .this_id = -1,
+};
+
+static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
+ const struct pci_device_id *entry)
+{
+ struct Scsi_Host *shost;
+ struct myrs_hba *cs;
+
+ shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
+ if (!shost)
+ return NULL;
+
+ shost->max_cmd_len = 16;
+ shost->max_lun = 256;
+ cs = shost_priv(shost);
+ mutex_init(&cs->dcmd_mutex);
+ mutex_init(&cs->cinfo_mutex);
+ cs->host = shost;
+
+ return cs;
+}
+
+/*
+ * RAID template functions
+ */
+
+/**
+ * myrs_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+myrs_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+
+ return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
+}
+
+/**
+ * myrs_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+myrs_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+ u64 percent_complete = 0;
+ u8 status;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
+ return;
+ if (ldev_info->rbld_active) {
+ unsigned short ldev_num = ldev_info->ldev_num;
+
+ status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ percent_complete = ldev_info->rbld_lba * 100;
+ do_div(percent_complete, ldev_info->cfg_devsize);
+ }
+ raid_set_resync(myrs_raid_template, dev, percent_complete);
+}
+
+/**
+ * myrs_get_state - get raid volume status
+ * @dev the device struct object
+ */
+static void
+myrs_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct myrs_hba *cs = shost_priv(sdev->host);
+ struct myrs_ldev_info *ldev_info = sdev->hostdata;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+
+ if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
+ state = RAID_STATE_UNKNOWN;
+ else {
+ switch (ldev_info->dev_state) {
+ case MYRS_DEVICE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MYRS_DEVICE_SUSPECTED_CRITICAL:
+ case MYRS_DEVICE_CRITICAL:
+ state = RAID_STATE_DEGRADED;
+ break;
+ case MYRS_DEVICE_REBUILD:
+ state = RAID_STATE_RESYNCING;
+ break;
+ case MYRS_DEVICE_UNCONFIGURED:
+ case MYRS_DEVICE_INVALID_STATE:
+ state = RAID_STATE_UNKNOWN;
+ break;
+ default:
+ state = RAID_STATE_OFFLINE;
+ }
+ }
+ raid_set_state(myrs_raid_template, dev, state);
+}
+
+struct raid_function_template myrs_raid_functions = {
+ .cookie = &myrs_template,
+ .is_raid = myrs_is_raid,
+ .get_resync = myrs_get_resync,
+ .get_state = myrs_get_state,
+};
+
+/*
+ * PCI interface functions
+ */
+void myrs_flush_cache(struct myrs_hba *cs)
+{
+ myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
+}
+
+static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
+ struct scsi_cmnd *scmd)
+{
+ unsigned char status;
+
+ if (!cmd_blk)
+ return;
+
+ scsi_dma_unmap(scmd);
+ status = cmd_blk->status;
+ if (cmd_blk->sense) {
+ if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
+ unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ if (sense_len > cmd_blk->sense_len)
+ sense_len = cmd_blk->sense_len;
+ memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
+ }
+ dma_pool_free(cs->sense_pool, cmd_blk->sense,
+ cmd_blk->sense_addr);
+ cmd_blk->sense = NULL;
+ cmd_blk->sense_addr = 0;
+ }
+ if (cmd_blk->dcdb) {
+ dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
+ cmd_blk->dcdb_dma);
+ cmd_blk->dcdb = NULL;
+ cmd_blk->dcdb_dma = 0;
+ }
+ if (cmd_blk->sgl) {
+ dma_pool_free(cs->sg_pool, cmd_blk->sgl,
+ cmd_blk->sgl_addr);
+ cmd_blk->sgl = NULL;
+ cmd_blk->sgl_addr = 0;
+ }
+ if (cmd_blk->residual)
+ scsi_set_resid(scmd, cmd_blk->residual);
+ if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
+ status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
+ scmd->result = (DID_BAD_TARGET << 16);
+ else
+ scmd->result = (DID_OK << 16) | status;
+ scmd->scsi_done(scmd);
+}
+
+static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
+{
+ if (!cmd_blk)
+ return;
+
+ if (cmd_blk->complete) {
+ complete(cmd_blk->complete);
+ cmd_blk->complete = NULL;
+ }
+}
+
+static void myrs_monitor(struct work_struct *work)
+{
+ struct myrs_hba *cs = container_of(work, struct myrs_hba,
+ monitor_work.work);
+ struct Scsi_Host *shost = cs->host;
+ struct myrs_ctlr_info *info = cs->ctlr_info;
+ unsigned int epoch = cs->fwstat_buf->epoch;
+ unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
+ unsigned char status;
+
+ dev_dbg(&shost->shost_gendev, "monitor tick\n");
+
+ status = myrs_get_fwstatus(cs);
+
+ if (cs->needs_update) {
+ cs->needs_update = false;
+ mutex_lock(&cs->cinfo_mutex);
+ status = myrs_get_ctlr_info(cs);
+ mutex_unlock(&cs->cinfo_mutex);
+ }
+ if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
+ status = myrs_get_event(cs, cs->next_evseq,
+ cs->event_buf);
+ if (status == MYRS_STATUS_SUCCESS) {
+ myrs_log_event(cs, cs->event_buf);
+ cs->next_evseq++;
+ interval = 1;
+ }
+ }
+
+ if (time_after(jiffies, cs->secondary_monitor_time
+ + MYRS_SECONDARY_MONITOR_INTERVAL))
+ cs->secondary_monitor_time = jiffies;
+
+ if (info->bg_init_active +
+ info->ldev_init_active +
+ info->pdev_init_active +
+ info->cc_active +
+ info->rbld_active +
+ info->exp_active != 0) {
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, shost) {
+ struct myrs_ldev_info *ldev_info;
+ int ldev_num;
+
+ if (sdev->channel < info->physchan_present)
+ continue;
+ ldev_info = sdev->hostdata;
+ if (!ldev_info)
+ continue;
+ ldev_num = ldev_info->ldev_num;
+ myrs_get_ldev_info(cs, ldev_num, ldev_info);
+ }
+ cs->needs_update = true;
+ }
+ if (epoch == cs->epoch &&
+ cs->fwstat_buf->next_evseq == cs->next_evseq &&
+ (cs->needs_update == false ||
+ time_before(jiffies, cs->primary_monitor_time
+ + MYRS_PRIMARY_MONITOR_INTERVAL))) {
+ interval = MYRS_SECONDARY_MONITOR_INTERVAL;
+ }
+
+ if (interval > 1)
+ cs->primary_monitor_time = jiffies;
+ queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
+}
+
+static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
+{
+ struct Scsi_Host *shost = cs->host;
+ size_t elem_size, elem_align;
+
+ elem_align = sizeof(struct myrs_sge);
+ elem_size = shost->sg_tablesize * elem_align;
+ cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
+ elem_size, elem_align, 0);
+ if (cs->sg_pool == NULL) {
+ shost_printk(KERN_ERR, shost,
+ "Failed to allocate SG pool\n");
+ return false;
+ }
+
+ cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
+ MYRS_SENSE_SIZE, sizeof(int), 0);
+ if (cs->sense_pool == NULL) {
+ dma_pool_destroy(cs->sg_pool);
+ cs->sg_pool = NULL;
+ shost_printk(KERN_ERR, shost,
+ "Failed to allocate sense data pool\n");
+ return false;
+ }
+
+ cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
+ MYRS_DCDB_SIZE,
+ sizeof(unsigned char), 0);
+ if (!cs->dcdb_pool) {
+ dma_pool_destroy(cs->sg_pool);
+ cs->sg_pool = NULL;
+ dma_pool_destroy(cs->sense_pool);
+ cs->sense_pool = NULL;
+ shost_printk(KERN_ERR, shost,
+ "Failed to allocate DCDB pool\n");
+ return false;
+ }
+
+ snprintf(cs->work_q_name, sizeof(cs->work_q_name),
+ "myrs_wq_%d", shost->host_no);
+ cs->work_q = create_singlethread_workqueue(cs->work_q_name);
+ if (!cs->work_q) {
+ dma_pool_destroy(cs->dcdb_pool);
+ cs->dcdb_pool = NULL;
+ dma_pool_destroy(cs->sg_pool);
+ cs->sg_pool = NULL;
+ dma_pool_destroy(cs->sense_pool);
+ cs->sense_pool = NULL;
+ shost_printk(KERN_ERR, shost,
+ "Failed to create workqueue\n");
+ return false;
+ }
+
+ /* Initialize the Monitoring Timer. */
+ INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
+ queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
+
+ return true;
+}
+
+static void myrs_destroy_mempools(struct myrs_hba *cs)
+{
+ cancel_delayed_work_sync(&cs->monitor_work);
+ destroy_workqueue(cs->work_q);
+
+ dma_pool_destroy(cs->sg_pool);
+ dma_pool_destroy(cs->dcdb_pool);
+ dma_pool_destroy(cs->sense_pool);
+}
+
+static void myrs_unmap(struct myrs_hba *cs)
+{
+ kfree(cs->event_buf);
+ kfree(cs->ctlr_info);
+ if (cs->fwstat_buf) {
+ dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
+ cs->fwstat_buf, cs->fwstat_addr);
+ cs->fwstat_buf = NULL;
+ }
+ if (cs->first_stat_mbox) {
+ dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
+ cs->first_stat_mbox, cs->stat_mbox_addr);
+ cs->first_stat_mbox = NULL;
+ }
+ if (cs->first_cmd_mbox) {
+ dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
+ cs->first_cmd_mbox, cs->cmd_mbox_addr);
+ cs->first_cmd_mbox = NULL;
+ }
+}
+
+static void myrs_cleanup(struct myrs_hba *cs)
+{
+ struct pci_dev *pdev = cs->pdev;
+
+ /* Free the memory mailbox, status, and related structures */
+ myrs_unmap(cs);
+
+ if (cs->mmio_base) {
+ cs->disable_intr(cs);
+ iounmap(cs->mmio_base);
+ }
+ if (cs->irq)
+ free_irq(cs->irq, cs);
+ if (cs->io_addr)
+ release_region(cs->io_addr, 0x80);
+ iounmap(cs->mmio_base);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ scsi_host_put(cs->host);
+}
+
+static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
+ const struct pci_device_id *entry)
+{
+ struct myrs_privdata *privdata =
+ (struct myrs_privdata *)entry->driver_data;
+ irq_handler_t irq_handler = privdata->irq_handler;
+ unsigned int mmio_size = privdata->mmio_size;
+ struct myrs_hba *cs = NULL;
+
+ cs = myrs_alloc_host(pdev, entry);
+ if (!cs) {
+ dev_err(&pdev->dev, "Unable to allocate Controller\n");
+ return NULL;
+ }
+ cs->pdev = pdev;
+
+ if (pci_enable_device(pdev))
+ goto Failure;
+
+ cs->pci_addr = pci_resource_start(pdev, 0);
+
+ pci_set_drvdata(pdev, cs);
+ spin_lock_init(&cs->queue_lock);
+ /* Map the Controller Register Window. */
+ if (mmio_size < PAGE_SIZE)
+ mmio_size = PAGE_SIZE;
+ cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size);
+ if (cs->mmio_base == NULL) {
+ dev_err(&pdev->dev,
+ "Unable to map Controller Register Window\n");
+ goto Failure;
+ }
+
+ cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
+ if (privdata->hw_init(pdev, cs, cs->io_base))
+ goto Failure;
+
+ /* Acquire shared access to the IRQ Channel. */
+ if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
+ dev_err(&pdev->dev,
+ "Unable to acquire IRQ Channel %d\n", pdev->irq);
+ goto Failure;
+ }
+ cs->irq = pdev->irq;
+ return cs;
+
+Failure:
+ dev_err(&pdev->dev,
+ "Failed to initialize Controller\n");
+ myrs_cleanup(cs);
+ return NULL;
+}
+
+/**
+ * myrs_err_status reports Controller BIOS Messages passed through
+ the Error Status Register when the driver performs the BIOS handshaking.
+ It returns true for fatal errors and false otherwise.
+*/
+
+static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
+ unsigned char parm0, unsigned char parm1)
+{
+ struct pci_dev *pdev = cs->pdev;
+
+ switch (status) {
+ case 0x00:
+ dev_info(&pdev->dev,
+ "Physical Device %d:%d Not Responding\n",
+ parm1, parm0);
+ break;
+ case 0x08:
+ dev_notice(&pdev->dev, "Spinning Up Drives\n");
+ break;
+ case 0x30:
+ dev_notice(&pdev->dev, "Configuration Checksum Error\n");
+ break;
+ case 0x60:
+ dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
+ break;
+ case 0x70:
+ dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
+ break;
+ case 0x90:
+ dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
+ parm1, parm0);
+ break;
+ case 0xA0:
+ dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
+ break;
+ case 0xB0:
+ dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
+ break;
+ case 0xD0:
+ dev_notice(&pdev->dev, "New Controller Configuration Found\n");
+ break;
+ case 0xF0:
+ dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
+ return true;
+ default:
+ dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
+ status);
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Hardware-specific functions
+ */
+
+/*
+ * DAC960 GEM Series Controllers.
+ */
+
+static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
+
+ writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_gen_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_GEN_IRQ << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
+
+ writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
+}
+
+static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
+
+ writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_mem_mbox_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32(DAC960_GEM_ODB_MMBOX_ACK_IRQ << 24);
+
+ writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_ack_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
+ DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
+
+ writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
+}
+
+static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_GEM_mem_mbox_status_available(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
+ return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_GEM_enable_intr(void __iomem *base)
+{
+ __le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
+ DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
+ writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
+}
+
+static inline void DAC960_GEM_disable_intr(void __iomem *base)
+{
+ __le32 val = 0;
+
+ writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
+}
+
+static inline bool DAC960_GEM_intr_enabled(void __iomem *base)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_IRQMASK_READ_OFFSET);
+ return !((le32_to_cpu(val) >> 24) &
+ (DAC960_GEM_IRQMASK_HWMBOX_IRQ |
+ DAC960_GEM_IRQMASK_MMBOX_IRQ));
+}
+
+static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+ union myrs_cmd_mbox *mbox)
+{
+ memcpy(&mem_mbox->words[1], &mbox->words[1],
+ sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+ /* Barrier to avoid reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
+ dma_addr_t cmd_mbox_addr)
+{
+ dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_GEM_read_cmd_ident(void __iomem *base)
+{
+ return readw(base + DAC960_GEM_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
+{
+ return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ __le32 val;
+
+ val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
+ if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
+ return false;
+ *error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
+ *param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
+ *param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
+ writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
+ return true;
+}
+
+static inline unsigned char
+DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+ unsigned char status;
+
+ while (DAC960_GEM_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_GEM_write_hw_mbox(base, mbox_addr);
+ DAC960_GEM_hw_mbox_new_cmd(base);
+ while (!DAC960_GEM_hw_mbox_status_available(base))
+ udelay(1);
+ status = DAC960_GEM_read_cmd_status(base);
+ DAC960_GEM_ack_hw_mbox_intr(base);
+ DAC960_GEM_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_GEM_hw_init(struct pci_dev *pdev,
+ struct myrs_hba *cs, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char status, parm0, parm1;
+
+ DAC960_GEM_disable_intr(base);
+ DAC960_GEM_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_GEM_init_in_progress(base) &&
+ timeout < MYRS_MAILBOX_TIMEOUT) {
+ if (DAC960_GEM_read_error_status(base, &status,
+ &parm0, &parm1) &&
+ myrs_err_status(cs, status, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRS_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_GEM_reset_ctrl(base);
+ return -EAGAIN;
+ }
+ DAC960_GEM_enable_intr(base);
+ cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
+ cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
+ cs->disable_intr = DAC960_GEM_disable_intr;
+ cs->reset = DAC960_GEM_reset_ctrl;
+ return 0;
+}
+
+static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
+{
+ struct myrs_hba *cs = arg;
+ void __iomem *base = cs->io_base;
+ struct myrs_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ DAC960_GEM_ack_intr(base);
+ next_stat_mbox = cs->next_stat_mbox;
+ while (next_stat_mbox->id > 0) {
+ unsigned short id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrs_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRS_DCMD_TAG)
+ cmd_blk = &cs->dcmd_blk;
+ else if (id == MYRS_MCMD_TAG)
+ cmd_blk = &cs->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cs->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = next_stat_mbox->status;
+ cmd_blk->sense_len = next_stat_mbox->sense_len;
+ cmd_blk->residual = next_stat_mbox->residual;
+ } else
+ dev_err(&cs->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+ if (++next_stat_mbox > cs->last_stat_mbox)
+ next_stat_mbox = cs->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrs_handle_cmdblk(cs, cmd_blk);
+ else
+ myrs_handle_scsi(cs, cmd_blk, scmd);
+ }
+ }
+ cs->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_GEM_privdata = {
+ .hw_init = DAC960_GEM_hw_init,
+ .irq_handler = DAC960_GEM_intr_handler,
+ .mmio_size = DAC960_GEM_mmio_size,
+};
+
+/*
+ * DAC960 BA Series Controllers.
+ */
+
+static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_GEN_IRQ, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
+}
+
+static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_IDB_OFFSET);
+ return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
+}
+
+static inline bool DAC960_BA_init_in_progress(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_IDB_OFFSET);
+ return !(val & DAC960_BA_IDB_INIT_DONE);
+}
+
+static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_mem_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_ODB_MMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline void DAC960_BA_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_BA_ODB_OFFSET);
+}
+
+static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_ODB_OFFSET);
+ return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_BA_mem_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_ODB_OFFSET);
+ return val & DAC960_BA_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_BA_enable_intr(void __iomem *base)
+{
+ writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_BA_disable_intr(void __iomem *base)
+{
+ writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_BA_intr_enabled(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_IRQMASK_OFFSET);
+ return !(val & DAC960_BA_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+ union myrs_cmd_mbox *mbox)
+{
+ memcpy(&mem_mbox->words[1], &mbox->words[1],
+ sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+ /* Barrier to avoid reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Barrier to force PCI access */
+ mb();
+}
+
+
+static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
+ dma_addr_t cmd_mbox_addr)
+{
+ dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_BA_read_cmd_ident(void __iomem *base)
+{
+ return readw(base + DAC960_BA_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
+{
+ return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ u8 val;
+
+ val = readb(base + DAC960_BA_ERRSTS_OFFSET);
+ if (!(val & DAC960_BA_ERRSTS_PENDING))
+ return false;
+ val &= ~DAC960_BA_ERRSTS_PENDING;
+ *error = val;
+ *param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
+ *param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
+ writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned char
+DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+ unsigned char status;
+
+ while (DAC960_BA_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_BA_write_hw_mbox(base, mbox_addr);
+ DAC960_BA_hw_mbox_new_cmd(base);
+ while (!DAC960_BA_hw_mbox_status_available(base))
+ udelay(1);
+ status = DAC960_BA_read_cmd_status(base);
+ DAC960_BA_ack_hw_mbox_intr(base);
+ DAC960_BA_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_BA_hw_init(struct pci_dev *pdev,
+ struct myrs_hba *cs, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char status, parm0, parm1;
+
+ DAC960_BA_disable_intr(base);
+ DAC960_BA_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_BA_init_in_progress(base) &&
+ timeout < MYRS_MAILBOX_TIMEOUT) {
+ if (DAC960_BA_read_error_status(base, &status,
+ &parm0, &parm1) &&
+ myrs_err_status(cs, status, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRS_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_BA_reset_ctrl(base);
+ return -EAGAIN;
+ }
+ DAC960_BA_enable_intr(base);
+ cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
+ cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
+ cs->disable_intr = DAC960_BA_disable_intr;
+ cs->reset = DAC960_BA_reset_ctrl;
+ return 0;
+}
+
+static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
+{
+ struct myrs_hba *cs = arg;
+ void __iomem *base = cs->io_base;
+ struct myrs_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ DAC960_BA_ack_intr(base);
+ next_stat_mbox = cs->next_stat_mbox;
+ while (next_stat_mbox->id > 0) {
+ unsigned short id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrs_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRS_DCMD_TAG)
+ cmd_blk = &cs->dcmd_blk;
+ else if (id == MYRS_MCMD_TAG)
+ cmd_blk = &cs->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cs->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = next_stat_mbox->status;
+ cmd_blk->sense_len = next_stat_mbox->sense_len;
+ cmd_blk->residual = next_stat_mbox->residual;
+ } else
+ dev_err(&cs->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+ if (++next_stat_mbox > cs->last_stat_mbox)
+ next_stat_mbox = cs->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrs_handle_cmdblk(cs, cmd_blk);
+ else
+ myrs_handle_scsi(cs, cmd_blk, scmd);
+ }
+ }
+ cs->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_BA_privdata = {
+ .hw_init = DAC960_BA_hw_init,
+ .irq_handler = DAC960_BA_intr_handler,
+ .mmio_size = DAC960_BA_mmio_size,
+};
+
+/*
+ * DAC960 LP Series Controllers.
+ */
+
+static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_gen_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_GEN_IRQ, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_reset_ctrl(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
+{
+ writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
+}
+
+static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_IDB_OFFSET);
+ return val & DAC960_LP_IDB_HWMBOX_FULL;
+}
+
+static inline bool DAC960_LP_init_in_progress(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_IDB_OFFSET);
+ return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
+}
+
+static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_mem_mbox_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_ODB_MMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline void DAC960_LP_ack_intr(void __iomem *base)
+{
+ writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
+ base + DAC960_LP_ODB_OFFSET);
+}
+
+static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_ODB_OFFSET);
+ return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
+}
+
+static inline bool DAC960_LP_mem_mbox_status_available(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_ODB_OFFSET);
+ return val & DAC960_LP_ODB_MMBOX_STS_AVAIL;
+}
+
+static inline void DAC960_LP_enable_intr(void __iomem *base)
+{
+ writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
+}
+
+static inline void DAC960_LP_disable_intr(void __iomem *base)
+{
+ writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
+}
+
+static inline bool DAC960_LP_intr_enabled(void __iomem *base)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_IRQMASK_OFFSET);
+ return !(val & DAC960_LP_IRQMASK_DISABLE_IRQ);
+}
+
+static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
+ union myrs_cmd_mbox *mbox)
+{
+ memcpy(&mem_mbox->words[1], &mbox->words[1],
+ sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
+ /* Barrier to avoid reordering */
+ wmb();
+ mem_mbox->words[0] = mbox->words[0];
+ /* Barrier to force PCI access */
+ mb();
+}
+
+static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
+ dma_addr_t cmd_mbox_addr)
+{
+ dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
+}
+
+static inline unsigned short DAC960_LP_read_cmd_ident(void __iomem *base)
+{
+ return readw(base + DAC960_LP_CMDSTS_OFFSET);
+}
+
+static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
+{
+ return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
+}
+
+static inline bool
+DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
+ unsigned char *param0, unsigned char *param1)
+{
+ u8 val;
+
+ val = readb(base + DAC960_LP_ERRSTS_OFFSET);
+ if (!(val & DAC960_LP_ERRSTS_PENDING))
+ return false;
+ val &= ~DAC960_LP_ERRSTS_PENDING;
+ *error = val;
+ *param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
+ *param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
+ writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
+ return true;
+}
+
+static inline unsigned char
+DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
+{
+ unsigned char status;
+
+ while (DAC960_LP_hw_mbox_is_full(base))
+ udelay(1);
+ DAC960_LP_write_hw_mbox(base, mbox_addr);
+ DAC960_LP_hw_mbox_new_cmd(base);
+ while (!DAC960_LP_hw_mbox_status_available(base))
+ udelay(1);
+ status = DAC960_LP_read_cmd_status(base);
+ DAC960_LP_ack_hw_mbox_intr(base);
+ DAC960_LP_ack_hw_mbox_status(base);
+
+ return status;
+}
+
+static int DAC960_LP_hw_init(struct pci_dev *pdev,
+ struct myrs_hba *cs, void __iomem *base)
+{
+ int timeout = 0;
+ unsigned char status, parm0, parm1;
+
+ DAC960_LP_disable_intr(base);
+ DAC960_LP_ack_hw_mbox_status(base);
+ udelay(1000);
+ while (DAC960_LP_init_in_progress(base) &&
+ timeout < MYRS_MAILBOX_TIMEOUT) {
+ if (DAC960_LP_read_error_status(base, &status,
+ &parm0, &parm1) &&
+ myrs_err_status(cs, status, parm0, parm1))
+ return -EIO;
+ udelay(10);
+ timeout++;
+ }
+ if (timeout == MYRS_MAILBOX_TIMEOUT) {
+ dev_err(&pdev->dev,
+ "Timeout waiting for Controller Initialisation\n");
+ return -ETIMEDOUT;
+ }
+ if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
+ dev_err(&pdev->dev,
+ "Unable to Enable Memory Mailbox Interface\n");
+ DAC960_LP_reset_ctrl(base);
+ return -ENODEV;
+ }
+ DAC960_LP_enable_intr(base);
+ cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
+ cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
+ cs->disable_intr = DAC960_LP_disable_intr;
+ cs->reset = DAC960_LP_reset_ctrl;
+
+ return 0;
+}
+
+static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
+{
+ struct myrs_hba *cs = arg;
+ void __iomem *base = cs->io_base;
+ struct myrs_stat_mbox *next_stat_mbox;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cs->queue_lock, flags);
+ DAC960_LP_ack_intr(base);
+ next_stat_mbox = cs->next_stat_mbox;
+ while (next_stat_mbox->id > 0) {
+ unsigned short id = next_stat_mbox->id;
+ struct scsi_cmnd *scmd = NULL;
+ struct myrs_cmdblk *cmd_blk = NULL;
+
+ if (id == MYRS_DCMD_TAG)
+ cmd_blk = &cs->dcmd_blk;
+ else if (id == MYRS_MCMD_TAG)
+ cmd_blk = &cs->mcmd_blk;
+ else {
+ scmd = scsi_host_find_tag(cs->host, id - 3);
+ if (scmd)
+ cmd_blk = scsi_cmd_priv(scmd);
+ }
+ if (cmd_blk) {
+ cmd_blk->status = next_stat_mbox->status;
+ cmd_blk->sense_len = next_stat_mbox->sense_len;
+ cmd_blk->residual = next_stat_mbox->residual;
+ } else
+ dev_err(&cs->pdev->dev,
+ "Unhandled command completion %d\n", id);
+
+ memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
+ if (++next_stat_mbox > cs->last_stat_mbox)
+ next_stat_mbox = cs->first_stat_mbox;
+
+ if (cmd_blk) {
+ if (id < 3)
+ myrs_handle_cmdblk(cs, cmd_blk);
+ else
+ myrs_handle_scsi(cs, cmd_blk, scmd);
+ }
+ }
+ cs->next_stat_mbox = next_stat_mbox;
+ spin_unlock_irqrestore(&cs->queue_lock, flags);
+ return IRQ_HANDLED;
+}
+
+struct myrs_privdata DAC960_LP_privdata = {
+ .hw_init = DAC960_LP_hw_init,
+ .irq_handler = DAC960_LP_intr_handler,
+ .mmio_size = DAC960_LP_mmio_size,
+};
+
+/*
+ * Module functions
+ */
+static int
+myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+ struct myrs_hba *cs;
+ int ret;
+
+ cs = myrs_detect(dev, entry);
+ if (!cs)
+ return -ENODEV;
+
+ ret = myrs_get_config(cs);
+ if (ret < 0) {
+ myrs_cleanup(cs);
+ return ret;
+ }
+
+ if (!myrs_create_mempools(dev, cs)) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ ret = scsi_add_host(cs->host, &dev->dev);
+ if (ret) {
+ dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
+ myrs_destroy_mempools(cs);
+ goto failed;
+ }
+ scsi_scan_host(cs->host);
+ return 0;
+failed:
+ myrs_cleanup(cs);
+ return ret;
+}
+
+
+static void myrs_remove(struct pci_dev *pdev)
+{
+ struct myrs_hba *cs = pci_get_drvdata(pdev);
+
+ if (cs == NULL)
+ return;
+
+ shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
+ myrs_flush_cache(cs);
+ myrs_destroy_mempools(cs);
+ myrs_cleanup(cs);
+}
+
+
+static const struct pci_device_id myrs_id_table[] = {
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
+ PCI_DEVICE_ID_MYLEX_DAC960_GEM,
+ PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
+ .driver_data = (unsigned long) &DAC960_GEM_privdata,
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
+ },
+ {
+ PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
+ },
+ {0, },
+};
+
+MODULE_DEVICE_TABLE(pci, myrs_id_table);
+
+static struct pci_driver myrs_pci_driver = {
+ .name = "myrs",
+ .id_table = myrs_id_table,
+ .probe = myrs_probe,
+ .remove = myrs_remove,
+};
+
+static int __init myrs_init_module(void)
+{
+ int ret;
+
+ myrs_raid_template = raid_class_attach(&myrs_raid_functions);
+ if (!myrs_raid_template)
+ return -ENODEV;
+
+ ret = pci_register_driver(&myrs_pci_driver);
+ if (ret)
+ raid_class_release(myrs_raid_template);
+
+ return ret;
+}
+
+static void __exit myrs_cleanup_module(void)
+{
+ pci_unregister_driver(&myrs_pci_driver);
+ raid_class_release(myrs_raid_template);
+}
+
+module_init(myrs_init_module);
+module_exit(myrs_cleanup_module);
+
+MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
+MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h
new file mode 100644
index 000000000000..e6702ee85e9f
--- /dev/null
+++ b/drivers/scsi/myrs.h
@@ -0,0 +1,1134 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+ *
+ * This driver supports the newer, SCSI-based firmware interface only.
+ *
+ * Copyright 2018 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
+ *
+ * Based on the original DAC960 driver, which has
+ * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+ * Portions Copyright 2002 by Mylex (An IBM Business Unit)
+ */
+
+#ifndef _MYRS_H
+#define _MYRS_H
+
+#define MYRS_MAILBOX_TIMEOUT 1000000
+
+#define MYRS_DCMD_TAG 1
+#define MYRS_MCMD_TAG 2
+
+#define MYRS_LINE_BUFFER_SIZE 128
+
+#define MYRS_PRIMARY_MONITOR_INTERVAL (10 * HZ)
+#define MYRS_SECONDARY_MONITOR_INTERVAL (60 * HZ)
+
+/* Maximum number of Scatter/Gather Segments supported */
+#define MYRS_SG_LIMIT 128
+
+/*
+ * Number of Command and Status Mailboxes used by the
+ * DAC960 V2 Firmware Memory Mailbox Interface.
+ */
+#define MYRS_MAX_CMD_MBOX 512
+#define MYRS_MAX_STAT_MBOX 512
+
+#define MYRS_DCDB_SIZE 16
+#define MYRS_SENSE_SIZE 14
+
+/*
+ * DAC960 V2 Firmware Command Opcodes.
+ */
+enum myrs_cmd_opcode {
+ MYRS_CMD_OP_MEMCOPY = 0x01,
+ MYRS_CMD_OP_SCSI_10_PASSTHRU = 0x02,
+ MYRS_CMD_OP_SCSI_255_PASSTHRU = 0x03,
+ MYRS_CMD_OP_SCSI_10 = 0x04,
+ MYRS_CMD_OP_SCSI_256 = 0x05,
+ MYRS_CMD_OP_IOCTL = 0x20,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware IOCTL Opcodes.
+ */
+enum myrs_ioctl_opcode {
+ MYRS_IOCTL_GET_CTLR_INFO = 0x01,
+ MYRS_IOCTL_GET_LDEV_INFO_VALID = 0x03,
+ MYRS_IOCTL_GET_PDEV_INFO_VALID = 0x05,
+ MYRS_IOCTL_GET_HEALTH_STATUS = 0x11,
+ MYRS_IOCTL_GET_EVENT = 0x15,
+ MYRS_IOCTL_START_DISCOVERY = 0x81,
+ MYRS_IOCTL_SET_DEVICE_STATE = 0x82,
+ MYRS_IOCTL_INIT_PDEV_START = 0x84,
+ MYRS_IOCTL_INIT_PDEV_STOP = 0x85,
+ MYRS_IOCTL_INIT_LDEV_START = 0x86,
+ MYRS_IOCTL_INIT_LDEV_STOP = 0x87,
+ MYRS_IOCTL_RBLD_DEVICE_START = 0x88,
+ MYRS_IOCTL_RBLD_DEVICE_STOP = 0x89,
+ MYRS_IOCTL_MAKE_CONSISTENT_START = 0x8A,
+ MYRS_IOCTL_MAKE_CONSISTENT_STOP = 0x8B,
+ MYRS_IOCTL_CC_START = 0x8C,
+ MYRS_IOCTL_CC_STOP = 0x8D,
+ MYRS_IOCTL_SET_MEM_MBOX = 0x8E,
+ MYRS_IOCTL_RESET_DEVICE = 0x90,
+ MYRS_IOCTL_FLUSH_DEVICE_DATA = 0x91,
+ MYRS_IOCTL_PAUSE_DEVICE = 0x92,
+ MYRS_IOCTL_UNPAUS_EDEVICE = 0x93,
+ MYRS_IOCTL_LOCATE_DEVICE = 0x94,
+ MYRS_IOCTL_CREATE_CONFIGURATION = 0xC0,
+ MYRS_IOCTL_DELETE_LDEV = 0xC1,
+ MYRS_IOCTL_REPLACE_INTERNALDEVICE = 0xC2,
+ MYRS_IOCTL_RENAME_LDEV = 0xC3,
+ MYRS_IOCTL_ADD_CONFIGURATION = 0xC4,
+ MYRS_IOCTL_XLATE_PDEV_TO_LDEV = 0xC5,
+ MYRS_IOCTL_CLEAR_CONFIGURATION = 0xCA,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Command Status Codes.
+ */
+#define MYRS_STATUS_SUCCESS 0x00
+#define MYRS_STATUS_FAILED 0x02
+#define MYRS_STATUS_DEVICE_BUSY 0x08
+#define MYRS_STATUS_DEVICE_NON_RESPONSIVE 0x0E
+#define MYRS_STATUS_DEVICE_NON_RESPONSIVE2 0x0F
+#define MYRS_STATUS_RESERVATION_CONFLICT 0x18
+
+/*
+ * DAC960 V2 Firmware Memory Type structure.
+ */
+struct myrs_mem_type {
+ enum {
+ MYRS_MEMTYPE_RESERVED = 0x00,
+ MYRS_MEMTYPE_DRAM = 0x01,
+ MYRS_MEMTYPE_EDRAM = 0x02,
+ MYRS_MEMTYPE_EDO = 0x03,
+ MYRS_MEMTYPE_SDRAM = 0x04,
+ MYRS_MEMTYPE_LAST = 0x1F,
+ } __packed mem_type:5; /* Byte 0 Bits 0-4 */
+ unsigned rsvd:1; /* Byte 0 Bit 5 */
+ unsigned mem_parity:1; /* Byte 0 Bit 6 */
+ unsigned mem_ecc:1; /* Byte 0 Bit 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Processor Type structure.
+ */
+enum myrs_cpu_type {
+ MYRS_CPUTYPE_i960CA = 0x01,
+ MYRS_CPUTYPE_i960RD = 0x02,
+ MYRS_CPUTYPE_i960RN = 0x03,
+ MYRS_CPUTYPE_i960RP = 0x04,
+ MYRS_CPUTYPE_NorthBay = 0x05,
+ MYRS_CPUTYPE_StrongArm = 0x06,
+ MYRS_CPUTYPE_i960RM = 0x07,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Get Controller Info reply structure.
+ */
+struct myrs_ctlr_info {
+ unsigned char rsvd1; /* Byte 0 */
+ enum {
+ MYRS_SCSI_BUS = 0x00,
+ MYRS_Fibre_BUS = 0x01,
+ MYRS_PCI_BUS = 0x03
+ } __packed bus; /* Byte 1 */
+ enum {
+ MYRS_CTLR_DAC960E = 0x01,
+ MYRS_CTLR_DAC960M = 0x08,
+ MYRS_CTLR_DAC960PD = 0x10,
+ MYRS_CTLR_DAC960PL = 0x11,
+ MYRS_CTLR_DAC960PU = 0x12,
+ MYRS_CTLR_DAC960PE = 0x13,
+ MYRS_CTLR_DAC960PG = 0x14,
+ MYRS_CTLR_DAC960PJ = 0x15,
+ MYRS_CTLR_DAC960PTL0 = 0x16,
+ MYRS_CTLR_DAC960PR = 0x17,
+ MYRS_CTLR_DAC960PRL = 0x18,
+ MYRS_CTLR_DAC960PT = 0x19,
+ MYRS_CTLR_DAC1164P = 0x1A,
+ MYRS_CTLR_DAC960PTL1 = 0x1B,
+ MYRS_CTLR_EXR2000P = 0x1C,
+ MYRS_CTLR_EXR3000P = 0x1D,
+ MYRS_CTLR_ACCELERAID352 = 0x1E,
+ MYRS_CTLR_ACCELERAID170 = 0x1F,
+ MYRS_CTLR_ACCELERAID160 = 0x20,
+ MYRS_CTLR_DAC960S = 0x60,
+ MYRS_CTLR_DAC960SU = 0x61,
+ MYRS_CTLR_DAC960SX = 0x62,
+ MYRS_CTLR_DAC960SF = 0x63,
+ MYRS_CTLR_DAC960SS = 0x64,
+ MYRS_CTLR_DAC960FL = 0x65,
+ MYRS_CTLR_DAC960LL = 0x66,
+ MYRS_CTLR_DAC960FF = 0x67,
+ MYRS_CTLR_DAC960HP = 0x68,
+ MYRS_CTLR_RAIDBRICK = 0x69,
+ MYRS_CTLR_METEOR_FL = 0x6A,
+ MYRS_CTLR_METEOR_FF = 0x6B
+ } __packed ctlr_type; /* Byte 2 */
+ unsigned char rsvd2; /* Byte 3 */
+ unsigned short bus_speed_mhz; /* Bytes 4-5 */
+ unsigned char bus_width; /* Byte 6 */
+ unsigned char flash_code; /* Byte 7 */
+ unsigned char ports_present; /* Byte 8 */
+ unsigned char rsvd3[7]; /* Bytes 9-15 */
+ unsigned char bus_name[16]; /* Bytes 16-31 */
+ unsigned char ctlr_name[16]; /* Bytes 32-47 */
+ unsigned char rsvd4[16]; /* Bytes 48-63 */
+ /* Firmware Release Information */
+ unsigned char fw_major_version; /* Byte 64 */
+ unsigned char fw_minor_version; /* Byte 65 */
+ unsigned char fw_turn_number; /* Byte 66 */
+ unsigned char fw_build_number; /* Byte 67 */
+ unsigned char fw_release_day; /* Byte 68 */
+ unsigned char fw_release_month; /* Byte 69 */
+ unsigned char fw_release_year_hi; /* Byte 70 */
+ unsigned char fw_release_year_lo; /* Byte 71 */
+ /* Hardware Release Information */
+ unsigned char hw_rev; /* Byte 72 */
+ unsigned char rsvd5[3]; /* Bytes 73-75 */
+ unsigned char hw_release_day; /* Byte 76 */
+ unsigned char hw_release_month; /* Byte 77 */
+ unsigned char hw_release_year_hi; /* Byte 78 */
+ unsigned char hw_release_year_lo; /* Byte 79 */
+ /* Hardware Manufacturing Information */
+ unsigned char manuf_batch_num; /* Byte 80 */
+ unsigned char rsvd6; /* Byte 81 */
+ unsigned char manuf_plant_num; /* Byte 82 */
+ unsigned char rsvd7; /* Byte 83 */
+ unsigned char hw_manuf_day; /* Byte 84 */
+ unsigned char hw_manuf_month; /* Byte 85 */
+ unsigned char hw_manuf_year_hi; /* Byte 86 */
+ unsigned char hw_manuf_year_lo; /* Byte 87 */
+ unsigned char max_pd_per_xld; /* Byte 88 */
+ unsigned char max_ild_per_xld; /* Byte 89 */
+ unsigned short nvram_size_kb; /* Bytes 90-91 */
+ unsigned char max_xld; /* Byte 92 */
+ unsigned char rsvd8[3]; /* Bytes 93-95 */
+ /* Unique Information per Controller */
+ unsigned char serial_number[16]; /* Bytes 96-111 */
+ unsigned char rsvd9[16]; /* Bytes 112-127 */
+ /* Vendor Information */
+ unsigned char rsvd10[3]; /* Bytes 128-130 */
+ unsigned char oem_code; /* Byte 131 */
+ unsigned char vendor[16]; /* Bytes 132-147 */
+ /* Other Physical/Controller/Operation Information */
+ unsigned char bbu_present:1; /* Byte 148 Bit 0 */
+ unsigned char cluster_mode:1; /* Byte 148 Bit 1 */
+ unsigned char rsvd11:6; /* Byte 148 Bits 2-7 */
+ unsigned char rsvd12[3]; /* Bytes 149-151 */
+ /* Physical Device Scan Information */
+ unsigned char pscan_active:1; /* Byte 152 Bit 0 */
+ unsigned char rsvd13:7; /* Byte 152 Bits 1-7 */
+ unsigned char pscan_chan; /* Byte 153 */
+ unsigned char pscan_target; /* Byte 154 */
+ unsigned char pscan_lun; /* Byte 155 */
+ /* Maximum Command Data Transfer Sizes */
+ unsigned short max_transfer_size; /* Bytes 156-157 */
+ unsigned short max_sge; /* Bytes 158-159 */
+ /* Logical/Physical Device Counts */
+ unsigned short ldev_present; /* Bytes 160-161 */
+ unsigned short ldev_critical; /* Bytes 162-163 */
+ unsigned short ldev_offline; /* Bytes 164-165 */
+ unsigned short pdev_present; /* Bytes 166-167 */
+ unsigned short pdisk_present; /* Bytes 168-169 */
+ unsigned short pdisk_critical; /* Bytes 170-171 */
+ unsigned short pdisk_offline; /* Bytes 172-173 */
+ unsigned short max_tcq; /* Bytes 174-175 */
+ /* Channel and Target ID Information */
+ unsigned char physchan_present; /* Byte 176 */
+ unsigned char virtchan_present; /* Byte 177 */
+ unsigned char physchan_max; /* Byte 178 */
+ unsigned char virtchan_max; /* Byte 179 */
+ unsigned char max_targets[16]; /* Bytes 180-195 */
+ unsigned char rsvd14[12]; /* Bytes 196-207 */
+ /* Memory/Cache Information */
+ unsigned short mem_size_mb; /* Bytes 208-209 */
+ unsigned short cache_size_mb; /* Bytes 210-211 */
+ unsigned int valid_cache_bytes; /* Bytes 212-215 */
+ unsigned int dirty_cache_bytes; /* Bytes 216-219 */
+ unsigned short mem_speed_mhz; /* Bytes 220-221 */
+ unsigned char mem_data_width; /* Byte 222 */
+ struct myrs_mem_type mem_type; /* Byte 223 */
+ unsigned char cache_mem_type_name[16]; /* Bytes 224-239 */
+ /* Execution Memory Information */
+ unsigned short exec_mem_size_mb; /* Bytes 240-241 */
+ unsigned short exec_l2_cache_size_mb; /* Bytes 242-243 */
+ unsigned char rsvd15[8]; /* Bytes 244-251 */
+ unsigned short exec_mem_speed_mhz; /* Bytes 252-253 */
+ unsigned char exec_mem_data_width; /* Byte 254 */
+ struct myrs_mem_type exec_mem_type; /* Byte 255 */
+ unsigned char exec_mem_type_name[16]; /* Bytes 256-271 */
+ /* CPU Type Information */
+ struct { /* Bytes 272-335 */
+ unsigned short cpu_speed_mhz;
+ enum myrs_cpu_type cpu_type;
+ unsigned char cpu_count;
+ unsigned char rsvd16[12];
+ unsigned char cpu_name[16];
+ } __packed cpu[2];
+ /* Debugging/Profiling/Command Time Tracing Information */
+ unsigned short cur_prof_page_num; /* Bytes 336-337 */
+ unsigned short num_prof_waiters; /* Bytes 338-339 */
+ unsigned short cur_trace_page_num; /* Bytes 340-341 */
+ unsigned short num_trace_waiters; /* Bytes 342-343 */
+ unsigned char rsvd18[8]; /* Bytes 344-351 */
+ /* Error Counters on Physical Devices */
+ unsigned short pdev_bus_resets; /* Bytes 352-353 */
+ unsigned short pdev_parity_errors; /* Bytes 355-355 */
+ unsigned short pdev_soft_errors; /* Bytes 356-357 */
+ unsigned short pdev_cmds_failed; /* Bytes 358-359 */
+ unsigned short pdev_misc_errors; /* Bytes 360-361 */
+ unsigned short pdev_cmd_timeouts; /* Bytes 362-363 */
+ unsigned short pdev_sel_timeouts; /* Bytes 364-365 */
+ unsigned short pdev_retries_done; /* Bytes 366-367 */
+ unsigned short pdev_aborts_done; /* Bytes 368-369 */
+ unsigned short pdev_host_aborts_done; /* Bytes 370-371 */
+ unsigned short pdev_predicted_failures; /* Bytes 372-373 */
+ unsigned short pdev_host_cmds_failed; /* Bytes 374-375 */
+ unsigned short pdev_hard_errors; /* Bytes 376-377 */
+ unsigned char rsvd19[6]; /* Bytes 378-383 */
+ /* Error Counters on Logical Devices */
+ unsigned short ldev_soft_errors; /* Bytes 384-385 */
+ unsigned short ldev_cmds_failed; /* Bytes 386-387 */
+ unsigned short ldev_host_aborts_done; /* Bytes 388-389 */
+ unsigned char rsvd20[2]; /* Bytes 390-391 */
+ /* Error Counters on Controller */
+ unsigned short ctlr_mem_errors; /* Bytes 392-393 */
+ unsigned short ctlr_host_aborts_done; /* Bytes 394-395 */
+ unsigned char rsvd21[4]; /* Bytes 396-399 */
+ /* Long Duration Activity Information */
+ unsigned short bg_init_active; /* Bytes 400-401 */
+ unsigned short ldev_init_active; /* Bytes 402-403 */
+ unsigned short pdev_init_active; /* Bytes 404-405 */
+ unsigned short cc_active; /* Bytes 406-407 */
+ unsigned short rbld_active; /* Bytes 408-409 */
+ unsigned short exp_active; /* Bytes 410-411 */
+ unsigned short patrol_active; /* Bytes 412-413 */
+ unsigned char rsvd22[2]; /* Bytes 414-415 */
+ /* Flash ROM Information */
+ unsigned char flash_type; /* Byte 416 */
+ unsigned char rsvd23; /* Byte 417 */
+ unsigned short flash_size_MB; /* Bytes 418-419 */
+ unsigned int flash_limit; /* Bytes 420-423 */
+ unsigned int flash_count; /* Bytes 424-427 */
+ unsigned char rsvd24[4]; /* Bytes 428-431 */
+ unsigned char flash_type_name[16]; /* Bytes 432-447 */
+ /* Firmware Run Time Information */
+ unsigned char rbld_rate; /* Byte 448 */
+ unsigned char bg_init_rate; /* Byte 449 */
+ unsigned char fg_init_rate; /* Byte 450 */
+ unsigned char cc_rate; /* Byte 451 */
+ unsigned char rsvd25[4]; /* Bytes 452-455 */
+ unsigned int max_dp; /* Bytes 456-459 */
+ unsigned int free_dp; /* Bytes 460-463 */
+ unsigned int max_iop; /* Bytes 464-467 */
+ unsigned int free_iop; /* Bytes 468-471 */
+ unsigned short max_combined_len; /* Bytes 472-473 */
+ unsigned short num_cfg_groups; /* Bytes 474-475 */
+ unsigned installation_abort_status:1; /* Byte 476 Bit 0 */
+ unsigned maint_mode_status:1; /* Byte 476 Bit 1 */
+ unsigned rsvd26:6; /* Byte 476 Bits 2-7 */
+ unsigned char rsvd27[6]; /* Bytes 477-511 */
+ unsigned char rsvd28[512]; /* Bytes 512-1023 */
+};
+
+/*
+ * DAC960 V2 Firmware Device State type.
+ */
+enum myrs_devstate {
+ MYRS_DEVICE_UNCONFIGURED = 0x00,
+ MYRS_DEVICE_ONLINE = 0x01,
+ MYRS_DEVICE_REBUILD = 0x03,
+ MYRS_DEVICE_MISSING = 0x04,
+ MYRS_DEVICE_SUSPECTED_CRITICAL = 0x05,
+ MYRS_DEVICE_OFFLINE = 0x08,
+ MYRS_DEVICE_CRITICAL = 0x09,
+ MYRS_DEVICE_SUSPECTED_DEAD = 0x0C,
+ MYRS_DEVICE_COMMANDED_OFFLINE = 0x10,
+ MYRS_DEVICE_STANDBY = 0x21,
+ MYRS_DEVICE_INVALID_STATE = 0xFF,
+} __packed;
+
+/*
+ * DAC960 V2 RAID Levels
+ */
+enum myrs_raid_level {
+ MYRS_RAID_LEVEL0 = 0x0, /* RAID 0 */
+ MYRS_RAID_LEVEL1 = 0x1, /* RAID 1 */
+ MYRS_RAID_LEVEL3 = 0x3, /* RAID 3 right asymmetric parity */
+ MYRS_RAID_LEVEL5 = 0x5, /* RAID 5 right asymmetric parity */
+ MYRS_RAID_LEVEL6 = 0x6, /* RAID 6 (Mylex RAID 6) */
+ MYRS_RAID_JBOD = 0x7, /* RAID 7 (JBOD) */
+ MYRS_RAID_NEWSPAN = 0x8, /* New Mylex SPAN */
+ MYRS_RAID_LEVEL3F = 0x9, /* RAID 3 fixed parity */
+ MYRS_RAID_LEVEL3L = 0xb, /* RAID 3 left symmetric parity */
+ MYRS_RAID_SPAN = 0xc, /* current spanning implementation */
+ MYRS_RAID_LEVEL5L = 0xd, /* RAID 5 left symmetric parity */
+ MYRS_RAID_LEVELE = 0xe, /* RAID E (concatenation) */
+ MYRS_RAID_PHYSICAL = 0xf, /* physical device */
+} __packed;
+
+enum myrs_stripe_size {
+ MYRS_STRIPE_SIZE_0 = 0x0, /* no stripe (RAID 1, RAID 7, etc) */
+ MYRS_STRIPE_SIZE_512B = 0x1,
+ MYRS_STRIPE_SIZE_1K = 0x2,
+ MYRS_STRIPE_SIZE_2K = 0x3,
+ MYRS_STRIPE_SIZE_4K = 0x4,
+ MYRS_STRIPE_SIZE_8K = 0x5,
+ MYRS_STRIPE_SIZE_16K = 0x6,
+ MYRS_STRIPE_SIZE_32K = 0x7,
+ MYRS_STRIPE_SIZE_64K = 0x8,
+ MYRS_STRIPE_SIZE_128K = 0x9,
+ MYRS_STRIPE_SIZE_256K = 0xa,
+ MYRS_STRIPE_SIZE_512K = 0xb,
+ MYRS_STRIPE_SIZE_1M = 0xc,
+} __packed;
+
+enum myrs_cacheline_size {
+ MYRS_CACHELINE_ZERO = 0x0, /* caching cannot be enabled */
+ MYRS_CACHELINE_512B = 0x1,
+ MYRS_CACHELINE_1K = 0x2,
+ MYRS_CACHELINE_2K = 0x3,
+ MYRS_CACHELINE_4K = 0x4,
+ MYRS_CACHELINE_8K = 0x5,
+ MYRS_CACHELINE_16K = 0x6,
+ MYRS_CACHELINE_32K = 0x7,
+ MYRS_CACHELINE_64K = 0x8,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Get Logical Device Info reply structure.
+ */
+struct myrs_ldev_info {
+ unsigned char ctlr; /* Byte 0 */
+ unsigned char channel; /* Byte 1 */
+ unsigned char target; /* Byte 2 */
+ unsigned char lun; /* Byte 3 */
+ enum myrs_devstate dev_state; /* Byte 4 */
+ unsigned char raid_level; /* Byte 5 */
+ enum myrs_stripe_size stripe_size; /* Byte 6 */
+ enum myrs_cacheline_size cacheline_size; /* Byte 7 */
+ struct {
+ enum {
+ MYRS_READCACHE_DISABLED = 0x0,
+ MYRS_READCACHE_ENABLED = 0x1,
+ MYRS_READAHEAD_ENABLED = 0x2,
+ MYRS_INTELLIGENT_READAHEAD_ENABLED = 0x3,
+ MYRS_READCACHE_LAST = 0x7,
+ } __packed rce:3; /* Byte 8 Bits 0-2 */
+ enum {
+ MYRS_WRITECACHE_DISABLED = 0x0,
+ MYRS_LOGICALDEVICE_RO = 0x1,
+ MYRS_WRITECACHE_ENABLED = 0x2,
+ MYRS_INTELLIGENT_WRITECACHE_ENABLED = 0x3,
+ MYRS_WRITECACHE_LAST = 0x7,
+ } __packed wce:3; /* Byte 8 Bits 3-5 */
+ unsigned rsvd1:1; /* Byte 8 Bit 6 */
+ unsigned ldev_init_done:1; /* Byte 8 Bit 7 */
+ } ldev_control; /* Byte 8 */
+ /* Logical Device Operations Status */
+ unsigned char cc_active:1; /* Byte 9 Bit 0 */
+ unsigned char rbld_active:1; /* Byte 9 Bit 1 */
+ unsigned char bg_init_active:1; /* Byte 9 Bit 2 */
+ unsigned char fg_init_active:1; /* Byte 9 Bit 3 */
+ unsigned char migration_active:1; /* Byte 9 Bit 4 */
+ unsigned char patrol_active:1; /* Byte 9 Bit 5 */
+ unsigned char rsvd2:2; /* Byte 9 Bits 6-7 */
+ unsigned char raid5_writeupdate; /* Byte 10 */
+ unsigned char raid5_algo; /* Byte 11 */
+ unsigned short ldev_num; /* Bytes 12-13 */
+ /* BIOS Info */
+ unsigned char bios_disabled:1; /* Byte 14 Bit 0 */
+ unsigned char cdrom_boot:1; /* Byte 14 Bit 1 */
+ unsigned char drv_coercion:1; /* Byte 14 Bit 2 */
+ unsigned char write_same_disabled:1; /* Byte 14 Bit 3 */
+ unsigned char hba_mode:1; /* Byte 14 Bit 4 */
+ enum {
+ MYRS_GEOMETRY_128_32 = 0x0,
+ MYRS_GEOMETRY_255_63 = 0x1,
+ MYRS_GEOMETRY_RSVD1 = 0x2,
+ MYRS_GEOMETRY_RSVD2 = 0x3
+ } __packed drv_geom:2; /* Byte 14 Bits 5-6 */
+ unsigned char super_ra_enabled:1; /* Byte 14 Bit 7 */
+ unsigned char rsvd3; /* Byte 15 */
+ /* Error Counters */
+ unsigned short soft_errs; /* Bytes 16-17 */
+ unsigned short cmds_failed; /* Bytes 18-19 */
+ unsigned short cmds_aborted; /* Bytes 20-21 */
+ unsigned short deferred_write_errs; /* Bytes 22-23 */
+ unsigned int rsvd4; /* Bytes 24-27 */
+ unsigned int rsvd5; /* Bytes 28-31 */
+ /* Device Size Information */
+ unsigned short rsvd6; /* Bytes 32-33 */
+ unsigned short devsize_bytes; /* Bytes 34-35 */
+ unsigned int orig_devsize; /* Bytes 36-39 */
+ unsigned int cfg_devsize; /* Bytes 40-43 */
+ unsigned int rsvd7; /* Bytes 44-47 */
+ unsigned char ldev_name[32]; /* Bytes 48-79 */
+ unsigned char inquiry[36]; /* Bytes 80-115 */
+ unsigned char rsvd8[12]; /* Bytes 116-127 */
+ u64 last_read_lba; /* Bytes 128-135 */
+ u64 last_write_lba; /* Bytes 136-143 */
+ u64 cc_lba; /* Bytes 144-151 */
+ u64 rbld_lba; /* Bytes 152-159 */
+ u64 bg_init_lba; /* Bytes 160-167 */
+ u64 fg_init_lba; /* Bytes 168-175 */
+ u64 migration_lba; /* Bytes 176-183 */
+ u64 patrol_lba; /* Bytes 184-191 */
+ unsigned char rsvd9[64]; /* Bytes 192-255 */
+};
+
+/*
+ * DAC960 V2 Firmware Get Physical Device Info reply structure.
+ */
+struct myrs_pdev_info {
+ unsigned char rsvd1; /* Byte 0 */
+ unsigned char channel; /* Byte 1 */
+ unsigned char target; /* Byte 2 */
+ unsigned char lun; /* Byte 3 */
+ /* Configuration Status Bits */
+ unsigned char pdev_fault_tolerant:1; /* Byte 4 Bit 0 */
+ unsigned char pdev_connected:1; /* Byte 4 Bit 1 */
+ unsigned char pdev_local_to_ctlr:1; /* Byte 4 Bit 2 */
+ unsigned char rsvd2:5; /* Byte 4 Bits 3-7 */
+ /* Multiple Host/Controller Status Bits */
+ unsigned char remote_host_dead:1; /* Byte 5 Bit 0 */
+ unsigned char remove_ctlr_dead:1; /* Byte 5 Bit 1 */
+ unsigned char rsvd3:6; /* Byte 5 Bits 2-7 */
+ enum myrs_devstate dev_state; /* Byte 6 */
+ unsigned char nego_data_width; /* Byte 7 */
+ unsigned short nego_sync_rate; /* Bytes 8-9 */
+ /* Multiported Physical Device Information */
+ unsigned char num_ports; /* Byte 10 */
+ unsigned char drv_access_bitmap; /* Byte 11 */
+ unsigned int rsvd4; /* Bytes 12-15 */
+ unsigned char ip_address[16]; /* Bytes 16-31 */
+ unsigned short max_tags; /* Bytes 32-33 */
+ /* Physical Device Operations Status */
+ unsigned char cc_in_progress:1; /* Byte 34 Bit 0 */
+ unsigned char rbld_in_progress:1; /* Byte 34 Bit 1 */
+ unsigned char makecc_in_progress:1; /* Byte 34 Bit 2 */
+ unsigned char pdevinit_in_progress:1; /* Byte 34 Bit 3 */
+ unsigned char migration_in_progress:1; /* Byte 34 Bit 4 */
+ unsigned char patrol_in_progress:1; /* Byte 34 Bit 5 */
+ unsigned char rsvd5:2; /* Byte 34 Bits 6-7 */
+ unsigned char long_op_status; /* Byte 35 */
+ unsigned char parity_errs; /* Byte 36 */
+ unsigned char soft_errs; /* Byte 37 */
+ unsigned char hard_errs; /* Byte 38 */
+ unsigned char misc_errs; /* Byte 39 */
+ unsigned char cmd_timeouts; /* Byte 40 */
+ unsigned char retries; /* Byte 41 */
+ unsigned char aborts; /* Byte 42 */
+ unsigned char pred_failures; /* Byte 43 */
+ unsigned int rsvd6; /* Bytes 44-47 */
+ unsigned short rsvd7; /* Bytes 48-49 */
+ unsigned short devsize_bytes; /* Bytes 50-51 */
+ unsigned int orig_devsize; /* Bytes 52-55 */
+ unsigned int cfg_devsize; /* Bytes 56-59 */
+ unsigned int rsvd8; /* Bytes 60-63 */
+ unsigned char pdev_name[16]; /* Bytes 64-79 */
+ unsigned char rsvd9[16]; /* Bytes 80-95 */
+ unsigned char rsvd10[32]; /* Bytes 96-127 */
+ unsigned char inquiry[36]; /* Bytes 128-163 */
+ unsigned char rsvd11[20]; /* Bytes 164-183 */
+ unsigned char rsvd12[8]; /* Bytes 184-191 */
+ u64 last_read_lba; /* Bytes 192-199 */
+ u64 last_write_lba; /* Bytes 200-207 */
+ u64 cc_lba; /* Bytes 208-215 */
+ u64 rbld_lba; /* Bytes 216-223 */
+ u64 makecc_lba; /* Bytes 224-231 */
+ u64 devinit_lba; /* Bytes 232-239 */
+ u64 migration_lba; /* Bytes 240-247 */
+ u64 patrol_lba; /* Bytes 248-255 */
+ unsigned char rsvd13[256]; /* Bytes 256-511 */
+};
+
+/*
+ * DAC960 V2 Firmware Health Status Buffer structure.
+ */
+struct myrs_fwstat {
+ unsigned int uptime_usecs; /* Bytes 0-3 */
+ unsigned int uptime_msecs; /* Bytes 4-7 */
+ unsigned int seconds; /* Bytes 8-11 */
+ unsigned char rsvd1[4]; /* Bytes 12-15 */
+ unsigned int epoch; /* Bytes 16-19 */
+ unsigned char rsvd2[4]; /* Bytes 20-23 */
+ unsigned int dbg_msgbuf_idx; /* Bytes 24-27 */
+ unsigned int coded_msgbuf_idx; /* Bytes 28-31 */
+ unsigned int cur_timetrace_page; /* Bytes 32-35 */
+ unsigned int cur_prof_page; /* Bytes 36-39 */
+ unsigned int next_evseq; /* Bytes 40-43 */
+ unsigned char rsvd3[4]; /* Bytes 44-47 */
+ unsigned char rsvd4[16]; /* Bytes 48-63 */
+ unsigned char rsvd5[64]; /* Bytes 64-127 */
+};
+
+/*
+ * DAC960 V2 Firmware Get Event reply structure.
+ */
+struct myrs_event {
+ unsigned int ev_seq; /* Bytes 0-3 */
+ unsigned int ev_time; /* Bytes 4-7 */
+ unsigned int ev_code; /* Bytes 8-11 */
+ unsigned char rsvd1; /* Byte 12 */
+ unsigned char channel; /* Byte 13 */
+ unsigned char target; /* Byte 14 */
+ unsigned char lun; /* Byte 15 */
+ unsigned int rsvd2; /* Bytes 16-19 */
+ unsigned int ev_parm; /* Bytes 20-23 */
+ unsigned char sense_data[40]; /* Bytes 24-63 */
+};
+
+/*
+ * DAC960 V2 Firmware Command Control Bits structure.
+ */
+struct myrs_cmd_ctrl {
+ unsigned char fua:1; /* Byte 0 Bit 0 */
+ unsigned char disable_pgout:1; /* Byte 0 Bit 1 */
+ unsigned char rsvd1:1; /* Byte 0 Bit 2 */
+ unsigned char add_sge_mem:1; /* Byte 0 Bit 3 */
+ unsigned char dma_ctrl_to_host:1; /* Byte 0 Bit 4 */
+ unsigned char rsvd2:1; /* Byte 0 Bit 5 */
+ unsigned char no_autosense:1; /* Byte 0 Bit 6 */
+ unsigned char disc_prohibited:1; /* Byte 0 Bit 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Command Timeout structure.
+ */
+struct myrs_cmd_tmo {
+ unsigned char tmo_val:6; /* Byte 0 Bits 0-5 */
+ enum {
+ MYRS_TMO_SCALE_SECONDS = 0,
+ MYRS_TMO_SCALE_MINUTES = 1,
+ MYRS_TMO_SCALE_HOURS = 2,
+ MYRS_TMO_SCALE_RESERVED = 3
+ } __packed tmo_scale:2; /* Byte 0 Bits 6-7 */
+};
+
+/*
+ * DAC960 V2 Firmware Physical Device structure.
+ */
+struct myrs_pdev {
+ unsigned char lun; /* Byte 0 */
+ unsigned char target; /* Byte 1 */
+ unsigned char channel:3; /* Byte 2 Bits 0-2 */
+ unsigned char ctlr:5; /* Byte 2 Bits 3-7 */
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Logical Device structure.
+ */
+struct myrs_ldev {
+ unsigned short ldev_num; /* Bytes 0-1 */
+ unsigned char rsvd:3; /* Byte 2 Bits 0-2 */
+ unsigned char ctlr:5; /* Byte 2 Bits 3-7 */
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Operation Device type.
+ */
+enum myrs_opdev {
+ MYRS_PHYSICAL_DEVICE = 0x00,
+ MYRS_RAID_DEVICE = 0x01,
+ MYRS_PHYSICAL_CHANNEL = 0x02,
+ MYRS_RAID_CHANNEL = 0x03,
+ MYRS_PHYSICAL_CONTROLLER = 0x04,
+ MYRS_RAID_CONTROLLER = 0x05,
+ MYRS_CONFIGURATION_GROUP = 0x10,
+ MYRS_ENCLOSURE = 0x11,
+} __packed;
+
+/*
+ * DAC960 V2 Firmware Translate Physical To Logical Device structure.
+ */
+struct myrs_devmap {
+ unsigned short ldev_num; /* Bytes 0-1 */
+ unsigned short rsvd; /* Bytes 2-3 */
+ unsigned char prev_boot_ctlr; /* Byte 4 */
+ unsigned char prev_boot_channel; /* Byte 5 */
+ unsigned char prev_boot_target; /* Byte 6 */
+ unsigned char prev_boot_lun; /* Byte 7 */
+};
+
+/*
+ * DAC960 V2 Firmware Scatter/Gather List Entry structure.
+ */
+struct myrs_sge {
+ u64 sge_addr; /* Bytes 0-7 */
+ u64 sge_count; /* Bytes 8-15 */
+};
+
+/*
+ * DAC960 V2 Firmware Data Transfer Memory Address structure.
+ */
+union myrs_sgl {
+ struct myrs_sge sge[2]; /* Bytes 0-31 */
+ struct {
+ unsigned short sge0_len; /* Bytes 0-1 */
+ unsigned short sge1_len; /* Bytes 2-3 */
+ unsigned short sge2_len; /* Bytes 4-5 */
+ unsigned short rsvd; /* Bytes 6-7 */
+ u64 sge0_addr; /* Bytes 8-15 */
+ u64 sge1_addr; /* Bytes 16-23 */
+ u64 sge2_addr; /* Bytes 24-31 */
+ } ext;
+};
+
+/*
+ * 64 Byte DAC960 V2 Firmware Command Mailbox structure.
+ */
+union myrs_cmd_mbox {
+ unsigned int words[16]; /* Words 0-15 */
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned int rsvd1:24; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd2[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } common;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size; /* Bytes 4-7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char cdb_len; /* Byte 21 */
+ unsigned char cdb[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } SCSI_10;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size; /* Bytes 4-7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ unsigned char cdb_len; /* Byte 21 */
+ unsigned short rsvd; /* Bytes 22-23 */
+ u64 cdb_addr; /* Bytes 24-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } SCSI_255;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned short rsvd1; /* Bytes 16-17 */
+ unsigned char ctlr_num; /* Byte 18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd2[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } ctlr_info;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_ldev ldev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } ldev_info;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char rsvd[10]; /* Bytes 22-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } pdev_info;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned short evnum_upper; /* Bytes 16-17 */
+ unsigned char ctlr_num; /* Byte 18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned short evnum_lower; /* Bytes 22-23 */
+ unsigned char rsvd[8]; /* Bytes 24-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } get_event;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ union {
+ struct myrs_ldev ldev; /* Bytes 16-18 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ };
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ enum myrs_devstate state; /* Byte 22 */
+ unsigned char rsvd[9]; /* Bytes 23-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } set_devstate;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_ldev ldev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char restore_consistency:1; /* Byte 22 Bit 0 */
+ unsigned char initialized_area_only:1; /* Byte 22 Bit 1 */
+ unsigned char rsvd1:6; /* Byte 22 Bits 2-7 */
+ unsigned char rsvd2[9]; /* Bytes 23-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } cc;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ unsigned char first_cmd_mbox_size_kb; /* Byte 4 */
+ unsigned char first_stat_mbox_size_kb; /* Byte 5 */
+ unsigned char second_cmd_mbox_size_kb; /* Byte 6 */
+ unsigned char second_stat_mbox_size_kb; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ unsigned int rsvd1:24; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ unsigned char fwstat_buf_size_kb; /* Byte 22 */
+ unsigned char rsvd2; /* Byte 23 */
+ u64 fwstat_buf_addr; /* Bytes 24-31 */
+ u64 first_cmd_mbox_addr; /* Bytes 32-39 */
+ u64 first_stat_mbox_addr; /* Bytes 40-47 */
+ u64 second_cmd_mbox_addr; /* Bytes 48-55 */
+ u64 second_stat_mbox_addr; /* Bytes 56-63 */
+ } set_mbox;
+ struct {
+ unsigned short id; /* Bytes 0-1 */
+ enum myrs_cmd_opcode opcode; /* Byte 2 */
+ struct myrs_cmd_ctrl control; /* Byte 3 */
+ u32 dma_size:24; /* Bytes 4-6 */
+ unsigned char dma_num; /* Byte 7 */
+ u64 sense_addr; /* Bytes 8-15 */
+ struct myrs_pdev pdev; /* Bytes 16-18 */
+ struct myrs_cmd_tmo tmo; /* Byte 19 */
+ unsigned char sense_len; /* Byte 20 */
+ enum myrs_ioctl_opcode ioctl_opcode; /* Byte 21 */
+ enum myrs_opdev opdev; /* Byte 22 */
+ unsigned char rsvd[9]; /* Bytes 23-31 */
+ union myrs_sgl dma_addr; /* Bytes 32-63 */
+ } dev_op;
+};
+
+/*
+ * DAC960 V2 Firmware Controller Status Mailbox structure.
+ */
+struct myrs_stat_mbox {
+ unsigned short id; /* Bytes 0-1 */
+ unsigned char status; /* Byte 2 */
+ unsigned char sense_len; /* Byte 3 */
+ int residual; /* Bytes 4-7 */
+};
+
+struct myrs_cmdblk {
+ union myrs_cmd_mbox mbox;
+ unsigned char status;
+ unsigned char sense_len;
+ int residual;
+ struct completion *complete;
+ struct myrs_sge *sgl;
+ dma_addr_t sgl_addr;
+ unsigned char *dcdb;
+ dma_addr_t dcdb_dma;
+ unsigned char *sense;
+ dma_addr_t sense_addr;
+};
+
+/*
+ * DAC960 Driver Controller structure.
+ */
+struct myrs_hba {
+ void __iomem *io_base;
+ void __iomem *mmio_base;
+ phys_addr_t io_addr;
+ phys_addr_t pci_addr;
+ unsigned int irq;
+
+ unsigned char model_name[28];
+ unsigned char fw_version[12];
+
+ struct Scsi_Host *host;
+ struct pci_dev *pdev;
+
+ unsigned int epoch;
+ unsigned int next_evseq;
+ /* Monitor flags */
+ bool needs_update;
+ bool disable_enc_msg;
+
+ struct workqueue_struct *work_q;
+ char work_q_name[20];
+ struct delayed_work monitor_work;
+ unsigned long primary_monitor_time;
+ unsigned long secondary_monitor_time;
+
+ spinlock_t queue_lock;
+
+ struct dma_pool *sg_pool;
+ struct dma_pool *sense_pool;
+ struct dma_pool *dcdb_pool;
+
+ void (*write_cmd_mbox)(union myrs_cmd_mbox *next_mbox,
+ union myrs_cmd_mbox *cmd_mbox);
+ void (*get_cmd_mbox)(void __iomem *base);
+ void (*disable_intr)(void __iomem *base);
+ void (*reset)(void __iomem *base);
+
+ dma_addr_t cmd_mbox_addr;
+ size_t cmd_mbox_size;
+ union myrs_cmd_mbox *first_cmd_mbox;
+ union myrs_cmd_mbox *last_cmd_mbox;
+ union myrs_cmd_mbox *next_cmd_mbox;
+ union myrs_cmd_mbox *prev_cmd_mbox1;
+ union myrs_cmd_mbox *prev_cmd_mbox2;
+
+ dma_addr_t stat_mbox_addr;
+ size_t stat_mbox_size;
+ struct myrs_stat_mbox *first_stat_mbox;
+ struct myrs_stat_mbox *last_stat_mbox;
+ struct myrs_stat_mbox *next_stat_mbox;
+
+ struct myrs_cmdblk dcmd_blk;
+ struct myrs_cmdblk mcmd_blk;
+ struct mutex dcmd_mutex;
+
+ struct myrs_fwstat *fwstat_buf;
+ dma_addr_t fwstat_addr;
+
+ struct myrs_ctlr_info *ctlr_info;
+ struct mutex cinfo_mutex;
+
+ struct myrs_event *event_buf;
+};
+
+typedef unsigned char (*enable_mbox_t)(void __iomem *base, dma_addr_t addr);
+typedef int (*myrs_hwinit_t)(struct pci_dev *pdev,
+ struct myrs_hba *c, void __iomem *base);
+
+struct myrs_privdata {
+ myrs_hwinit_t hw_init;
+ irq_handler_t irq_handler;
+ unsigned int mmio_size;
+};
+
+/*
+ * DAC960 GEM Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_GEM_mmio_size 0x600
+
+enum DAC960_GEM_reg_offset {
+ DAC960_GEM_IDB_READ_OFFSET = 0x214,
+ DAC960_GEM_IDB_CLEAR_OFFSET = 0x218,
+ DAC960_GEM_ODB_READ_OFFSET = 0x224,
+ DAC960_GEM_ODB_CLEAR_OFFSET = 0x228,
+ DAC960_GEM_IRQSTS_OFFSET = 0x208,
+ DAC960_GEM_IRQMASK_READ_OFFSET = 0x22C,
+ DAC960_GEM_IRQMASK_CLEAR_OFFSET = 0x230,
+ DAC960_GEM_CMDMBX_OFFSET = 0x510,
+ DAC960_GEM_CMDSTS_OFFSET = 0x518,
+ DAC960_GEM_ERRSTS_READ_OFFSET = 0x224,
+ DAC960_GEM_ERRSTS_CLEAR_OFFSET = 0x228,
+};
+
+/*
+ * DAC960 GEM Series Inbound Door Bell Register.
+ */
+#define DAC960_GEM_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_GEM_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_GEM_IDB_GEN_IRQ 0x04
+#define DAC960_GEM_IDB_CTRL_RESET 0x08
+#define DAC960_GEM_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_GEM_IDB_HWMBOX_FULL 0x01
+#define DAC960_GEM_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 GEM Series Outbound Door Bell Register.
+ */
+#define DAC960_GEM_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_GEM_ODB_MMBOX_ACK_IRQ 0x02
+#define DAC960_GEM_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_GEM_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 GEM Series Interrupt Mask Register.
+ */
+#define DAC960_GEM_IRQMASK_HWMBOX_IRQ 0x01
+#define DAC960_GEM_IRQMASK_MMBOX_IRQ 0x02
+
+/*
+ * DAC960 GEM Series Error Status Register.
+ */
+#define DAC960_GEM_ERRSTS_PENDING 0x20
+
+/*
+ * dma_addr_writeql is provided to write dma_addr_t types
+ * to a 64-bit pci address space register. The controller
+ * will accept having the register written as two 32-bit
+ * values.
+ *
+ * In HIGHMEM kernels, dma_addr_t is a 64-bit value.
+ * without HIGHMEM, dma_addr_t is a 32-bit value.
+ *
+ * The compiler should always fix up the assignment
+ * to u.wq appropriately, depending upon the size of
+ * dma_addr_t.
+ */
+static inline
+void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address)
+{
+ union {
+ u64 wq;
+ uint wl[2];
+ } u;
+
+ u.wq = addr;
+
+ writel(u.wl[0], write_address);
+ writel(u.wl[1], write_address + 4);
+}
+
+/*
+ * DAC960 BA Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_BA_mmio_size 0x80
+
+enum DAC960_BA_reg_offset {
+ DAC960_BA_IRQSTS_OFFSET = 0x30,
+ DAC960_BA_IRQMASK_OFFSET = 0x34,
+ DAC960_BA_CMDMBX_OFFSET = 0x50,
+ DAC960_BA_CMDSTS_OFFSET = 0x58,
+ DAC960_BA_IDB_OFFSET = 0x60,
+ DAC960_BA_ODB_OFFSET = 0x61,
+ DAC960_BA_ERRSTS_OFFSET = 0x63,
+};
+
+/*
+ * DAC960 BA Series Inbound Door Bell Register.
+ */
+#define DAC960_BA_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_BA_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_BA_IDB_GEN_IRQ 0x04
+#define DAC960_BA_IDB_CTRL_RESET 0x08
+#define DAC960_BA_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_BA_IDB_HWMBOX_EMPTY 0x01
+#define DAC960_BA_IDB_INIT_DONE 0x02
+
+/*
+ * DAC960 BA Series Outbound Door Bell Register.
+ */
+#define DAC960_BA_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_BA_ODB_MMBOX_ACK_IRQ 0x02
+
+#define DAC960_BA_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_BA_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 BA Series Interrupt Mask Register.
+ */
+#define DAC960_BA_IRQMASK_DISABLE_IRQ 0x04
+#define DAC960_BA_IRQMASK_DISABLEW_I2O 0x08
+
+/*
+ * DAC960 BA Series Error Status Register.
+ */
+#define DAC960_BA_ERRSTS_PENDING 0x04
+
+/*
+ * DAC960 LP Series Controller Interface Register Offsets.
+ */
+
+#define DAC960_LP_mmio_size 0x80
+
+enum DAC960_LP_reg_offset {
+ DAC960_LP_CMDMBX_OFFSET = 0x10,
+ DAC960_LP_CMDSTS_OFFSET = 0x18,
+ DAC960_LP_IDB_OFFSET = 0x20,
+ DAC960_LP_ODB_OFFSET = 0x2C,
+ DAC960_LP_ERRSTS_OFFSET = 0x2E,
+ DAC960_LP_IRQSTS_OFFSET = 0x30,
+ DAC960_LP_IRQMASK_OFFSET = 0x34,
+};
+
+/*
+ * DAC960 LP Series Inbound Door Bell Register.
+ */
+#define DAC960_LP_IDB_HWMBOX_NEW_CMD 0x01
+#define DAC960_LP_IDB_HWMBOX_ACK_STS 0x02
+#define DAC960_LP_IDB_GEN_IRQ 0x04
+#define DAC960_LP_IDB_CTRL_RESET 0x08
+#define DAC960_LP_IDB_MMBOX_NEW_CMD 0x10
+
+#define DAC960_LP_IDB_HWMBOX_FULL 0x01
+#define DAC960_LP_IDB_INIT_IN_PROGRESS 0x02
+
+/*
+ * DAC960 LP Series Outbound Door Bell Register.
+ */
+#define DAC960_LP_ODB_HWMBOX_ACK_IRQ 0x01
+#define DAC960_LP_ODB_MMBOX_ACK_IRQ 0x02
+
+#define DAC960_LP_ODB_HWMBOX_STS_AVAIL 0x01
+#define DAC960_LP_ODB_MMBOX_STS_AVAIL 0x02
+
+/*
+ * DAC960 LP Series Interrupt Mask Register.
+ */
+#define DAC960_LP_IRQMASK_DISABLE_IRQ 0x04
+
+/*
+ * DAC960 LP Series Error Status Register.
+ */
+#define DAC960_LP_ERRSTS_PENDING 0x04
+
+#endif /* _MYRS_H */
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 8620ac5d6e41..5aac3e801903 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -2638,7 +2638,7 @@ static int nsp32_detect(struct pci_dev *pdev)
/*
* setup DMA
*/
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
goto scsi_unregister;
}
@@ -2646,7 +2646,9 @@ static int nsp32_detect(struct pci_dev *pdev)
/*
* allocate autoparam DMA resource.
*/
- data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
+ data->autoparam = dma_alloc_coherent(&pdev->dev,
+ sizeof(nsp32_autoparam), &(data->auto_paddr),
+ GFP_KERNEL);
if (data->autoparam == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto scsi_unregister;
@@ -2655,8 +2657,8 @@ static int nsp32_detect(struct pci_dev *pdev)
/*
* allocate scatter-gather DMA resource.
*/
- data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
- &(data->sg_paddr));
+ data->sg_list = dma_alloc_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
+ &data->sg_paddr, GFP_KERNEL);
if (data->sg_list == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto free_autoparam;
@@ -2761,11 +2763,11 @@ static int nsp32_detect(struct pci_dev *pdev)
free_irq(host->irq, data);
free_sg_list:
- pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
+ dma_free_coherent(&pdev->dev, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
free_autoparam:
- pci_free_consistent(pdev, sizeof(nsp32_autoparam),
+ dma_free_coherent(&pdev->dev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
scsi_unregister:
@@ -2780,12 +2782,12 @@ static int nsp32_release(struct Scsi_Host *host)
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
if (data->autoparam) {
- pci_free_consistent(data->Pci, sizeof(nsp32_autoparam),
+ dma_free_coherent(&data->Pci->dev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
}
if (data->sg_list) {
- pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE,
+ dma_free_coherent(&data->Pci->dev, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
}
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 67b14576fff2..e19fa883376f 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -445,7 +445,7 @@ static void _put_request(struct request *rq)
* code paths.
*/
if (unlikely(rq->bio))
- blk_end_request(rq, BLK_STS_IOERR, blk_rq_bytes(rq));
+ blk_mq_end_request(rq, BLK_STS_IOERR);
else
blk_put_request(rq);
}
diff --git a/drivers/scsi/pm8001/pm8001_defs.h b/drivers/scsi/pm8001/pm8001_defs.h
index 199527dbaaa1..48e0624ecc68 100644
--- a/drivers/scsi/pm8001/pm8001_defs.h
+++ b/drivers/scsi/pm8001/pm8001_defs.h
@@ -132,4 +132,12 @@ enum pm8001_hba_info_flags {
PM8001F_RUN_TIME = (1U << 1),
};
+/**
+ * Phy Status
+ */
+#define PHY_LINK_DISABLE 0x00
+#define PHY_LINK_DOWN 0x01
+#define PHY_STATE_LINK_UP_SPCV 0x2
+#define PHY_STATE_LINK_UP_SPC 0x1
+
#endif
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 4dd6cad330e8..d0bb357034d8 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -1479,6 +1479,12 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
} else {
u32 producer_index;
void *pi_virt = circularQ->pi_virt;
+ /* spurious interrupt during setup if
+ * kexec-ing and driver doing a doorbell access
+ * with the pre-kexec oq interrupt setup
+ */
+ if (!pi_virt)
+ break;
/* Update the producer index from SPC */
producer_index = pm8001_read_32(pi_virt);
circularQ->producer_index = cpu_to_le32(producer_index);
@@ -2414,7 +2420,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
sata_resp = &psataPayload->sata_resp[0];
resp = (struct ata_task_resp *)ts->buf;
if (t->ata_task.dma_xfer == 0 &&
- t->data_dir == PCI_DMA_FROMDEVICE) {
+ t->data_dir == DMA_FROM_DEVICE) {
len = sizeof(struct pio_setup_fis);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("PIO read len = %d\n", len));
@@ -3810,7 +3816,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
" status = %x\n", status));
if (status == 0) {
phy->phy_state = 1;
- if (pm8001_ha->flags == PM8001F_RUN_TIME)
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL)
complete(phy->enable_completion);
}
break;
@@ -4196,12 +4203,12 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
return ret;
}
-/* PCI_DMA_... to our direction translation. */
+/* DMA_... to our direction translation. */
static const u8 data_dir_flags[] = {
- [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
- [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */
- [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
- [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
+ [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+ [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
+ [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
+ [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
};
void
pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd)
@@ -4248,13 +4255,13 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
* DMA-map SMP request, response buffers
*/
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out;
@@ -4287,10 +4294,10 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
err_out_2:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
err_out:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return rc;
}
@@ -4369,7 +4376,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
u32 opc = OPC_INB_SATA_HOST_OPSTART;
memset(&sata_cmd, 0, sizeof(sata_cmd));
circularQ = &pm8001_ha->inbnd_q_tbl[0];
- if (task->data_dir == PCI_DMA_NONE) {
+ if (task->data_dir == DMA_NONE) {
ATAP = 0x04; /* no data*/
PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
} else if (likely(!task->ata_task.device_control_reg_update)) {
diff --git a/drivers/scsi/pm8001/pm8001_hwi.h b/drivers/scsi/pm8001/pm8001_hwi.h
index e4867e690c84..6d91e2446542 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.h
+++ b/drivers/scsi/pm8001/pm8001_hwi.h
@@ -131,10 +131,6 @@
#define LINKRATE_30 (0x02 << 8)
#define LINKRATE_60 (0x04 << 8)
-/* for phy state */
-
-#define PHY_STATE_LINK_UP_SPC 0x1
-
/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */
#define GSM_SM_BASE 0x4F0000
struct mpi_msg_hdr{
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 7a697ca68501..d71e7e4ec29c 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -121,7 +121,7 @@ static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
{
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- phy->phy_state = 0;
+ phy->phy_state = PHY_LINK_DISABLE;
phy->pm8001_ha = pm8001_ha;
sas_phy->enabled = (phy_id < pm8001_ha->chip->n_phy) ? 1 : 0;
sas_phy->class = SAS;
@@ -152,7 +152,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
- pci_free_consistent(pm8001_ha->pdev,
+ dma_free_coherent(&pm8001_ha->pdev->dev,
(pm8001_ha->memoryMap.region[i].total_len +
pm8001_ha->memoryMap.region[i].alignment),
pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -501,30 +501,12 @@ static int pci_go_44(struct pci_dev *pdev)
{
int rc;
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(44))) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(44));
- if (rc) {
- rc = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "44-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
+ if (rc) {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
dev_printk(KERN_ERR, &pdev->dev,
"32-bit DMA enable failed\n");
- return rc;
- }
- rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
}
return rc;
}
@@ -1067,6 +1049,7 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
if (rc)
goto err_out_shost;
scsi_scan_host(pm8001_ha->shost);
+ pm8001_ha->flags = PM8001F_RUN_TIME;
return 0;
err_out_shost:
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 947d6017d004..b3be49d41375 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -116,8 +116,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr,
u64 align_offset = 0;
if (align)
align_offset = (dma_addr_t)align - 1;
- mem_virt_alloc = pci_zalloc_consistent(pdev, mem_size + align,
- &mem_dma_handle);
+ mem_virt_alloc = dma_zalloc_coherent(&pdev->dev, mem_size + align,
+ &mem_dma_handle, GFP_KERNEL);
if (!mem_virt_alloc) {
pm8001_printk("memory allocation error\n");
return -1;
@@ -157,9 +157,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
int rc = 0, phy_id = sas_phy->id;
struct pm8001_hba_info *pm8001_ha = NULL;
struct sas_phy_linkrates *rates;
+ struct sas_ha_struct *sas_ha;
+ struct pm8001_phy *phy;
DECLARE_COMPLETION_ONSTACK(completion);
unsigned long flags;
pm8001_ha = sas_phy->ha->lldd_ha;
+ phy = &pm8001_ha->phy[phy_id];
pm8001_ha->phy[phy_id].enable_completion = &completion;
switch (func) {
case PHY_FUNC_SET_LINK_RATE:
@@ -172,7 +175,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
pm8001_ha->phy[phy_id].maximum_linkrate =
rates->maximum_linkrate;
}
- if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@@ -180,7 +183,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
PHY_LINK_RESET);
break;
case PHY_FUNC_HARD_RESET:
- if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@@ -188,7 +191,7 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
PHY_HARD_RESET);
break;
case PHY_FUNC_LINK_RESET:
- if (pm8001_ha->phy[phy_id].phy_state == 0) {
+ if (pm8001_ha->phy[phy_id].phy_state == PHY_LINK_DISABLE) {
PM8001_CHIP_DISP->phy_start_req(pm8001_ha, phy_id);
wait_for_completion(&completion);
}
@@ -200,6 +203,25 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
PHY_LINK_RESET);
break;
case PHY_FUNC_DISABLE:
+ if (pm8001_ha->chip_id != chip_8001) {
+ if (pm8001_ha->phy[phy_id].phy_state ==
+ PHY_STATE_LINK_UP_SPCV) {
+ sas_ha = pm8001_ha->sas;
+ sas_phy_disconnected(&phy->sas_phy);
+ sas_ha->notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ phy->phy_attached = 0;
+ }
+ } else {
+ if (pm8001_ha->phy[phy_id].phy_state ==
+ PHY_STATE_LINK_UP_SPC) {
+ sas_ha = pm8001_ha->sas;
+ sas_phy_disconnected(&phy->sas_phy);
+ sas_ha->notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+ phy->phy_attached = 0;
+ }
+ }
PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id);
break;
case PHY_FUNC_GET_EVENTS:
@@ -374,6 +396,13 @@ static int pm8001_task_exec(struct sas_task *task,
return 0;
}
pm8001_ha = pm8001_find_ha_by_dev(task->dev);
+ if (pm8001_ha->controller_fatal_error) {
+ struct task_status_struct *ts = &t->task_status;
+
+ ts->resp = SAS_TASK_UNDELIVERED;
+ t->task_done(t);
+ return 0;
+ }
PM8001_IO_DBG(pm8001_ha, pm8001_printk("pm8001_task_exec device \n "));
spin_lock_irqsave(&pm8001_ha->lock, flags);
do {
@@ -466,7 +495,7 @@ err_out:
dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc);
if (!sas_protocol_ata(t->task_proto))
if (n_elem)
- dma_unmap_sg(pm8001_ha->dev, t->scatter, n_elem,
+ dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter,
t->data_dir);
out_done:
spin_unlock_irqrestore(&pm8001_ha->lock, flags);
@@ -504,9 +533,9 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
switch (task->task_proto) {
case SAS_PROTOCOL_SMP:
dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dma_unmap_sg(pm8001_ha->dev, &task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
break;
case SAS_PROTOCOL_SATA:
@@ -1020,13 +1049,11 @@ int pm8001_I_T_nexus_event_handler(struct domain_device *dev)
struct pm8001_device *pm8001_dev;
struct pm8001_hba_info *pm8001_ha;
struct sas_phy *phy;
- u32 device_id = 0;
if (!dev || !dev->lldd_dev)
return -1;
pm8001_dev = dev->lldd_dev;
- device_id = pm8001_dev->device_id;
pm8001_ha = pm8001_find_ha_by_dev(dev);
PM8001_EH_DBG(pm8001_ha,
@@ -1159,7 +1186,6 @@ int pm8001_abort_task(struct sas_task *task)
{
unsigned long flags;
u32 tag;
- u32 device_id;
struct domain_device *dev ;
struct pm8001_hba_info *pm8001_ha;
struct scsi_lun lun;
@@ -1173,7 +1199,6 @@ int pm8001_abort_task(struct sas_task *task)
dev = task->dev;
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
- device_id = pm8001_dev->device_id;
phy_id = pm8001_dev->attached_phy;
rc = pm8001_find_tag(task, &tag);
if (rc == 0) {
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 80b4dd6df0c2..f88b0d33c385 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -58,7 +58,7 @@
#include "pm8001_defs.h"
#define DRV_NAME "pm80xx"
-#define DRV_VERSION "0.1.38"
+#define DRV_VERSION "0.1.39"
#define PM8001_FAIL_LOGGING 0x01 /* Error message logging */
#define PM8001_INIT_LOGGING 0x02 /* driver init logging */
#define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */
@@ -538,6 +538,7 @@ struct pm8001_hba_info {
u32 logging_level;
u32 fw_status;
u32 smp_exp_mode;
+ bool controller_fatal_error;
const struct firmware *fw_image;
struct isr_param irq_vector[PM8001_MAX_MSIX_VEC];
u32 reset_in_progress;
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 42f0405601ad..63e4f7d34d6c 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -577,6 +577,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size);
pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity);
+ /* Update Fatal error interrupt vector */
+ pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt |=
+ ((pm8001_ha->number_of_intr - 1) << 8);
pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT,
pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt);
pm8001_mw32(address, MAIN_EVENT_CRC_CHECK,
@@ -1110,6 +1113,9 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
return -EBUSY;
}
+ /* Initialize the controller fatal error flag */
+ pm8001_ha->controller_fatal_error = false;
+
/* Initialize pci space address eg: mpi offset */
init_pci_device_addresses(pm8001_ha);
init_default_table_values(pm8001_ha);
@@ -1218,13 +1224,17 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha)
u32 bootloader_state;
u32 ibutton0, ibutton1;
- /* Check if MPI is in ready state to reset */
- if (mpi_uninit_check(pm8001_ha) != 0) {
- PM8001_FAIL_DBG(pm8001_ha,
- pm8001_printk("MPI state is not ready\n"));
- return -1;
+ /* Process MPI table uninitialization only if FW is ready */
+ if (!pm8001_ha->controller_fatal_error) {
+ /* Check if MPI is in ready state to reset */
+ if (mpi_uninit_check(pm8001_ha) != 0) {
+ regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "MPI state is not ready scratch1 :0x%x\n",
+ regval));
+ return -1;
+ }
}
-
/* checked for reset register normal state; 0x0 */
regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET);
PM8001_INIT_DBG(pm8001_ha,
@@ -2123,7 +2133,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
sata_resp = &psataPayload->sata_resp[0];
resp = (struct ata_task_resp *)ts->buf;
if (t->ata_task.dma_xfer == 0 &&
- t->data_dir == PCI_DMA_FROMDEVICE) {
+ t->data_dir == DMA_FROM_DEVICE) {
len = sizeof(struct pio_setup_fis);
PM8001_IO_DBG(pm8001_ha,
pm8001_printk("PIO read len = %d\n", len));
@@ -3118,8 +3128,9 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n",
status, phy_id));
if (status == 0) {
- phy->phy_state = 1;
- if (pm8001_ha->flags == PM8001F_RUN_TIME)
+ phy->phy_state = PHY_LINK_DOWN;
+ if (pm8001_ha->flags == PM8001F_RUN_TIME &&
+ phy->enable_completion != NULL)
complete(phy->enable_completion);
}
return 0;
@@ -3211,7 +3222,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
return 0;
}
phy->phy_attached = 0;
- phy->phy_state = 0;
+ phy->phy_state = PHY_LINK_DISABLE;
break;
case HW_EVENT_PORT_INVALID:
PM8001_MSG_DBG(pm8001_ha,
@@ -3384,13 +3395,14 @@ static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 status =
le32_to_cpu(pPayload->status);
u32 phyid =
- le32_to_cpu(pPayload->phyid);
+ le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phyid];
PM8001_MSG_DBG(pm8001_ha,
pm8001_printk("phy:0x%x status:0x%x\n",
phyid, status));
- if (status == 0)
- phy->phy_state = 0;
+ if (status == PHY_STOP_SUCCESS ||
+ status == PHY_STOP_ERR_DEVICE_ATTACHED)
+ phy->phy_state = PHY_LINK_DISABLE;
return 0;
}
@@ -3752,6 +3764,46 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
}
+static void print_scratchpad_registers(struct pm8001_hba_info *pm8001_ha)
+{
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_1:0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_1)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_2: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_2)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_3: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_3)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_4: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_4)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_HOST_SCRATCH_PAD_5: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_5)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_RSVD_SCRATCH_PAD_0: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_6)));
+ PM8001_FAIL_DBG(pm8001_ha,
+ pm8001_printk("MSGU_RSVD_SCRATCH_PAD_1: 0x%x\n",
+ pm8001_cr32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_7)));
+}
+
static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
{
struct outbound_queue_table *circularQ;
@@ -3759,10 +3811,28 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
u8 uninitialized_var(bc);
u32 ret = MPI_IO_STATUS_FAIL;
unsigned long flags;
+ u32 regval;
+ if (vec == (pm8001_ha->number_of_intr - 1)) {
+ regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
+ SCRATCH_PAD_MIPSALL_READY) {
+ pm8001_ha->controller_fatal_error = true;
+ PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(
+ "Firmware Fatal error! Regval:0x%x\n", regval));
+ print_scratchpad_registers(pm8001_ha);
+ return ret;
+ }
+ }
spin_lock_irqsave(&pm8001_ha->lock, flags);
circularQ = &pm8001_ha->outbnd_q_tbl[vec];
do {
+ /* spurious interrupt during setup if kexec-ing and
+ * driver doing a doorbell access w/ the pre-kexec oq
+ * interrupt setup.
+ */
+ if (!circularQ->pi_virt)
+ break;
ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc);
if (MPI_IO_STATUS_SUCCESS == ret) {
/* process the outbound message */
@@ -3785,12 +3855,12 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
return ret;
}
-/* PCI_DMA_... to our direction translation. */
+/* DMA_... to our direction translation. */
static const u8 data_dir_flags[] = {
- [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */
- [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */
- [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */
- [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */
+ [DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */
+ [DMA_TO_DEVICE] = DATA_DIR_OUT, /* OUTBOUND */
+ [DMA_FROM_DEVICE] = DATA_DIR_IN, /* INBOUND */
+ [DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */
};
static void build_smp_cmd(u32 deviceID, __le32 hTag,
@@ -3832,13 +3902,13 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
* DMA-map SMP request, response buffers
*/
sg_req = &task->smp_task.smp_req;
- elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, DMA_TO_DEVICE);
if (!elem)
return -ENOMEM;
req_len = sg_dma_len(sg_req);
sg_resp = &task->smp_task.smp_resp;
- elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+ elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, DMA_FROM_DEVICE);
if (!elem) {
rc = -ENOMEM;
goto err_out;
@@ -3929,10 +3999,10 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha,
err_out_2:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
err_out:
dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
return rc;
}
@@ -4156,7 +4226,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_INB_NUM;
circularQ = &pm8001_ha->inbnd_q_tbl[q_index];
- if (task->data_dir == PCI_DMA_NONE) {
+ if (task->data_dir == DMA_NONE) {
ATAP = 0x04; /* no data*/
PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n"));
} else if (likely(!task->ata_task.device_control_reg_update)) {
@@ -4606,9 +4676,8 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha,
void pm8001_set_phy_profile(struct pm8001_hba_info *pm8001_ha,
u32 length, u8 *buf)
{
- u32 page_code, i;
+ u32 i;
- page_code = SAS_PHY_ANALOG_SETTINGS_PAGE;
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
mpi_set_phy_profile_req(pm8001_ha,
SAS_PHY_ANALOG_SETTINGS_PAGE, i, length, (u32 *)buf);
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index 889e69ce3689..84d7426441bf 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -170,6 +170,10 @@
#define LINKRATE_60 (0x04 << 8)
#define LINKRATE_120 (0x08 << 8)
+/*phy_stop*/
+#define PHY_STOP_SUCCESS 0x00
+#define PHY_STOP_ERR_DEVICE_ATTACHED 0x1046
+
/* phy_profile */
#define SAS_PHY_ANALOG_SETTINGS_PAGE 0x04
#define PHY_DWORD_LENGTH 0xC
@@ -216,8 +220,6 @@
#define SAS_DOPNRJT_RTRY_TMO 128
#define SAS_COPNRJT_RTRY_TMO 128
-/* for phy state */
-#define PHY_STATE_LINK_UP_SPCV 0x2
/*
Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128
@@ -1384,6 +1386,9 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
#define SCRATCH_PAD_IOP0_READY 0xC00
#define SCRATCH_PAD_IOP1_READY 0x3000
+#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
+ SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_RAAE_READY)
/* boot loader state */
#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 0a5dd5595dd3..d5a4f17fce51 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2855,12 +2855,12 @@ static int qedf_set_fcoe_pf_param(struct qedf_ctx *qedf)
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",
qedf->num_queues);
- qedf->p_cpuq = pci_alloc_consistent(qedf->pdev,
+ qedf->p_cpuq = dma_alloc_coherent(&qedf->pdev->dev,
qedf->num_queues * sizeof(struct qedf_glbl_q_params),
- &qedf->hw_p_cpuq);
+ &qedf->hw_p_cpuq, GFP_KERNEL);
if (!qedf->p_cpuq) {
- QEDF_ERR(&(qedf->dbg_ctx), "pci_alloc_consistent failed.\n");
+ QEDF_ERR(&(qedf->dbg_ctx), "dma_alloc_coherent failed.\n");
return 1;
}
@@ -2929,7 +2929,7 @@ static void qedf_free_fcoe_pf_param(struct qedf_ctx *qedf)
if (qedf->p_cpuq) {
size = qedf->num_queues * sizeof(struct qedf_glbl_q_params);
- pci_free_consistent(qedf->pdev, size, qedf->p_cpuq,
+ dma_free_coherent(&qedf->pdev->dev, size, qedf->p_cpuq,
qedf->hw_p_cpuq);
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e5bd035ebad0..105b0e4d7818 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -806,11 +806,11 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
memset(&qedi->pf_params.iscsi_pf_params, 0,
sizeof(qedi->pf_params.iscsi_pf_params));
- qedi->p_cpuq = pci_alloc_consistent(qedi->pdev,
+ qedi->p_cpuq = dma_alloc_coherent(&qedi->pdev->dev,
qedi->num_queues * sizeof(struct qedi_glbl_q_params),
- &qedi->hw_p_cpuq);
+ &qedi->hw_p_cpuq, GFP_KERNEL);
if (!qedi->p_cpuq) {
- QEDI_ERR(&qedi->dbg_ctx, "pci_alloc_consistent fail\n");
+ QEDI_ERR(&qedi->dbg_ctx, "dma_alloc_coherent fail\n");
rval = -1;
goto err_alloc_mem;
}
@@ -871,7 +871,7 @@ static void qedi_free_iscsi_pf_param(struct qedi_ctx *qedi)
if (qedi->p_cpuq) {
size = qedi->num_queues * sizeof(struct qedi_glbl_q_params);
- pci_free_consistent(qedi->pdev, size, qedi->p_cpuq,
+ dma_free_coherent(&qedi->pdev->dev, size, qedi->p_cpuq,
qedi->hw_p_cpuq);
}
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 390775d5c918..15a50cc7e4b3 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1750,7 +1750,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
uint8_t *sp, *tbuf;
dma_addr_t p_tbuf;
- tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
+ tbuf = dma_alloc_coherent(&ha->pdev->dev, 8000, &p_tbuf, GFP_KERNEL);
if (!tbuf)
return -ENOMEM;
#endif
@@ -1841,7 +1841,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha)
out:
#if DUMP_IT_BACK
- pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
+ dma_free_coherent(&ha->pdev->dev, 8000, tbuf, p_tbuf);
#endif
return err;
}
@@ -4259,8 +4259,8 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->devnum = devnum; /* specifies microcode load address */
#ifdef QLA_64BIT_PTR
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+ if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
"suitable DMA mask - aborting\n", ha->host_no);
error = -ENODEV;
@@ -4270,7 +4270,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
ha->host_no);
#else
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "scsi(%li): Unable to set a "
"suitable DMA mask - aborting\n", ha->host_no);
error = -ENODEV;
@@ -4278,17 +4278,17 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
#endif
- ha->request_ring = pci_alloc_consistent(ha->pdev,
+ ha->request_ring = dma_alloc_coherent(&ha->pdev->dev,
((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
- &ha->request_dma);
+ &ha->request_dma, GFP_KERNEL);
if (!ha->request_ring) {
printk(KERN_INFO "qla1280: Failed to get request memory\n");
goto error_put_host;
}
- ha->response_ring = pci_alloc_consistent(ha->pdev,
+ ha->response_ring = dma_alloc_coherent(&ha->pdev->dev,
((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
- &ha->response_dma);
+ &ha->response_dma, GFP_KERNEL);
if (!ha->response_ring) {
printk(KERN_INFO "qla1280: Failed to get response memory\n");
goto error_free_request_ring;
@@ -4370,11 +4370,11 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
release_region(host->io_port, 0xff);
#endif
error_free_response_ring:
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
ha->response_ring, ha->response_dma);
error_free_request_ring:
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
ha->request_ring, ha->request_dma);
error_put_host:
@@ -4404,10 +4404,10 @@ qla1280_remove_one(struct pci_dev *pdev)
release_region(host->io_port, 0xff);
#endif
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
ha->request_ring, ha->request_dma);
- pci_free_consistent(ha->pdev,
+ dma_free_coherent(&ha->pdev->dev,
((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
ha->response_ring, ha->response_dma);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 4888b999e82f..b28f159fdaee 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -158,9 +158,17 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return 0;
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
+
if (IS_NOCACHE_VPD_TYPE(ha))
ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
ha->nvram_size);
+ mutex_unlock(&ha->optrom_mutex);
+
return memory_read_from_buffer(buf, count, &off, ha->nvram,
ha->nvram_size);
}
@@ -208,10 +216,17 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
return -EAGAIN;
}
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
+ return -EAGAIN;
+ }
+
/* Write NVRAM. */
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
- count);
+ count);
+ mutex_unlock(&ha->optrom_mutex);
ql_dbg(ql_dbg_user, vha, 0x7060,
"Setting ISP_ABORT_NEEDED\n");
@@ -322,6 +337,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
size = ha->optrom_size - start;
mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
switch (val) {
case 0:
if (ha->optrom_state != QLA_SREADING &&
@@ -499,8 +518,14 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
qla27xx_find_valid_image(vha) == QLA27XX_SECONDARY_IMAGE)
faddr = ha->flt_region_vpd_sec << 2;
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
ha->isp_ops->read_optrom(vha, ha->vpd, faddr,
ha->vpd_size);
+ mutex_unlock(&ha->optrom_mutex);
}
return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
}
@@ -518,9 +543,6 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev)))
return 0;
- if (qla2x00_chip_is_down(vha))
- return 0;
-
if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
!ha->isp_ops->write_nvram)
return 0;
@@ -531,16 +553,25 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
return -EAGAIN;
}
+ mutex_lock(&ha->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&ha->optrom_mutex);
+ return -EAGAIN;
+ }
+
/* Write NVRAM. */
ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
/* Update flash version information for 4Gb & above. */
- if (!IS_FWI2_CAPABLE(ha))
+ if (!IS_FWI2_CAPABLE(ha)) {
+ mutex_unlock(&ha->optrom_mutex);
return -EINVAL;
+ }
tmp_data = vmalloc(256);
if (!tmp_data) {
+ mutex_unlock(&ha->optrom_mutex);
ql_log(ql_log_warn, vha, 0x706b,
"Unable to allocate memory for VPD information update.\n");
return -ENOMEM;
@@ -548,6 +579,8 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
ha->isp_ops->get_flash_version(vha, tmp_data);
vfree(tmp_data);
+ mutex_unlock(&ha->optrom_mutex);
+
return count;
}
@@ -573,10 +606,15 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN) || off != 0 || count < SFP_DEV_SIZE)
return 0;
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
return 0;
+ }
rval = qla2x00_read_sfp_dev(vha, buf, count);
+ mutex_unlock(&vha->hw->optrom_mutex);
+
if (rval)
return -EIO;
@@ -785,9 +823,11 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
if (unlikely(pci_channel_offline(ha->pdev)))
return 0;
-
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
return 0;
+ }
if (ha->xgmac_data)
goto do_read;
@@ -795,6 +835,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
&ha->xgmac_data_dma, GFP_KERNEL);
if (!ha->xgmac_data) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x7076,
"Unable to allocate memory for XGMAC read-data.\n");
return 0;
@@ -806,6 +847,8 @@ do_read:
rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
XGMAC_DATA_SIZE, &actual_size);
+
+ mutex_unlock(&vha->hw->optrom_mutex);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7077,
"Unable to read XGMAC data (%x).\n", rval);
@@ -842,13 +885,16 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
if (ha->dcbx_tlv)
goto do_read;
-
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
return 0;
+ }
ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
&ha->dcbx_tlv_dma, GFP_KERNEL);
if (!ha->dcbx_tlv) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x7078,
"Unable to allocate memory for DCBX TLV read-data.\n");
return -ENOMEM;
@@ -859,6 +905,9 @@ do_read:
rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
DCBX_TLV_DATA_SIZE);
+
+ mutex_unlock(&vha->hw->optrom_mutex);
+
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7079,
"Unable to read DCBX TLV (%x).\n", rval);
@@ -1159,6 +1208,34 @@ qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
}
static ssize_t
+qla_zio_threshold_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+ return scnprintf(buf, PAGE_SIZE, "%d exchanges\n",
+ vha->hw->last_zio_threshold);
+}
+
+static ssize_t
+qla_zio_threshold_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (vha->hw->zio_mode != QLA_ZIO_MODE_6)
+ return -EINVAL;
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+ if (val < 0 || val > 256)
+ return -ERANGE;
+
+ atomic_set(&vha->hw->zio_threshold, val);
+ return strlen(buf);
+}
+
+static ssize_t
qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1184,15 +1261,17 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
if (IS_QLA2100(ha) || IS_QLA2200(ha))
return -EPERM;
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x707a,
"Abort ISP active -- ignoring beacon request.\n");
return -EBUSY;
}
- if (sscanf(buf, "%d", &val) != 1)
- return -EINVAL;
-
if (val)
rval = ha->isp_ops->beacon_on(vha);
else
@@ -1201,6 +1280,8 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
if (rval != QLA_SUCCESS)
count = 0;
+ mutex_unlock(&vha->hw->optrom_mutex);
+
return count;
}
@@ -1370,18 +1451,24 @@ qla2x00_thermal_temp_show(struct device *dev,
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
uint16_t temp = 0;
+ int rc;
+ mutex_lock(&vha->hw->optrom_mutex);
if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
goto done;
}
if (vha->hw->flags.eeh_busy) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
goto done;
}
- if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+ rc = qla2x00_get_thermal_temp(vha, &temp);
+ mutex_unlock(&vha->hw->optrom_mutex);
+ if (rc == QLA_SUCCESS)
return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
done:
@@ -1402,13 +1489,24 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
}
- if (qla2x00_chip_is_down(vha))
+ mutex_lock(&vha->hw->optrom_mutex);
+ if (qla2x00_chip_is_down(vha)) {
+ mutex_unlock(&vha->hw->optrom_mutex);
ql_log(ql_log_warn, vha, 0x707c,
"ISP reset active.\n");
- else if (!vha->hw->flags.eeh_busy)
- rval = qla2x00_get_firmware_state(vha, state);
- if (rval != QLA_SUCCESS)
+ goto out;
+ } else if (vha->hw->flags.eeh_busy) {
+ mutex_unlock(&vha->hw->optrom_mutex);
+ goto out;
+ }
+
+ rval = qla2x00_get_firmware_state(vha, state);
+ mutex_unlock(&vha->hw->optrom_mutex);
+out:
+ if (rval != QLA_SUCCESS) {
memset(state, -1, sizeof(state));
+ rval = qla2x00_get_firmware_state(vha, state);
+ }
return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
state[0], state[1], state[2], state[3], state[4], state[5]);
@@ -1534,6 +1632,433 @@ qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
ha->max_speed_sup ? "32Gps" : "16Gps");
}
+/* ----- */
+
+static ssize_t
+qlini_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int len = 0;
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "Supported options: enabled | disabled | dual | exclusive\n");
+
+ /* --- */
+ len += scnprintf(buf + len, PAGE_SIZE-len, "Current selection: ");
+
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_EXCLUSIVE);
+ break;
+ case QLA2XXX_INI_MODE_DISABLED:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_DISABLED);
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_ENABLED);
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ QLA2XXX_INI_MODE_STR_DUAL);
+ break;
+ }
+ len += scnprintf(buf + len, PAGE_SIZE-len, "\n");
+
+ return len;
+}
+
+static char *mode_to_str[] = {
+ "exclusive",
+ "disabled",
+ "enabled",
+ "dual",
+};
+
+#define NEED_EXCH_OFFLOAD(_exchg) ((_exchg) > FW_DEF_EXCHANGES_CNT)
+static int qla_set_ini_mode(scsi_qla_host_t *vha, int op)
+{
+ int rc = 0;
+ enum {
+ NO_ACTION,
+ MODE_CHANGE_ACCEPT,
+ MODE_CHANGE_NO_ACTION,
+ TARGET_STILL_ACTIVE,
+ };
+ int action = NO_ACTION;
+ int set_mode = 0;
+ u8 eo_toggle = 0; /* exchange offload flipped */
+
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ switch (op) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle) {
+ /*
+ * The number of exchange to be offload
+ * was tweaked or offload option was
+ * flipped
+ */
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ break;
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle) {
+ /*
+ * The number of exchange to be offload
+ * was tweaked or offload option was
+ * flipped
+ */
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ } else {
+ action = MODE_CHANGE_ACCEPT;
+ }
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ action = MODE_CHANGE_ACCEPT;
+ /* active_mode is target only, reset it to dual */
+ if (qla_tgt_mode_enabled(vha)) {
+ set_mode = 1;
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (qla_tgt_mode_enabled(vha))
+ action = TARGET_STILL_ACTIVE;
+ else {
+ action = MODE_CHANGE_ACCEPT;
+ set_mode = 1;
+ }
+ break;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ switch (op) {
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle)
+ /*
+ * The number of exchange to be offload
+ * was tweaked or offload option was
+ * flipped
+ */
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = NO_ACTION;
+ } else
+ action = NO_ACTION;
+
+ break;
+
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (qla_tgt_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xexchoffld !=
+ vha->u_ql2xexchoffld) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld)) ||
+ eo_toggle)
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = MODE_CHANGE_NO_ACTION;
+ } else
+ action = MODE_CHANGE_NO_ACTION;
+ break;
+
+ case QLA2XXX_INI_MODE_DUAL: /* exclusive -> dual */
+ if (qla_tgt_mode_enabled(vha)) {
+ action = MODE_CHANGE_ACCEPT;
+ set_mode = 1;
+ } else
+ action = MODE_CHANGE_ACCEPT;
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (qla_tgt_mode_enabled(vha))
+ action = TARGET_STILL_ACTIVE;
+ else {
+ if (vha->hw->flags.fw_started)
+ action = MODE_CHANGE_NO_ACTION;
+ else
+ action = MODE_CHANGE_ACCEPT;
+ }
+ break;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ switch (op) {
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+ if (((vha->ql2xiniexchg != vha->u_ql2xiniexchg) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg)) ||
+ eo_toggle)
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = NO_ACTION;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ case QLA2XXX_INI_MODE_DISABLED:
+ action = MODE_CHANGE_ACCEPT;
+ break;
+ default:
+ action = MODE_CHANGE_NO_ACTION;
+ break;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_DUAL:
+ switch (op) {
+ case QLA2XXX_INI_MODE_DUAL:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
+ vha->u_ql2xiniexchg) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+
+ if ((((vha->ql2xexchoffld +
+ vha->ql2xiniexchg) !=
+ (vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) || eo_toggle)
+ action = MODE_CHANGE_ACCEPT;
+ else
+ action = NO_ACTION;
+ } else {
+ if (NEED_EXCH_OFFLOAD(vha->u_ql2xexchoffld +
+ vha->u_ql2xiniexchg) !=
+ vha->hw->flags.exchoffld_enabled)
+ eo_toggle = 1;
+
+ if ((((vha->ql2xexchoffld + vha->ql2xiniexchg)
+ != (vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) &&
+ NEED_EXCH_OFFLOAD(vha->u_ql2xiniexchg +
+ vha->u_ql2xexchoffld)) || eo_toggle)
+ action = MODE_CHANGE_NO_ACTION;
+ else
+ action = NO_ACTION;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ /* turning off initiator mode */
+ set_mode = 1;
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_NO_ACTION;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ set_mode = 1;
+ action = MODE_CHANGE_ACCEPT;
+ } else {
+ action = MODE_CHANGE_ACCEPT;
+ }
+ break;
+
+ case QLA2XXX_INI_MODE_ENABLED:
+ if (qla_tgt_mode_enabled(vha) ||
+ qla_dual_mode_enabled(vha)) {
+ action = TARGET_STILL_ACTIVE;
+ } else {
+ action = MODE_CHANGE_ACCEPT;
+ }
+ }
+ break;
+ }
+
+ switch (action) {
+ case MODE_CHANGE_ACCEPT:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Mode change accepted. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
+ mode_to_str[vha->qlini_mode], mode_to_str[op],
+ vha->ql2xexchoffld, vha->u_ql2xexchoffld,
+ vha->ql2xiniexchg, vha->u_ql2xiniexchg);
+
+ vha->qlini_mode = op;
+ vha->ql2xexchoffld = vha->u_ql2xexchoffld;
+ vha->ql2xiniexchg = vha->u_ql2xiniexchg;
+ if (set_mode)
+ qlt_set_mode(vha);
+ vha->flags.online = 1;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+
+ case MODE_CHANGE_NO_ACTION:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Mode is set. No action taken. From %s to %s, Tgt exchg %d|%d. ini exchg %d|%d\n",
+ mode_to_str[vha->qlini_mode], mode_to_str[op],
+ vha->ql2xexchoffld, vha->u_ql2xexchoffld,
+ vha->ql2xiniexchg, vha->u_ql2xiniexchg);
+ vha->qlini_mode = op;
+ vha->ql2xexchoffld = vha->u_ql2xexchoffld;
+ vha->ql2xiniexchg = vha->u_ql2xiniexchg;
+ break;
+
+ case TARGET_STILL_ACTIVE:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Target Mode is active. Unable to change Mode.\n");
+ break;
+
+ case NO_ACTION:
+ default:
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Mode unchange. No action taken. %d|%d pct %d|%d.\n",
+ vha->qlini_mode, op,
+ vha->ql2xexchoffld, vha->u_ql2xexchoffld);
+ break;
+ }
+
+ return rc;
+}
+
+static ssize_t
+qlini_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int ini;
+
+ if (!buf)
+ return -EINVAL;
+
+ if (strncasecmp(QLA2XXX_INI_MODE_STR_EXCLUSIVE, buf,
+ strlen(QLA2XXX_INI_MODE_STR_EXCLUSIVE)) == 0)
+ ini = QLA2XXX_INI_MODE_EXCLUSIVE;
+ else if (strncasecmp(QLA2XXX_INI_MODE_STR_DISABLED, buf,
+ strlen(QLA2XXX_INI_MODE_STR_DISABLED)) == 0)
+ ini = QLA2XXX_INI_MODE_DISABLED;
+ else if (strncasecmp(QLA2XXX_INI_MODE_STR_ENABLED, buf,
+ strlen(QLA2XXX_INI_MODE_STR_ENABLED)) == 0)
+ ini = QLA2XXX_INI_MODE_ENABLED;
+ else if (strncasecmp(QLA2XXX_INI_MODE_STR_DUAL, buf,
+ strlen(QLA2XXX_INI_MODE_STR_DUAL)) == 0)
+ ini = QLA2XXX_INI_MODE_DUAL;
+ else
+ return -EINVAL;
+
+ qla_set_ini_mode(vha, ini);
+ return strlen(buf);
+}
+
+static ssize_t
+ql2xexchoffld_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int len = 0;
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "target exchange: new %d : current: %d\n\n",
+ vha->u_ql2xexchoffld, vha->ql2xexchoffld);
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
+ vha->host_no);
+
+ return len;
+}
+
+static ssize_t
+ql2xexchoffld_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val > FW_MAX_EXCHANGES_CNT)
+ val = FW_MAX_EXCHANGES_CNT;
+ else if (val < 0)
+ val = 0;
+
+ vha->u_ql2xexchoffld = val;
+ return strlen(buf);
+}
+
+static ssize_t
+ql2xiniexchg_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int len = 0;
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "target exchange: new %d : current: %d\n\n",
+ vha->u_ql2xiniexchg, vha->ql2xiniexchg);
+
+ len += scnprintf(buf + len, PAGE_SIZE-len,
+ "Please (re)set operating mode via \"/sys/class/scsi_host/host%ld/qlini_mode\" to load new setting.\n",
+ vha->host_no);
+
+ return len;
+}
+
+static ssize_t
+ql2xiniexchg_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ if (val > FW_MAX_EXCHANGES_CNT)
+ val = FW_MAX_EXCHANGES_CNT;
+ else if (val < 0)
+ val = 0;
+
+ vha->u_ql2xiniexchg = val;
+ return strlen(buf);
+}
+
static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1581,6 +2106,13 @@ static DEVICE_ATTR(allow_cna_fw_dump, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(pep_version, S_IRUGO, qla2x00_pep_version_show, NULL);
static DEVICE_ATTR(min_link_speed, S_IRUGO, qla2x00_min_link_speed_show, NULL);
static DEVICE_ATTR(max_speed_sup, S_IRUGO, qla2x00_max_speed_sup_show, NULL);
+static DEVICE_ATTR(zio_threshold, 0644,
+ qla_zio_threshold_show,
+ qla_zio_threshold_store);
+static DEVICE_ATTR_RW(qlini_mode);
+static DEVICE_ATTR_RW(ql2xexchoffld);
+static DEVICE_ATTR_RW(ql2xiniexchg);
+
struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_driver_version,
@@ -1617,9 +2149,28 @@ struct device_attribute *qla2x00_host_attrs[] = {
&dev_attr_pep_version,
&dev_attr_min_link_speed,
&dev_attr_max_speed_sup,
+ &dev_attr_zio_threshold,
+ NULL, /* reserve for qlini_mode */
+ NULL, /* reserve for ql2xiniexchg */
+ NULL, /* reserve for ql2xexchoffld */
NULL,
};
+void qla_insert_tgt_attrs(void)
+{
+ struct device_attribute **attr;
+
+ /* advance to empty slot */
+ for (attr = &qla2x00_host_attrs[0]; *attr; ++attr)
+ continue;
+
+ *attr = &dev_attr_qlini_mode;
+ attr++;
+ *attr = &dev_attr_ql2xiniexchg;
+ attr++;
+ *attr = &dev_attr_ql2xexchoffld;
+}
+
/* Host attributes. */
static void
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index c11a89be292c..4a9fd8d944d6 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -2487,7 +2487,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
vha = shost_priv(host);
}
- if (qla2x00_reset_active(vha)) {
+ if (qla2x00_chip_is_down(vha)) {
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
bsg_request->msgcode);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a9dc9c4a6382..26b93c563f92 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -262,8 +262,8 @@ struct name_list_extended {
struct get_name_list_extended *l;
dma_addr_t ldma;
struct list_head fcports;
- spinlock_t fcports_lock;
u32 size;
+ u8 sent;
};
/*
* Timeout timer counts in seconds
@@ -519,6 +519,7 @@ struct srb_iocb {
enum {
TYPE_SRB,
TYPE_TGT_CMD,
+ TYPE_TGT_TMCMD, /* task management */
};
typedef struct srb {
@@ -2280,7 +2281,6 @@ struct ct_sns_desc {
enum discovery_state {
DSC_DELETED,
DSC_GNN_ID,
- DSC_GID_PN,
DSC_GNL,
DSC_LOGIN_PEND,
DSC_LOGIN_FAILED,
@@ -2305,7 +2305,6 @@ enum login_state { /* FW control Target side */
enum fcport_mgt_event {
FCME_RELOGIN = 1,
FCME_RSCN,
- FCME_GIDPN_DONE,
FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */
FCME_PRLI_DONE,
FCME_GNL_DONE,
@@ -2351,7 +2350,7 @@ typedef struct fc_port {
unsigned int login_succ:1;
unsigned int query:1;
unsigned int id_changed:1;
- unsigned int rscn_rcvd:1;
+ unsigned int scan_needed:1;
struct work_struct nvme_del_work;
struct completion nvme_del_done;
@@ -2375,11 +2374,13 @@ typedef struct fc_port {
unsigned long expires;
struct list_head del_list_entry;
struct work_struct free_work;
-
+ struct work_struct reg_work;
+ uint64_t jiffies_at_registration;
struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
uint16_t tgt_id;
uint16_t old_tgt_id;
+ uint16_t sec_since_registration;
uint8_t fcp_prio;
@@ -2412,6 +2413,7 @@ typedef struct fc_port {
struct qla_tgt_sess *tgt_session;
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
+ enum discovery_state next_disc_state;
enum login_state fw_login_state;
unsigned long dm_login_expire;
unsigned long plogi_nack_done_deadline;
@@ -3212,17 +3214,14 @@ enum qla_work_type {
QLA_EVT_ASYNC_LOGOUT,
QLA_EVT_ASYNC_LOGOUT_DONE,
QLA_EVT_ASYNC_ADISC,
- QLA_EVT_ASYNC_ADISC_DONE,
QLA_EVT_UEVENT,
QLA_EVT_AENFX,
- QLA_EVT_GIDPN,
QLA_EVT_GPNID,
QLA_EVT_UNMAP,
QLA_EVT_NEW_SESS,
QLA_EVT_GPDB,
QLA_EVT_PRLI,
QLA_EVT_GPSC,
- QLA_EVT_UPD_FCPORT,
QLA_EVT_GNL,
QLA_EVT_NACK,
QLA_EVT_RELOGIN,
@@ -3483,6 +3482,9 @@ struct qla_qpair {
struct list_head qp_list_elem; /* vha->qp_list */
struct list_head hints_list;
uint16_t cpuid;
+ uint16_t retry_term_cnt;
+ uint32_t retry_term_exchg_addr;
+ uint64_t retry_term_jiff;
struct qla_tgt_counters tgt_counters;
};
@@ -4184,6 +4186,10 @@ struct qla_hw_data {
atomic_t nvme_active_aen_cnt;
uint16_t nvme_last_rptd_aen; /* Last recorded aen count */
+
+ atomic_t zio_threshold;
+ uint16_t last_zio_threshold;
+#define DEFAULT_ZIO_THRESHOLD 64
};
#define FW_ABILITY_MAX_SPEED_MASK 0xFUL
@@ -4263,10 +4269,11 @@ typedef struct scsi_qla_host {
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
#define QPAIR_ONLINE_CHECK_NEEDED 27
-#define SET_ZIO_THRESHOLD_NEEDED 28
+#define SET_NVME_ZIO_THRESHOLD_NEEDED 28
#define DETECT_SFP_CHANGE 29
#define N2N_LOGIN_NEEDED 30
#define IOCB_WORK_ACTIVE 31
+#define SET_ZIO_THRESHOLD_NEEDED 32
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */
@@ -4369,6 +4376,13 @@ typedef struct scsi_qla_host {
atomic_t vref_count;
struct qla8044_reset_template reset_tmplt;
uint16_t bbcr;
+
+ uint16_t u_ql2xexchoffld;
+ uint16_t u_ql2xiniexchg;
+ uint16_t qlini_mode;
+ uint16_t ql2xexchoffld;
+ uint16_t ql2xiniexchg;
+
struct name_list_extended gnl;
/* Count of active session/fcport */
int fcport_count;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 178974896b5c..3673fcdb033a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -54,7 +54,7 @@ extern void qla2x00_abort_isp_cleanup(scsi_qla_host_t *);
extern void qla2x00_quiesce_io(scsi_qla_host_t *);
extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
-
+void qla_register_fcport_fn(struct work_struct *);
extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
@@ -73,8 +73,6 @@ extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
-extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
- uint16_t *);
struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *,
enum qla_work_type);
extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *);
@@ -109,6 +107,7 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *, port_id_t *, u8 *, u8*,
int qla24xx_fcport_handle_login(struct scsi_qla_host *, fc_port_t *);
int qla24xx_detect_sfp(scsi_qla_host_t *vha);
int qla24xx_post_gpdb_work(struct scsi_qla_host *, fc_port_t *, u8);
+
void qla2x00_async_prlo_done(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
extern int qla2x00_post_async_prlo_work(struct scsi_qla_host *, fc_port_t *,
@@ -118,6 +117,8 @@ extern int qla2x00_post_async_prlo_done_work(struct scsi_qla_host *,
int qla_post_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
void qla_do_iidma_work(struct scsi_qla_host *vha, fc_port_t *fcport);
int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
+void qla_rscn_replay(fc_port_t *fcport);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -158,6 +159,7 @@ extern int ql2xnvmeenable;
extern int ql2xautodetectsfp;
extern int ql2xenablemsix;
extern int qla2xuseresexchforels;
+extern int ql2xexlogins;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -208,7 +210,7 @@ extern void qla2x00_disable_board_on_pci_error(struct work_struct *);
extern void qla2x00_sp_compl(void *, int);
extern void qla2xxx_qpair_sp_free_dma(void *);
extern void qla2xxx_qpair_sp_compl(void *, int);
-extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
+extern void qla24xx_sched_upd_fcport(fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
@@ -644,9 +646,6 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
extern int qla2x00_chk_ms_status(scsi_qla_host_t *, ms_iocb_entry_t *,
struct ct_sns_rsp *, const char *);
extern void qla2x00_async_iocb_timeout(void *data);
-extern int qla24xx_async_gidpn(scsi_qla_host_t *, fc_port_t *);
-int qla24xx_post_gidpn_work(struct scsi_qla_host *, fc_port_t *);
-void qla24xx_handle_gidpn_event(scsi_qla_host_t *, struct event_arg *);
extern void qla2x00_free_fcport(fc_port_t *);
@@ -677,6 +676,7 @@ void qla_scan_work_fn(struct work_struct *);
*/
struct device_attribute;
extern struct device_attribute *qla2x00_host_attrs[];
+extern struct device_attribute *qla2x00_host_attrs_dm[];
struct fc_function_template;
extern struct fc_function_template qla2xxx_transport_functions;
extern struct fc_function_template qla2xxx_transport_vport_functions;
@@ -690,7 +690,7 @@ extern int qla2x00_echo_test(scsi_qla_host_t *,
extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
struct qla_fcp_prio_cfg *, uint8_t);
-
+void qla_insert_tgt_attrs(void);
/*
* Global Function Prototypes in qla_dfs.c source file.
*/
@@ -897,5 +897,6 @@ void qlt_unknown_atio_work_fn(struct work_struct *);
void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
void qlt_remove_target_resources(struct qla_hw_data *);
void qlt_clr_qp_table(struct scsi_qla_host *vha);
+void qlt_set_mode(struct scsi_qla_host *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index a0038d879b9d..90cfa394f942 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -2973,237 +2973,6 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
}
}
-/* GID_PN completion processing. */
-void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
-{
- fc_port_t *fcport = ea->fcport;
-
- ql_dbg(ql_dbg_disc, vha, 0x201d,
- "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
- __func__, fcport->port_name, fcport->disc_state,
- fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
- fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
-
- if (fcport->disc_state == DSC_DELETE_PEND)
- return;
-
- if (ea->sp->gen2 != fcport->login_gen) {
- /* PLOGI/PRLI/LOGO came in while cmd was out.*/
- ql_dbg(ql_dbg_disc, vha, 0x201e,
- "%s %8phC generation changed rscn %d|%d n",
- __func__, fcport->port_name, fcport->last_rscn_gen,
- fcport->rscn_gen);
- return;
- }
-
- if (!ea->rc) {
- if (ea->sp->gen1 == fcport->rscn_gen) {
- fcport->scan_state = QLA_FCPORT_FOUND;
- fcport->flags |= FCF_FABRIC_DEVICE;
-
- if (fcport->d_id.b24 == ea->id.b24) {
- /* cable plugged into the same place */
- switch (vha->host->active_mode) {
- case MODE_TARGET:
- if (fcport->fw_login_state ==
- DSC_LS_PRLI_COMP) {
- u16 data[2];
- /*
- * Late RSCN was delivered.
- * Remote port already login'ed.
- */
- ql_dbg(ql_dbg_disc, vha, 0x201f,
- "%s %d %8phC post adisc\n",
- __func__, __LINE__,
- fcport->port_name);
- data[0] = data[1] = 0;
- qla2x00_post_async_adisc_work(
- vha, fcport, data);
- }
- break;
- case MODE_INITIATOR:
- case MODE_DUAL:
- default:
- ql_dbg(ql_dbg_disc, vha, 0x201f,
- "%s %d %8phC post %s\n", __func__,
- __LINE__, fcport->port_name,
- (atomic_read(&fcport->state) ==
- FCS_ONLINE) ? "adisc" : "gnl");
-
- if (atomic_read(&fcport->state) ==
- FCS_ONLINE) {
- u16 data[2];
-
- data[0] = data[1] = 0;
- qla2x00_post_async_adisc_work(
- vha, fcport, data);
- } else {
- qla24xx_post_gnl_work(vha,
- fcport);
- }
- break;
- }
- } else { /* fcport->d_id.b24 != ea->id.b24 */
- fcport->d_id.b24 = ea->id.b24;
- fcport->id_changed = 1;
- if (fcport->deleted != QLA_SESS_DELETED) {
- ql_dbg(ql_dbg_disc, vha, 0x2021,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- }
- }
- } else { /* ea->sp->gen1 != fcport->rscn_gen */
- ql_dbg(ql_dbg_disc, vha, 0x2022,
- "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- /* rscn came in while cmd was out */
- qla24xx_post_gidpn_work(vha, fcport);
- }
- } else { /* ea->rc */
- /* cable pulled */
- if (ea->sp->gen1 == fcport->rscn_gen) {
- if (ea->sp->gen2 == fcport->login_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x2042,
- "%s %d %8phC post del sess\n", __func__,
- __LINE__, fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2045,
- "%s %d %8phC login\n", __func__, __LINE__,
- fcport->port_name);
- qla24xx_fcport_handle_login(vha, fcport);
- }
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x2049,
- "%s %d %8phC post gidpn\n", __func__, __LINE__,
- fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
- }
- }
-} /* gidpn_event */
-
-static void qla2x00_async_gidpn_sp_done(void *s, int res)
-{
- struct srb *sp = s;
- struct scsi_qla_host *vha = sp->vha;
- fc_port_t *fcport = sp->fcport;
- u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
- struct event_arg ea;
-
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
-
- memset(&ea, 0, sizeof(ea));
- ea.fcport = fcport;
- ea.id.b.domain = id[0];
- ea.id.b.area = id[1];
- ea.id.b.al_pa = id[2];
- ea.sp = sp;
- ea.rc = res;
- ea.event = FCME_GIDPN_DONE;
-
- if (res == QLA_FUNCTION_TIMEOUT) {
- ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
- "Async done-%s WWPN %8phC timed out.\n",
- sp->name, fcport->port_name);
- qla24xx_post_gidpn_work(sp->vha, fcport);
- sp->free(sp);
- return;
- } else if (res) {
- ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
- "Async done-%s fail res %x, WWPN %8phC\n",
- sp->name, res, fcport->port_name);
- } else {
- ql_dbg(ql_dbg_disc, vha, 0x204f,
- "Async done-%s good WWPN %8phC ID %3phC\n",
- sp->name, fcport->port_name, id);
- }
-
- qla2x00_fcport_event_handler(vha, &ea);
-
- sp->free(sp);
-}
-
-int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
-{
- int rval = QLA_FUNCTION_FAILED;
- struct ct_sns_req *ct_req;
- srb_t *sp;
-
- if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
- return rval;
-
- fcport->disc_state = DSC_GID_PN;
- fcport->scan_state = QLA_FCPORT_SCAN;
- sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
- if (!sp)
- goto done;
-
- fcport->flags |= FCF_ASYNC_SENT;
- sp->type = SRB_CT_PTHRU_CMD;
- sp->name = "gidpn";
- sp->gen1 = fcport->rscn_gen;
- sp->gen2 = fcport->login_gen;
-
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
-
- /* CT_IU preamble */
- ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
- GID_PN_RSP_SIZE);
-
- /* GIDPN req */
- memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
- WWN_SIZE);
-
- /* req & rsp use the same buffer */
- sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
- sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
- sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
- sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
- sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
-
- sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
- sp->done = qla2x00_async_gidpn_sp_done;
-
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
- ql_dbg(ql_dbg_disc, vha, 0x20a4,
- "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
- sp->name, fcport->port_name,
- sp->handle, fcport->loop_id, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa);
- return rval;
-
-done_free_sp:
- sp->free(sp);
-done:
- fcport->flags &= ~FCF_ASYNC_ACTIVE;
- return rval;
-}
-
-int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
-{
- struct qla_work_evt *e;
- int ls;
-
- ls = atomic_read(&vha->loop_state);
- if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
- test_bit(UNLOADING, &vha->dpc_flags))
- return 0;
-
- e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
- if (!e)
- return QLA_FUNCTION_FAILED;
-
- e->u.fcport.fcport = fcport;
- fcport->flags |= FCF_ASYNC_ACTIVE;
- return qla2x00_post_work(vha, e);
-}
-
int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
{
struct qla_work_evt *e;
@@ -3237,9 +3006,6 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
__func__, fcport->port_name);
return;
} else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
return;
}
@@ -3261,6 +3027,9 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC \n",
sp->name, res, fcport->port_name);
+ if (res == QLA_FUNCTION_TIMEOUT)
+ return;
+
if (res == (DID_ERROR << 16)) {
/* entry status error */
goto done;
@@ -3272,7 +3041,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
ql_dbg(ql_dbg_disc, vha, 0x2019,
"GPSC command unsupported, disabling query.\n");
ha->flags.gpsc_supported = 0;
- res = QLA_SUCCESS;
+ goto done;
}
} else {
switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
@@ -3305,7 +3074,6 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res)
be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
be16_to_cpu(ct_rsp->rsp.gpsc.speed));
}
-done:
memset(&ea, 0, sizeof(ea));
ea.event = FCME_GPSC_DONE;
ea.rc = res;
@@ -3313,6 +3081,7 @@ done:
ea.sp = sp;
qla2x00_fcport_event_handler(vha, &ea);
+done:
sp->free(sp);
}
@@ -3355,15 +3124,15 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla24xx_async_gpsc_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_disc, vha, 0x205e,
"Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
sp->name, fcport->port_name, sp->handle,
fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
return rval;
done_free_sp:
@@ -3442,26 +3211,10 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
if (ea->rc) {
/* cable is disconnected */
list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
- if (fcport->d_id.b24 == ea->id.b24) {
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC DS %d\n",
- __func__, __LINE__,
- fcport->port_name,
- fcport->disc_state);
+ if (fcport->d_id.b24 == ea->id.b24)
fcport->scan_state = QLA_FCPORT_SCAN;
- switch (fcport->disc_state) {
- case DSC_DELETED:
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- fcport->port_name);
- qlt_schedule_sess_for_deletion(fcport);
- break;
- }
- }
+
+ qlt_schedule_sess_for_deletion(fcport);
}
} else {
/* cable is connected */
@@ -3470,34 +3223,19 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
list) {
if ((conflict->d_id.b24 == ea->id.b24) &&
- (fcport != conflict)) {
- /* 2 fcports with conflict Nport ID or
+ (fcport != conflict))
+ /*
+ * 2 fcports with conflict Nport ID or
* an existing fcport is having nport ID
* conflict with new fcport.
*/
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC DS %d\n",
- __func__, __LINE__,
- conflict->port_name,
- conflict->disc_state);
conflict->scan_state = QLA_FCPORT_SCAN;
- switch (conflict->disc_state) {
- case DSC_DELETED:
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict->port_name);
- qlt_schedule_sess_for_deletion
- (conflict);
- break;
- }
- }
+
+ qlt_schedule_sess_for_deletion(conflict);
}
+ fcport->scan_needed = 0;
fcport->rscn_gen++;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->flags |= FCF_FABRIC_DEVICE;
@@ -3548,19 +3286,7 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
conflict->disc_state);
conflict->scan_state = QLA_FCPORT_SCAN;
- switch (conflict->disc_state) {
- case DSC_DELETED:
- case DSC_DELETE_PEND:
- break;
- default:
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict->port_name);
- qlt_schedule_sess_for_deletion
- (conflict);
- break;
- }
+ qlt_schedule_sess_for_deletion(conflict);
}
}
@@ -3724,13 +3450,14 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
sp->done = qla2x00_async_gpnid_sp_done;
+ ql_dbg(ql_dbg_disc, vha, 0x2067,
+ "Async-%s hdl=%x ID %3phC.\n", sp->name,
+ sp->handle, ct_req->req.port_id.port_id);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_disc, vha, 0x2067,
- "Async-%s hdl=%x ID %3phC.\n", sp->name,
- sp->handle, ct_req->req.port_id.port_id);
return rval;
done_free_sp:
@@ -3896,9 +3623,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
fc_port_t *fcport;
u32 i, rc;
bool found;
- struct fab_scan_rp *rp;
+ struct fab_scan_rp *rp, *trp;
unsigned long flags;
u8 recheck = 0;
+ u16 dup = 0, dup_cnt = 0;
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s enter\n", __func__);
@@ -3929,6 +3657,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
for (i = 0; i < vha->hw->max_fibre_devices; i++) {
u64 wwn;
+ int k;
rp = &vha->scan.l[i];
found = false;
@@ -3937,6 +3666,20 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
if (wwn == 0)
continue;
+ /* Remove duplicate NPORT ID entries from switch data base */
+ for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
+ trp = &vha->scan.l[k];
+ if (rp->id.b24 == trp->id.b24) {
+ dup = 1;
+ dup_cnt++;
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose,
+ vha, 0xffff,
+ "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
+ rp->id.b24, rp->port_name, trp->port_name);
+ memset(trp, 0, sizeof(*trp));
+ }
+ }
+
if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
continue;
@@ -3951,7 +3694,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
continue;
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
fcport->scan_state = QLA_FCPORT_FOUND;
found = true;
/*
@@ -3976,25 +3719,30 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
}
}
+ if (dup) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Detected %d duplicate NPORT ID(s) from switch data base\n",
+ dup_cnt);
+ }
+
/*
* Logout all previous fabric dev marked lost, except FCP2 devices.
*/
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
continue;
}
if (fcport->scan_state != QLA_FCPORT_FOUND) {
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
if ((qla_dual_mode_enabled(vha) ||
qla_ini_mode_enabled(vha)) &&
atomic_read(&fcport->state) == FCS_ONLINE) {
- qla2x00_mark_device_lost(vha, fcport,
- ql2xplogiabsentdevice, 0);
+ if (fcport->loop_id != FC_NO_LOOP_ID) {
+ if (fcport->flags & FCF_FCP2_DEVICE)
+ fcport->logout_on_delete = 0;
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- (fcport->flags & FCF_FCP2_DEVICE) == 0) {
ql_dbg(ql_dbg_disc, vha, 0x20f0,
"%s %d %8phC post del sess\n",
__func__, __LINE__,
@@ -4005,7 +3753,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
}
}
} else {
- if (fcport->rscn_rcvd ||
+ if (fcport->scan_needed ||
fcport->disc_state != DSC_LOGIN_COMPLETE) {
if (fcport->login_retry == 0) {
fcport->login_retry =
@@ -4015,7 +3763,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
fcport->port_name, fcport->loop_id,
fcport->login_retry);
}
- fcport->rscn_rcvd = 0;
+ fcport->scan_needed = 0;
qla24xx_fcport_handle_login(vha, fcport);
}
}
@@ -4030,7 +3778,7 @@ out:
if (recheck) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
- if (fcport->rscn_rcvd) {
+ if (fcport->scan_needed) {
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
break;
@@ -4039,6 +3787,41 @@ out:
}
}
+static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
+ srb_t *sp, int cmd)
+{
+ struct qla_work_evt *e;
+
+ if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
+ return QLA_PARAMETER_ERROR;
+
+ e = qla2x00_alloc_work(vha, cmd);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.iosb.sp = sp;
+
+ return qla2x00_post_work(vha, e);
+}
+
+static int qla2x00_post_nvme_gpnft_work(struct scsi_qla_host *vha,
+ srb_t *sp, int cmd)
+{
+ struct qla_work_evt *e;
+
+ if (cmd != QLA_EVT_GPNFT)
+ return QLA_PARAMETER_ERROR;
+
+ e = qla2x00_alloc_work(vha, cmd);
+ if (!e)
+ return QLA_FUNCTION_FAILED;
+
+ e->u.gpnft.fc4_type = FC4_TYPE_NVME;
+ e->u.gpnft.sp = sp;
+
+ return qla2x00_post_work(vha, e);
+}
+
static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
struct srb *sp)
{
@@ -4139,120 +3922,85 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
{
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
- struct qla_work_evt *e;
struct ct_sns_req *ct_req =
(struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
u16 cmd = be16_to_cpu(ct_req->command);
u8 fc4_type = sp->gen2;
unsigned long flags;
+ int rc;
/* gen2 field is holding the fc4type */
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async done-%s res %x FC4Type %x\n",
sp->name, res, sp->gen2);
+ del_timer(&sp->u.iocb_cmd.timer);
+ sp->rc = res;
if (res) {
unsigned long flags;
+ const char *name = sp->name;
- sp->free(sp);
- spin_lock_irqsave(&vha->work_lock, flags);
- vha->scan.scan_flags &= ~SF_SCANNING;
- vha->scan.scan_retry++;
- spin_unlock_irqrestore(&vha->work_lock, flags);
+ /*
+ * We are in an Interrupt context, queue up this
+ * sp for GNNFT_DONE work. This will allow all
+ * the resource to get freed up.
+ */
+ rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+ QLA_EVT_GNNFT_DONE);
+ if (rc) {
+ /* Cleanup here to prevent memory leak */
+ qla24xx_sp_unmap(vha, sp);
- if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
- } else {
- ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
- "Async done-%s rescan failed on all retries\n",
- sp->name);
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "Async done-%s rescan failed on all retries.\n",
+ name);
+ }
}
return;
}
- if (!res)
- qla2x00_find_free_fcp_nvme_slot(vha, sp);
+ qla2x00_find_free_fcp_nvme_slot(vha, sp);
if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
cmd == GNN_FT_CMD) {
- del_timer(&sp->u.iocb_cmd.timer);
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT);
- if (!e) {
- /*
- * please ignore kernel warning. Otherwise,
- * we have mem leak.
- */
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- }
-
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async done-%s unable to alloc work element\n",
- sp->name);
- sp->free(sp);
+ sp->rc = res;
+ rc = qla2x00_post_nvme_gpnft_work(vha, sp, QLA_EVT_GPNFT);
+ if (rc) {
+ qla24xx_sp_unmap(vha, sp);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- return;
}
- e->u.gpnft.fc4_type = FC4_TYPE_NVME;
- sp->rc = res;
- e->u.gpnft.sp = sp;
-
- qla2x00_post_work(vha, e);
return;
}
- if (cmd == GPN_FT_CMD)
- e = qla2x00_alloc_work(vha, QLA_EVT_GPNFT_DONE);
- else
- e = qla2x00_alloc_work(vha, QLA_EVT_GNNFT_DONE);
- if (!e) {
- /* please ignore kernel warning. Otherwise, we have mem leak. */
- if (sp->u.iocb_cmd.u.ctarg.req) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.req_allocated_size,
- sp->u.iocb_cmd.u.ctarg.req,
- sp->u.iocb_cmd.u.ctarg.req_dma);
- sp->u.iocb_cmd.u.ctarg.req = NULL;
- }
- if (sp->u.iocb_cmd.u.ctarg.rsp) {
- dma_free_coherent(&vha->hw->pdev->dev,
- sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
- sp->u.iocb_cmd.u.ctarg.rsp,
- sp->u.iocb_cmd.u.ctarg.rsp_dma);
- sp->u.iocb_cmd.u.ctarg.rsp = NULL;
- }
+ if (cmd == GPN_FT_CMD) {
+ rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+ QLA_EVT_GPNFT_DONE);
+ } else {
+ rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
+ QLA_EVT_GNNFT_DONE);
+ }
- ql_dbg(ql_dbg_disc, vha, 0xffff,
- "Async done-%s unable to alloc work element\n",
- sp->name);
- sp->free(sp);
+ if (rc) {
+ qla24xx_sp_unmap(vha, sp);
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
return;
}
-
- sp->rc = res;
- e->u.iosb.sp = sp;
-
- qla2x00_post_work(vha, e);
}
/*
@@ -4285,11 +4033,13 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
vha->scan.scan_flags &= ~SF_SCANNING;
spin_unlock_irqrestore(&vha->work_lock, flags);
WARN_ON(1);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
goto done_free_sp;
}
ql_dbg(ql_dbg_disc, vha, 0xfffff,
- "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n",
+ "%s: FC4Type %x, CT-PASSTHRU %s command ctarg rsp size %d, ctarg req size %d\n",
__func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
sp->u.iocb_cmd.u.ctarg.req_size);
@@ -4318,8 +4068,12 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
sp->done = qla2x00_async_gpnft_gnnft_sp_done;
rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
+ if (rval != QLA_SUCCESS) {
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_flags &= ~SF_SCANNING;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
goto done_free_sp;
+ }
ql_dbg(ql_dbg_disc, vha, 0xffff,
"Async-%s hdl=%x FC4Type %x.\n", sp->name,
@@ -4351,7 +4105,6 @@ void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
{
ql_dbg(ql_dbg_disc, vha, 0xffff,
"%s enter\n", __func__);
- del_timer(&sp->u.iocb_cmd.timer);
qla24xx_async_gnnft(vha, sp, sp->gen2);
}
@@ -4444,9 +4197,9 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
- rspsz = sizeof(struct ct_sns_gpnft_rsp) +
- ((vha->hw->max_fibre_devices - 1) *
- sizeof(struct ct_sns_gpn_ft_data));
+ rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
+ memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
+ memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
/* CT_IU preamble */
@@ -4644,9 +4397,6 @@ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
__func__, fcport->port_name);
return;
} else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
return;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b934977c5c26..c72d8012fe2a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -52,12 +52,14 @@ qla2x00_sp_timeout(struct timer_list *t)
struct srb_iocb *iocb;
struct req_que *req;
unsigned long flags;
+ struct qla_hw_data *ha = sp->vha->hw;
- spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ WARN_ON_ONCE(irqs_disabled());
+ spin_lock_irqsave(&ha->hardware_lock, flags);
req = sp->qpair->req;
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
- spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
iocb->timeout(sp);
}
@@ -245,6 +247,12 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
}
+ ql_dbg(ql_dbg_disc, vha, 0x2072,
+ "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
+ "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->login_retry);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
fcport->flags |= FCF_LOGIN_NEEDED;
@@ -252,11 +260,6 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
}
- ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
- "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->login_retry);
return rval;
done_free_sp:
@@ -301,15 +304,16 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
sp->done = qla2x00_async_logout_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2070,
"Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x %8phC.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
fcport->port_name);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
return rval;
done_free_sp:
@@ -396,6 +400,9 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
ql_dbg(ql_dbg_disc, vha, 0x2066,
"%s %8phC: adisc fail: post delete\n",
__func__, ea->fcport->port_name);
+ /* deleted = 0 & logout_on_delete = force fw cleanup */
+ fcport->deleted = 0;
+ fcport->logout_on_delete = 1;
qlt_schedule_sess_for_deletion(ea->fcport);
return;
}
@@ -410,9 +417,8 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea)
__func__, ea->fcport->port_name);
return;
} else if (ea->sp->gen1 != ea->fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, ea->fcport->port_name);
- qla24xx_post_gidpn_work(vha, ea->fcport);
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -487,13 +493,15 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
sp->done = qla2x00_async_adisc_sp_done;
if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
lio->u.logio.flags |= SRB_LOGIN_RETRIED;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x206f,
"Async-adisc - hdl=%x loopid=%x portid=%06x %8phC.\n",
sp->handle, fcport->loop_id, fcport->d_id.b24, fcport->port_name);
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
+
return rval;
done_free_sp:
@@ -536,11 +544,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
}
if (fcport->last_rscn_gen != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20df,
- "%s %8phC rscn gen changed rscn %d|%d \n",
- __func__, fcport->port_name,
- fcport->last_rscn_gen, fcport->rscn_gen);
- qla24xx_post_gidpn_work(vha, fcport);
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
return;
} else if (fcport->last_login_gen != fcport->login_gen) {
ql_dbg(ql_dbg_disc, vha, 0x20e0,
@@ -787,6 +792,10 @@ qla24xx_async_gnl_sp_done(void *s, int res)
sp->name, res, sp->u.iocb_cmd.u.mbx.in_mb[1],
sp->u.iocb_cmd.u.mbx.in_mb[2]);
+ if (res == QLA_FUNCTION_TIMEOUT)
+ return;
+
+ sp->fcport->flags &= ~(FCF_ASYNC_SENT|FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.sp = sp;
ea.rc = res;
@@ -814,25 +823,24 @@ qla24xx_async_gnl_sp_done(void *s, int res)
(loop_id & 0x7fff));
}
- spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
INIT_LIST_HEAD(&h);
fcport = tf = NULL;
if (!list_empty(&vha->gnl.fcports))
list_splice_init(&vha->gnl.fcports, &h);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
list_for_each_entry_safe(fcport, tf, &h, gnl_entry) {
list_del_init(&fcport->gnl_entry);
- spin_lock(&vha->hw->tgt.sess_lock);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
- spin_unlock(&vha->hw->tgt.sess_lock);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
ea.fcport = fcport;
qla2x00_fcport_event_handler(vha, &ea);
}
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
- spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
/* create new fcport if fw has knowledge of new sessions */
for (i = 0; i < n; i++) {
port_id_t id;
@@ -865,6 +873,8 @@ qla24xx_async_gnl_sp_done(void *s, int res)
}
}
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ vha->gnl.sent = 0;
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp->free(sp);
@@ -884,27 +894,24 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport)
ql_dbg(ql_dbg_disc, vha, 0x20d9,
"Async-gnlist WWPN %8phC \n", fcport->port_name);
- spin_lock_irqsave(&vha->gnl.fcports_lock, flags);
- if (!list_empty(&fcport->gnl_entry)) {
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
- rval = QLA_SUCCESS;
- goto done;
- }
-
- spin_lock(&vha->hw->tgt.sess_lock);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ fcport->flags |= FCF_ASYNC_SENT;
fcport->disc_state = DSC_GNL;
fcport->last_rscn_gen = fcport->rscn_gen;
fcport->last_login_gen = fcport->login_gen;
- spin_unlock(&vha->hw->tgt.sess_lock);
list_add_tail(&fcport->gnl_entry, &vha->gnl.fcports);
- spin_unlock_irqrestore(&vha->gnl.fcports_lock, flags);
+ if (vha->gnl.sent) {
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ return QLA_SUCCESS;
+ }
+ vha->gnl.sent = 1;
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
if (!sp)
goto done;
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_MB_IOCB;
sp->name = "gnlist";
sp->gen1 = fcport->rscn_gen;
@@ -970,8 +977,13 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
"Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n",
sp->name, res, fcport->port_name, mb[1], mb[2]);
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ if (res == QLA_FUNCTION_TIMEOUT) {
+ dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
+ sp->u.iocb_cmd.u.mbx.in_dma);
+ return;
+ }
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
memset(&ea, 0, sizeof(ea));
ea.event = FCME_GPDB_DONE;
ea.fcport = fcport;
@@ -1147,14 +1159,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
sp->done = qla24xx_async_gpdb_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_disc, vha, 0x20dc,
"Async-%s %8phC hndl %x opt %x\n",
sp->name, fcport->port_name, sp->handle, opt);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
return rval;
done_free_sp:
@@ -1182,11 +1193,9 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
vha->fcport_count++;
ea->fcport->login_succ = 1;
- ql_dbg(ql_dbg_disc, vha, 0x20d6,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, ea->fcport->port_name,
- vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, ea->fcport);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ qla24xx_sched_upd_fcport(ea->fcport);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
} else if (ea->fcport->login_succ) {
/*
* We have an existing session. A late RSCN delivery
@@ -1226,6 +1235,19 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea)
else
ls = pd->current_login_state & 0xf;
+ if (ea->sp->gen2 != fcport->login_gen) {
+ /* target side must have changed it. */
+
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC generation changed\n",
+ __func__, fcport->port_name);
+ return;
+ } else if (ea->sp->gen1 != fcport->rscn_gen) {
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
+ return;
+ }
+
switch (ls) {
case PDS_PRLI_COMPLETE:
__qla24xx_parse_gpdb(vha, fcport, pd);
@@ -1280,7 +1302,8 @@ static void qla_chk_n2n_b4_login(struct scsi_qla_host *vha, fc_port_t *fcport)
login = 1;
}
- if (login) {
+ if (login && fcport->login_retry) {
+ fcport->login_retry--;
if (fcport->loop_id == FC_NO_LOOP_ID) {
fcport->fw_login_state = DSC_LS_PORT_UNAVAIL;
rc = qla2x00_find_new_loop_id(vha, fcport);
@@ -1304,14 +1327,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
{
u16 data[2];
u64 wwn;
+ u16 sec;
- ql_dbg(ql_dbg_disc, vha, 0x20d8,
- "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d retry %d lid %d scan %d\n",
+ ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8,
+ "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, fcport->login_pause, fcport->flags,
fcport->conflict, fcport->last_rscn_gen, fcport->rscn_gen,
- fcport->login_gen, fcport->login_retry,
- fcport->loop_id, fcport->scan_state);
+ fcport->login_gen, fcport->loop_id, fcport->scan_state);
if (fcport->scan_state != QLA_FCPORT_FOUND)
return 0;
@@ -1410,22 +1433,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
break;
case DSC_LOGIN_FAILED:
- fcport->login_retry--;
- ql_dbg(ql_dbg_disc, vha, 0x20d0,
- "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
if (N2N_TOPO(vha->hw))
qla_chk_n2n_b4_login(vha, fcport);
else
- qla24xx_post_gidpn_work(vha, fcport);
+ qlt_schedule_sess_for_deletion(fcport);
break;
case DSC_LOGIN_COMPLETE:
/* recheck login state */
- ql_dbg(ql_dbg_disc, vha, 0x20d1,
- "%s %d %8phC post adisc\n",
- __func__, __LINE__, fcport->port_name);
- fcport->login_retry--;
data[0] = data[1] = 0;
qla2x00_post_async_adisc_work(vha, fcport, data);
break;
@@ -1435,6 +1450,22 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
qla24xx_post_prli_work(vha, fcport);
break;
+ case DSC_UPD_FCPORT:
+ sec = jiffies_to_msecs(jiffies -
+ fcport->jiffies_at_registration)/1000;
+ if (fcport->sec_since_registration < sec && sec &&
+ !(sec % 60)) {
+ fcport->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, fcport->vha, 0xffff,
+ "%s %8phC - Slow Rport registration(%d Sec)\n",
+ __func__, fcport->port_name, sec);
+ }
+
+ if (fcport->next_disc_state != DSC_DELETE_PEND)
+ fcport->next_disc_state = DSC_ADISC;
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+
default:
break;
}
@@ -1513,7 +1544,6 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n",
__func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
return;
}
@@ -1533,7 +1563,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
{
fc_port_t *f, *tf;
uint32_t id = 0, mask, rid;
- unsigned long flags;
fc_port_t *fcport;
switch (ea->event) {
@@ -1548,10 +1577,16 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
return;
switch (ea->id.b.rsvd_1) {
case RSCN_PORT_ADDR:
+#define BIGSCAN 1
+#if defined BIGSCAN & BIGSCAN > 0
+ {
+ unsigned long flags;
fcport = qla2x00_find_fcport_by_nportid
(vha, &ea->id, 1);
- if (fcport)
- fcport->rscn_rcvd = 1;
+ if (fcport) {
+ fcport->scan_needed = 1;
+ fcport->rscn_gen++;
+ }
spin_lock_irqsave(&vha->work_lock, flags);
if (vha->scan.scan_flags == 0) {
@@ -1561,7 +1596,26 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
schedule_delayed_work(&vha->scan.scan_work, 5);
}
spin_unlock_irqrestore(&vha->work_lock, flags);
-
+ }
+#else
+ {
+ int rc;
+ fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1);
+ if (!fcport) {
+ /* cable moved */
+ rc = qla24xx_post_gpnid_work(vha, &ea->id);
+ if (rc) {
+ ql_log(ql_log_warn, vha, 0xd044,
+ "RSCN GPNID work failed %06x\n",
+ ea->id.b24);
+ }
+ } else {
+ ea->fcport = fcport;
+ fcport->scan_needed = 1;
+ qla24xx_handle_rscn_event(fcport, ea);
+ }
+ }
+#endif
break;
case RSCN_AREA_ADDR:
case RSCN_DOM_ADDR:
@@ -1597,9 +1651,6 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
}
break;
- case FCME_GIDPN_DONE:
- qla24xx_handle_gidpn_event(vha, ea);
- break;
case FCME_GNL_DONE:
qla24xx_handle_gnl_done_event(vha, ea);
break;
@@ -1639,6 +1690,34 @@ void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea)
}
}
+/*
+ * RSCN(s) came in for this fcport, but the RSCN(s) was not able
+ * to be consumed by the fcport
+ */
+void qla_rscn_replay(fc_port_t *fcport)
+{
+ struct event_arg ea;
+
+ switch (fcport->disc_state) {
+ case DSC_DELETE_PEND:
+ return;
+ default:
+ break;
+ }
+
+ if (fcport->scan_needed) {
+ memset(&ea, 0, sizeof(ea));
+ ea.event = FCME_RSCN;
+ ea.id = fcport->d_id;
+ ea.id.b.rsvd_1 = RSCN_PORT_ADDR;
+#if defined BIGSCAN & BIGSCAN > 0
+ qla2x00_fcport_event_handler(fcport->vha, &ea);
+#else
+ qla24xx_post_gpnid_work(fcport->vha, &ea.id);
+#endif
+ }
+}
+
static void
qla2x00_tmf_iocb_timeout(void *data)
{
@@ -1684,15 +1763,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
tm_iocb->u.tmf.data = tag;
sp->done = qla2x00_tmf_sp_done;
- rval = qla2x00_start_sp(sp);
- if (rval != QLA_SUCCESS)
- goto done_free_sp;
-
ql_dbg(ql_dbg_taskm, vha, 0x802f,
"Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
sp->handle, fcport->loop_id, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS)
+ goto done_free_sp;
wait_for_completion(&tm_iocb->u.tmf.comp);
rval = tm_iocb->u.tmf.data;
@@ -1747,47 +1825,46 @@ int
qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait)
{
scsi_qla_host_t *vha = cmd_sp->vha;
- fc_port_t *fcport = cmd_sp->fcport;
struct srb_iocb *abt_iocb;
srb_t *sp;
int rval = QLA_FUNCTION_FAILED;
- sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
+ sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport,
+ GFP_KERNEL);
if (!sp)
goto done;
abt_iocb = &sp->u.iocb_cmd;
sp->type = SRB_ABT_CMD;
sp->name = "abort";
+ sp->qpair = cmd_sp->qpair;
if (wait)
sp->flags = SRB_WAKEUP_ON_COMP;
abt_iocb->timeout = qla24xx_abort_iocb_timeout;
init_completion(&abt_iocb->u.abt.comp);
- qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha));
+ /* FW can send 2 x ABTS's timeout/20s */
+ qla2x00_init_timer(sp, 42);
abt_iocb->u.abt.cmd_hndl = cmd_sp->handle;
-
- if (vha->flags.qpairs_available && cmd_sp->qpair)
- abt_iocb->u.abt.req_que_no =
- cpu_to_le16(cmd_sp->qpair->req->id);
- else
- abt_iocb->u.abt.req_que_no = cpu_to_le16(vha->req->id);
+ abt_iocb->u.abt.req_que_no = cpu_to_le16(cmd_sp->qpair->req->id);
sp->done = qla24xx_abort_sp_done;
+ ql_dbg(ql_dbg_async, vha, 0x507c,
+ "Abort command issued - hdl=%x, type=%x\n",
+ cmd_sp->handle, cmd_sp->type);
+
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS)
goto done_free_sp;
- ql_dbg(ql_dbg_async, vha, 0x507c,
- "Abort command issued - hdl=%x, target_id=%x\n",
- cmd_sp->handle, fcport->tgt_id);
-
if (wait) {
wait_for_completion(&abt_iocb->u.abt.comp);
rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ?
QLA_SUCCESS : QLA_FUNCTION_FAILED;
+ } else {
+ goto done;
}
done_free_sp:
@@ -1803,19 +1880,17 @@ qla24xx_async_abort_command(srb_t *sp)
uint32_t handle;
fc_port_t *fcport = sp->fcport;
+ struct qla_qpair *qpair = sp->qpair;
struct scsi_qla_host *vha = fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = vha->req;
-
- if (vha->flags.qpairs_available && sp->qpair)
- req = sp->qpair->req;
+ struct req_que *req = qpair->req;
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
if (handle == req->num_outstanding_cmds) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
@@ -1876,7 +1951,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
"%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d data %x|%x iop %x|%x\n",
__func__, fcport->port_name, fcport->disc_state,
fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
- ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1,
+ ea->sp->gen1, fcport->rscn_gen,
ea->data[0], ea->data[1], ea->iop[0], ea->iop[1]);
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
@@ -1898,9 +1973,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
return;
} else if (ea->sp->gen1 != fcport->rscn_gen) {
- ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
- __func__, __LINE__, fcport->port_name);
- qla24xx_post_gidpn_work(vha, fcport);
+ ql_dbg(ql_dbg_disc, vha, 0x20d3,
+ "%s %8phC RSCN generation changed\n",
+ __func__, fcport->port_name);
+ qla_rscn_replay(fcport);
+ qlt_schedule_sess_for_deletion(fcport);
return;
}
@@ -1952,25 +2029,15 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
cid.b.rsvd_1 = 0;
ql_dbg(ql_dbg_disc, vha, 0x20ec,
- "%s %d %8phC LoopID 0x%x in use post gnl\n",
+ "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
__func__, __LINE__, ea->fcport->port_name,
- ea->fcport->loop_id);
+ ea->fcport->loop_id, cid.b24);
- if (IS_SW_RESV_ADDR(cid)) {
- set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
- ea->fcport->loop_id = FC_NO_LOOP_ID;
- } else {
- qla2x00_clear_loop_id(ea->fcport);
- }
+ set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
+ ea->fcport->loop_id = FC_NO_LOOP_ID;
qla24xx_post_gnl_work(vha, ea->fcport);
break;
case MBS_PORT_ID_USED:
- ql_dbg(ql_dbg_disc, vha, 0x20ed,
- "%s %d %8phC NPortId %02x%02x%02x inuse post gidpn\n",
- __func__, __LINE__, ea->fcport->port_name,
- ea->fcport->d_id.b.domain, ea->fcport->d_id.b.area,
- ea->fcport->d_id.b.al_pa);
-
lid = ea->iop[1] & 0xffff;
qlt_find_sess_invalidate_other(vha,
wwn_to_u64(ea->fcport->port_name),
@@ -1989,8 +2056,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
"%s %d %8phC NPortId %06x inuse with loopid 0x%x. post gidpn\n",
__func__, __LINE__, ea->fcport->port_name,
ea->fcport->d_id.b24, lid);
- qla2x00_clear_loop_id(ea->fcport);
- qla24xx_post_gidpn_work(vha, ea->fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ed,
"%s %d %8phC NPortId %06x inuse with loopid 0x%x. sched delete\n",
@@ -2018,26 +2083,6 @@ qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
return;
}
-void
-qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
- uint16_t *data)
-{
- fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
- if (data[0] == MBS_COMMAND_COMPLETE) {
- qla2x00_update_fcport(vha, fcport);
-
- return;
- }
-
- /* Retry login. */
- if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- else
- qla2x00_mark_device_lost(vha, fcport, 1, 0);
-
- return;
-}
-
/****************************************************************************/
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
@@ -3527,6 +3572,11 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (rval == QLA_SUCCESS) {
qla24xx_detect_sfp(vha);
+ if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
+ (ha->zio_mode == QLA_ZIO_MODE_6))
+ qla27xx_set_zio_threshold(vha,
+ ha->last_zio_threshold);
+
rval = qla2x00_set_exlogins_buffer(vha);
if (rval != QLA_SUCCESS)
goto failed;
@@ -4015,6 +4065,7 @@ next_check:
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
QLA_FW_STARTED(ha);
+ vha->u_ql2xexchoffld = vha->u_ql2xiniexchg = 0;
}
return (rval);
@@ -4728,6 +4779,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport = NULL;
}
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
+ INIT_WORK(&fcport->reg_work, qla_register_fcport_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
INIT_LIST_HEAD(&fcport->list);
@@ -4853,19 +4905,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
*/
if (qla_tgt_mode_enabled(vha) ||
qla_dual_mode_enabled(vha)) {
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha)) {
- spin_lock_irqsave(&ha->tgt.atio_lock,
- flags);
- qlt_24xx_process_atio_queue(vha, 0);
- spin_unlock_irqrestore(
- &ha->tgt.atio_lock, flags);
- } else {
- spin_lock_irqsave(&ha->hardware_lock,
- flags);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(
- &ha->hardware_lock, flags);
- }
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+ qlt_24xx_process_atio_queue(vha, 0);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock,
+ flags);
}
}
}
@@ -4958,6 +5001,19 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
(uint8_t *)ha->gid_list,
entries * sizeof(struct gid_list_info));
+ if (entries == 0) {
+ spin_lock_irqsave(&vha->work_lock, flags);
+ vha->scan.scan_retry++;
+ spin_unlock_irqrestore(&vha->work_lock, flags);
+
+ if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ }
+ } else {
+ vha->scan.scan_retry = 0;
+ }
+
list_for_each_entry(fcport, &vha->vp_fcports, list) {
fcport->scan_state = QLA_FCPORT_SCAN;
}
@@ -5223,20 +5279,20 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
void
qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
- fcport->vha = vha;
-
if (IS_SW_RESV_ADDR(fcport->d_id))
return;
+ ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n",
+ __func__, fcport->port_name);
+
+ fcport->disc_state = DSC_UPD_FCPORT;
+ fcport->login_retry = vha->hw->login_retry_count;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
- fcport->disc_state = DSC_LOGIN_COMPLETE;
fcport->deleted = 0;
fcport->logout_on_delete = 1;
fcport->login_retry = vha->hw->login_retry_count;
fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
- qla2x00_iidma_fcport(vha, fcport);
-
switch (vha->hw->current_topology) {
case ISP_CFG_N:
case ISP_CFG_NL:
@@ -5246,6 +5302,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
+ qla2x00_iidma_fcport(vha, fcport);
+
if (fcport->fc4f_nvme) {
qla_nvme_register_remote(vha, fcport);
fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -5274,6 +5332,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
if (fcport->id_changed) {
fcport->id_changed = 0;
@@ -5290,7 +5350,36 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla24xx_post_gpsc_work(vha, fcport);
}
}
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
+
+ fcport->disc_state = DSC_LOGIN_COMPLETE;
+}
+
+void qla_register_fcport_fn(struct work_struct *work)
+{
+ fc_port_t *fcport = container_of(work, struct fc_port, reg_work);
+ u32 rscn_gen = fcport->rscn_gen;
+ u16 data[2];
+
+ if (IS_SW_RESV_ADDR(fcport->d_id))
+ return;
+
+ qla2x00_update_fcport(fcport->vha, fcport);
+
+ if (rscn_gen != fcport->rscn_gen) {
+ /* RSCN(s) came in while registration */
+ switch (fcport->next_disc_state) {
+ case DSC_DELETE_PEND:
+ qlt_schedule_sess_for_deletion(fcport);
+ break;
+ case DSC_ADISC:
+ data[0] = data[1] = 0;
+ qla2x00_post_async_adisc_work(fcport->vha, fcport,
+ data);
+ break;
+ default:
+ break;
+ }
+ }
}
/*
@@ -6494,6 +6583,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ ha->link_data_rate = PORT_SPEED_UNKNOWN;
SAVE_TOPO(ha);
ha->flags.rida_fmt2 = 0;
ha->flags.n2n_ae = 0;
@@ -6622,6 +6712,20 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
return status;
}
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (!qla_tgt_mode_enabled(vha))
+ return 0;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ if (!qla_dual_mode_enabled(vha))
+ return 0;
+ break;
+ case QLA2XXX_INI_MODE_ENABLED:
+ default:
+ break;
+ }
+
ha->isp_ops->get_flash_version(vha, req->ring);
ha->isp_ops->nvram_config(vha);
@@ -6682,7 +6786,7 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
* The next call disables the board
* completely.
*/
- ha->isp_ops->reset_adapter(vha);
+ qla2x00_abort_isp_cleanup(vha);
vha->flags.online = 0;
clear_bit(ISP_ABORT_RETRY,
&vha->dpc_flags);
@@ -7142,7 +7246,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
}
icb->firmware_options_2 &= cpu_to_le32(
~(BIT_3 | BIT_2 | BIT_1 | BIT_0));
- vha->flags.process_response_queue = 0;
if (ha->zio_mode != QLA_ZIO_DISABLED) {
ha->zio_mode = QLA_ZIO_MODE_6;
@@ -7153,7 +7256,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
icb->firmware_options_2 |= cpu_to_le32(
(uint32_t)ha->zio_mode);
icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
- vha->flags.process_response_queue = 1;
}
if (rval) {
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 4351736b2426..512c3c37b447 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -209,7 +209,8 @@ qla2x00_chip_is_down(scsi_qla_host_t *vha)
}
static inline srb_t *
-qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
+qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
+ fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
uint8_t bail;
@@ -225,7 +226,9 @@ qla2xxx_get_qpair_sp(struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag)
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->iocbs = 1;
- sp->vha = qpair->vha;
+ sp->vha = vha;
+ sp->qpair = qpair;
+ sp->cmd_type = TYPE_SRB;
INIT_LIST_HEAD(&sp->elem);
done:
@@ -246,19 +249,17 @@ qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
{
srb_t *sp = NULL;
uint8_t bail;
+ struct qla_qpair *qpair;
QLA_VHA_MARK_BUSY(vha, bail);
if (unlikely(bail))
return NULL;
- sp = mempool_alloc(vha->hw->srb_mempool, flag);
+ qpair = vha->hw->base_qpair;
+ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, flag);
if (!sp)
goto done;
- memset(sp, 0, sizeof(*sp));
- sp->fcport = fcport;
- sp->cmd_type = TYPE_SRB;
- sp->iocbs = 1;
sp->vha = vha;
done:
if (!sp)
@@ -270,7 +271,7 @@ static inline void
qla2x00_rel_sp(srb_t *sp)
{
QLA_VHA_MARK_NOT_BUSY(sp->vha);
- mempool_free(sp, sp->vha->hw->srb_mempool);
+ qla2xxx_rel_qpair_sp(sp->qpair, sp);
}
static inline void
@@ -317,13 +318,13 @@ static inline bool
qla_is_exch_offld_enabled(struct scsi_qla_host *vha)
{
if (qla_ini_mode_enabled(vha) &&
- (ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
+ (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT))
return true;
else if (qla_tgt_mode_enabled(vha) &&
- (ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
+ (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT))
return true;
else if (qla_dual_mode_enabled(vha) &&
- ((ql2xiniexchg + ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
+ ((vha->ql2xiniexchg + vha->ql2xexchoffld) > FW_DEF_EXCHANGES_CNT))
return true;
else
return false;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 42ac8e097419..86fb8b21aa71 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1526,12 +1526,6 @@ qla24xx_start_scsi(srb_t *sp)
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
-
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
@@ -1725,12 +1719,6 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&ha->iobase->isp24.hccr);
-
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -1880,11 +1868,6 @@ qla2xxx_start_scsi_mq(srb_t *sp)
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
-
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_SUCCESS;
@@ -2287,8 +2270,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
logio->control_flags =
cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
- if (!sp->fcport->se_sess ||
- !sp->fcport->keep_nport_handle)
+ if (!sp->fcport->keep_nport_handle)
logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
@@ -2659,7 +2641,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
struct qla_hw_data *ha = vha->hw;
int rval = QLA_SUCCESS;
void *ptr, *resp_ptr;
- dma_addr_t ptr_dma;
/* Alloc SRB structure */
sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
@@ -2691,7 +2672,6 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ptr = elsio->u.els_plogi.els_plogi_pyld =
dma_alloc_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
&elsio->u.els_plogi.els_plogi_pyld_dma, GFP_KERNEL);
- ptr_dma = elsio->u.els_plogi.els_plogi_pyld_dma;
if (!elsio->u.els_plogi.els_plogi_pyld) {
rval = QLA_FUNCTION_FAILED;
@@ -3314,19 +3294,21 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
{
struct srb_iocb *aio = &sp->u.iocb_cmd;
scsi_qla_host_t *vha = sp->vha;
- struct req_que *req = vha->req;
+ struct req_que *req = sp->qpair->req;
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
- abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ if (sp->fcport) {
+ abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
+ abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
+ abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
+ }
abt_iocb->handle_to_abort =
cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
aio->u.abt.cmd_hndl));
- abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
- abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
- abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
abt_iocb->vp_index = vha->vp_idx;
abt_iocb->req_que_no = cpu_to_le16(aio->u.abt.req_que_no);
/* Send the command to the firmware */
@@ -3455,12 +3437,13 @@ qla2x00_start_sp(srb_t *sp)
int rval;
scsi_qla_host_t *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qp = sp->qpair;
void *pkt;
unsigned long flags;
rval = QLA_FUNCTION_FAILED;
- spin_lock_irqsave(&ha->hardware_lock, flags);
- pkt = qla2x00_alloc_iocbs(vha, sp);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ pkt = __qla2x00_alloc_iocbs(sp->qpair, sp);
if (!pkt) {
ql_log(ql_log_warn, vha, 0x700c,
"qla2x00_alloc_iocbs failed.\n");
@@ -3538,9 +3521,9 @@ qla2x00_start_sp(srb_t *sp)
}
wmb();
- qla2x00_start_iocbs(vha, ha->req_q_map[0]);
+ qla2x00_start_iocbs(vha, qp->req);
done:
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 36cbb29c84f6..d73b04e40590 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1850,11 +1850,12 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
uint16_t state_flags;
struct nvmefc_fcp_req *fd;
- uint16_t ret = 0;
+ uint16_t ret = QLA_SUCCESS;
+ uint16_t comp_status = le16_to_cpu(sts->comp_status);
iocb = &sp->u.iocb_cmd;
fcport = sp->fcport;
- iocb->u.nvme.comp_status = le16_to_cpu(sts->comp_status);
+ iocb->u.nvme.comp_status = comp_status;
state_flags = le16_to_cpu(sts->state_flags);
fd = iocb->u.nvme.desc;
@@ -1892,28 +1893,35 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
fd->transferred_length = fd->payload_length -
le32_to_cpu(sts->residual_len);
- switch (le16_to_cpu(sts->comp_status)) {
+ if (unlikely(comp_status != CS_COMPLETE))
+ ql_log(ql_log_warn, fcport->vha, 0x5060,
+ "NVME-%s ERR Handling - hdl=%x status(%x) tr_len:%x resid=%x ox_id=%x\n",
+ sp->name, sp->handle, comp_status,
+ fd->transferred_length, le32_to_cpu(sts->residual_len),
+ sts->ox_id);
+
+ /*
+ * If transport error then Failure (HBA rejects request)
+ * otherwise transport will handle.
+ */
+ switch (comp_status) {
case CS_COMPLETE:
- ret = QLA_SUCCESS;
break;
- case CS_ABORTED:
+
case CS_RESET:
case CS_PORT_UNAVAILABLE:
case CS_PORT_LOGGED_OUT:
+ fcport->nvme_flag |= NVME_FLAG_RESETTING;
+ /* fall through */
+ case CS_ABORTED:
case CS_PORT_BUSY:
- ql_log(ql_log_warn, fcport->vha, 0x5060,
- "NVME-%s ERR Handling - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
- sp->name, sp->handle, sts->comp_status,
- le32_to_cpu(sts->residual_len), sts->ox_id);
fd->transferred_length = 0;
iocb->u.nvme.rsp_pyld_len = 0;
ret = QLA_ABORTED;
break;
+ case CS_DATA_UNDERRUN:
+ break;
default:
- ql_log(ql_log_warn, fcport->vha, 0x5060,
- "NVME-%s error - hdl=%x completion status(%x) resid=%x ox_id=%x\n",
- sp->name, sp->handle, sts->comp_status,
- le32_to_cpu(sts->residual_len), sts->ox_id);
ret = QLA_FUNCTION_FAILED;
break;
}
@@ -2837,6 +2845,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
case ELS_IOCB_TYPE:
case ABORT_IOCB_TYPE:
case MBX_IOCB_TYPE:
+ default:
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
sp->done(sp, res);
@@ -2847,7 +2856,6 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
case ABTS_RESP_24XX:
case CTIO_TYPE7:
case CTIO_CRC2:
- default:
return 1;
}
fatal:
@@ -3121,6 +3129,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
uint16_t mb[8];
struct rsp_que *rsp;
unsigned long flags;
+ bool process_atio = false;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -3181,22 +3190,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
case INTR_ATIO_QUE_UPDATE_27XX:
- case INTR_ATIO_QUE_UPDATE:{
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+ case INTR_ATIO_QUE_UPDATE:
+ process_atio = true;
break;
- }
- case INTR_ATIO_RSP_QUE_UPDATE: {
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
-
+ case INTR_ATIO_RSP_QUE_UPDATE:
+ process_atio = true;
qla24xx_process_response_queue(vha, rsp);
break;
- }
default:
ql_dbg(ql_dbg_async, vha, 0x504f,
"Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -3210,6 +3210,12 @@ qla24xx_intr_handler(int irq, void *dev_id)
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (process_atio) {
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+ qlt_24xx_process_atio_queue(vha, 0);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+ }
+
return IRQ_HANDLED;
}
@@ -3256,6 +3262,7 @@ qla24xx_msix_default(int irq, void *dev_id)
uint32_t hccr;
uint16_t mb[8];
unsigned long flags;
+ bool process_atio = false;
rsp = (struct rsp_que *) dev_id;
if (!rsp) {
@@ -3312,22 +3319,13 @@ qla24xx_msix_default(int irq, void *dev_id)
qla24xx_process_response_queue(vha, rsp);
break;
case INTR_ATIO_QUE_UPDATE_27XX:
- case INTR_ATIO_QUE_UPDATE:{
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
+ case INTR_ATIO_QUE_UPDATE:
+ process_atio = true;
break;
- }
- case INTR_ATIO_RSP_QUE_UPDATE: {
- unsigned long flags2;
- spin_lock_irqsave(&ha->tgt.atio_lock, flags2);
- qlt_24xx_process_atio_queue(vha, 1);
- spin_unlock_irqrestore(&ha->tgt.atio_lock, flags2);
-
+ case INTR_ATIO_RSP_QUE_UPDATE:
+ process_atio = true;
qla24xx_process_response_queue(vha, rsp);
break;
- }
default:
ql_dbg(ql_dbg_async, vha, 0x5051,
"Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -3338,6 +3336,12 @@ qla24xx_msix_default(int irq, void *dev_id)
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (process_atio) {
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
+ qlt_24xx_process_atio_queue(vha, 0);
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
+ }
+
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2c6c2cd5a0d0..2f3e5075ae76 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -60,6 +60,7 @@ static struct rom_cmd {
{ MBC_GET_ADAPTER_LOOP_ID },
{ MBC_READ_SFP },
{ MBC_GET_RNID_PARAMS },
+ { MBC_GET_SET_ZIO_THRESHOLD },
};
static int is_rom_cmd(uint16_t cmd)
@@ -189,7 +190,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
goto premature_exit;
}
- ha->flags.mbox_busy = 1;
+
/* Save mailbox command for debug */
ha->mcp = mcp;
@@ -198,12 +199,13 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (ha->flags.purge_mbox || chip_reset != ha->chip_reset) {
+ if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
+ ha->flags.mbox_busy) {
rval = QLA_ABORTED;
- ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
goto premature_exit;
}
+ ha->flags.mbox_busy = 1;
/* Load mailbox registers. */
if (IS_P3P_TYPE(ha))
@@ -254,9 +256,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
+ ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
- ha->flags.mbox_busy = 0;
+
atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1010,
"Pending mailbox timeout, exiting.\n");
@@ -274,6 +277,16 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
atomic_inc(&ha->num_pend_mbx_stage3);
if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
mcp->tov * HZ)) {
+ if (chip_reset != ha->chip_reset) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
+ atomic_dec(&ha->num_pend_mbx_stage2);
+ atomic_dec(&ha->num_pend_mbx_stage3);
+ rval = QLA_ABORTED;
+ goto premature_exit;
+ }
ql_dbg(ql_dbg_mbx, vha, 0x117a,
"cmd=%x Timeout.\n", command);
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -282,7 +295,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
} else if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
atomic_dec(&ha->num_pend_mbx_stage2);
atomic_dec(&ha->num_pend_mbx_stage3);
rval = QLA_ABORTED;
@@ -300,9 +315,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
+ ha->flags.mbox_busy = 0;
spin_unlock_irqrestore(&ha->hardware_lock,
flags);
- ha->flags.mbox_busy = 0;
atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
@@ -320,7 +335,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
while (!ha->flags.mbox_int) {
if (ha->flags.purge_mbox ||
chip_reset != ha->chip_reset) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
atomic_dec(&ha->num_pend_mbx_stage2);
rval = QLA_ABORTED;
goto premature_exit;
@@ -363,7 +381,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ha->mcp = NULL;
@@ -436,7 +457,10 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
* then only PCI ERR flag would be set.
* we will do premature exit for above case.
*/
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock,
+ flags);
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
@@ -451,8 +475,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
rval = QLA_FUNCTION_TIMEOUT;
}
}
-
+ spin_lock_irqsave(&ha->hardware_lock, flags);
ha->flags.mbox_busy = 0;
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Clean up */
ha->mcp = NULL;
@@ -493,7 +518,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
- } else if (!abort_active) {
+ } else if (current == ha->dpc_thread) {
/* call abort directly since we are in the DPC thread */
ql_dbg(ql_dbg_mbx, vha, 0x101d,
"Timeout, calling abort_isp.\n");
@@ -1486,7 +1511,6 @@ qla2x00_abort_target(struct fc_port *fcport, uint64_t l, int tag)
struct req_que *req;
struct rsp_que *rsp;
- l = l;
vha = fcport->vha;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
@@ -3072,22 +3096,25 @@ qla24xx_abort_command(srb_t *sp)
struct scsi_qla_host *vha = fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct req_que *req = vha->req;
+ struct qla_qpair *qpair = sp->qpair;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
"Entered %s.\n", __func__);
if (vha->flags.qpairs_available && sp->qpair)
req = sp->qpair->req;
+ else
+ return QLA_FUNCTION_FAILED;
if (ql2xasynctmfenable)
return qla24xx_async_abort_command(sp);
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
if (handle == req->num_outstanding_cmds) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
@@ -3762,10 +3789,7 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
mcp->mb[0] = MBC_PORT_PARAMS;
mcp->mb[1] = loop_id;
mcp->mb[2] = BIT_0;
- if (IS_CNA_CAPABLE(vha->hw))
- mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
- else
- mcp->mb[3] = port_speed & (BIT_2|BIT_1|BIT_0);
+ mcp->mb[3] = port_speed & (BIT_5|BIT_4|BIT_3|BIT_2|BIT_1|BIT_0);
mcp->mb[9] = vha->vp_idx;
mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
mcp->in_mb = MBX_3|MBX_1|MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 20d9dc39f0fb..7e78e7eff783 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -506,7 +506,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
return -EBUSY;
/* Alloc SRB structure */
- sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
if (!sp)
return -EBUSY;
@@ -607,7 +607,7 @@ void qla_nvme_abort(struct qla_hw_data *ha, struct srb *sp, int res)
{
int rval;
- if (!test_bit(ABORT_ISP_ACTIVE, &sp->vha->dpc_flags)) {
+ if (ha->flags.fw_started) {
rval = ha->isp_ops->abort_command(sp);
if (!rval && !qla_nvme_wait_on_command(sp))
ql_log(ql_log_warn, NULL, 0x2112,
@@ -660,9 +660,6 @@ void qla_nvme_delete(struct scsi_qla_host *vha)
__func__, fcport);
nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0);
- init_completion(&fcport->nvme_del_done);
- nvme_fc_unregister_remoteport(fcport->nvme_remote_port);
- wait_for_completion(&fcport->nvme_del_done);
}
if (vha->nvme_local_port) {
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de2bc78449e7..121e18b3b9f8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -3699,8 +3699,8 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for pending cmds (physical and virtual) to complete */
- if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
- WAIT_HOST) == QLA_SUCCESS) {
+ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0,
+ WAIT_HOST)) {
ql_dbg(ql_dbg_init, vha, 0x00b3,
"Done wait for "
"pending commands.\n");
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 42b8f0d3e580..8794e54f43a9 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -14,6 +14,8 @@
#include <linux/kobject.h>
#include <linux/slab.h>
#include <linux/blk-mq-pci.h>
+#include <linux/refcount.h>
+
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
#include <scsi/scsi_transport.h>
@@ -204,7 +206,7 @@ int ql2xasynctmfenable = 1;
module_param(ql2xasynctmfenable, int, S_IRUGO);
MODULE_PARM_DESC(ql2xasynctmfenable,
"Enables issue of TM IOCBs asynchronously via IOCB mechanism"
- "Default is 0 - Issue TM IOCBs via mailbox mechanism.");
+ "Default is 1 - Issue TM IOCBs via mailbox mechanism.");
int ql2xdontresethba;
module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR);
@@ -391,12 +393,14 @@ static void qla_init_base_qpair(struct scsi_qla_host *vha, struct req_que *req,
struct qla_hw_data *ha = vha->hw;
rsp->qpair = ha->base_qpair;
rsp->req = req;
+ ha->base_qpair->hw = ha;
ha->base_qpair->req = req;
ha->base_qpair->rsp = rsp;
ha->base_qpair->vha = vha;
ha->base_qpair->qp_lock_ptr = &ha->hardware_lock;
ha->base_qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
ha->base_qpair->msix = &ha->msix_entries[QLA_MSIX_RSP_Q];
+ ha->base_qpair->srb_mempool = ha->srb_mempool;
INIT_LIST_HEAD(&ha->base_qpair->hints_list);
ha->base_qpair->enable_class_2 = ql2xenableclass2;
/* init qpair to this cpu. Will adjust at run time. */
@@ -1012,7 +1016,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
else
goto qc24_target_busy;
- sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+ sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
if (!sp)
goto qc24_host_busy;
@@ -1212,10 +1216,14 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
return return_status;
}
-static void
+static int
sp_get(struct srb *sp)
{
- atomic_inc(&sp->ref_count);
+ if (!refcount_inc_not_zero((refcount_t*)&sp->ref_count))
+ /* kref get fail */
+ return ENXIO;
+ else
+ return 0;
}
#define ISP_REG_DISCONNECT 0xffffffffU
@@ -1273,38 +1281,51 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
unsigned long flags;
int rval, wait = 0;
struct qla_hw_data *ha = vha->hw;
+ struct qla_qpair *qpair;
if (qla2x00_isp_reg_stat(ha)) {
ql_log(ql_log_info, vha, 0x8042,
"PCI/Register disconnect, exiting.\n");
return FAILED;
}
- if (!CMD_SP(cmd))
- return SUCCESS;
ret = fc_block_scsi_eh(cmd);
if (ret != 0)
return ret;
ret = SUCCESS;
- id = cmd->device->id;
- lun = cmd->device->lun;
-
- spin_lock_irqsave(&ha->hardware_lock, flags);
sp = (srb_t *) CMD_SP(cmd);
- if (!sp) {
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (!sp)
+ return SUCCESS;
+
+ qpair = sp->qpair;
+ if (!qpair)
+ return SUCCESS;
+
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ if (!CMD_SP(cmd)) {
+ /* there's a chance an interrupt could clear
+ the ptr as part of done & free */
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
return SUCCESS;
}
+ if (sp_get(sp)){
+ /* ref_count is already 0 */
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+ return SUCCESS;
+ }
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
+ id = cmd->device->id;
+ lun = cmd->device->lun;
+
ql_dbg(ql_dbg_taskm, vha, 0x8002,
"Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n",
vha->host_no, id, lun, sp, cmd, sp->handle);
/* Get a reference to the sp and drop the lock.*/
- sp_get(sp);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
rval = ha->isp_ops->abort_command(sp);
if (rval) {
if (rval == QLA_FUNCTION_PARAMETER_ERROR)
@@ -1320,14 +1341,29 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
wait = 1;
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
- sp->done(sp, 0);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ spin_lock_irqsave(qpair->qp_lock_ptr, flags);
+ /*
+ * Clear the slot in the oustanding_cmds array if we can't find the
+ * command to reclaim the resources.
+ */
+ if (rval == QLA_FUNCTION_PARAMETER_ERROR)
+ vha->req->outstanding_cmds[sp->handle] = NULL;
+
+ /*
+ * sp->done will do ref_count--
+ * sp_get() took an extra count above
+ */
+ sp->done(sp, DID_RESET << 16);
/* Did the command return during mailbox execution? */
if (ret == FAILED && !CMD_SP(cmd))
ret = SUCCESS;
+ if (!CMD_SP(cmd))
+ wait = 0;
+
+ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
+
/* Wait for the command to be returned. */
if (wait) {
if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
@@ -1721,7 +1757,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
struct req_que *req;
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
struct qla_tgt_cmd *cmd;
- uint8_t trace = 0;
if (!ha->req_q_map)
return;
@@ -1731,64 +1766,68 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
- if (sp->cmd_type == TYPE_SRB) {
+ switch (sp->cmd_type) {
+ case TYPE_SRB:
if (sp->type == SRB_NVME_CMD ||
sp->type == SRB_NVME_LS) {
- sp_get(sp);
- spin_unlock_irqrestore(qp->qp_lock_ptr,
- flags);
- qla_nvme_abort(ha, sp, res);
- spin_lock_irqsave(qp->qp_lock_ptr,
- flags);
+ if (!sp_get(sp)) {
+ /* got sp */
+ spin_unlock_irqrestore
+ (qp->qp_lock_ptr,
+ flags);
+ qla_nvme_abort(ha, sp, res);
+ spin_lock_irqsave
+ (qp->qp_lock_ptr, flags);
+ }
} else if (GET_CMD_SP(sp) &&
!ha->flags.eeh_busy &&
(!test_bit(ABORT_ISP_ACTIVE,
&vha->dpc_flags)) &&
+ !qla2x00_isp_reg_stat(ha) &&
(sp->type == SRB_SCSI_CMD)) {
/*
- * Don't abort commands in
- * adapter during EEH
- * recovery as it's not
+ * Don't abort commands in adapter
+ * during EEH recovery as it's not
* accessible/responding.
*
- * Get a reference to the sp
- * and drop the lock. The
- * reference ensures this
- * sp->done() call and not the
- * call in qla2xxx_eh_abort()
- * ends the SCSI command (with
- * result 'res').
- */
- sp_get(sp);
- spin_unlock_irqrestore(qp->qp_lock_ptr,
- flags);
- status = qla2xxx_eh_abort(
- GET_CMD_SP(sp));
- spin_lock_irqsave(qp->qp_lock_ptr,
- flags);
- /*
- * Get rid of extra reference
- * if immediate exit from
- * ql2xxx_eh_abort
+ * Get a reference to the sp and drop
+ * the lock. The reference ensures this
+ * sp->done() call and not the call in
+ * qla2xxx_eh_abort() ends the SCSI cmd
+ * (with result 'res').
*/
- if (status == FAILED &&
- (qla2x00_isp_reg_stat(ha)))
- atomic_dec(
- &sp->ref_count);
+ if (!sp_get(sp)) {
+ spin_unlock_irqrestore
+ (qp->qp_lock_ptr, flags);
+ status = qla2xxx_eh_abort(
+ GET_CMD_SP(sp));
+ spin_lock_irqsave
+ (qp->qp_lock_ptr, flags);
+ }
}
sp->done(sp, res);
- } else {
+ break;
+ case TYPE_TGT_CMD:
if (!vha->hw->tgt.tgt_ops || !tgt ||
qla_ini_mode_enabled(vha)) {
- if (!trace)
- ql_dbg(ql_dbg_tgt_mgt,
- vha, 0xf003,
- "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
- vha->dpc_flags);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+ "HOST-ABORT-HNDLR: dpc_flags=%lx. Target mode disabled\n",
+ vha->dpc_flags);
continue;
}
cmd = (struct qla_tgt_cmd *)sp;
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+ break;
+ case TYPE_TGT_TMCMD:
+ /*
+ * Currently, only ABTS response gets on the
+ * outstanding_cmds[]
+ */
+ ha->tgt.tgt_ops->free_mcmd(
+ (struct qla_tgt_mgmt_cmd *)sp);
+ break;
+ default:
+ break;
}
}
}
@@ -2708,7 +2747,7 @@ static void qla2x00_iocb_work_fn(struct work_struct *work)
struct scsi_qla_host, iocb_work);
struct qla_hw_data *ha = vha->hw;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- int i = 20;
+ int i = 2;
unsigned long flags;
if (test_bit(UNLOADING, &base_vha->dpc_flags))
@@ -2819,6 +2858,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
atomic_set(&ha->num_pend_mbx_stage1, 0);
atomic_set(&ha->num_pend_mbx_stage2, 0);
atomic_set(&ha->num_pend_mbx_stage3, 0);
+ atomic_set(&ha->zio_threshold, DEFAULT_ZIO_THRESHOLD);
+ ha->last_zio_threshold = DEFAULT_ZIO_THRESHOLD;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -4249,29 +4290,34 @@ static void
qla2x00_number_of_exch(scsi_qla_host_t *vha, u32 *ret_cnt, u16 max_cnt)
{
u32 temp;
+ struct init_cb_81xx *icb = (struct init_cb_81xx *)&vha->hw->init_cb;
*ret_cnt = FW_DEF_EXCHANGES_CNT;
if (max_cnt > vha->hw->max_exchg)
max_cnt = vha->hw->max_exchg;
if (qla_ini_mode_enabled(vha)) {
- if (ql2xiniexchg > max_cnt)
- ql2xiniexchg = max_cnt;
+ if (vha->ql2xiniexchg > max_cnt)
+ vha->ql2xiniexchg = max_cnt;
+
+ if (vha->ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = vha->ql2xiniexchg;
- if (ql2xiniexchg > FW_DEF_EXCHANGES_CNT)
- *ret_cnt = ql2xiniexchg;
} else if (qla_tgt_mode_enabled(vha)) {
- if (ql2xexchoffld > max_cnt)
- ql2xexchoffld = max_cnt;
+ if (vha->ql2xexchoffld > max_cnt) {
+ vha->ql2xexchoffld = max_cnt;
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
+ }
- if (ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
- *ret_cnt = ql2xexchoffld;
+ if (vha->ql2xexchoffld > FW_DEF_EXCHANGES_CNT)
+ *ret_cnt = vha->ql2xexchoffld;
} else if (qla_dual_mode_enabled(vha)) {
- temp = ql2xiniexchg + ql2xexchoffld;
+ temp = vha->ql2xiniexchg + vha->ql2xexchoffld;
if (temp > max_cnt) {
- ql2xiniexchg -= (temp - max_cnt)/2;
- ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
+ vha->ql2xiniexchg -= (temp - max_cnt)/2;
+ vha->ql2xexchoffld -= (((temp - max_cnt)/2) + 1);
temp = max_cnt;
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
}
if (temp > FW_DEF_EXCHANGES_CNT)
@@ -4309,6 +4355,12 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
if (totsz != ha->exchoffld_size) {
qla2x00_free_exchoffld_buffer(ha);
+ if (actual_cnt <= FW_DEF_EXCHANGES_CNT) {
+ ha->exchoffld_size = 0;
+ ha->flags.exchoffld_enabled = 0;
+ return QLA_SUCCESS;
+ }
+
ha->exchoffld_size = totsz;
ql_log(ql_log_info, vha, 0xd016,
@@ -4341,6 +4393,15 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
return -ENOMEM;
}
+ } else if (!ha->exchoffld_buf || (actual_cnt <= FW_DEF_EXCHANGES_CNT)) {
+ /* pathological case */
+ qla2x00_free_exchoffld_buffer(ha);
+ ha->exchoffld_size = 0;
+ ha->flags.exchoffld_enabled = 0;
+ ql_log(ql_log_info, vha, 0xd016,
+ "Exchange offload not enable: offld size=%d, actual count=%d entry sz=0x%x, total sz=0x%x.\n",
+ ha->exchoffld_size, actual_cnt, size, totsz);
+ return 0;
}
/* Now configure the dma buffer */
@@ -4356,7 +4417,7 @@ qla2x00_set_exchoffld_buffer(scsi_qla_host_t *vha)
if (qla_ini_mode_enabled(vha))
icb->exchange_count = 0;
else
- icb->exchange_count = cpu_to_le16(ql2xexchoffld);
+ icb->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
}
return rval;
@@ -4564,6 +4625,10 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
vha->host_no = host->host_no;
vha->hw = ha;
+ vha->qlini_mode = ql2x_ini_mode;
+ vha->ql2xexchoffld = ql2xexchoffld;
+ vha->ql2xiniexchg = ql2xiniexchg;
+
INIT_LIST_HEAD(&vha->vp_fcports);
INIT_LIST_HEAD(&vha->work_list);
INIT_LIST_HEAD(&vha->list);
@@ -4579,7 +4644,6 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
- spin_lock_init(&vha->gnl.fcports_lock);
init_waitqueue_head(&vha->fcport_waitQ);
init_waitqueue_head(&vha->vref_waitq);
@@ -4710,7 +4774,6 @@ qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC);
-qla2x00_post_async_work(adisc_done, QLA_EVT_ASYNC_ADISC_DONE);
qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO);
qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE);
@@ -4761,16 +4824,25 @@ qlafx00_post_aenfx_work(struct scsi_qla_host *vha, uint32_t evtcode,
return qla2x00_post_work(vha, e);
}
-int qla24xx_post_upd_fcport_work(struct scsi_qla_host *vha, fc_port_t *fcport)
+void qla24xx_sched_upd_fcport(fc_port_t *fcport)
{
- struct qla_work_evt *e;
+ unsigned long flags;
- e = qla2x00_alloc_work(vha, QLA_EVT_UPD_FCPORT);
- if (!e)
- return QLA_FUNCTION_FAILED;
+ if (IS_SW_RESV_ADDR(fcport->d_id))
+ return;
- e->u.fcport.fcport = fcport;
- return qla2x00_post_work(vha, e);
+ spin_lock_irqsave(&fcport->vha->work_lock, flags);
+ if (fcport->disc_state == DSC_UPD_FCPORT) {
+ spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+ return;
+ }
+ fcport->jiffies_at_registration = jiffies;
+ fcport->sec_since_registration = 0;
+ fcport->next_disc_state = DSC_DELETED;
+ fcport->disc_state = DSC_UPD_FCPORT;
+ spin_unlock_irqrestore(&fcport->vha->work_lock, flags);
+
+ queue_work(system_unbound_wq, &fcport->reg_work);
}
static
@@ -4808,10 +4880,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP)
+ if (e->u.new_sess.fc4_type & FS_FC4TYPE_FCP)
fcport->fc4_type = FC4_TYPE_FCP_SCSI;
- if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) {
+ if (e->u.new_sess.fc4_type & FS_FC4TYPE_NVME) {
fcport->fc4_type = FC4_TYPE_OTHER;
fcport->fc4f_nvme = FC4_TYPE_NVME;
}
@@ -4990,19 +5062,12 @@ qla2x00_do_work(struct scsi_qla_host *vha)
qla2x00_async_adisc(vha, e->u.logio.fcport,
e->u.logio.data);
break;
- case QLA_EVT_ASYNC_ADISC_DONE:
- qla2x00_async_adisc_done(vha, e->u.logio.fcport,
- e->u.logio.data);
- break;
case QLA_EVT_UEVENT:
qla2x00_uevent_emit(vha, e->u.uevent.code);
break;
case QLA_EVT_AENFX:
qlafx00_process_aen(vha, e);
break;
- case QLA_EVT_GIDPN:
- qla24xx_async_gidpn(vha, e->u.fcport.fcport);
- break;
case QLA_EVT_GPNID:
qla24xx_async_gpnid(vha, &e->u.gpnid.id);
break;
@@ -5025,9 +5090,6 @@ qla2x00_do_work(struct scsi_qla_host *vha)
case QLA_EVT_GPSC:
qla24xx_async_gpsc(vha, e->u.fcport.fcport);
break;
- case QLA_EVT_UPD_FCPORT:
- qla2x00_update_fcport(vha, e->u.fcport.fcport);
- break;
case QLA_EVT_GNL:
qla24xx_async_gnl(vha, e->u.fcport.fcport);
break;
@@ -6041,12 +6103,29 @@ qla2x00_do_dpc(void *data)
if (test_and_clear_bit
(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
!test_bit(UNLOADING, &base_vha->dpc_flags)) {
+ bool do_reset = true;
+
+ switch (base_vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_ENABLED:
+ break;
+ case QLA2XXX_INI_MODE_DISABLED:
+ if (!qla_tgt_mode_enabled(base_vha) &&
+ !ha->flags.fw_started)
+ do_reset = false;
+ break;
+ case QLA2XXX_INI_MODE_DUAL:
+ if (!qla_dual_mode_enabled(base_vha) &&
+ !ha->flags.fw_started)
+ do_reset = false;
+ break;
+ default:
+ break;
+ }
- ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
- "ISP abort scheduled.\n");
- if (!(test_and_set_bit(ABORT_ISP_ACTIVE,
+ if (do_reset && !(test_and_set_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags))) {
-
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4007,
+ "ISP abort scheduled.\n");
if (ha->isp_ops->abort_isp(base_vha)) {
/* failed. retry later */
set_bit(ISP_ABORT_NEEDED,
@@ -6054,10 +6133,9 @@ qla2x00_do_dpc(void *data)
}
clear_bit(ABORT_ISP_ACTIVE,
&base_vha->dpc_flags);
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
+ "ISP abort end.\n");
}
-
- ql_dbg(ql_dbg_dpc, base_vha, 0x4008,
- "ISP abort end.\n");
}
if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
@@ -6183,17 +6261,28 @@ intr_on_check:
mutex_unlock(&ha->mq_lock);
}
- if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) {
+ if (test_and_clear_bit(SET_NVME_ZIO_THRESHOLD_NEEDED,
+ &base_vha->dpc_flags)) {
ql_log(ql_log_info, base_vha, 0xffffff,
"nvme: SET ZIO Activity exchange threshold to %d.\n",
ha->nvme_last_rptd_aen);
- if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) {
+ if (qla27xx_set_zio_threshold(base_vha,
+ ha->nvme_last_rptd_aen)) {
ql_log(ql_log_info, base_vha, 0xffffff,
- "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
- ha->nvme_last_rptd_aen);
+ "nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
+ ha->nvme_last_rptd_aen);
}
}
+ if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED,
+ &base_vha->dpc_flags)) {
+ ql_log(ql_log_info, base_vha, 0xffffff,
+ "SET ZIO Activity exchange threshold to %d.\n",
+ ha->last_zio_threshold);
+ qla27xx_set_zio_threshold(base_vha,
+ ha->last_zio_threshold);
+ }
+
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
@@ -6406,13 +6495,24 @@ qla2x00_timer(struct timer_list *t)
* FC-NVME
* see if the active AEN count has changed from what was last reported.
*/
- if (!vha->vp_idx &&
- atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen &&
- ha->zio_mode == QLA_ZIO_MODE_6) {
+ if (!vha->vp_idx && (atomic_read(&ha->nvme_active_aen_cnt) !=
+ ha->nvme_last_rptd_aen) && ha->zio_mode == QLA_ZIO_MODE_6) {
ql_log(ql_log_info, vha, 0x3002,
- "nvme: Sched: Set ZIO exchange threshold to %d.\n",
- ha->nvme_last_rptd_aen);
+ "nvme: Sched: Set ZIO exchange threshold to %d.\n",
+ ha->nvme_last_rptd_aen);
ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
+ set_bit(SET_NVME_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
+ start_dpc++;
+ }
+
+ if (!vha->vp_idx &&
+ (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
+ (ha->zio_mode == QLA_ZIO_MODE_6) &&
+ (IS_QLA83XX(ha) || IS_QLA27XX(ha))) {
+ ql_log(ql_log_info, vha, 0x3002,
+ "Sched: Set ZIO exchange threshold to %d.\n",
+ ha->last_zio_threshold);
+ ha->last_zio_threshold = atomic_read(&ha->zio_threshold);
set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
start_dpc++;
}
@@ -6839,8 +6939,6 @@ qla2xxx_pci_resume(struct pci_dev *pdev)
"The device failed to resume I/O from slot/link_reset.\n");
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
-
ha->flags.eeh_busy = 0;
}
@@ -6946,6 +7044,9 @@ qla2x00_module_init(void)
if (ql2xextended_error_logging == 1)
ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+ if (ql2x_ini_mode == QLA2XXX_INI_MODE_DUAL)
+ qla_insert_tgt_attrs();
+
qla2xxx_transport_template =
fc_attach_transport(&qla2xxx_transport_functions);
if (!qla2xxx_transport_template) {
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8c811b251d42..39828207bc1d 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -141,6 +141,8 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *,
struct abts_recv_from_24xx *);
static void qlt_send_busy(struct qla_qpair *, struct atio_from_isp *,
uint16_t);
+static int qlt_check_reserve_free_req(struct qla_qpair *qpair, uint32_t);
+static inline uint32_t qlt_make_handle(struct qla_qpair *);
/*
* Global Variables
@@ -541,7 +543,6 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
qlt_response_pkt(host, rsp, pkt);
break;
}
-
default:
qlt_response_pkt(vha, rsp, pkt);
break;
@@ -600,14 +601,9 @@ void qla2x00_async_nack_sp_done(void *s, int res)
sp->fcport->login_succ = 1;
vha->fcport_count++;
-
- ql_dbg(ql_dbg_disc, vha, 0x20f3,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__,
- sp->fcport->port_name,
- vha->fcport_count);
- sp->fcport->disc_state = DSC_UPD_FCPORT;
- qla24xx_post_upd_fcport_work(vha, sp->fcport);
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ qla24xx_sched_upd_fcport(sp->fcport);
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
} else {
sp->fcport->login_retry = 0;
sp->fcport->disc_state = DSC_LOGIN_COMPLETE;
@@ -1230,11 +1226,12 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
unsigned long flags;
+ u16 sec;
- if (sess->disc_state == DSC_DELETE_PEND)
+ switch (sess->disc_state) {
+ case DSC_DELETE_PEND:
return;
-
- if (sess->disc_state == DSC_DELETED) {
+ case DSC_DELETED:
if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
wake_up_all(&tgt->waitQ);
if (sess->vha->fcport_count == 0)
@@ -1243,11 +1240,26 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
return;
+ break;
+ case DSC_UPD_FCPORT:
+ /*
+ * This port is not done reporting to upper layer.
+ * let it finish
+ */
+ sess->next_disc_state = DSC_DELETE_PEND;
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration)/1000;
+ if (sess->sec_since_registration < sec && sec && !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ "%s %8phC : Slow Rport registration(%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+ return;
+ default:
+ break;
}
- if (sess->deleted == QLA_SESS_DELETED)
- sess->logout_on_delete = 0;
-
spin_lock_irqsave(&sess->vha->work_lock, flags);
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
spin_unlock_irqrestore(&sess->vha->work_lock, flags);
@@ -1261,7 +1273,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
qla24xx_chk_fcp_state(sess);
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
- "Scheduling sess %p for deletion\n", sess);
+ "Scheduling sess %p for deletion %8phC\n",
+ sess, sess->port_name);
INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn);
WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
@@ -1479,27 +1492,14 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
struct qla_hw_data *ha = tgt->ha;
unsigned long flags;
+ mutex_lock(&ha->optrom_mutex);
mutex_lock(&qla_tgt_mutex);
- if (!vha->fc_vport) {
- struct Scsi_Host *sh = vha->host;
- struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
- bool npiv_vports;
-
- spin_lock_irqsave(sh->host_lock, flags);
- npiv_vports = (fc_host->npiv_vports_inuse);
- spin_unlock_irqrestore(sh->host_lock, flags);
-
- if (npiv_vports) {
- mutex_unlock(&qla_tgt_mutex);
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
- "NPIV is in use. Can not stop target\n");
- return -EPERM;
- }
- }
+
if (tgt->tgt_stop || tgt->tgt_stopped) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
"Already in tgt->tgt_stop or tgt_stopped state\n");
mutex_unlock(&qla_tgt_mutex);
+ mutex_unlock(&ha->optrom_mutex);
return -EPERM;
}
@@ -1537,6 +1537,8 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
/* Wait for sessions to clear out (just in case) */
wait_event_timeout(tgt->waitQ, test_tgt_sess_count(tgt), 10*HZ);
+ mutex_unlock(&ha->optrom_mutex);
+
return 0;
}
EXPORT_SYMBOL(qlt_stop_phase1);
@@ -1566,6 +1568,15 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
tgt);
+
+ switch (vha->qlini_mode) {
+ case QLA2XXX_INI_MODE_EXCLUSIVE:
+ vha->flags.online = 1;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ break;
+ default:
+ break;
+ }
}
EXPORT_SYMBOL(qlt_stop_phase2);
@@ -1715,6 +1726,94 @@ static void qlt_send_notify_ack(struct qla_qpair *qpair,
qla2x00_start_iocbs(vha, qpair->req);
}
+static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd)
+{
+ struct scsi_qla_host *vha = mcmd->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct abts_resp_to_24xx *resp;
+ uint32_t f_ctl, h;
+ uint8_t *p;
+ int rc;
+ struct abts_recv_from_24xx *abts = &mcmd->orig_iocb.abts;
+ struct qla_qpair *qpair = mcmd->qpair;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe006,
+ "Sending task mgmt ABTS response (ha=%p, status=%x)\n",
+ ha, mcmd->fc_tm_rsp);
+
+ rc = qlt_check_reserve_free_req(qpair, 1);
+ if (rc) {
+ ql_dbg(ql_dbg_tgt, vha, 0xe04a,
+ "qla_target(%d): %s failed: unable to allocate request packet\n",
+ vha->vp_idx, __func__);
+ return -EAGAIN;
+ }
+
+ resp = (struct abts_resp_to_24xx *)qpair->req->ring_ptr;
+ memset(resp, 0, sizeof(*resp));
+
+ h = qlt_make_handle(qpair);
+ if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+ /*
+ * CTIO type 7 from the firmware doesn't provide a way to
+ * know the initiator's LOOP ID, hence we can't find
+ * the session and, so, the command.
+ */
+ return -EAGAIN;
+ } else {
+ qpair->req->outstanding_cmds[h] = (srb_t *)mcmd;
+ }
+
+ resp->handle = MAKE_HANDLE(qpair->req->id, h);
+ resp->entry_type = ABTS_RESP_24XX;
+ resp->entry_count = 1;
+ resp->nport_handle = abts->nport_handle;
+ resp->vp_index = vha->vp_idx;
+ resp->sof_type = abts->sof_type;
+ resp->exchange_address = abts->exchange_address;
+ resp->fcp_hdr_le = abts->fcp_hdr_le;
+ f_ctl = cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
+ F_CTL_LAST_SEQ | F_CTL_END_SEQ |
+ F_CTL_SEQ_INITIATIVE);
+ p = (uint8_t *)&f_ctl;
+ resp->fcp_hdr_le.f_ctl[0] = *p++;
+ resp->fcp_hdr_le.f_ctl[1] = *p++;
+ resp->fcp_hdr_le.f_ctl[2] = *p;
+
+ resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
+ resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
+ resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
+ resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
+ resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
+ resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
+
+ resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
+ if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
+ resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
+ resp->payload.ba_acct.low_seq_cnt = 0x0000;
+ resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
+ resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
+ resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
+ } else {
+ resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
+ resp->payload.ba_rjt.reason_code =
+ BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
+ /* Other bytes are zero */
+ }
+
+ vha->vha_tgt.qla_tgt->abts_resp_expected++;
+
+ /* Memory Barrier */
+ wmb();
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+
+ return rc;
+}
+
/*
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
@@ -1742,6 +1841,7 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
}
resp->entry_type = ABTS_RESP_24XX;
+ resp->handle = QLA_TGT_SKIP_HANDLE;
resp->entry_count = 1;
resp->nport_handle = abts->nport_handle;
resp->vp_index = vha->vp_idx;
@@ -1799,15 +1899,13 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair,
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
*/
static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
- struct abts_resp_from_24xx_fw *entry)
+ struct qla_qpair *qpair, response_t *pkt, struct qla_tgt_mgmt_cmd *mcmd)
{
struct ctio7_to_24xx *ctio;
+ u16 tmp;
+ struct abts_recv_from_24xx *entry;
- ql_dbg(ql_dbg_tgt, vha, 0xe007,
- "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
-
- ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(
- vha->hw->base_qpair, NULL);
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(qpair, NULL);
if (ctio == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe04b,
"qla_target(%d): %s failed: unable to allocate "
@@ -1815,6 +1913,13 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
return;
}
+ if (mcmd)
+ /* abts from remote port */
+ entry = &mcmd->orig_iocb.abts;
+ else
+ /* abts from this driver. */
+ entry = (struct abts_recv_from_24xx *)pkt;
+
/*
* We've got on entrance firmware's response on by us generated
* ABTS response. So, in it ID fields are reversed.
@@ -1826,56 +1931,48 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
ctio->vp_index = vha->vp_idx;
- ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
- ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
- ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
ctio->exchange_addr = entry->exchange_addr_to_abort;
- ctio->u.status1.flags = cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
- CTIO7_FLAGS_TERMINATE);
- ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
+ tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE);
- /* Memory Barrier */
- wmb();
- qla2x00_start_iocbs(vha, vha->req);
+ if (mcmd) {
+ ctio->initiator_id[0] = entry->fcp_hdr_le.s_id[0];
+ ctio->initiator_id[1] = entry->fcp_hdr_le.s_id[1];
+ ctio->initiator_id[2] = entry->fcp_hdr_le.s_id[2];
- qlt_24xx_send_abts_resp(vha->hw->base_qpair,
- (struct abts_recv_from_24xx *)entry,
- FCP_TMF_CMPL, true);
-}
-
-static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
-{
- struct qla_tgt_sess_op *op;
- struct qla_tgt_cmd *cmd;
- unsigned long flags;
+ if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID)
+ tmp |= (mcmd->abort_io_attr << 9);
+ else if (qpair->retry_term_cnt & 1)
+ tmp |= (0x4 << 9);
+ } else {
+ ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
+ ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
+ ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
- spin_lock_irqsave(&vha->cmd_list_lock, flags);
- list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
- if (tag == op->atio.u.isp24.exchange_addr) {
- op->aborted = true;
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- return 1;
- }
+ if (qpair->retry_term_cnt & 1)
+ tmp |= (0x4 << 9);
}
+ ctio->u.status1.flags = cpu_to_le16(tmp);
+ ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
- list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
- if (tag == op->atio.u.isp24.exchange_addr) {
- op->aborted = true;
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- return 1;
- }
- }
+ ql_dbg(ql_dbg_tgt, vha, 0xe007,
+ "Sending retry TERM EXCH CTIO7 flags %04xh oxid %04xh attr valid %x\n",
+ le16_to_cpu(ctio->u.status1.flags),
+ le16_to_cpu(ctio->u.status1.ox_id),
+ (mcmd && mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) ? 1 : 0);
- list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
- if (tag == cmd->atio.u.isp24.exchange_addr) {
- cmd->aborted = 1;
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
- return 1;
- }
- }
- spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
+ /* Memory Barrier */
+ wmb();
+ if (qpair->reqq_start_iocbs)
+ qpair->reqq_start_iocbs(qpair);
+ else
+ qla2x00_start_iocbs(vha, qpair->req);
+
+ if (mcmd)
+ qlt_build_abts_resp_iocb(mcmd);
+ else
+ qlt_24xx_send_abts_resp(qpair,
+ (struct abts_recv_from_24xx *)entry, FCP_TMF_CMPL, true);
- return 0;
}
/* drop cmds for the given lun
@@ -1970,9 +2067,8 @@ static void qlt_do_tmr_work(struct work_struct *work)
spin_lock_irqsave(mcmd->qpair->qp_lock_ptr, flags);
switch (mcmd->tmr_func) {
case QLA_TGT_ABTS:
- qlt_24xx_send_abts_resp(mcmd->qpair,
- &mcmd->orig_iocb.abts,
- FCP_TMF_REJECTED, false);
+ mcmd->fc_tm_rsp = FCP_TMF_REJECTED;
+ qlt_build_abts_resp_iocb(mcmd);
break;
case QLA_TGT_LUN_RESET:
case QLA_TGT_CLEAR_TS:
@@ -2007,12 +2103,6 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct qla_tgt_mgmt_cmd *mcmd;
struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
- if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
- /* send TASK_ABORT response immediately */
- qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false);
- return 0;
- }
-
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
vha->vp_idx, abts->exchange_addr_to_abort);
@@ -2025,7 +2115,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
return -ENOMEM;
}
memset(mcmd, 0, sizeof(*mcmd));
-
+ mcmd->cmd_type = TYPE_TGT_TMCMD;
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
mcmd->reset_count = ha->base_qpair->chip_reset;
@@ -2047,6 +2137,8 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
if (abort_cmd && abort_cmd->qpair) {
mcmd->qpair = abort_cmd->qpair;
mcmd->se_cmd.cpuid = abort_cmd->se_cmd.cpuid;
+ mcmd->abort_io_attr = abort_cmd->atio.u.isp24.attr;
+ mcmd->flags = QLA24XX_MGMT_ABORT_IO_ATTR_VALID;
}
}
@@ -2264,6 +2356,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
struct qla_qpair *qpair = mcmd->qpair;
+ bool free_mcmd = true;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
"TM response mcmd (%p) status %#x state %#x",
@@ -2302,10 +2395,10 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
&mcmd->orig_iocb.imm_ntfy, 0, 0, 0, 0, 0, 0);
}
} else {
- if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
- qlt_24xx_send_abts_resp(qpair, &mcmd->orig_iocb.abts,
- mcmd->fc_tm_rsp, false);
- else
+ if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX) {
+ qlt_build_abts_resp_iocb(mcmd);
+ free_mcmd = false;
+ } else
qlt_24xx_send_task_mgmt_ctio(qpair, mcmd,
mcmd->fc_tm_rsp);
}
@@ -2317,7 +2410,9 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
* descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
* qlt_xmit_tm_rsp() returns here..
*/
- ha->tgt.tgt_ops->free_mcmd(mcmd);
+ if (free_mcmd)
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+
spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
}
EXPORT_SYMBOL(qlt_xmit_tm_rsp);
@@ -2330,7 +2425,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
BUG_ON(cmd->sg_cnt == 0);
prm->sg = (struct scatterlist *)cmd->sg;
- prm->seg_cnt = pci_map_sg(cmd->qpair->pdev, cmd->sg,
+ prm->seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev, cmd->sg,
cmd->sg_cnt, cmd->dma_data_direction);
if (unlikely(prm->seg_cnt == 0))
goto out_err;
@@ -2357,7 +2452,7 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
if (cmd->prot_sg_cnt) {
prm->prot_sg = cmd->prot_sg;
- prm->prot_seg_cnt = pci_map_sg(cmd->qpair->pdev,
+ prm->prot_seg_cnt = dma_map_sg(&cmd->qpair->pdev->dev,
cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (unlikely(prm->prot_seg_cnt == 0))
@@ -2392,12 +2487,12 @@ static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
qpair = cmd->qpair;
- pci_unmap_sg(qpair->pdev, cmd->sg, cmd->sg_cnt,
+ dma_unmap_sg(&qpair->pdev->dev, cmd->sg, cmd->sg_cnt,
cmd->dma_data_direction);
cmd->sg_mapped = 0;
if (cmd->prot_sg_cnt)
- pci_unmap_sg(qpair->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+ dma_unmap_sg(&qpair->pdev->dev, cmd->prot_sg, cmd->prot_sg_cnt,
cmd->dma_data_direction);
if (!cmd->ctx)
@@ -3289,7 +3384,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+ spin_lock(&cmd->cmd_lock);
cmd->cmd_sent_to_fw = 1;
+ spin_unlock(&cmd->cmd_lock);
+ cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
/* Memory Barrier */
wmb();
@@ -3367,7 +3465,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
qlt_load_data_segments(&prm);
cmd->state = QLA_TGT_STATE_NEED_DATA;
+ spin_lock(&cmd->cmd_lock);
cmd->cmd_sent_to_fw = 1;
+ spin_unlock(&cmd->cmd_lock);
+ cmd->ctio_flags = le16_to_cpu(pkt->u.status0.flags);
/* Memory Barrier */
wmb();
@@ -3825,10 +3926,10 @@ static int qlt_term_ctio_exchange(struct qla_qpair *qpair, void *ctio,
/* ha->hardware_lock supposed to be held on entry */
-static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
+static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
struct rsp_que *rsp, uint32_t handle, void *ctio)
{
- struct qla_tgt_cmd *cmd = NULL;
+ void *cmd = NULL;
struct req_que *req;
int qid = GET_QID(handle);
uint32_t h = handle & ~QLA_TGT_HANDLE_MASK;
@@ -3857,7 +3958,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return NULL;
}
- cmd = (struct qla_tgt_cmd *)req->outstanding_cmds[h];
+ cmd = (void *) req->outstanding_cmds[h];
if (unlikely(cmd == NULL)) {
ql_dbg(ql_dbg_async, vha, 0xe053,
"qla_target(%d): Suspicious: unable to find the command with handle %x req->id %d rsp->id %d\n",
@@ -3930,7 +4031,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
return;
}
- cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
+ cmd = (struct qla_tgt_cmd *)qlt_ctio_to_cmd(vha, rsp, handle, ctio);
if (cmd == NULL)
return;
@@ -3941,12 +4042,20 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
if (unlikely(status != CTIO_SUCCESS)) {
switch (status & 0xFFFF) {
+ case CTIO_INVALID_RX_ID:
+ if (printk_ratelimit())
+ dev_info(&vha->hw->pdev->dev,
+ "qla_target(%d): CTIO with INVALID_RX_ID ATIO attr %x CTIO Flags %x|%x\n",
+ vha->vp_idx, cmd->atio.u.isp24.attr,
+ ((cmd->ctio_flags >> 9) & 0xf),
+ cmd->ctio_flags);
+
+ break;
case CTIO_LIP_RESET:
case CTIO_TARGET_RESET:
case CTIO_ABORTED:
/* driver request abort via Terminate exchange */
case CTIO_TIMEOUT:
- case CTIO_INVALID_RX_ID:
/* They are OK */
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
"qla_target(%d): CTIO with "
@@ -3973,7 +4082,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
* Session is already logged out, but we need
* to notify initiator, who's not aware of this
*/
- cmd->sess->logout_on_delete = 0;
cmd->sess->send_els_logo = 1;
ql_dbg(ql_dbg_disc, vha, 0x20f8,
"%s %d %8phC post del sess\n",
@@ -4711,6 +4819,12 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
sess = qlt_find_sess_invalidate_other(vha, wwn,
port_id, loop_id, &conflict_sess);
spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d Term INOT due to WWN=0 lid=%d, NportID %06X ",
+ __func__, __LINE__, loop_id, port_id.b24);
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
}
if (IS_SW_RESV_ADDR(port_id)) {
@@ -4752,6 +4866,32 @@ static int qlt_handle_login(struct scsi_qla_host *vha,
goto out;
}
+ if (sess->disc_state == DSC_UPD_FCPORT) {
+ u16 sec;
+
+ /*
+ * Remote port registration is still going on from
+ * previous login. Allow it to finish before we
+ * accept the new login.
+ */
+ sess->next_disc_state = DSC_DELETE_PEND;
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration) / 1000;
+ if (sess->sec_since_registration < sec && sec &&
+ !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %8phC - Slow Rport registration (%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+
+ if (!conflict_sess)
+ kmem_cache_free(qla_tgt_plogi_cachep, pla);
+
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ goto out;
+ }
+
qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
sess->d_id = port_id;
sess->login_gen++;
@@ -4910,6 +5050,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
if (sess != NULL) {
bool delete = false;
+ int sec;
spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
switch (sess->fw_login_state) {
case DSC_LS_PLOGI_PEND:
@@ -4922,9 +5063,24 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
switch (sess->disc_state) {
+ case DSC_UPD_FCPORT:
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock,
+ flags);
+
+ sec = jiffies_to_msecs(jiffies -
+ sess->jiffies_at_registration)/1000;
+ if (sess->sec_since_registration < sec && sec &&
+ !(sec % 5)) {
+ sess->sec_since_registration = sec;
+ ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
+ "%s %8phC : Slow Rport registration(%d Sec)\n",
+ __func__, sess->port_name, sec);
+ }
+ qlt_send_term_imm_notif(vha, iocb, 1);
+ return 0;
+
case DSC_LOGIN_PEND:
case DSC_GPDB:
- case DSC_UPD_FCPORT:
case DSC_LOGIN_COMPLETE:
case DSC_ADISC:
delete = false;
@@ -5608,6 +5764,101 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
tgt->atio_irq_cmd_count--;
}
+/*
+ * qpair lock is assume to be held
+ * rc = 0 : send terminate & abts respond
+ * rc != 0: do not send term & abts respond
+ */
+static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
+ struct qla_qpair *qpair, struct abts_resp_from_24xx_fw *entry)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rc = 0;
+
+ /*
+ * Detect unresolved exchange. If the same ABTS is unable
+ * to terminate an existing command and the same ABTS loops
+ * between FW & Driver, then force FW dump. Under 1 jiff,
+ * we should see multiple loops.
+ */
+ if (qpair->retry_term_exchg_addr == entry->exchange_addr_to_abort &&
+ qpair->retry_term_jiff == jiffies) {
+ /* found existing exchange */
+ qpair->retry_term_cnt++;
+ if (qpair->retry_term_cnt >= 5) {
+ rc = EIO;
+ qpair->retry_term_cnt = 0;
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to send ABTS Respond. Dumping firmware.\n");
+ ql_dump_buffer(ql_dbg_tgt_mgt + ql_dbg_buffer,
+ vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
+
+ if (qpair == ha->base_qpair)
+ ha->isp_ops->fw_dump(vha, 1);
+ else
+ ha->isp_ops->fw_dump(vha, 0);
+
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ } else if (qpair->retry_term_jiff != jiffies) {
+ qpair->retry_term_exchg_addr = entry->exchange_addr_to_abort;
+ qpair->retry_term_cnt = 0;
+ qpair->retry_term_jiff = jiffies;
+ }
+
+ return rc;
+}
+
+
+static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, response_t *pkt)
+{
+ struct abts_resp_from_24xx_fw *entry =
+ (struct abts_resp_from_24xx_fw *)pkt;
+ u32 h = pkt->handle & ~QLA_TGT_HANDLE_MASK;
+ struct qla_tgt_mgmt_cmd *mcmd;
+ struct qla_hw_data *ha = vha->hw;
+
+ mcmd = (struct qla_tgt_mgmt_cmd *)qlt_ctio_to_cmd(vha, rsp,
+ pkt->handle, pkt);
+ if (mcmd == NULL && h != QLA_TGT_SKIP_HANDLE) {
+ ql_dbg(ql_dbg_async, vha, 0xe064,
+ "qla_target(%d): ABTS Comp without mcmd\n",
+ vha->vp_idx);
+ return;
+ }
+
+ if (mcmd)
+ vha = mcmd->vha;
+ vha->vha_tgt.qla_tgt->abts_resp_expected--;
+
+ ql_dbg(ql_dbg_tgt, vha, 0xe038,
+ "ABTS_RESP_24XX: compl_status %x\n",
+ entry->compl_status);
+
+ if (le16_to_cpu(entry->compl_status) != ABTS_RESP_COMPL_SUCCESS) {
+ if ((entry->error_subcode1 == 0x1E) &&
+ (entry->error_subcode2 == 0)) {
+ if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ return;
+ }
+ qlt_24xx_retry_term_exchange(vha, rsp->qpair,
+ pkt, mcmd);
+ } else {
+ ql_dbg(ql_dbg_tgt, vha, 0xe063,
+ "qla_target(%d): ABTS_RESP_24XX failed %x (subcode %x:%x)",
+ vha->vp_idx, entry->compl_status,
+ entry->error_subcode1,
+ entry->error_subcode2);
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ }
+ } else {
+ ha->tgt.tgt_ops->free_mcmd(mcmd);
+ }
+}
+
/* ha->hardware_lock supposed to be held on entry */
/* called via callback from qla2xxx */
static void qlt_response_pkt(struct scsi_qla_host *vha,
@@ -5740,41 +5991,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha,
case ABTS_RESP_24XX:
if (tgt->abts_resp_expected > 0) {
- struct abts_resp_from_24xx_fw *entry =
- (struct abts_resp_from_24xx_fw *)pkt;
- ql_dbg(ql_dbg_tgt, vha, 0xe038,
- "ABTS_RESP_24XX: compl_status %x\n",
- entry->compl_status);
- tgt->abts_resp_expected--;
- if (le16_to_cpu(entry->compl_status) !=
- ABTS_RESP_COMPL_SUCCESS) {
- if ((entry->error_subcode1 == 0x1E) &&
- (entry->error_subcode2 == 0)) {
- /*
- * We've got a race here: aborted
- * exchange not terminated, i.e.
- * response for the aborted command was
- * sent between the abort request was
- * received and processed.
- * Unfortunately, the firmware has a
- * silly requirement that all aborted
- * exchanges must be explicitely
- * terminated, otherwise it refuses to
- * send responses for the abort
- * requests. So, we have to
- * (re)terminate the exchange and retry
- * the abort response.
- */
- qlt_24xx_retry_term_exchange(vha,
- entry);
- } else
- ql_dbg(ql_dbg_tgt, vha, 0xe063,
- "qla_target(%d): ABTS_RESP_24XX "
- "failed %x (subcode %x:%x)",
- vha->vp_idx, entry->compl_status,
- entry->error_subcode1,
- entry->error_subcode2);
- }
+ qlt_handle_abts_completion(vha, rsp, pkt);
} else {
ql_dbg(ql_dbg_tgt, vha, 0xe064,
"qla_target(%d): Unexpected ABTS_RESP_24XX "
@@ -5964,10 +6181,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
case MODE_DUAL:
if (newfcport) {
if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
- ql_dbg(ql_dbg_disc, vha, 0x20fe,
- "%s %d %8phC post upd_fcport fcp_cnt %d\n",
- __func__, __LINE__, fcport->port_name, vha->fcport_count);
- qla24xx_post_upd_fcport_work(vha, fcport);
+ qla24xx_sched_upd_fcport(fcport);
} else {
ql_dbg(ql_dbg_disc, vha, 0x20ff,
"%s %d %8phC post gpsc fcp_cnt %d\n",
@@ -6413,6 +6627,9 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
if (!(host->hostt->supported_mode & MODE_TARGET))
continue;
+ if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
+ continue;
+
spin_lock_irqsave(&ha->hardware_lock, flags);
if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
@@ -6475,15 +6692,15 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
EXPORT_SYMBOL(qlt_lport_deregister);
/* Must be called under HW lock */
-static void qlt_set_mode(struct scsi_qla_host *vha)
+void qlt_set_mode(struct scsi_qla_host *vha)
{
- switch (ql2x_ini_mode) {
+ switch (vha->qlini_mode) {
case QLA2XXX_INI_MODE_DISABLED:
case QLA2XXX_INI_MODE_EXCLUSIVE:
vha->host->active_mode = MODE_TARGET;
break;
case QLA2XXX_INI_MODE_ENABLED:
- vha->host->active_mode = MODE_UNKNOWN;
+ vha->host->active_mode = MODE_INITIATOR;
break;
case QLA2XXX_INI_MODE_DUAL:
vha->host->active_mode = MODE_DUAL;
@@ -6496,7 +6713,7 @@ static void qlt_set_mode(struct scsi_qla_host *vha)
/* Must be called under HW lock */
static void qlt_clear_mode(struct scsi_qla_host *vha)
{
- switch (ql2x_ini_mode) {
+ switch (vha->qlini_mode) {
case QLA2XXX_INI_MODE_DISABLED:
vha->host->active_mode = MODE_UNKNOWN;
break;
@@ -6532,12 +6749,17 @@ qlt_enable_vha(struct scsi_qla_host *vha)
dump_stack();
return;
}
+ if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
+ return;
spin_lock_irqsave(&ha->hardware_lock, flags);
tgt->tgt_stopped = 0;
qlt_set_mode(vha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ mutex_lock(&ha->optrom_mutex);
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+ "%s.\n", __func__);
if (vha->vp_idx) {
qla24xx_disable_vp(vha);
qla24xx_enable_vp(vha);
@@ -6546,6 +6768,7 @@ qlt_enable_vha(struct scsi_qla_host *vha)
qla2xxx_wake_dpc(base_vha);
qla2x00_wait_for_hba_online(base_vha);
}
+ mutex_unlock(&ha->optrom_mutex);
}
EXPORT_SYMBOL(qlt_enable_vha);
@@ -6767,7 +6990,7 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
if (qla_tgt_mode_enabled(vha))
nv->exchange_count = cpu_to_le16(0xFFFF);
else /* dual */
- nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+ nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6846,14 +7069,6 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
-
- /* disable ZIO at start time. */
- if (!vha->flags.init_done) {
- uint32_t tmp;
- tmp = le32_to_cpu(icb->firmware_options_2);
- tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
- icb->firmware_options_2 = cpu_to_le32(tmp);
- }
}
void
@@ -6881,7 +7096,7 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
if (qla_tgt_mode_enabled(vha))
nv->exchange_count = cpu_to_le16(0xFFFF);
else /* dual */
- nv->exchange_count = cpu_to_le16(ql2xexchoffld);
+ nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
/* Enable target mode */
nv->firmware_options_1 |= cpu_to_le32(BIT_4);
@@ -6957,15 +7172,6 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
}
-
- /* disable ZIO at start time. */
- if (!vha->flags.init_done) {
- uint32_t tmp;
- tmp = le32_to_cpu(icb->firmware_options_2);
- tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
- icb->firmware_options_2 = cpu_to_le32(tmp);
- }
-
}
void
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 199d3ba1916d..721da593b1bc 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -900,6 +900,7 @@ struct qla_tgt_cmd {
unsigned int aborted:1;
unsigned int data_work:1;
unsigned int data_work_free:1;
+ unsigned int released:1;
struct scatterlist *sg; /* cmd data buffer SG vector */
int sg_cnt; /* SG segments count */
@@ -908,6 +909,7 @@ struct qla_tgt_cmd {
u64 unpacked_lun;
enum dma_data_direction dma_data_direction;
+ uint16_t ctio_flags;
uint16_t vp_idx;
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
@@ -956,16 +958,20 @@ struct qla_tgt_sess_work_param {
};
struct qla_tgt_mgmt_cmd {
+ uint8_t cmd_type;
+ uint8_t pad[3];
uint16_t tmr_func;
uint8_t fc_tm_rsp;
+ uint8_t abort_io_attr;
struct fc_port *sess;
struct qla_qpair *qpair;
struct scsi_qla_host *vha;
struct se_cmd se_cmd;
struct work_struct free_work;
unsigned int flags;
+#define QLA24XX_MGMT_SEND_NACK BIT_0
+#define QLA24XX_MGMT_ABORT_IO_ATTR_VALID BIT_1
uint32_t reset_count;
-#define QLA24XX_MGMT_SEND_NACK 1
struct work_struct work;
uint64_t unpacked_lun;
union {
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3850b28518e5..12bafff71a1a 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.00.00.08-k"
+#define QLA2XXX_VERSION "10.00.00.11-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index e03d12a5f986..65053c066680 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -277,14 +277,25 @@ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
static void tcm_qla2xxx_complete_free(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ bool released = false;
+ unsigned long flags;
cmd->cmd_in_wq = 0;
WARN_ON(cmd->trc_flags & TRC_CMD_FREE);
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
cmd->qpair->tgt_counters.qla_core_ret_sta_ctio++;
cmd->trc_flags |= TRC_CMD_FREE;
- transport_generic_free_cmd(&cmd->se_cmd, 0);
+ cmd->cmd_sent_to_fw = 0;
+ if (cmd->released)
+ released = true;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+ if (released)
+ qlt_free_cmd(cmd);
+ else
+ transport_generic_free_cmd(&cmd->se_cmd, 0);
}
/*
@@ -325,6 +336,7 @@ static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd;
+ unsigned long flags;
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
@@ -332,9 +344,16 @@ static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
qlt_free_mcmd(mcmd);
return;
}
-
cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
- qlt_free_cmd(cmd);
+
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->cmd_sent_to_fw) {
+ cmd->released = 1;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ qlt_free_cmd(cmd);
+ }
}
static void tcm_qla2xxx_release_session(struct kref *kref)
@@ -405,7 +424,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
se_cmd->pi_err = 0;
/*
- * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
+ * qla_target.c:qlt_rdy_to_xfer() will call dma_map_sg() to setup
* the SGL mappings into PCIe memory for incoming FCP WRITE data.
*/
return qlt_rdy_to_xfer(cmd);
@@ -499,6 +518,7 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
@@ -506,6 +526,25 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/
cmd->cmd_in_wq = 0;
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ cmd->cmd_sent_to_fw = 0;
+
+ if (cmd->released) {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ qlt_free_cmd(cmd);
+ return;
+ }
+
+ cmd->data_work = 1;
+ if (cmd->aborted) {
+ cmd->data_work_free = 1;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+ tcm_qla2xxx_free_cmd(cmd);
+ return;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
@@ -718,10 +757,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
cmd->sg_cnt = 0;
cmd->offset = 0;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
- if (cmd->trc_flags & TRC_XMIT_STATUS) {
- pr_crit("Multiple calls for status = %p.\n", cmd);
- dump_stack();
- }
cmd->trc_flags |= TRC_XMIT_STATUS;
if (se_cmd->data_direction == DMA_FROM_DEVICE) {
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 52b1a0bc93c9..1ef74aa2d00a 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -766,12 +766,10 @@ int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
while (drvr_wait) {
if (ql4xxx_lock_drvr(a) == 0) {
ssleep(QL4_LOCK_DRVR_SLEEP);
- if (drvr_wait) {
- DEBUG2(printk("scsi%ld: %s: Waiting for "
- "Global Init Semaphore(%d)...\n",
- a->host_no,
- __func__, drvr_wait));
- }
+ DEBUG2(printk("scsi%ld: %s: Waiting for "
+ "Global Init Semaphore(%d)...\n",
+ a->host_no,
+ __func__, drvr_wait));
drvr_wait -= QL4_LOCK_DRVR_SLEEP;
} else {
DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0e13349dce57..051164f755a4 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3382,7 +3382,7 @@ static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
if (task->data_count) {
task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data,
task->data_count,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -3437,7 +3437,7 @@ static void qla4xxx_task_cleanup(struct iscsi_task *task)
if (task->data_count) {
dma_unmap_single(&ha->pdev->dev, task_data->data_dma,
- task->data_count, PCI_DMA_TODEVICE);
+ task->data_count, DMA_TO_DEVICE);
}
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n",
@@ -9020,25 +9020,16 @@ static void qla4xxx_remove_adapter(struct pci_dev *pdev)
/**
* qla4xxx_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context
- *
- * At exit, the @ha's flags.enable_64bit_addressing set to indicated
- * supported addressing method.
*/
static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
{
- int retval;
-
/* Update our PCI device dma_mask for full 64 bit mask */
- if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
- if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
- dev_dbg(&ha->pdev->dev,
- "Failed to set 64 bit PCI consistent mask; "
- "using 32 bit.\n");
- retval = pci_set_consistent_dma_mask(ha->pdev,
- DMA_BIT_MASK(32));
- }
- } else
- retval = pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+ if (dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(64))) {
+ dev_dbg(&ha->pdev->dev,
+ "Failed to set 64 bit PCI consistent mask; "
+ "using 32 bit.\n");
+ dma_set_mask_and_coherent(&ha->pdev->dev, DMA_BIT_MASK(32));
+ }
}
static int qla4xxx_slave_alloc(struct scsi_device *sdev)
@@ -9824,7 +9815,6 @@ qla4xxx_pci_resume(struct pci_dev *pdev)
__func__);
}
- pci_cleanup_aer_uncorrect_error_status(pdev);
clear_bit(AF_EEH_BUSY, &ha->flags);
}
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index ea88906d2cc5..5c3d6e1e0145 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -63,8 +63,7 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
* emulated RAID devices, so start with SCSI */
struct raid_internal *i = ac_to_raid_internal(cont);
-#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
- if (scsi_is_sdev_device(dev)) {
+ if (IS_ENABLED(CONFIG_SCSI) && scsi_is_sdev_device(dev)) {
struct scsi_device *sdev = to_scsi_device(dev);
if (i->f->cookie != sdev->host->hostt)
@@ -72,7 +71,6 @@ static int raid_match(struct attribute_container *cont, struct device *dev)
return i->f->is_raid(dev);
}
-#endif
/* FIXME: look at other subsystems too */
return 0;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index b7a8fdfeb2f4..c736d61b1648 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -338,9 +338,6 @@ int scsi_block_when_processing_errors(struct scsi_device *sdev)
online = scsi_device_online(sdev);
- SCSI_LOG_ERROR_RECOVERY(5, sdev_printk(KERN_INFO, sdev,
- "%s: rtn: %d\n", __func__, online));
-
return online;
}
EXPORT_SYMBOL(scsi_block_when_processing_errors);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62348412ed1b..c7fccbb8f554 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1201,8 +1201,8 @@ int scsi_init_io(struct scsi_cmnd *cmd)
count = blk_rq_map_integrity_sg(rq->q, rq->bio,
prot_sdb->table.sgl);
- BUG_ON(unlikely(count > ivecs));
- BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
+ BUG_ON(count > ivecs);
+ BUG_ON(count > queue_max_integrity_segments(rq->q));
cmd->prot_sdb = prot_sdb;
cmd->prot_sdb->table.nents = count;
@@ -2753,6 +2753,7 @@ scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
switch (oldstate) {
case SDEV_RUNNING:
case SDEV_CREATED_BLOCK:
+ case SDEV_OFFLINE:
break;
default:
goto illegal;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 0cd16e80b019..0a165b2b3e81 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -612,7 +612,6 @@ sas_phy_protocol_attr(identify.target_port_protocols,
sas_phy_simple_attr(identify.sas_address, sas_address, "0x%016llx\n",
unsigned long long);
sas_phy_simple_attr(identify.phy_identifier, phy_identifier, "%d\n", u8);
-//sas_phy_simple_attr(port_identifier, port_identifier, "%d\n", int);
sas_phy_linkspeed_attr(negotiated_linkrate);
sas_phy_linkspeed_attr(minimum_linkrate_hw);
sas_phy_linkspeed_rw_attr(minimum_linkrate);
@@ -1802,7 +1801,6 @@ sas_attach_transport(struct sas_function_template *ft)
SETUP_PHY_ATTRIBUTE(device_type);
SETUP_PHY_ATTRIBUTE(sas_address);
SETUP_PHY_ATTRIBUTE(phy_identifier);
- //SETUP_PHY_ATTRIBUTE(port_identifier);
SETUP_PHY_ATTRIBUTE(negotiated_linkrate);
SETUP_PHY_ATTRIBUTE(minimum_linkrate_hw);
SETUP_PHY_ATTRIBUTE_RW(minimum_linkrate);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b762d0fd773c..3bb2b3351e35 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1272,8 +1272,6 @@ static int sd_init_command(struct scsi_cmnd *cmd)
case REQ_OP_READ:
case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
- case REQ_OP_ZONE_REPORT:
- return sd_zbc_setup_report_cmnd(cmd);
case REQ_OP_ZONE_RESET:
return sd_zbc_setup_reset_cmnd(cmd);
default:
@@ -1802,6 +1800,7 @@ static const struct block_device_operations sd_fops = {
.check_events = sd_check_events,
.revalidate_disk = sd_revalidate_disk,
.unlock_native_capacity = sd_unlock_native_capacity,
+ .report_zones = sd_zbc_report_zones,
.pr_ops = &sd_pr_ops,
};
@@ -1953,16 +1952,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
scsi_set_resid(SCpnt, blk_rq_bytes(req));
}
break;
- case REQ_OP_ZONE_REPORT:
- if (!result) {
- good_bytes = scsi_bufflen(SCpnt)
- - scsi_get_resid(SCpnt);
- scsi_set_resid(SCpnt, 0);
- } else {
- good_bytes = 0;
- scsi_set_resid(SCpnt, blk_rq_bytes(req));
- }
- break;
default:
/*
* In case of bogus fw or device, we could end up having
@@ -3425,8 +3414,6 @@ static int sd_remove(struct device *dev)
del_gendisk(sdkp->disk);
sd_shutdown(dev);
- sd_zbc_remove(sdkp);
-
free_opal_dev(sdkp->opal_dev);
blk_register_region(devt, SD_MINORS, NULL,
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index a7d4f50b67d4..1d63f3a23ffb 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -76,7 +76,6 @@ struct scsi_disk {
#ifdef CONFIG_BLK_DEV_ZONED
u32 nr_zones;
u32 zone_blocks;
- u32 zone_shift;
u32 zones_optimal_open;
u32 zones_optimal_nonseq;
u32 zones_max_open;
@@ -271,12 +270,13 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
#ifdef CONFIG_BLK_DEV_ZONED
extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
-extern void sd_zbc_remove(struct scsi_disk *sdkp);
extern void sd_zbc_print_zones(struct scsi_disk *sdkp);
-extern int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd);
extern int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd);
extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
struct scsi_sense_hdr *sshdr);
+extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask);
#else /* CONFIG_BLK_DEV_ZONED */
@@ -286,15 +286,8 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
return 0;
}
-static inline void sd_zbc_remove(struct scsi_disk *sdkp) {}
-
static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {}
-static inline int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
-{
- return BLKPREP_INVALID;
-}
-
static inline int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
{
return BLKPREP_INVALID;
@@ -304,6 +297,8 @@ static inline void sd_zbc_complete(struct scsi_cmnd *cmd,
unsigned int good_bytes,
struct scsi_sense_hdr *sshdr) {}
+#define sd_zbc_report_zones NULL
+
#endif /* CONFIG_BLK_DEV_ZONED */
#endif /* _SCSI_DISK_H */
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 412c1787dcd9..e06c48c866e4 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -62,16 +62,22 @@ static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
}
/**
- * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
+ * sd_zbc_do_report_zones - Issue a REPORT ZONES scsi command.
* @sdkp: The target disk
* @buf: Buffer to use for the reply
* @buflen: the buffer size
* @lba: Start LBA of the report
+ * @partial: Do partial report
*
* For internal use during device validation.
+ * Using partial=true can significantly speed up execution of a report zones
+ * command because the disk does not have to count all possible report matching
+ * zones and will only report the count of zones fitting in the command reply
+ * buffer.
*/
-static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
- unsigned int buflen, sector_t lba)
+static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
+ unsigned int buflen, sector_t lba,
+ bool partial)
{
struct scsi_device *sdp = sdkp->device;
const int timeout = sdp->request_queue->rq_timeout;
@@ -85,6 +91,8 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
cmd[1] = ZI_REPORT_ZONES;
put_unaligned_be64(lba, &cmd[2]);
put_unaligned_be32(buflen, &cmd[10]);
+ if (partial)
+ cmd[14] = ZBC_REPORT_ZONE_PARTIAL;
memset(buf, 0, buflen);
result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
@@ -110,108 +118,56 @@ static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
}
/**
- * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
- * @cmd: The command to setup
+ * sd_zbc_report_zones - Disk report zones operation.
+ * @disk: The target disk
+ * @sector: Start 512B sector of the report
+ * @zones: Array of zone descriptors
+ * @nr_zones: Number of descriptors in the array
+ * @gfp_mask: Memory allocation mask
*
- * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
+ * Execute a report zones command on the target disk.
*/
-int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
+int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask)
{
- struct request *rq = cmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- sector_t lba, sector = blk_rq_pos(rq);
- unsigned int nr_bytes = blk_rq_bytes(rq);
- int ret;
-
- WARN_ON(nr_bytes == 0);
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ unsigned int i, buflen, nrz = *nr_zones;
+ unsigned char *buf;
+ size_t offset = 0;
+ int ret = 0;
if (!sd_is_zoned(sdkp))
/* Not a zoned device */
- return BLKPREP_KILL;
-
- ret = scsi_init_io(cmd);
- if (ret != BLKPREP_OK)
- return ret;
-
- cmd->cmd_len = 16;
- memset(cmd->cmnd, 0, cmd->cmd_len);
- cmd->cmnd[0] = ZBC_IN;
- cmd->cmnd[1] = ZI_REPORT_ZONES;
- lba = sectors_to_logical(sdkp->device, sector);
- put_unaligned_be64(lba, &cmd->cmnd[2]);
- put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
- /* Do partial report for speeding things up */
- cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;
-
- cmd->sc_data_direction = DMA_FROM_DEVICE;
- cmd->sdb.length = nr_bytes;
- cmd->transfersize = sdkp->device->sector_size;
- cmd->allowed = 0;
-
- return BLKPREP_OK;
-}
-
-/**
- * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
- * @scmd: The completed report zones command
- * @good_bytes: reply size in bytes
- *
- * Convert all reported zone descriptors to struct blk_zone. The conversion
- * is done in-place, directly in the request specified sg buffer.
- */
-static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
- unsigned int good_bytes)
-{
- struct request *rq = scmd->request;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
- struct sg_mapping_iter miter;
- struct blk_zone_report_hdr hdr;
- struct blk_zone zone;
- unsigned int offset, bytes = 0;
- unsigned long flags;
- u8 *buf;
-
- if (good_bytes < 64)
- return;
+ return -EOPNOTSUPP;
- memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));
-
- sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
- SG_MITER_TO_SG | SG_MITER_ATOMIC);
-
- local_irq_save(flags);
- while (sg_miter_next(&miter) && bytes < good_bytes) {
+ /*
+ * Get a reply buffer for the number of requested zones plus a header.
+ * For ATA, buffers must be aligned to 512B.
+ */
+ buflen = roundup((nrz + 1) * 64, 512);
+ buf = kmalloc(buflen, gfp_mask);
+ if (!buf)
+ return -ENOMEM;
- buf = miter.addr;
- offset = 0;
+ ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
+ sectors_to_logical(sdkp->device, sector), true);
+ if (ret)
+ goto out_free_buf;
- if (bytes == 0) {
- /* Set the report header */
- hdr.nr_zones = min_t(unsigned int,
- (good_bytes - 64) / 64,
- get_unaligned_be32(&buf[0]) / 64);
- memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
- offset += 64;
- bytes += 64;
- }
+ nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64);
+ for (i = 0; i < nrz; i++) {
+ offset += 64;
+ sd_zbc_parse_report(sdkp, buf + offset, zones);
+ zones++;
+ }
- /* Parse zone descriptors */
- while (offset < miter.length && hdr.nr_zones) {
- WARN_ON(offset > miter.length);
- buf = miter.addr + offset;
- sd_zbc_parse_report(sdkp, buf, &zone);
- memcpy(buf, &zone, sizeof(struct blk_zone));
- offset += 64;
- bytes += 64;
- hdr.nr_zones--;
- }
+ *nr_zones = nrz;
- if (!hdr.nr_zones)
- break;
+out_free_buf:
+ kfree(buf);
- }
- sg_miter_stop(&miter);
- local_irq_restore(flags);
+ return ret;
}
/**
@@ -294,30 +250,23 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE_SAME:
break;
-
- case REQ_OP_ZONE_REPORT:
-
- if (!result)
- sd_zbc_report_zones_complete(cmd, good_bytes);
- break;
-
}
}
/**
- * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
+ * sd_zbc_check_zoned_characteristics - Check zoned block device characteristics
* @sdkp: Target disk
* @buf: Buffer where to store the VPD page data
*
- * Read VPD page B6.
+ * Read VPD page B6, get information and check that reads are unconstrained.
*/
-static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
- unsigned char *buf)
+static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp,
+ unsigned char *buf)
{
if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
sd_printk(KERN_NOTICE, sdkp,
- "Unconstrained-read check failed\n");
+ "Read zoned characteristics VPD page failed\n");
return -ENODEV;
}
@@ -335,43 +284,17 @@ static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
}
- return 0;
-}
-
-/**
- * sd_zbc_check_capacity - Check reported capacity.
- * @sdkp: Target disk
- * @buf: Buffer to use for commands
- *
- * ZBC drive may report only the capacity of the first conventional zones at
- * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
- * Check this here. If the disk reported only its conventional zones capacity,
- * get the total capacity by doing a report zones.
- */
-static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
-{
- sector_t lba;
- int ret;
-
- if (sdkp->rc_basis != 0)
- return 0;
-
- /* Do a report zone to get the maximum LBA to check capacity */
- ret = sd_zbc_report_zones(sdkp, buf, SD_BUF_SIZE, 0);
- if (ret)
- return ret;
-
- /* The max_lba field is the capacity of this device */
- lba = get_unaligned_be64(&buf[8]);
- if (lba + 1 == sdkp->capacity)
- return 0;
-
- if (sdkp->first_scan)
- sd_printk(KERN_WARNING, sdkp,
- "Changing capacity from %llu to max LBA+1 %llu\n",
- (unsigned long long)sdkp->capacity,
- (unsigned long long)lba + 1);
- sdkp->capacity = lba + 1;
+ /*
+ * Check for unconstrained reads: host-managed devices with
+ * constrained reads (drives failing read after write pointer)
+ * are not supported.
+ */
+ if (!sdkp->urswrz) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_NOTICE, sdkp,
+ "constrained reads devices are not supported\n");
+ return -ENODEV;
+ }
return 0;
}
@@ -379,24 +302,27 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
#define SD_ZBC_BUF_SIZE 131072U
/**
- * sd_zbc_check_zone_size - Check the device zone sizes
+ * sd_zbc_check_zones - Check the device capacity and zone sizes
* @sdkp: Target disk
*
- * Check that all zones of the device are equal. The last zone can however
- * be smaller. The zone size must also be a power of two number of LBAs.
+ * Check that the device capacity as reported by READ CAPACITY matches the
+ * max_lba value (plus one)of the report zones command reply. Also check that
+ * all zones of the device have an equal size, only allowing the last zone of
+ * the disk to have a smaller size (runt zone). The zone size must also be a
+ * power of two.
*
* Returns the zone size in number of blocks upon success or an error code
* upon failure.
*/
-static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
+static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks)
{
u64 zone_blocks = 0;
- sector_t block = 0;
+ sector_t max_lba, block = 0;
unsigned char *buf;
unsigned char *rec;
unsigned int buf_len;
unsigned int list_length;
- s64 ret;
+ int ret;
u8 same;
/* Get a buffer */
@@ -404,11 +330,28 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
if (!buf)
return -ENOMEM;
- /* Do a report zone to get the same field */
- ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
+ /* Do a report zone to get max_lba and the same field */
+ ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0, false);
if (ret)
goto out_free;
+ if (sdkp->rc_basis == 0) {
+ /* The max_lba field is the capacity of this device */
+ max_lba = get_unaligned_be64(&buf[8]);
+ if (sdkp->capacity != max_lba + 1) {
+ if (sdkp->first_scan)
+ sd_printk(KERN_WARNING, sdkp,
+ "Changing capacity from %llu to max LBA+1 %llu\n",
+ (unsigned long long)sdkp->capacity,
+ (unsigned long long)max_lba + 1);
+ sdkp->capacity = max_lba + 1;
+ }
+ }
+
+ /*
+ * Check same field: for any value other than 0, we know that all zones
+ * have the same size.
+ */
same = buf[4] & 0x0f;
if (same > 0) {
rec = &buf[64];
@@ -445,8 +388,8 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
}
if (block < sdkp->capacity) {
- ret = sd_zbc_report_zones(sdkp, buf,
- SD_ZBC_BUF_SIZE, block);
+ ret = sd_zbc_do_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
+ block, true);
if (ret)
goto out_free;
}
@@ -470,9 +413,10 @@ out:
if (sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp,
"Zone size too large\n");
- ret = -ENODEV;
+ ret = -EFBIG;
} else {
- ret = zone_blocks;
+ *zblocks = zone_blocks;
+ ret = 0;
}
out_free:
@@ -481,191 +425,11 @@ out_free:
return ret;
}
-/**
- * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
- * @nr_zones: Number of zones to allocate space for.
- * @numa_node: NUMA node to allocate the memory from.
- */
-static inline unsigned long *
-sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
-{
- return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
- GFP_KERNEL, numa_node);
-}
-
-/**
- * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
- * @sdkp: disk used
- * @buf: report reply buffer
- * @buflen: length of @buf
- * @zone_shift: logarithm base 2 of the number of blocks in a zone
- * @seq_zones_bitmap: bitmap of sequential zones to set
- *
- * Parse reported zone descriptors in @buf to identify sequential zones and
- * set the reported zone bit in @seq_zones_bitmap accordingly.
- * Since read-only and offline zones cannot be written, do not
- * mark them as sequential in the bitmap.
- * Return the LBA after the last zone reported.
- */
-static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
- unsigned int buflen, u32 zone_shift,
- unsigned long *seq_zones_bitmap)
-{
- sector_t lba, next_lba = sdkp->capacity;
- unsigned int buf_len, list_length;
- unsigned char *rec;
- u8 type, cond;
-
- list_length = get_unaligned_be32(&buf[0]) + 64;
- buf_len = min(list_length, buflen);
- rec = buf + 64;
-
- while (rec < buf + buf_len) {
- type = rec[0] & 0x0f;
- cond = (rec[1] >> 4) & 0xf;
- lba = get_unaligned_be64(&rec[16]);
- if (type != ZBC_ZONE_TYPE_CONV &&
- cond != ZBC_ZONE_COND_READONLY &&
- cond != ZBC_ZONE_COND_OFFLINE)
- set_bit(lba >> zone_shift, seq_zones_bitmap);
- next_lba = lba + get_unaligned_be64(&rec[8]);
- rec += 64;
- }
-
- return next_lba;
-}
-
-/**
- * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
- * @sdkp: target disk
- * @zone_shift: logarithm base 2 of the number of blocks in a zone
- * @nr_zones: number of zones to set up a seq zone bitmap for
- *
- * Allocate a zone bitmap and initialize it by identifying sequential zones.
- */
-static unsigned long *
-sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
- u32 nr_zones)
-{
- struct request_queue *q = sdkp->disk->queue;
- unsigned long *seq_zones_bitmap;
- sector_t lba = 0;
- unsigned char *buf;
- int ret = -ENOMEM;
-
- seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
- if (!seq_zones_bitmap)
- return ERR_PTR(-ENOMEM);
-
- buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- goto out;
-
- while (lba < sdkp->capacity) {
- ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, lba);
- if (ret)
- goto out;
- lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
- zone_shift, seq_zones_bitmap);
- }
-
- if (lba != sdkp->capacity) {
- /* Something went wrong */
- ret = -EIO;
- }
-
-out:
- kfree(buf);
- if (ret) {
- kfree(seq_zones_bitmap);
- return ERR_PTR(ret);
- }
- return seq_zones_bitmap;
-}
-
-static void sd_zbc_cleanup(struct scsi_disk *sdkp)
-{
- struct request_queue *q = sdkp->disk->queue;
-
- kfree(q->seq_zones_bitmap);
- q->seq_zones_bitmap = NULL;
-
- kfree(q->seq_zones_wlock);
- q->seq_zones_wlock = NULL;
-
- q->nr_zones = 0;
-}
-
-static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
-{
- struct request_queue *q = sdkp->disk->queue;
- u32 zone_shift = ilog2(zone_blocks);
- u32 nr_zones;
- int ret;
-
- /* chunk_sectors indicates the zone size */
- blk_queue_chunk_sectors(q,
- logical_to_sectors(sdkp->device, zone_blocks));
- nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
-
- /*
- * Initialize the device request queue information if the number
- * of zones changed.
- */
- if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
- unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
- size_t zone_bitmap_size;
-
- if (nr_zones) {
- seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
- q->node);
- if (!seq_zones_wlock) {
- ret = -ENOMEM;
- goto err;
- }
-
- seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
- zone_shift, nr_zones);
- if (IS_ERR(seq_zones_bitmap)) {
- ret = PTR_ERR(seq_zones_bitmap);
- kfree(seq_zones_wlock);
- goto err;
- }
- }
- zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
- sizeof(unsigned long);
- blk_mq_freeze_queue(q);
- if (q->nr_zones != nr_zones) {
- /* READ16/WRITE16 is mandatory for ZBC disks */
- sdkp->device->use_16_for_rw = 1;
- sdkp->device->use_10_for_rw = 0;
-
- sdkp->zone_blocks = zone_blocks;
- sdkp->zone_shift = zone_shift;
- sdkp->nr_zones = nr_zones;
- q->nr_zones = nr_zones;
- swap(q->seq_zones_wlock, seq_zones_wlock);
- swap(q->seq_zones_bitmap, seq_zones_bitmap);
- } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
- zone_bitmap_size) != 0) {
- memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
- zone_bitmap_size);
- }
- blk_mq_unfreeze_queue(q);
- kfree(seq_zones_wlock);
- kfree(seq_zones_bitmap);
- }
-
- return 0;
-
-err:
- sd_zbc_cleanup(sdkp);
- return ret;
-}
-
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
{
- int64_t zone_blocks;
+ struct gendisk *disk = sdkp->disk;
+ unsigned int nr_zones;
+ u32 zone_blocks;
int ret;
if (!sd_is_zoned(sdkp))
@@ -675,26 +439,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
*/
return 0;
- /* Get zoned block device characteristics */
- ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
- if (ret)
- goto err;
-
- /*
- * Check for unconstrained reads: host-managed devices with
- * constrained reads (drives failing read after write pointer)
- * are not supported.
- */
- if (!sdkp->urswrz) {
- if (sdkp->first_scan)
- sd_printk(KERN_NOTICE, sdkp,
- "constrained reads devices are not supported\n");
- ret = -ENODEV;
- goto err;
- }
-
- /* Check capacity */
- ret = sd_zbc_check_capacity(sdkp, buf);
+ /* Check zoned block device characteristics (unconstrained reads) */
+ ret = sd_zbc_check_zoned_characteristics(sdkp, buf);
if (ret)
goto err;
@@ -702,33 +448,44 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
* Check zone size: only devices with a constant zone size (except
* an eventual last runt zone) that is a power of 2 are supported.
*/
- zone_blocks = sd_zbc_check_zone_size(sdkp);
- ret = -EFBIG;
- if (zone_blocks != (u32)zone_blocks)
- goto err;
- ret = zone_blocks;
- if (ret < 0)
+ ret = sd_zbc_check_zones(sdkp, &zone_blocks);
+ if (ret != 0)
goto err;
/* The drive satisfies the kernel restrictions: set it up */
- ret = sd_zbc_setup(sdkp, zone_blocks);
- if (ret)
- goto err;
+ blk_queue_chunk_sectors(sdkp->disk->queue,
+ logical_to_sectors(sdkp->device, zone_blocks));
+ nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks);
+
+ /* READ16/WRITE16 is mandatory for ZBC disks */
+ sdkp->device->use_16_for_rw = 1;
+ sdkp->device->use_10_for_rw = 0;
+
+ /*
+ * If something changed, revalidate the disk zone bitmaps once we have
+ * the capacity, that is on the second revalidate execution during disk
+ * scan and always during normal revalidate.
+ */
+ if (sdkp->first_scan)
+ return 0;
+ if (sdkp->zone_blocks != zone_blocks ||
+ sdkp->nr_zones != nr_zones ||
+ disk->queue->nr_zones != nr_zones) {
+ ret = blk_revalidate_disk_zones(disk);
+ if (ret != 0)
+ goto err;
+ sdkp->zone_blocks = zone_blocks;
+ sdkp->nr_zones = nr_zones;
+ }
return 0;
err:
sdkp->capacity = 0;
- sd_zbc_cleanup(sdkp);
return ret;
}
-void sd_zbc_remove(struct scsi_disk *sdkp)
-{
- sd_zbc_cleanup(sdkp);
-}
-
void sd_zbc_print_zones(struct scsi_disk *sdkp)
{
if (!sd_is_zoned(sdkp) || !sdkp->capacity)
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8a254bb46a9b..c6ad00703c5b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -822,7 +822,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
scsi_req_free_cmd(scsi_req(srp->rq));
- blk_end_request_all(srp->rq, BLK_STS_IOERR);
+ blk_put_request(srp->rq);
srp->rq = NULL;
}
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 2112ea6723c6..a25a07a0b7f0 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -349,16 +349,16 @@ static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
static int pqi_map_single(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *sg_descriptor, void *buffer,
- size_t buffer_length, int data_direction)
+ size_t buffer_length, enum dma_data_direction data_direction)
{
dma_addr_t bus_address;
- if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
+ if (!buffer || buffer_length == 0 || data_direction == DMA_NONE)
return 0;
- bus_address = pci_map_single(pci_dev, buffer, buffer_length,
+ bus_address = dma_map_single(&pci_dev->dev, buffer, buffer_length,
data_direction);
- if (pci_dma_mapping_error(pci_dev, bus_address))
+ if (dma_mapping_error(&pci_dev->dev, bus_address))
return -ENOMEM;
put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
@@ -370,15 +370,15 @@ static int pqi_map_single(struct pci_dev *pci_dev,
static void pqi_pci_unmap(struct pci_dev *pci_dev,
struct pqi_sg_descriptor *descriptors, int num_descriptors,
- int data_direction)
+ enum dma_data_direction data_direction)
{
int i;
- if (data_direction == PCI_DMA_NONE)
+ if (data_direction == DMA_NONE)
return;
for (i = 0; i < num_descriptors; i++)
- pci_unmap_single(pci_dev,
+ dma_unmap_single(&pci_dev->dev,
(dma_addr_t)get_unaligned_le64(&descriptors[i].address),
get_unaligned_le32(&descriptors[i].length),
data_direction);
@@ -387,10 +387,9 @@ static void pqi_pci_unmap(struct pci_dev *pci_dev,
static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
struct pqi_raid_path_request *request, u8 cmd,
u8 *scsi3addr, void *buffer, size_t buffer_length,
- u16 vpd_page, int *pci_direction)
+ u16 vpd_page, enum dma_data_direction *dir)
{
u8 *cdb;
- int pci_dir;
memset(request, 0, sizeof(*request));
@@ -458,23 +457,21 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
switch (request->data_direction) {
case SOP_READ_FLAG:
- pci_dir = PCI_DMA_FROMDEVICE;
+ *dir = DMA_FROM_DEVICE;
break;
case SOP_WRITE_FLAG:
- pci_dir = PCI_DMA_TODEVICE;
+ *dir = DMA_TO_DEVICE;
break;
case SOP_NO_DIRECTION_FLAG:
- pci_dir = PCI_DMA_NONE;
+ *dir = DMA_NONE;
break;
default:
- pci_dir = PCI_DMA_BIDIRECTIONAL;
+ *dir = DMA_BIDIRECTIONAL;
break;
}
- *pci_direction = pci_dir;
-
return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
- buffer, buffer_length, pci_dir);
+ buffer, buffer_length, *dir);
}
static inline void pqi_reinit_io_request(struct pqi_io_request *io_request)
@@ -516,21 +513,19 @@ static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
struct bmic_identify_controller *buffer)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
- sizeof(*buffer), 0, &pci_direction);
+ sizeof(*buffer), 0, &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -538,21 +533,19 @@ static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
- &pci_direction);
+ &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -562,13 +555,13 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
size_t buffer_length)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
u16 bmic_device_index;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
- buffer_length, 0, &pci_direction);
+ buffer_length, 0, &dir);
if (rc)
return rc;
@@ -579,9 +572,7 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
0, NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -590,8 +581,8 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
{
int rc;
struct pqi_raid_path_request request;
- int pci_direction;
struct bmic_flush_cache *flush_cache;
+ enum dma_data_direction dir;
/*
* Don't bother trying to flush the cache if the controller is
@@ -608,16 +599,14 @@ static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info,
rc = pqi_build_raid_path_request(ctrl_info, &request,
SA_FLUSH_CACHE, RAID_CTLR_LUNID, flush_cache,
- sizeof(*flush_cache), 0, &pci_direction);
+ sizeof(*flush_cache), 0, &dir);
if (rc)
goto out;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
0, NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
out:
kfree(flush_cache);
@@ -629,20 +618,18 @@ static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
{
int rc;
struct pqi_raid_path_request request;
- int pci_direction;
+ enum dma_data_direction dir;
rc = pqi_build_raid_path_request(ctrl_info, &request,
BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
- buffer_length, 0, &pci_direction);
+ buffer_length, 0, &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
0, NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -793,20 +780,18 @@ static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
void *buffer, size_t buffer_length)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
rc = pqi_build_raid_path_request(ctrl_info, &request,
- cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
+ cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &dir);
if (rc)
return rc;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
-
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
return rc;
}
@@ -1089,7 +1074,7 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
- int pci_direction;
+ enum dma_data_direction dir;
struct pqi_raid_path_request request;
struct raid_map *raid_map;
@@ -1099,15 +1084,14 @@ static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
rc = pqi_build_raid_path_request(ctrl_info, &request,
CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
- sizeof(*raid_map), 0, &pci_direction);
+ sizeof(*raid_map), 0, &dir);
if (rc)
goto error;
rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
NULL, NO_TIMEOUT);
- pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- pci_direction);
+ pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir);
if (rc)
goto error;
@@ -3822,7 +3806,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
rc = pqi_map_single(ctrl_info->pci_dev,
&request.data.report_device_capability.sg_descriptor,
capability, sizeof(*capability),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -3831,7 +3815,7 @@ static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
pqi_pci_unmap(ctrl_info->pci_dev,
&request.data.report_device_capability.sg_descriptor, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -4158,7 +4142,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
rc = pqi_map_single(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors,
event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -4167,7 +4151,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
if (rc)
goto out;
@@ -4194,7 +4178,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
rc = pqi_map_single(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors,
event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (rc)
goto out;
@@ -4203,7 +4187,7 @@ static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
pqi_pci_unmap(ctrl_info->pci_dev,
request.data.report_event_configuration.sg_descriptors, 1,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
out:
kfree(event_config);
@@ -5534,7 +5518,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
rc = pqi_map_single(ctrl_info->pci_dev,
&request.sg_descriptors[0], kernel_buffer,
- iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ iocommand.buf_size, DMA_BIDIRECTIONAL);
if (rc)
goto out;
@@ -5548,7 +5532,7 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
if (iocommand.buf_size > 0)
pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 5141bd4c9f06..ea91658c7060 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -316,9 +316,9 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
put_unaligned_le32(ctrl_info->max_io_slots,
&base_struct->error_buffer_num_elements);
- bus_address = pci_map_single(ctrl_info->pci_dev, base_struct,
- sizeof(*base_struct), PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(ctrl_info->pci_dev, bus_address)) {
+ bus_address = dma_map_single(&ctrl_info->pci_dev->dev, base_struct,
+ sizeof(*base_struct), DMA_TO_DEVICE);
+ if (dma_mapping_error(&ctrl_info->pci_dev->dev, bus_address)) {
rc = -ENOMEM;
goto out;
}
@@ -331,9 +331,8 @@ int sis_init_base_struct_addr(struct pqi_ctrl_info *ctrl_info)
rc = sis_send_sync_cmd(ctrl_info, SIS_CMD_INIT_BASE_STRUCT_ADDRESS,
&params);
- pci_unmap_single(ctrl_info->pci_dev, bus_address, sizeof(*base_struct),
- PCI_DMA_TODEVICE);
-
+ dma_unmap_single(&ctrl_info->pci_dev->dev, bus_address,
+ sizeof(*base_struct), DMA_TO_DEVICE);
out:
kfree(base_struct_unaligned);
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index b106596cc0cf..e9ccfb97773f 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -111,8 +111,8 @@ snic_queue_report_tgt_req(struct snic *snic)
SNIC_BUG_ON((((unsigned long)buf) % SNIC_SG_DESC_ALIGN) != 0);
- pa = pci_map_single(snic->pdev, buf, buf_len, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(snic->pdev, pa)) {
+ pa = dma_map_single(&snic->pdev->dev, buf, buf_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost,
"Rpt-tgt rspbuf %p: PCI DMA Mapping Failed\n",
buf);
@@ -138,7 +138,8 @@ snic_queue_report_tgt_req(struct snic *snic)
ret = snic_queue_wq_desc(snic, rqi->req, rqi->req_len);
if (ret) {
- pci_unmap_single(snic->pdev, pa, buf_len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&snic->pdev->dev, pa, buf_len,
+ DMA_FROM_DEVICE);
kfree(buf);
rqi->sge_va = 0;
snic_release_untagged_req(snic, rqi);
diff --git a/drivers/scsi/snic/snic_io.c b/drivers/scsi/snic/snic_io.c
index 8e69548395b9..159ee94d2a55 100644
--- a/drivers/scsi/snic/snic_io.c
+++ b/drivers/scsi/snic/snic_io.c
@@ -102,7 +102,8 @@ snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct snic_req_info *rqi = NULL;
unsigned long flags;
- pci_unmap_single(snic->pdev, buf->dma_addr, buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&snic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
rqi = req_to_rqi(req);
spin_lock_irqsave(&snic->spl_cmd_lock, flags);
@@ -172,8 +173,8 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
snic_print_desc(__func__, os_buf, len);
/* Map request buffer */
- pa = pci_map_single(snic->pdev, os_buf, len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(snic->pdev, pa)) {
+ pa = dma_map_single(&snic->pdev->dev, os_buf, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost, "qdesc: PCI DMA Mapping Fail.\n");
return -ENOMEM;
@@ -186,7 +187,7 @@ snic_queue_wq_desc(struct snic *snic, void *os_buf, u16 len)
spin_lock_irqsave(&snic->wq_lock[q_num], flags);
desc_avail = snic_wqdesc_avail(snic, q_num, req->hdr.type);
if (desc_avail <= 0) {
- pci_unmap_single(snic->pdev, pa, len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&snic->pdev->dev, pa, len, DMA_TO_DEVICE);
req->req_pa = 0;
spin_unlock_irqrestore(&snic->wq_lock[q_num], flags);
atomic64_inc(&snic->s_stats.misc.wq_alloc_fail);
@@ -350,29 +351,29 @@ snic_req_free(struct snic *snic, struct snic_req_info *rqi)
if (rqi->abort_req) {
if (rqi->abort_req->req_pa)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
rqi->abort_req->req_pa,
sizeof(struct snic_host_req),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
mempool_free(rqi->abort_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
}
if (rqi->dr_req) {
if (rqi->dr_req->req_pa)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
rqi->dr_req->req_pa,
sizeof(struct snic_host_req),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
mempool_free(rqi->dr_req, snic->req_pool[SNIC_REQ_TM_CACHE]);
}
if (rqi->req->req_pa)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
rqi->req->req_pa,
rqi->req_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
mempool_free(rqi, snic->req_pool[rqi->rq_pool_type]);
}
@@ -384,10 +385,10 @@ snic_pci_unmap_rsp_buf(struct snic *snic, struct snic_req_info *rqi)
sgd = req_to_sgl(rqi_to_req(rqi));
SNIC_BUG_ON(sgd[0].addr == 0);
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
le64_to_cpu(sgd[0].addr),
le32_to_cpu(sgd[0].len),
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
/*
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 7cf70aaec0ba..5295277d6325 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -435,37 +435,17 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* limitation for the device. Try 43-bit first, and
* fail to 32-bit.
*/
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(43));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43));
if (ret) {
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
SNIC_HOST_ERR(shost,
"No Usable DMA Configuration, aborting %d\n",
ret);
-
- goto err_rel_regions;
- }
-
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret) {
- SNIC_HOST_ERR(shost,
- "Unable to obtain 32-bit DMA for consistent allocations, aborting: %d\n",
- ret);
-
- goto err_rel_regions;
- }
- } else {
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(43));
- if (ret) {
- SNIC_HOST_ERR(shost,
- "Unable to obtain 43-bit DMA for consistent allocations. aborting: %d\n",
- ret);
-
goto err_rel_regions;
}
}
-
/* Map vNIC resources from BAR0 */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n");
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index d9b2e46424aa..b3650c989ed4 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -146,10 +146,10 @@ snic_release_req_buf(struct snic *snic,
CMD_FLAGS(sc));
if (req->u.icmnd.sense_addr)
- pci_unmap_single(snic->pdev,
+ dma_unmap_single(&snic->pdev->dev,
le64_to_cpu(req->u.icmnd.sense_addr),
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
scsi_dma_unmap(sc);
@@ -185,12 +185,11 @@ snic_queue_icmnd_req(struct snic *snic,
}
}
- pa = pci_map_single(snic->pdev,
+ pa = dma_map_single(&snic->pdev->dev,
sc->sense_buffer,
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
-
- if (pci_dma_mapping_error(snic->pdev, pa)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&snic->pdev->dev, pa)) {
SNIC_HOST_ERR(snic->shost,
"QIcmnd:PCI Map Failed for sns buf %p tag %x\n",
sc->sense_buffer, snic_cmd_tag(sc));
@@ -2001,7 +2000,7 @@ snic_dr_finish(struct snic *snic, struct scsi_cmnd *sc)
}
dr_failed:
- SNIC_BUG_ON(!spin_is_locked(io_lock));
+ lockdep_assert_held(io_lock);
if (rqi)
CMD_SP(sc) = NULL;
spin_unlock_irqrestore(io_lock, flags);
@@ -2604,7 +2603,7 @@ snic_internal_abort_io(struct snic *snic, struct scsi_cmnd *sc, int tmf)
ret = SUCCESS;
skip_internal_abts:
- SNIC_BUG_ON(!spin_is_locked(io_lock));
+ lockdep_assert_held(io_lock);
spin_unlock_irqrestore(io_lock, flags);
return ret;
diff --git a/drivers/scsi/snic/vnic_dev.c b/drivers/scsi/snic/vnic_dev.c
index dad5fc66effb..05e374f80946 100644
--- a/drivers/scsi/snic/vnic_dev.c
+++ b/drivers/scsi/snic/vnic_dev.c
@@ -225,10 +225,9 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
{
svnic_dev_desc_ring_size(ring, desc_count, desc_size);
- ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
- ring->size_unaligned,
- &ring->base_addr_unaligned);
-
+ ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
+ ring->size_unaligned, &ring->base_addr_unaligned,
+ GFP_KERNEL);
if (!ring->descs_unaligned) {
pr_err("Failed to allocate ring (size=%d), aborting\n",
(int)ring->size);
@@ -251,7 +250,7 @@ int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
void svnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
ring->size_unaligned,
ring->descs_unaligned,
ring->base_addr_unaligned);
@@ -470,9 +469,9 @@ int svnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0;
if (!vdev->fw_info) {
- vdev->fw_info = pci_alloc_consistent(vdev->pdev,
+ vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
- &vdev->fw_info_pa);
+ &vdev->fw_info_pa, GFP_KERNEL);
if (!vdev->fw_info)
return -ENOMEM;
@@ -534,8 +533,8 @@ int svnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int wait = VNIC_DVCMD_TMO;
if (!vdev->stats) {
- vdev->stats = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_stats), &vdev->stats_pa);
+ vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_stats), &vdev->stats_pa, GFP_KERNEL);
if (!vdev->stats)
return -ENOMEM;
}
@@ -607,9 +606,9 @@ int svnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
int wait = VNIC_DVCMD_TMO;
if (!vdev->notify) {
- vdev->notify = pci_alloc_consistent(vdev->pdev,
+ vdev->notify = dma_alloc_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
- &vdev->notify_pa);
+ &vdev->notify_pa, GFP_KERNEL);
if (!vdev->notify)
return -ENOMEM;
}
@@ -697,21 +696,21 @@ void svnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_notify),
vdev->notify,
vdev->notify_pa);
if (vdev->linkstatus)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(u32),
vdev->linkstatus,
vdev->linkstatus_pa);
if (vdev->stats)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_stats),
vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
- pci_free_consistent(vdev->pdev,
+ dma_free_coherent(&vdev->pdev->dev,
sizeof(struct vnic_devcmd_fw_info),
vdev->fw_info, vdev->fw_info_pa);
if (vdev->devcmd2)
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 0b1421cdf8a0..c9a55d0f076d 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -60,30 +60,6 @@ static u8 sun3x_esp_read8(struct esp *esp, unsigned long reg)
return readb(esp->regs + (reg * 4UL));
}
-static dma_addr_t sun3x_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int sun3x_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void sun3x_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void sun3x_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int sun3x_esp_irq_pending(struct esp *esp)
{
if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -182,10 +158,6 @@ static int sun3x_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops sun3x_esp_ops = {
.esp_write8 = sun3x_esp_write8,
.esp_read8 = sun3x_esp_read8,
- .map_single = sun3x_esp_map_single,
- .map_sg = sun3x_esp_map_sg,
- .unmap_single = sun3x_esp_unmap_single,
- .unmap_sg = sun3x_esp_unmap_sg,
.irq_pending = sun3x_esp_irq_pending,
.reset_dma = sun3x_esp_reset_dma,
.dma_drain = sun3x_esp_dma_drain,
@@ -246,7 +218,7 @@ static int esp_sun3x_probe(struct platform_device *dev)
dev_set_drvdata(&dev->dev, esp);
- err = scsi_esp_register(esp, &dev->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 747ee64a78e1..a11efbcb7f8b 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -80,7 +80,7 @@ static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
static int esp_sbus_map_regs(struct esp *esp, int hme)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct resource *res;
/* On HME, two reg sets exist, first is DVMA,
@@ -100,11 +100,9 @@ static int esp_sbus_map_regs(struct esp *esp, int hme)
static int esp_sbus_map_command_block(struct esp *esp)
{
- struct platform_device *op = esp->dev;
-
- esp->command_block = dma_alloc_coherent(&op->dev, 16,
+ esp->command_block = dma_alloc_coherent(esp->dev, 16,
&esp->command_block_dma,
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!esp->command_block)
return -ENOMEM;
return 0;
@@ -113,7 +111,7 @@ static int esp_sbus_map_command_block(struct esp *esp)
static int esp_sbus_register_irq(struct esp *esp)
{
struct Scsi_Host *host = esp->host;
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
host->irq = op->archdata.irqs[0];
return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
@@ -121,7 +119,7 @@ static int esp_sbus_register_irq(struct esp *esp)
static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *dp;
dp = op->dev.of_node;
@@ -143,7 +141,7 @@ done:
static void esp_get_differential(struct esp *esp)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *dp;
dp = op->dev.of_node;
@@ -155,7 +153,7 @@ static void esp_get_differential(struct esp *esp)
static void esp_get_clock_params(struct esp *esp)
{
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *bus_dp, *dp;
int fmhz;
@@ -172,7 +170,7 @@ static void esp_get_clock_params(struct esp *esp)
static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
{
struct device_node *dma_dp = dma_of->dev.of_node;
- struct platform_device *op = esp->dev;
+ struct platform_device *op = to_platform_device(esp->dev);
struct device_node *dp;
u8 bursts, val;
@@ -212,38 +210,6 @@ static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
return sbus_readb(esp->regs + (reg * 4UL));
}
-static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- struct platform_device *op = esp->dev;
-
- return dma_map_single(&op->dev, buf, sz, dir);
-}
-
-static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- struct platform_device *op = esp->dev;
-
- return dma_map_sg(&op->dev, sg, num_sg, dir);
-}
-
-static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- struct platform_device *op = esp->dev;
-
- dma_unmap_single(&op->dev, addr, sz, dir);
-}
-
-static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- struct platform_device *op = esp->dev;
-
- dma_unmap_sg(&op->dev, sg, num_sg, dir);
-}
-
static int sbus_esp_irq_pending(struct esp *esp)
{
if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
@@ -255,14 +221,13 @@ static void sbus_esp_reset_dma(struct esp *esp)
{
int can_do_burst16, can_do_burst32, can_do_burst64;
int can_do_sbus64, lim;
- struct platform_device *op;
+ struct platform_device *op = to_platform_device(esp->dev);
u32 val;
can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
can_do_burst64 = 0;
can_do_sbus64 = 0;
- op = esp->dev;
if (sbus_can_dma_64bit())
can_do_sbus64 = 1;
if (sbus_can_burst64())
@@ -474,10 +439,6 @@ static int sbus_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops sbus_esp_ops = {
.esp_write8 = sbus_esp_write8,
.esp_read8 = sbus_esp_read8,
- .map_single = sbus_esp_map_single,
- .map_sg = sbus_esp_map_sg,
- .unmap_single = sbus_esp_unmap_single,
- .unmap_sg = sbus_esp_unmap_sg,
.irq_pending = sbus_esp_irq_pending,
.reset_dma = sbus_esp_reset_dma,
.dma_drain = sbus_esp_dma_drain,
@@ -504,7 +465,7 @@ static int esp_sbus_probe_one(struct platform_device *op,
esp = shost_priv(host);
esp->host = host;
- esp->dev = op;
+ esp->dev = &op->dev;
esp->ops = &sbus_esp_ops;
if (hme)
@@ -540,7 +501,7 @@ static int esp_sbus_probe_one(struct platform_device *op,
dev_set_drvdata(&op->dev, esp);
- err = scsi_esp_register(esp, &op->dev);
+ err = scsi_esp_register(esp);
if (err)
goto fail_free_irq;
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index bd3f6e2d6834..0a2a54517b15 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -4370,6 +4370,13 @@ static void sym_nego_rejected(struct sym_hcb *np, struct sym_tcb *tp, struct sym
OUTB(np, HS_PRT, HS_BUSY);
}
+#define sym_printk(lvl, tp, cp, fmt, v...) do { \
+ if (cp) \
+ scmd_printk(lvl, cp->cmd, fmt, ##v); \
+ else \
+ starget_printk(lvl, tp->starget, fmt, ##v); \
+} while (0)
+
/*
* chip exception handler for programmed interrupts.
*/
@@ -4415,7 +4422,7 @@ static void sym_int_sir(struct sym_hcb *np)
* been selected with ATN. We do not want to handle that.
*/
case SIR_SEL_ATN_NO_MSG_OUT:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No MSG OUT phase after selection with ATN\n");
goto out_stuck;
/*
@@ -4423,7 +4430,7 @@ static void sym_int_sir(struct sym_hcb *np)
* having reselected the initiator.
*/
case SIR_RESEL_NO_MSG_IN:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No MSG IN phase after reselection\n");
goto out_stuck;
/*
@@ -4431,7 +4438,7 @@ static void sym_int_sir(struct sym_hcb *np)
* an IDENTIFY.
*/
case SIR_RESEL_NO_IDENTIFY:
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"No IDENTIFY after reselection\n");
goto out_stuck;
/*
@@ -4460,7 +4467,7 @@ static void sym_int_sir(struct sym_hcb *np)
case SIR_RESEL_ABORTED:
np->lastmsg = np->msgout[0];
np->msgout[0] = M_NOOP;
- scmd_printk(KERN_WARNING, cp->cmd,
+ sym_printk(KERN_WARNING, tp, cp,
"message %x sent on bad reselection\n", np->lastmsg);
goto out;
/*
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index e09fe6ab3572..2ddd426323e9 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -109,3 +109,22 @@ config SCSI_UFS_HISI
Select this if you have UFS controller on Hisilicon chipset.
If unsure, say N.
+
+config SCSI_UFS_BSG
+ bool "Universal Flash Storage BSG device node"
+ depends on SCSI_UFSHCD
+ select BLK_DEV_BSGLIB
+ help
+ Universal Flash Storage (UFS) is SCSI transport specification for
+ accessing flash storage on digital cameras, mobile phones and
+ consumer electronic devices.
+ A UFS controller communicates with a UFS device by exchanging
+ UFS Protocol Information Units (UPIUs).
+ UPIUs can not only be used as a transport layer for the SCSI protocol
+ but are also used by the UFS native command set.
+ This transport driver supports exchanging UFS protocol information units
+ with a UFS device. See also the ufshcd driver, which is a SCSI driver
+ that supports UFS devices.
+
+ Select this if you need a bsg device node for your UFS controller.
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 2c50f03d8c4a..aca481329828 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -4,7 +4,8 @@ obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-objs := ufshcd.o ufs-sysfs.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 75ee5906b966..3aeadb14aae1 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -16,7 +16,6 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
-#include <linux/phy/phy-qcom-ufs.h>
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
@@ -70,20 +69,27 @@ static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
}
static int ufs_qcom_host_clk_get(struct device *dev,
- const char *name, struct clk **clk_out)
+ const char *name, struct clk **clk_out, bool optional)
{
struct clk *clk;
int err = 0;
clk = devm_clk_get(dev, name);
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- dev_err(dev, "%s: failed to get %s err %d",
- __func__, name, err);
- } else {
+ if (!IS_ERR(clk)) {
*clk_out = clk;
+ return 0;
}
+ err = PTR_ERR(clk);
+
+ if (optional && err == -ENOENT) {
+ *clk_out = NULL;
+ return 0;
+ }
+
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to get %s err %d\n", name, err);
+
return err;
}
@@ -104,11 +110,9 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
if (!host->is_lane_clks_enabled)
return;
- if (host->hba->lanes_per_direction > 1)
- clk_disable_unprepare(host->tx_l1_sync_clk);
+ clk_disable_unprepare(host->tx_l1_sync_clk);
clk_disable_unprepare(host->tx_l0_sync_clk);
- if (host->hba->lanes_per_direction > 1)
- clk_disable_unprepare(host->rx_l1_sync_clk);
+ clk_disable_unprepare(host->rx_l1_sync_clk);
clk_disable_unprepare(host->rx_l0_sync_clk);
host->is_lane_clks_enabled = false;
@@ -132,24 +136,21 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
if (err)
goto disable_rx_l0;
- if (host->hba->lanes_per_direction > 1) {
- err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
host->rx_l1_sync_clk);
- if (err)
- goto disable_tx_l0;
+ if (err)
+ goto disable_tx_l0;
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+ err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
host->tx_l1_sync_clk);
- if (err)
- goto disable_rx_l1;
- }
+ if (err)
+ goto disable_rx_l1;
host->is_lane_clks_enabled = true;
goto out;
disable_rx_l1:
- if (host->hba->lanes_per_direction > 1)
- clk_disable_unprepare(host->rx_l1_sync_clk);
+ clk_disable_unprepare(host->rx_l1_sync_clk);
disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
@@ -163,25 +164,25 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
int err = 0;
struct device *dev = host->hba->dev;
- err = ufs_qcom_host_clk_get(dev,
- "rx_lane0_sync_clk", &host->rx_l0_sync_clk);
+ err = ufs_qcom_host_clk_get(dev, "rx_lane0_sync_clk",
+ &host->rx_l0_sync_clk, false);
if (err)
goto out;
- err = ufs_qcom_host_clk_get(dev,
- "tx_lane0_sync_clk", &host->tx_l0_sync_clk);
+ err = ufs_qcom_host_clk_get(dev, "tx_lane0_sync_clk",
+ &host->tx_l0_sync_clk, false);
if (err)
goto out;
/* In case of single lane per direction, don't read lane1 clocks */
if (host->hba->lanes_per_direction > 1) {
err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
- &host->rx_l1_sync_clk);
+ &host->rx_l1_sync_clk, false);
if (err)
goto out;
err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
- &host->tx_l1_sync_clk);
+ &host->tx_l1_sync_clk, true);
}
out:
return err;
@@ -189,22 +190,9 @@ out:
static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
u32 tx_lanes;
- int err = 0;
-
- err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
- if (err)
- goto out;
-
- err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
- if (err)
- dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
- __func__);
-out:
- return err;
+ return ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
}
static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
@@ -932,10 +920,8 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
{
u32 val;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
struct ufs_qcom_dev_params ufs_qcom_cap;
int ret = 0;
- int res = 0;
if (!dev_req_params) {
pr_err("%s: incoming dev_req_params is NULL\n", __func__);
@@ -1002,12 +988,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
}
val = ~(MAX_U32 << dev_req_params->lane_tx);
- res = ufs_qcom_phy_set_tx_lane_enable(phy, val);
- if (res) {
- dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable() failed res = %d\n",
- __func__, res);
- ret = res;
- }
/* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
@@ -1264,10 +1244,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
}
}
- /* update phy revision information before calling phy_init() */
- ufs_qcom_phy_save_controller_version(host->generic_phy,
- host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
-
err = ufs_qcom_init_lane_clks(host);
if (err)
goto out_variant_clear;
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 295f4bef6a0e..c114826316eb 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -129,11 +129,6 @@ enum {
MASK_CLK_NS_REG = 0xFFFC00,
};
-enum ufs_qcom_phy_init_type {
- UFS_PHY_INIT_FULL,
- UFS_PHY_INIT_CFG_RESTORE,
-};
-
/* QCOM UFS debug print bit mask */
#define UFS_QCOM_DBG_PRINT_REGS_EN BIT(0)
#define UFS_QCOM_DBG_PRINT_ICE_REGS_EN BIT(1)
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 14e5bf7af0bb..58087d3916d0 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -38,9 +38,9 @@
#include <linux/mutex.h>
#include <linux/types.h>
+#include <uapi/scsi/scsi_bsg_ufs.h>
-#define MAX_CDB_SIZE 16
-#define GENERAL_UPIU_REQUEST_SIZE 32
+#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
#define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2
#define QUERY_DESC_HDR_SIZE 2
@@ -414,6 +414,7 @@ enum {
MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
MASK_RSP_EXCEPTION_EVENT = 0x10000,
MASK_TM_SERVICE_RESP = 0xFF,
+ MASK_TM_FUNC = 0xFF,
};
/* Task management service response */
@@ -433,65 +434,6 @@ enum ufs_dev_pwr_mode {
};
/**
- * struct utp_upiu_header - UPIU header structure
- * @dword_0: UPIU header DW-0
- * @dword_1: UPIU header DW-1
- * @dword_2: UPIU header DW-2
- */
-struct utp_upiu_header {
- __be32 dword_0;
- __be32 dword_1;
- __be32 dword_2;
-};
-
-/**
- * struct utp_upiu_cmd - Command UPIU structure
- * @data_transfer_len: Data Transfer Length DW-3
- * @cdb: Command Descriptor Block CDB DW-4 to DW-7
- */
-struct utp_upiu_cmd {
- __be32 exp_data_transfer_len;
- u8 cdb[MAX_CDB_SIZE];
-};
-
-/**
- * struct utp_upiu_query - upiu request buffer structure for
- * query request.
- * @opcode: command to perform B-0
- * @idn: a value that indicates the particular type of data B-1
- * @index: Index to further identify data B-2
- * @selector: Index to further identify data B-3
- * @reserved_osf: spec reserved field B-4,5
- * @length: number of descriptor bytes to read/write B-6,7
- * @value: Attribute value to be written DW-5
- * @reserved: spec reserved DW-6,7
- */
-struct utp_upiu_query {
- u8 opcode;
- u8 idn;
- u8 index;
- u8 selector;
- __be16 reserved_osf;
- __be16 length;
- __be32 value;
- __be32 reserved[2];
-};
-
-/**
- * struct utp_upiu_req - general upiu request structure
- * @header:UPIU header structure DW-0 to DW-2
- * @sc: fields structure for scsi command DW-3 to DW-7
- * @qr: fields structure for query request DW-3 to DW-7
- */
-struct utp_upiu_req {
- struct utp_upiu_header header;
- union {
- struct utp_upiu_cmd sc;
- struct utp_upiu_query qr;
- };
-};
-
-/**
* struct utp_cmd_rsp - Response UPIU structure
* @residual_transfer_count: Residual transfer count DW-3
* @reserved: Reserved double words DW-4 to DW-7
@@ -520,36 +462,6 @@ struct utp_upiu_rsp {
};
/**
- * struct utp_upiu_task_req - Task request UPIU structure
- * @header - UPIU header structure DW0 to DW-2
- * @input_param1: Input parameter 1 DW-3
- * @input_param2: Input parameter 2 DW-4
- * @input_param3: Input parameter 3 DW-5
- * @reserved: Reserved double words DW-6 to DW-7
- */
-struct utp_upiu_task_req {
- struct utp_upiu_header header;
- __be32 input_param1;
- __be32 input_param2;
- __be32 input_param3;
- __be32 reserved[2];
-};
-
-/**
- * struct utp_upiu_task_rsp - Task Management Response UPIU structure
- * @header: UPIU header structure DW0-DW-2
- * @output_param1: Ouput parameter 1 DW3
- * @output_param2: Output parameter 2 DW4
- * @reserved: Reserved double words DW-5 to DW-7
- */
-struct utp_upiu_task_rsp {
- struct utp_upiu_header header;
- __be32 output_param1;
- __be32 output_param2;
- __be32 reserved[3];
-};
-
-/**
* struct ufs_query_req - parameters for building a query request
* @query_func: UPIU header query function
* @upiu_req: the query request data
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
new file mode 100644
index 000000000000..e5f8e54bf644
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bsg endpoint that supports UPIUs
+ *
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#include "ufs_bsg.h"
+
+static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len,
+ struct utp_upiu_query *qr)
+{
+ int desc_size = be16_to_cpu(qr->length);
+ int desc_id = qr->idn;
+ int ret;
+
+ if (desc_size <= 0)
+ return -EINVAL;
+
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, desc_len);
+ if (ret || !*desc_len)
+ return -EINVAL;
+
+ *desc_len = min_t(int, *desc_len, desc_size);
+
+ return 0;
+}
+
+static int ufs_bsg_verify_query_size(struct ufs_hba *hba,
+ unsigned int request_len,
+ unsigned int reply_len,
+ int desc_len, enum query_opcode desc_op)
+{
+ int min_req_len = sizeof(struct ufs_bsg_request);
+ int min_rsp_len = sizeof(struct ufs_bsg_reply);
+
+ if (desc_op == UPIU_QUERY_OPCODE_WRITE_DESC)
+ min_req_len += desc_len;
+
+ if (min_req_len > request_len || min_rsp_len > reply_len) {
+ dev_err(hba->dev, "not enough space assigned\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ufs_bsg_verify_query_params(struct ufs_hba *hba,
+ struct ufs_bsg_request *bsg_request,
+ unsigned int request_len,
+ unsigned int reply_len,
+ uint8_t *desc_buff, int *desc_len,
+ enum query_opcode desc_op)
+{
+ struct utp_upiu_query *qr;
+
+ if (desc_op == UPIU_QUERY_OPCODE_READ_DESC) {
+ dev_err(hba->dev, "unsupported opcode %d\n", desc_op);
+ return -ENOTSUPP;
+ }
+
+ if (desc_op != UPIU_QUERY_OPCODE_WRITE_DESC)
+ goto out;
+
+ qr = &bsg_request->upiu_req.qr;
+ if (ufs_bsg_get_query_desc_size(hba, desc_len, qr)) {
+ dev_err(hba->dev, "Illegal desc size\n");
+ return -EINVAL;
+ }
+
+ if (ufs_bsg_verify_query_size(hba, request_len, reply_len, *desc_len,
+ desc_op))
+ return -EINVAL;
+
+ desc_buff = (uint8_t *)(bsg_request + 1);
+
+out:
+ return 0;
+}
+
+static int ufs_bsg_request(struct bsg_job *job)
+{
+ struct ufs_bsg_request *bsg_request = job->request;
+ struct ufs_bsg_reply *bsg_reply = job->reply;
+ struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent));
+ unsigned int req_len = job->request_len;
+ unsigned int reply_len = job->reply_len;
+ struct uic_command uc = {};
+ int msgcode;
+ uint8_t *desc_buff = NULL;
+ int desc_len = 0;
+ enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP;
+ int ret;
+
+ ret = ufs_bsg_verify_query_size(hba, req_len, reply_len, 0, desc_op);
+ if (ret)
+ goto out;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ msgcode = bsg_request->msgcode;
+ switch (msgcode) {
+ case UPIU_TRANSACTION_QUERY_REQ:
+ desc_op = bsg_request->upiu_req.qr.opcode;
+ ret = ufs_bsg_verify_query_params(hba, bsg_request, req_len,
+ reply_len, desc_buff,
+ &desc_len, desc_op);
+ if (ret)
+ goto out;
+
+ /* fall through */
+ case UPIU_TRANSACTION_NOP_OUT:
+ case UPIU_TRANSACTION_TASK_REQ:
+ ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req,
+ &bsg_reply->upiu_rsp, msgcode,
+ desc_buff, &desc_len, desc_op);
+ if (ret)
+ dev_err(hba->dev,
+ "exe raw upiu: error code %d\n", ret);
+
+ break;
+ case UPIU_TRANSACTION_UIC_CMD:
+ memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE);
+ ret = ufshcd_send_uic_cmd(hba, &uc);
+ if (ret)
+ dev_dbg(hba->dev,
+ "send uic cmd: error code %d\n", ret);
+
+ memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE);
+
+ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode);
+
+ break;
+ }
+
+out:
+ bsg_reply->result = ret;
+ job->reply_len = sizeof(struct ufs_bsg_reply) +
+ bsg_reply->reply_payload_rcv_len;
+
+ bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+
+ return ret;
+}
+
+/**
+ * ufs_bsg_remove - detach and remove the added ufs-bsg node
+ *
+ * Should be called when unloading the driver.
+ */
+void ufs_bsg_remove(struct ufs_hba *hba)
+{
+ struct device *bsg_dev = &hba->bsg_dev;
+
+ if (!hba->bsg_queue)
+ return;
+
+ bsg_unregister_queue(hba->bsg_queue);
+
+ device_del(bsg_dev);
+ put_device(bsg_dev);
+}
+
+static inline void ufs_bsg_node_release(struct device *dev)
+{
+ put_device(dev->parent);
+}
+
+/**
+ * ufs_bsg_probe - Add ufs bsg device node
+ * @hba: per adapter object
+ *
+ * Called during initial loading of the driver, and before scsi_scan_host.
+ */
+int ufs_bsg_probe(struct ufs_hba *hba)
+{
+ struct device *bsg_dev = &hba->bsg_dev;
+ struct Scsi_Host *shost = hba->host;
+ struct device *parent = &shost->shost_gendev;
+ struct request_queue *q;
+ int ret;
+
+ device_initialize(bsg_dev);
+
+ bsg_dev->parent = get_device(parent);
+ bsg_dev->release = ufs_bsg_node_release;
+
+ dev_set_name(bsg_dev, "ufs-bsg");
+
+ ret = device_add(bsg_dev);
+ if (ret)
+ goto out;
+
+ q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, 0);
+ if (IS_ERR(q)) {
+ ret = PTR_ERR(q);
+ goto out;
+ }
+
+ hba->bsg_queue = q;
+
+ return 0;
+
+out:
+ dev_err(bsg_dev, "fail to initialize a bsg dev %d\n", shost->host_no);
+ put_device(bsg_dev);
+ return ret;
+}
diff --git a/drivers/scsi/ufs/ufs_bsg.h b/drivers/scsi/ufs/ufs_bsg.h
new file mode 100644
index 000000000000..d09918758631
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_bsg.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#ifndef UFS_BSG_H
+#define UFS_BSG_H
+
+#include <linux/bsg-lib.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+
+#include "ufshcd.h"
+#include "ufs.h"
+
+#ifdef CONFIG_SCSI_UFS_BSG
+void ufs_bsg_remove(struct ufs_hba *hba);
+int ufs_bsg_probe(struct ufs_hba *hba);
+#else
+static inline void ufs_bsg_remove(struct ufs_hba *hba) {}
+static inline int ufs_bsg_probe(struct ufs_hba *hba) {return 0; }
+#endif
+
+#endif /* UFS_BSG_H */
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index c55f38ec391c..23d7cca36ff0 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -46,6 +46,7 @@
#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-sysfs.h"
+#include "ufs_bsg.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -326,14 +327,11 @@ static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
const char *str)
{
- struct utp_task_req_desc *descp;
- struct utp_upiu_task_req *task_req;
int off = (int)tag - hba->nutrs;
+ struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
- descp = &hba->utmrdl_base_addr[off];
- task_req = (struct utp_upiu_task_req *)descp->task_req_upiu;
- trace_ufshcd_upiu(dev_name(hba->dev), str, &task_req->header,
- &task_req->input_param1);
+ trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
+ &descp->input_param1);
}
static void ufshcd_add_command_trace(struct ufs_hba *hba,
@@ -475,22 +473,13 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
{
- struct utp_task_req_desc *tmrdp;
int tag;
for_each_set_bit(tag, &bitmap, hba->nutmrs) {
- tmrdp = &hba->utmrdl_base_addr[tag];
+ struct utp_task_req_desc *tmrdp = &hba->utmrdl_base_addr[tag];
+
dev_err(hba->dev, "TM[%d] - Task Management Header\n", tag);
- ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
- sizeof(struct request_desc_header));
- dev_err(hba->dev, "TM[%d] - Task Management Request UPIU\n",
- tag);
- ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
- sizeof(struct utp_upiu_req));
- dev_err(hba->dev, "TM[%d] - Task Management Response UPIU\n",
- tag);
- ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
- sizeof(struct utp_task_req_desc));
+ ufshcd_hex_dump("", tmrdp, sizeof(*tmrdp));
}
}
@@ -646,19 +635,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
- * @task_req_descp: pointer to utp_task_req_desc structure
- *
- * This function is used to get the OCS field from UTMRD
- * Returns the OCS field in the UTMRD
- */
-static inline int
-ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
-{
- return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
-}
-
-/**
* ufshcd_get_tm_free_slot - get a free slot for task management request
* @hba: per adapter instance
* @free_slot: pointer to variable with available slot value
@@ -1691,8 +1667,9 @@ static void __ufshcd_release(struct ufs_hba *hba)
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- schedule_delayed_work(&hba->clk_gating.gate_work,
- msecs_to_jiffies(hba->clk_gating.delay_ms));
+ queue_delayed_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.gate_work,
+ msecs_to_jiffies(hba->clk_gating.delay_ms));
}
void ufshcd_release(struct ufs_hba *hba)
@@ -1763,6 +1740,34 @@ out:
return count;
}
+static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+{
+ char wq_name[sizeof("ufs_clkscaling_00")];
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ INIT_WORK(&hba->clk_scaling.suspend_work,
+ ufshcd_clk_scaling_suspend_work);
+ INIT_WORK(&hba->clk_scaling.resume_work,
+ ufshcd_clk_scaling_resume_work);
+
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+ hba->host->host_no);
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+ ufshcd_clkscaling_init_sysfs(hba);
+}
+
+static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ destroy_workqueue(hba->clk_scaling.workq);
+ ufshcd_devfreq_remove(hba);
+}
+
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
char wq_name[sizeof("ufs_clk_gating_00")];
@@ -2055,8 +2060,7 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
*
* Returns 0 only if success.
*/
-static int
-ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
{
int ret;
unsigned long flags;
@@ -2238,8 +2242,8 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
ucd_req_ptr->sc.exp_data_transfer_len =
cpu_to_be32(lrbp->cmd->sdb.length);
- cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
- memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
+ cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE);
+ memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE);
memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
@@ -2258,7 +2262,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
struct ufs_query *query = &hba->dev_cmd.query;
u16 len = be16_to_cpu(query->request.upiu_req.length);
- u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
/* Query request header */
ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
@@ -2280,7 +2283,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
/* Copy the Descriptor */
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
- memcpy(descp, query->descriptor, len);
+ memcpy(ucd_req_ptr + 1, query->descriptor, len);
memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
@@ -4601,46 +4604,6 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
}
/**
- * ufshcd_task_req_compl - handle task management request completion
- * @hba: per adapter instance
- * @index: index of the completed request
- * @resp: task management service response
- *
- * Returns non-zero value on error, zero on success
- */
-static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
-{
- struct utp_task_req_desc *task_req_descp;
- struct utp_upiu_task_rsp *task_rsp_upiup;
- unsigned long flags;
- int ocs_value;
- int task_result;
-
- spin_lock_irqsave(hba->host->host_lock, flags);
-
- /* Clear completed tasks from outstanding_tasks */
- __clear_bit(index, &hba->outstanding_tasks);
-
- task_req_descp = hba->utmrdl_base_addr;
- ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
-
- if (ocs_value == OCS_SUCCESS) {
- task_rsp_upiup = (struct utp_upiu_task_rsp *)
- task_req_descp[index].task_rsp_upiu;
- task_result = be32_to_cpu(task_rsp_upiup->output_param1);
- task_result = task_result & MASK_TM_SERVICE_RESP;
- if (resp)
- *resp = (u8)task_result;
- } else {
- dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
- __func__, ocs_value);
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
- return ocs_value;
-}
-
-/**
* ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
* @lrbp: pointer to local reference block of completed command
* @scsi_status: SCSI command status
@@ -5597,28 +5560,12 @@ out:
return err;
}
-/**
- * ufshcd_issue_tm_cmd - issues task management commands to controller
- * @hba: per adapter instance
- * @lun_id: LUN ID to which TM command is sent
- * @task_id: task ID to which the TM command is applicable
- * @tm_function: task management function opcode
- * @tm_response: task management service response return value
- *
- * Returns non-zero value on error, zero on success.
- */
-static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
- u8 tm_function, u8 *tm_response)
+static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
+ struct utp_task_req_desc *treq, u8 tm_function)
{
- struct utp_task_req_desc *task_req_descp;
- struct utp_upiu_task_req *task_req_upiup;
- struct Scsi_Host *host;
+ struct Scsi_Host *host = hba->host;
unsigned long flags;
- int free_slot;
- int err;
- int task_tag;
-
- host = hba->host;
+ int free_slot, task_tag, err;
/*
* Get free slot, sleep if slots are unavailable.
@@ -5629,30 +5576,11 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
ufshcd_hold(hba, false);
spin_lock_irqsave(host->host_lock, flags);
- task_req_descp = hba->utmrdl_base_addr;
- task_req_descp += free_slot;
-
- /* Configure task request descriptor */
- task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
- task_req_descp->header.dword_2 =
- cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
-
- /* Configure task request UPIU */
- task_req_upiup =
- (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
task_tag = hba->nutrs + free_slot;
- task_req_upiup->header.dword_0 =
- UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
- lun_id, task_tag);
- task_req_upiup->header.dword_1 =
- UPIU_HEADER_DWORD(0, tm_function, 0, 0);
- /*
- * The host shall provide the same value for LUN field in the basic
- * header and for Input Parameter.
- */
- task_req_upiup->input_param1 = cpu_to_be32(lun_id);
- task_req_upiup->input_param2 = cpu_to_be32(task_id);
+ treq->req_header.dword_0 |= cpu_to_be32(task_tag);
+
+ memcpy(hba->utmrdl_base_addr + free_slot, treq, sizeof(*treq));
ufshcd_vops_setup_task_mgmt(hba, free_slot, tm_function);
/* send command to the controller */
@@ -5682,8 +5610,15 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
__func__, free_slot);
err = -ETIMEDOUT;
} else {
- err = ufshcd_task_req_compl(hba, free_slot, tm_response);
+ err = 0;
+ memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
+
ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __clear_bit(free_slot, &hba->outstanding_tasks);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
}
clear_bit(free_slot, &hba->tm_condition);
@@ -5695,6 +5630,228 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
}
/**
+ * ufshcd_issue_tm_cmd - issues task management commands to controller
+ * @hba: per adapter instance
+ * @lun_id: LUN ID to which TM command is sent
+ * @task_id: task ID to which the TM command is applicable
+ * @tm_function: task management function opcode
+ * @tm_response: task management service response return value
+ *
+ * Returns non-zero value on error, zero on success.
+ */
+static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
+ u8 tm_function, u8 *tm_response)
+{
+ struct utp_task_req_desc treq = { { 0 }, };
+ int ocs_value, err;
+
+ /* Configure task request descriptor */
+ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ /* Configure task request UPIU */
+ treq.req_header.dword_0 = cpu_to_be32(lun_id << 8) |
+ cpu_to_be32(UPIU_TRANSACTION_TASK_REQ << 24);
+ treq.req_header.dword_1 = cpu_to_be32(tm_function << 16);
+
+ /*
+ * The host shall provide the same value for LUN field in the basic
+ * header and for Input Parameter.
+ */
+ treq.input_param1 = cpu_to_be32(lun_id);
+ treq.input_param2 = cpu_to_be32(task_id);
+
+ err = __ufshcd_issue_tm_cmd(hba, &treq, tm_function);
+ if (err == -ETIMEDOUT)
+ return err;
+
+ ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+ if (ocs_value != OCS_SUCCESS)
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
+ __func__, ocs_value);
+ else if (tm_response)
+ *tm_response = be32_to_cpu(treq.output_param1) &
+ MASK_TM_SERVICE_RESP;
+ return err;
+}
+
+/**
+ * ufshcd_issue_devman_upiu_cmd - API for sending "utrd" type requests
+ * @hba: per-adapter instance
+ * @req_upiu: upiu request
+ * @rsp_upiu: upiu reply
+ * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
+ * @buff_len: descriptor size, 0 if NA
+ * @desc_op: descriptor operation
+ *
+ * Those type of requests uses UTP Transfer Request Descriptor - utrd.
+ * Therefore, it "rides" the device management infrastructure: uses its tag and
+ * tasks work queues.
+ *
+ * Since there is only one available tag for device management commands,
+ * the caller is expected to hold the hba->dev_cmd.lock mutex.
+ */
+static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
+ struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu,
+ u8 *desc_buff, int *buff_len,
+ int cmd_type,
+ enum query_opcode desc_op)
+{
+ struct ufshcd_lrb *lrbp;
+ int err = 0;
+ int tag;
+ struct completion wait;
+ unsigned long flags;
+ u32 upiu_flags;
+
+ down_read(&hba->clk_scaling_lock);
+
+ wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+
+ lrbp->cmd = NULL;
+ lrbp->sense_bufflen = 0;
+ lrbp->sense_buffer = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = 0;
+ lrbp->intr_cmd = true;
+ hba->dev_cmd.type = cmd_type;
+
+ switch (hba->ufs_version) {
+ case UFSHCI_VERSION_10:
+ case UFSHCI_VERSION_11:
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ break;
+ default:
+ lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
+ break;
+ }
+
+ /* update the task tag in the request upiu */
+ req_upiu->header.dword_0 |= cpu_to_be32(tag);
+
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+
+ /* just copy the upiu request as it is */
+ memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr));
+ if (desc_buff && desc_op == UPIU_QUERY_OPCODE_WRITE_DESC) {
+ /* The Data Segment Area is optional depending upon the query
+ * function value. for WRITE DESCRIPTOR, the data segment
+ * follows right after the tsf.
+ */
+ memcpy(lrbp->ucd_req_ptr + 1, desc_buff, *buff_len);
+ *buff_len = 0;
+ }
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
+
+ hba->dev_cmd.complete = &wait;
+
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /*
+ * ignore the returning value here - ufshcd_check_query_response is
+ * bound to fail since dev_cmd.query and dev_cmd.type were left empty.
+ * read the response directly ignoring all errors.
+ */
+ ufshcd_wait_for_dev_cmd(hba, lrbp, QUERY_REQ_TIMEOUT);
+
+ /* just copy the upiu response as it is */
+ memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu));
+
+ ufshcd_put_dev_cmd_tag(hba, tag);
+ wake_up(&hba->dev_cmd.tag_wq);
+ up_read(&hba->clk_scaling_lock);
+ return err;
+}
+
+/**
+ * ufshcd_exec_raw_upiu_cmd - API function for sending raw upiu commands
+ * @hba: per-adapter instance
+ * @req_upiu: upiu request
+ * @rsp_upiu: upiu reply - only 8 DW as we do not support scsi commands
+ * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target
+ * @desc_buff: pointer to descriptor buffer, NULL if NA
+ * @buff_len: descriptor size, 0 if NA
+ * @desc_op: descriptor operation
+ *
+ * Supports UTP Transfer requests (nop and query), and UTP Task
+ * Management requests.
+ * It is up to the caller to fill the upiu conent properly, as it will
+ * be copied without any further input validations.
+ */
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+ struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu,
+ int msgcode,
+ u8 *desc_buff, int *buff_len,
+ enum query_opcode desc_op)
+{
+ int err;
+ int cmd_type = DEV_CMD_TYPE_QUERY;
+ struct utp_task_req_desc treq = { { 0 }, };
+ int ocs_value;
+ u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC;
+
+ if (desc_buff && desc_op != UPIU_QUERY_OPCODE_WRITE_DESC) {
+ err = -ENOTSUPP;
+ goto out;
+ }
+
+ switch (msgcode) {
+ case UPIU_TRANSACTION_NOP_OUT:
+ cmd_type = DEV_CMD_TYPE_NOP;
+ /* fall through */
+ case UPIU_TRANSACTION_QUERY_REQ:
+ ufshcd_hold(hba, false);
+ mutex_lock(&hba->dev_cmd.lock);
+ err = ufshcd_issue_devman_upiu_cmd(hba, req_upiu, rsp_upiu,
+ desc_buff, buff_len,
+ cmd_type, desc_op);
+ mutex_unlock(&hba->dev_cmd.lock);
+ ufshcd_release(hba);
+
+ break;
+ case UPIU_TRANSACTION_TASK_REQ:
+ treq.header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
+ treq.header.dword_2 = cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+
+ memcpy(&treq.req_header, req_upiu, sizeof(*req_upiu));
+
+ err = __ufshcd_issue_tm_cmd(hba, &treq, tm_f);
+ if (err == -ETIMEDOUT)
+ break;
+
+ ocs_value = le32_to_cpu(treq.header.dword_2) & MASK_OCS;
+ if (ocs_value != OCS_SUCCESS) {
+ dev_err(hba->dev, "%s: failed, ocs = 0x%x\n", __func__,
+ ocs_value);
+ break;
+ }
+
+ memcpy(rsp_upiu, &treq.rsp_header, sizeof(*rsp_upiu));
+
+ break;
+ default:
+ err = -EINVAL;
+
+ break;
+ }
+
+out:
+ return err;
+}
+
+/**
* ufshcd_eh_device_reset_handler - device reset handler registered to
* scsi layer.
* @cmd: SCSI command pointer
@@ -6652,6 +6809,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
hba->clk_scaling.is_allowed = true;
}
+ ufs_bsg_probe(hba);
+
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
}
@@ -6666,6 +6825,7 @@ out:
*/
if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
pm_runtime_put_sync(hba->dev);
+ ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba);
}
@@ -7201,12 +7361,9 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
ufshcd_suspend_clkscaling(hba);
- if (ufshcd_is_clkscaling_supported(hba)) {
+ if (ufshcd_is_clkscaling_supported(hba))
if (hba->devfreq)
ufshcd_suspend_clkscaling(hba);
- destroy_workqueue(hba->clk_scaling.workq);
- ufshcd_devfreq_remove(hba);
- }
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -7875,12 +8032,14 @@ EXPORT_SYMBOL(ufshcd_shutdown);
*/
void ufshcd_remove(struct ufs_hba *hba)
{
+ ufs_bsg_remove(hba);
ufs_sysfs_remove_nodes(hba->dev);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba, true);
+ ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
if (ufshcd_is_clkscaling_supported(hba))
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
@@ -8027,7 +8186,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
host->unique_id = host->host_no;
- host->max_cmd_len = MAX_CDB_SIZE;
+ host->max_cmd_len = UFS_CDB_SIZE;
hba->max_pwr_info.is_valid = false;
@@ -8052,6 +8211,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_init_clk_gating(hba);
+ ufshcd_init_clk_scaling(hba);
+
/*
* In order to avoid any spurious interrupt immediately after
* registering UFS controller interrupt handler, clear any pending UFS
@@ -8090,21 +8251,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_remove_scsi_host;
}
- if (ufshcd_is_clkscaling_supported(hba)) {
- char wq_name[sizeof("ufs_clkscaling_00")];
-
- INIT_WORK(&hba->clk_scaling.suspend_work,
- ufshcd_clk_scaling_suspend_work);
- INIT_WORK(&hba->clk_scaling.resume_work,
- ufshcd_clk_scaling_resume_work);
-
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
-
- ufshcd_clkscaling_init_sysfs(hba);
- }
-
/*
* Set the default power management level for runtime and system PM.
* Default power saving mode is to keep UFS link in Hibern8 state
@@ -8142,6 +8288,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
+ ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
out_disable:
hba->is_irq_enabled = false;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 33fdd3f281ae..1a1c2b487a4e 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -702,6 +702,9 @@ struct ufs_hba {
struct rw_semaphore clk_scaling_lock;
struct ufs_desc_size desc_size;
atomic_t scsi_block_reqs_cnt;
+
+ struct device bsg_dev;
+ struct request_queue *bsg_queue;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -892,6 +895,15 @@ int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
+int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
+
+int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
+ struct utp_upiu_req *req_upiu,
+ struct utp_upiu_req *rsp_upiu,
+ int msgcode,
+ u8 *desc_buff, int *buff_len,
+ enum query_opcode desc_op);
+
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
{
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index bb5d9c7f3353..6fa889de5ee5 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -433,22 +433,25 @@ struct utp_transfer_req_desc {
__le16 prd_table_offset;
};
-/**
- * struct utp_task_req_desc - UTMRD structure
- * @header: UTMRD header DW-0 to DW-3
- * @task_req_upiu: Pointer to task request UPIU DW-4 to DW-11
- * @task_rsp_upiu: Pointer to task response UPIU DW12 to DW-19
+/*
+ * UTMRD structure.
*/
struct utp_task_req_desc {
-
/* DW 0-3 */
struct request_desc_header header;
- /* DW 4-11 */
- __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
-
- /* DW 12-19 */
- __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+ /* DW 4-11 - Task request UPIU structure */
+ struct utp_upiu_header req_header;
+ __be32 input_param1;
+ __be32 input_param2;
+ __be32 input_param3;
+ __be32 __reserved1[2];
+
+ /* DW 12-19 - Task Management Response UPIU structure */
+ struct utp_upiu_header rsp_header;
+ __be32 output_param1;
+ __be32 output_param2;
+ __be32 __reserved2[3];
};
#endif /* End of Header */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 0cd947f78b5b..6e491023fdd8 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -372,9 +372,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
pvscsi_create_sg(ctx, sg, segs);
e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
- ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
- SGL_SIZE, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(adapter->dev, ctx->sglPA)) {
+ ctx->sglPA = dma_map_single(&adapter->dev->dev,
+ ctx->sgl, SGL_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) {
scmd_printk(KERN_ERR, cmd,
"vmw_pvscsi: Failed to map ctx sglist for DMA.\n");
scsi_dma_unmap(cmd);
@@ -389,9 +389,9 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter,
* In case there is no S/G list, scsi_sglist points
* directly to the buffer.
*/
- ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
+ ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen,
cmd->sc_data_direction);
- if (pci_dma_mapping_error(adapter->dev, ctx->dataPA)) {
+ if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) {
scmd_printk(KERN_ERR, cmd,
"vmw_pvscsi: Failed to map direct data buffer for DMA.\n");
return -ENOMEM;
@@ -417,23 +417,23 @@ static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
if (count != 0) {
scsi_dma_unmap(cmd);
if (ctx->sglPA) {
- pci_unmap_single(adapter->dev, ctx->sglPA,
- SGL_SIZE, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->dev->dev, ctx->sglPA,
+ SGL_SIZE, DMA_TO_DEVICE);
ctx->sglPA = 0;
}
} else
- pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
- cmd->sc_data_direction);
+ dma_unmap_single(&adapter->dev->dev, ctx->dataPA,
+ bufflen, cmd->sc_data_direction);
}
if (cmd->sense_buffer)
- pci_unmap_single(adapter->dev, ctx->sensePA,
- SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
+ SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
}
static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
{
- adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
- &adapter->ringStatePA);
+ adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
+ &adapter->ringStatePA, GFP_KERNEL);
if (!adapter->rings_state)
return -ENOMEM;
@@ -441,17 +441,17 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
pvscsi_ring_pages);
adapter->req_depth = adapter->req_pages
* PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
- adapter->req_ring = pci_alloc_consistent(adapter->dev,
- adapter->req_pages * PAGE_SIZE,
- &adapter->reqRingPA);
+ adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev,
+ adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA,
+ GFP_KERNEL);
if (!adapter->req_ring)
return -ENOMEM;
adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
pvscsi_ring_pages);
- adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
- adapter->cmp_pages * PAGE_SIZE,
- &adapter->cmpRingPA);
+ adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev,
+ adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA,
+ GFP_KERNEL);
if (!adapter->cmp_ring)
return -ENOMEM;
@@ -464,9 +464,9 @@ static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
pvscsi_msg_ring_pages);
- adapter->msg_ring = pci_alloc_consistent(adapter->dev,
- adapter->msg_pages * PAGE_SIZE,
- &adapter->msgRingPA);
+ adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev,
+ adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA,
+ GFP_KERNEL);
if (!adapter->msg_ring)
return -ENOMEM;
BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
@@ -708,10 +708,10 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
e->lun[1] = sdev->lun;
if (cmd->sense_buffer) {
- ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
- SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(adapter->dev, ctx->sensePA)) {
+ ctx->sensePA = dma_map_single(&adapter->dev->dev,
+ cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) {
scmd_printk(KERN_ERR, cmd,
"vmw_pvscsi: Failed to map sense buffer for DMA.\n");
ctx->sensePA = 0;
@@ -740,9 +740,9 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) {
if (cmd->sense_buffer) {
- pci_unmap_single(adapter->dev, ctx->sensePA,
+ dma_unmap_single(&adapter->dev->dev, ctx->sensePA,
SCSI_SENSE_BUFFERSIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
ctx->sensePA = 0;
}
return -ENOMEM;
@@ -1218,21 +1218,21 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
}
if (adapter->rings_state)
- pci_free_consistent(adapter->dev, PAGE_SIZE,
+ dma_free_coherent(&adapter->dev->dev, PAGE_SIZE,
adapter->rings_state, adapter->ringStatePA);
if (adapter->req_ring)
- pci_free_consistent(adapter->dev,
+ dma_free_coherent(&adapter->dev->dev,
adapter->req_pages * PAGE_SIZE,
adapter->req_ring, adapter->reqRingPA);
if (adapter->cmp_ring)
- pci_free_consistent(adapter->dev,
+ dma_free_coherent(&adapter->dev->dev,
adapter->cmp_pages * PAGE_SIZE,
adapter->cmp_ring, adapter->cmpRingPA);
if (adapter->msg_ring)
- pci_free_consistent(adapter->dev,
+ dma_free_coherent(&adapter->dev->dev,
adapter->msg_pages * PAGE_SIZE,
adapter->msg_ring, adapter->msgRingPA);
}
@@ -1291,8 +1291,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
u32 numPhys = 16;
dev = pvscsi_dev(adapter);
- config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
- &configPagePA);
+ config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE,
+ &configPagePA, GFP_KERNEL);
if (!config_page) {
dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n");
goto exit;
@@ -1326,7 +1326,8 @@ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter)
} else
dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n",
header->hostStatus, header->scsiStatus);
- pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA);
+ dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page,
+ configPagePA);
exit:
return numPhys;
}
@@ -1346,11 +1347,9 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (pci_enable_device(pdev))
return error;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
- } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
+ } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
} else {
printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c
index bb70882e6b56..ca8e3abeb2c7 100644
--- a/drivers/scsi/zorro_esp.c
+++ b/drivers/scsi/zorro_esp.c
@@ -9,8 +9,6 @@
*
* Copyright (C) 2013 Tuomas Vainikka (tuomas.vainikka@aalto.fi) for
* Blizzard 1230 DMA and probe function fixes
- *
- * Copyright (C) 2017 Finn Thain for PIO code from Mac ESP driver adapted here
*/
/*
* ZORRO bus code from:
@@ -159,7 +157,6 @@ struct fastlane_dma_registers {
struct zorro_esp_priv {
struct esp *esp; /* our ESP instance - for Scsi_host* */
void __iomem *board_base; /* virtual address (Zorro III board) */
- int error; /* PIO error flag */
int zorro3; /* board is Zorro III */
unsigned char ctrl_data; /* shadow copy of ctrl_reg */
};
@@ -182,30 +179,6 @@ static u8 zorro_esp_read8(struct esp *esp, unsigned long reg)
return readb(esp->regs + (reg * 4UL));
}
-static dma_addr_t zorro_esp_map_single(struct esp *esp, void *buf,
- size_t sz, int dir)
-{
- return dma_map_single(esp->dev, buf, sz, dir);
-}
-
-static int zorro_esp_map_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- return dma_map_sg(esp->dev, sg, num_sg, dir);
-}
-
-static void zorro_esp_unmap_single(struct esp *esp, dma_addr_t addr,
- size_t sz, int dir)
-{
- dma_unmap_single(esp->dev, addr, sz, dir);
-}
-
-static void zorro_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
- int num_sg, int dir)
-{
- dma_unmap_sg(esp->dev, sg, num_sg, dir);
-}
-
static int zorro_esp_irq_pending(struct esp *esp)
{
/* check ESP status register; DMA has no status reg. */
@@ -245,7 +218,7 @@ static int fastlane_esp_irq_pending(struct esp *esp)
static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr,
u32 dma_len)
{
- return dma_len > 0xFFFFFF ? 0xFFFFFF : dma_len;
+ return dma_len > 0xFFFF ? 0xFFFF : dma_len;
}
static void zorro_esp_reset_dma(struct esp *esp)
@@ -274,192 +247,29 @@ static void fastlane_esp_dma_invalidate(struct esp *esp)
z_writel(0, zep->board_base);
}
-/*
- * Programmed IO routines follow.
- */
-
-static inline unsigned int zorro_esp_wait_for_fifo(struct esp *esp)
-{
- int i = 500000;
-
- do {
- unsigned int fbytes = zorro_esp_read8(esp, ESP_FFLAGS)
- & ESP_FF_FBYTES;
-
- if (fbytes)
- return fbytes;
-
- udelay(2);
- } while (--i);
-
- pr_err("FIFO is empty (sreg %02x)\n",
- zorro_esp_read8(esp, ESP_STATUS));
- return 0;
-}
-
-static inline int zorro_esp_wait_for_intr(struct esp *esp)
-{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
- int i = 500000;
-
- do {
- esp->sreg = zorro_esp_read8(esp, ESP_STATUS);
- if (esp->sreg & ESP_STAT_INTR)
- return 0;
-
- udelay(2);
- } while (--i);
-
- pr_err("IRQ timeout (sreg %02x)\n", esp->sreg);
- zep->error = 1;
- return 1;
-}
-
-/*
- * PIO macros as used in mac_esp.c.
- * Note that addr and fifo arguments are local-scope variables declared
- * in zorro_esp_send_pio_cmd(), the macros are only used in that function,
- * and addr and fifo are referenced in each use of the macros so there
- * is no need to pass them as macro parameters.
- */
-#define ZORRO_ESP_PIO_LOOP(operands, reg1) \
- asm volatile ( \
- "1: moveb " operands "\n" \
- " subqw #1,%1 \n" \
- " jbne 1b \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo));
-
-#define ZORRO_ESP_PIO_FILL(operands, reg1) \
- asm volatile ( \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " moveb " operands "\n" \
- " subqw #8,%1 \n" \
- " subqw #8,%1 \n" \
- : "+a" (addr), "+r" (reg1) \
- : "a" (fifo));
-
-#define ZORRO_ESP_FIFO_SIZE 16
-
-static void zorro_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
- u32 dma_count, int write, u8 cmd)
-{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
- u8 __iomem *fifo = esp->regs + ESP_FDATA * 16;
- u8 phase = esp->sreg & ESP_STAT_PMASK;
-
- cmd &= ~ESP_CMD_DMA;
-
- if (write) {
- u8 *dst = (u8 *)addr;
- u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
-
- scsi_esp_cmd(esp, cmd);
-
- while (1) {
- if (!zorro_esp_wait_for_fifo(esp))
- break;
-
- *dst++ = zorro_esp_read8(esp, ESP_FDATA);
- --esp_count;
-
- if (!esp_count)
- break;
-
- if (zorro_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
- if (esp->ireg & mask) {
- zep->error = 1;
- break;
- }
-
- if (phase == ESP_MIP)
- scsi_esp_cmd(esp, ESP_CMD_MOK);
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- } else { /* unused, as long as we only handle MIP here */
- scsi_esp_cmd(esp, ESP_CMD_FLUSH);
-
- if (esp_count >= ZORRO_ESP_FIFO_SIZE)
- ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
- else
- ZORRO_ESP_PIO_LOOP("%0@+,%2@", esp_count)
-
- scsi_esp_cmd(esp, cmd);
-
- while (esp_count) {
- unsigned int n;
-
- if (zorro_esp_wait_for_intr(esp))
- break;
-
- if ((esp->sreg & ESP_STAT_PMASK) != phase)
- break;
-
- esp->ireg = zorro_esp_read8(esp, ESP_INTRPT);
- if (esp->ireg & ~ESP_INTR_BSERV) {
- zep->error = 1;
- break;
- }
-
- n = ZORRO_ESP_FIFO_SIZE -
- (zorro_esp_read8(esp, ESP_FFLAGS) & ESP_FF_FBYTES);
- if (n > esp_count)
- n = esp_count;
-
- if (n == ZORRO_ESP_FIFO_SIZE)
- ZORRO_ESP_PIO_FILL("%0@+,%2@", esp_count)
- else {
- esp_count -= n;
- ZORRO_ESP_PIO_LOOP("%0@+,%2@", n)
- }
-
- scsi_esp_cmd(esp, ESP_CMD_TI);
- }
- }
-}
-
/* Blizzard 1230/60 SCSI-IV DMA */
static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct blz1230_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/*
* Use PIO if transferring message bytes to esp->command_block_dma.
* PIO requires a virtual address, so substitute esp->command_block
* for addr.
*/
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ /* Clear the results of a possible prior esp->ops->send_dma_cmd() */
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
if (write)
/* DMA receive */
dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -484,7 +294,6 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
scsi_esp_cmd(esp, ESP_CMD_DMA);
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
scsi_esp_cmd(esp, cmd);
}
@@ -494,18 +303,19 @@ static void zorro_esp_send_blz1230_dma_cmd(struct esp *esp, u32 addr,
static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct blz1230II_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
if (write)
/* DMA receive */
dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -529,7 +339,6 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
scsi_esp_cmd(esp, ESP_CMD_DMA);
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
scsi_esp_cmd(esp, cmd);
}
@@ -539,18 +348,19 @@ static void zorro_esp_send_blz1230II_dma_cmd(struct esp *esp, u32 addr,
static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct blz2060_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
if (write)
/* DMA receive */
dma_sync_single_for_device(esp->dev, addr, esp_count,
@@ -574,7 +384,6 @@ static void zorro_esp_send_blz2060_dma_cmd(struct esp *esp, u32 addr,
scsi_esp_cmd(esp, ESP_CMD_DMA);
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
scsi_esp_cmd(esp, cmd);
}
@@ -589,17 +398,18 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
u8 phase = esp->sreg & ESP_STAT_PMASK;
unsigned char *ctrl_data = &zep->ctrl_data;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
if (write) {
/* DMA receive */
@@ -635,21 +445,21 @@ static void zorro_esp_send_cyber_dma_cmd(struct esp *esp, u32 addr,
static void zorro_esp_send_cyberII_dma_cmd(struct esp *esp, u32 addr,
u32 esp_count, u32 dma_count, int write, u8 cmd)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
struct cyberII_dma_registers __iomem *dregs = esp->dma_regs;
u8 phase = esp->sreg & ESP_STAT_PMASK;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
if (write) {
/* DMA receive */
@@ -681,17 +491,18 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
u8 phase = esp->sreg & ESP_STAT_PMASK;
unsigned char *ctrl_data = &zep->ctrl_data;
- zep->error = 0;
/* Use PIO if transferring message bytes to esp->command_block_dma */
if (phase == ESP_MIP && addr == esp->command_block_dma) {
- zorro_esp_send_pio_cmd(esp, (u32) esp->command_block,
- esp_count, dma_count, write, cmd);
+ esp_send_pio_cmd(esp, (u32)esp->command_block, esp_count,
+ dma_count, write, cmd);
return;
}
+ esp->send_cmd_error = 0;
+ esp->send_cmd_residual = 0;
+
zorro_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
zorro_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
- zorro_esp_write8(esp, (esp_count >> 16) & 0xff, ESP_TCHI);
if (write) {
/* DMA receive */
@@ -724,14 +535,7 @@ static void zorro_esp_send_fastlane_dma_cmd(struct esp *esp, u32 addr,
static int zorro_esp_dma_error(struct esp *esp)
{
- struct zorro_esp_priv *zep = dev_get_drvdata(esp->dev);
-
- /* check for error in case we've been doing PIO */
- if (zep->error == 1)
- return 1;
-
- /* do nothing - there seems to be no way to check for DMA errors */
- return 0;
+ return esp->send_cmd_error;
}
/* per-board ESP driver ops */
@@ -739,10 +543,6 @@ static int zorro_esp_dma_error(struct esp *esp)
static const struct esp_driver_ops blz1230_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -755,10 +555,6 @@ static const struct esp_driver_ops blz1230_esp_ops = {
static const struct esp_driver_ops blz1230II_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -771,10 +567,6 @@ static const struct esp_driver_ops blz1230II_esp_ops = {
static const struct esp_driver_ops blz2060_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -787,10 +579,6 @@ static const struct esp_driver_ops blz2060_esp_ops = {
static const struct esp_driver_ops cyber_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = cyber_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -803,10 +591,6 @@ static const struct esp_driver_ops cyber_esp_ops = {
static const struct esp_driver_ops cyberII_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = zorro_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -819,10 +603,6 @@ static const struct esp_driver_ops cyberII_esp_ops = {
static const struct esp_driver_ops fastlane_esp_ops = {
.esp_write8 = zorro_esp_write8,
.esp_read8 = zorro_esp_read8,
- .map_single = zorro_esp_map_single,
- .map_sg = zorro_esp_map_sg,
- .unmap_single = zorro_esp_unmap_single,
- .unmap_sg = zorro_esp_unmap_sg,
.irq_pending = fastlane_esp_irq_pending,
.dma_length_limit = zorro_esp_dma_length_limit,
.reset_dma = zorro_esp_reset_dma,
@@ -1039,6 +819,8 @@ static int zorro_esp_probe(struct zorro_dev *z,
goto fail_unmap_fastlane;
}
+ esp->fifo_reg = esp->regs + ESP_FDATA * 4;
+
/* Check whether a Blizzard 12x0 or CyberstormII really has SCSI */
if (zdd->scsi_option) {
zorro_esp_write8(esp, (ESP_CONFIG1_PENABLE | 7), ESP_CFG1);
@@ -1082,7 +864,7 @@ static int zorro_esp_probe(struct zorro_dev *z,
}
/* register the chip */
- err = scsi_esp_register(esp, &z->dev);
+ err = scsi_esp_register(esp);
if (err) {
err = -ENOMEM;
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index 95b00d28ad6e..55eda5863a6b 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/idr.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/slimbus.h>
#include "slimbus.h"
@@ -32,6 +33,10 @@ static int slim_device_match(struct device *dev, struct device_driver *drv)
struct slim_device *sbdev = to_slim_device(dev);
struct slim_driver *sbdrv = to_slim_driver(drv);
+ /* Attempt an OF style match first */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
return !!slim_match(sbdrv->id_table, sbdev);
}
@@ -39,8 +44,23 @@ static int slim_device_probe(struct device *dev)
{
struct slim_device *sbdev = to_slim_device(dev);
struct slim_driver *sbdrv = to_slim_driver(dev->driver);
+ int ret;
- return sbdrv->probe(sbdev);
+ ret = sbdrv->probe(sbdev);
+ if (ret)
+ return ret;
+
+ /* try getting the logical address after probe */
+ ret = slim_get_logical_addr(sbdev);
+ if (!ret) {
+ if (sbdrv->device_status)
+ sbdrv->device_status(sbdev, sbdev->status);
+ } else {
+ dev_err(&sbdev->dev, "Failed to get logical address\n");
+ ret = -EPROBE_DEFER;
+ }
+
+ return ret;
}
static int slim_device_remove(struct device *dev)
@@ -57,11 +77,24 @@ static int slim_device_remove(struct device *dev)
return 0;
}
+static int slim_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct slim_device *sbdev = to_slim_device(dev);
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return add_uevent_var(env, "MODALIAS=slim:%s", dev_name(&sbdev->dev));
+}
+
struct bus_type slimbus_bus = {
.name = "slimbus",
.match = slim_device_match,
.probe = slim_device_probe,
.remove = slim_device_remove,
+ .uevent = slim_device_uevent,
};
EXPORT_SYMBOL_GPL(slimbus_bus);
@@ -77,7 +110,7 @@ EXPORT_SYMBOL_GPL(slimbus_bus);
int __slim_driver_register(struct slim_driver *drv, struct module *owner)
{
/* ID table and probe are mandatory */
- if (!drv->id_table || !drv->probe)
+ if (!(drv->driver.of_match_table || drv->id_table) || !drv->probe)
return -EINVAL;
drv->driver.bus = &slimbus_bus;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 8be4d6786c61..7218fb963d0a 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -1004,6 +1004,7 @@ static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl,
struct slim_eaddr *ea, u8 *laddr)
{
struct slim_val_inf msg = {0};
+ u8 failed_ea[6] = {0, 0, 0, 0, 0, 0};
struct slim_msg_txn txn;
u8 wbuf[10] = {0};
u8 rbuf[10] = {0};
@@ -1034,6 +1035,9 @@ static int qcom_slim_ngd_get_laddr(struct slim_controller *ctrl,
return ret;
}
+ if (!memcmp(rbuf, failed_ea, 6))
+ return -ENXIO;
+
*laddr = rbuf[6];
return ret;
@@ -1234,8 +1238,17 @@ static int qcom_slim_ngd_enable(struct qcom_slim_ngd_ctrl *ctrl, bool enable)
pm_runtime_resume(ctrl->dev);
pm_runtime_mark_last_busy(ctrl->dev);
pm_runtime_put(ctrl->dev);
+
+ ret = slim_register_controller(&ctrl->ctrl);
+ if (ret) {
+ dev_err(ctrl->dev, "error adding slim controller\n");
+ return ret;
+ }
+
+ dev_info(ctrl->dev, "SLIM controller Registered\n");
} else {
qcom_slim_qmi_exit(ctrl);
+ slim_unregister_controller(&ctrl->ctrl);
}
return 0;
@@ -1342,7 +1355,6 @@ static int of_qcom_slim_ngd_register(struct device *parent,
ngd->base = ctrl->base + ngd->id * data->offset +
(ngd->id - 1) * data->size;
ctrl->ngd = ngd;
- platform_driver_register(&qcom_slim_ngd_driver);
return 0;
}
@@ -1357,11 +1369,6 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev)
int ret;
ctrl->ctrl.dev = dev;
- ret = slim_register_controller(&ctrl->ctrl);
- if (ret) {
- dev_err(dev, "error adding slim controller\n");
- return ret;
- }
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, QCOM_SLIM_NGD_AUTOSUSPEND);
@@ -1371,7 +1378,7 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev)
ret = qcom_slim_ngd_qmi_svc_event_init(ctrl);
if (ret) {
dev_err(&pdev->dev, "QMI service registration failed:%d", ret);
- goto err;
+ return ret;
}
INIT_WORK(&ctrl->m_work, qcom_slim_ngd_master_worker);
@@ -1383,14 +1390,12 @@ static int qcom_slim_ngd_probe(struct platform_device *pdev)
}
return 0;
-err:
- slim_unregister_controller(&ctrl->ctrl);
wq_err:
qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
if (ctrl->mwq)
destroy_workqueue(ctrl->mwq);
- return 0;
+ return ret;
}
static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
@@ -1441,6 +1446,7 @@ static int qcom_slim_ngd_ctrl_probe(struct platform_device *pdev)
init_completion(&ctrl->reconf);
init_completion(&ctrl->qmi.qmi_comp);
+ platform_driver_register(&qcom_slim_ngd_driver);
return of_qcom_slim_ngd_register(dev, ctrl);
}
@@ -1456,7 +1462,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev)
struct qcom_slim_ngd_ctrl *ctrl = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
- slim_unregister_controller(&ctrl->ctrl);
+ qcom_slim_ngd_enable(ctrl, false);
qcom_slim_ngd_exit_dma(ctrl);
qcom_slim_ngd_qmi_svc_event_deinit(&ctrl->qmi);
if (ctrl->mwq)
@@ -1467,7 +1473,7 @@ static int qcom_slim_ngd_remove(struct platform_device *pdev)
return 0;
}
-static int qcom_slim_ngd_runtime_idle(struct device *dev)
+static int __maybe_unused qcom_slim_ngd_runtime_idle(struct device *dev)
{
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
@@ -1477,8 +1483,7 @@ static int qcom_slim_ngd_runtime_idle(struct device *dev)
return -EAGAIN;
}
-#ifdef CONFIG_PM
-static int qcom_slim_ngd_runtime_suspend(struct device *dev)
+static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
{
struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
int ret = 0;
@@ -1491,7 +1496,6 @@ static int qcom_slim_ngd_runtime_suspend(struct device *dev)
return ret;
}
-#endif
static const struct dev_pm_ops qcom_slim_ngd_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
index 5abb08ffb74d..ffc5311c0ed8 100644
--- a/drivers/soc/dove/pmu.c
+++ b/drivers/soc/dove/pmu.c
@@ -383,7 +383,7 @@ int __init dove_init_pmu(void)
domains_node = of_get_child_by_name(np_pmu, "domains");
if (!domains_node) {
- pr_err("%s: failed to find domains sub-node\n", np_pmu->name);
+ pr_err("%pOFn: failed to find domains sub-node\n", np_pmu);
return 0;
}
@@ -396,7 +396,7 @@ int __init dove_init_pmu(void)
pmu->pmc_base = of_iomap(pmu->of_node, 0);
pmu->pmu_base = of_iomap(pmu->of_node, 1);
if (!pmu->pmc_base || !pmu->pmu_base) {
- pr_err("%s: failed to map PMU\n", np_pmu->name);
+ pr_err("%pOFn: failed to map PMU\n", np_pmu);
iounmap(pmu->pmu_base);
iounmap(pmu->pmc_base);
kfree(pmu);
@@ -414,7 +414,7 @@ int __init dove_init_pmu(void)
break;
domain->pmu = pmu;
- domain->base.name = kstrdup(np->name, GFP_KERNEL);
+ domain->base.name = kasprintf(GFP_KERNEL, "%pOFn", np);
if (!domain->base.name) {
kfree(domain);
break;
@@ -444,7 +444,7 @@ int __init dove_init_pmu(void)
/* Loss of the interrupt controller is not a fatal error. */
parent_irq = irq_of_parse_and_map(pmu->of_node, 0);
if (!parent_irq) {
- pr_err("%s: no interrupt specified\n", np_pmu->name);
+ pr_err("%pOFn: no interrupt specified\n", np_pmu);
} else {
ret = dove_init_pmu_irq(pmu, parent_irq);
if (ret)
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 9b17f72349ed..321a92613a7e 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -310,6 +310,37 @@ int dpaa2_io_service_rearm(struct dpaa2_io *d,
EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
/**
+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @s: the dpaa2_io_store object for the result.
+ *
+ * Return 0 for success, or error code for failure.
+ */
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
+ struct dpaa2_io_store *s)
+{
+ struct qbman_pull_desc pd;
+ int err;
+
+ qbman_pull_desc_clear(&pd);
+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
+ qbman_pull_desc_set_numframes(&pd, (u8)s->max);
+ qbman_pull_desc_set_fq(&pd, fqid);
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+ s->swp = d->swp;
+ err = qbman_swp_pull(d->swp, &pd);
+ if (err)
+ s->swp = NULL;
+
+ return err;
+}
+EXPORT_SYMBOL(dpaa2_io_service_pull_fq);
+
+/**
* dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
* @d: the given DPIO service.
* @channelid: the given channel id.
@@ -342,6 +373,33 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
/**
+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue.
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ *
+ * Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
+ u32 fqid,
+ const struct dpaa2_fd *fd)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_no_orp(&ed, 0);
+ qbman_eq_desc_set_fq(&ed, fqid);
+
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
+
+/**
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
* @d: the given DPIO service.
* @qdid: the given queuing destination id.
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 6fd5fef5f39b..109b38de3176 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -419,7 +419,7 @@ static size_t fqd_sz, pfdr_sz;
static int zero_priv_mem(phys_addr_t addr, size_t sz)
{
/* map as cacheable, non-guarded */
- void __iomem *tmpp = ioremap_prot(addr, sz, 0);
+ void __iomem *tmpp = ioremap_cache(addr, sz);
if (!tmpp)
return -ENOMEM;
diff --git a/drivers/soc/fsl/qe/qe_tdm.c b/drivers/soc/fsl/qe/qe_tdm.c
index f744c214f680..f78c34647ca2 100644
--- a/drivers/soc/fsl/qe/qe_tdm.c
+++ b/drivers/soc/fsl/qe/qe_tdm.c
@@ -131,7 +131,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
pdev = of_find_device_by_node(np2);
if (!pdev) {
- pr_err("%s: failed to lookup pdev\n", np2->name);
+ pr_err("%pOFn: failed to lookup pdev\n", np2);
of_node_put(np2);
return -EINVAL;
}
@@ -153,7 +153,7 @@ int ucc_of_parse_tdm(struct device_node *np, struct ucc_tdm *utdm,
pdev = of_find_device_by_node(np2);
if (!pdev) {
ret = -EINVAL;
- pr_err("%s: failed to lookup pdev\n", np2->name);
+ pr_err("%pOFn: failed to lookup pdev\n", np2);
of_node_put(np2);
goto err_miss_siram_property;
}
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 57af8a537332..4bda793ba6ae 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -219,7 +219,7 @@ static int apr_add_device(struct device *dev, struct device_node *np,
adev->domain_id = id->domain_id;
adev->version = id->svc_version;
if (np)
- strncpy(adev->name, np->name, APR_NAME_SIZE);
+ snprintf(adev->name, APR_NAME_SIZE, "%pOFn", np);
else
strncpy(adev->name, id->name, APR_NAME_SIZE);
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 6dff8682155f..6f86a726bb45 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -392,21 +392,21 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
error = of_property_read_u32(node, "reg", &id);
if (error) {
dev_err(pmu->dev,
- "%s: failed to retrieve domain id (reg): %d\n",
- node->name, error);
+ "%pOFn: failed to retrieve domain id (reg): %d\n",
+ node, error);
return -EINVAL;
}
if (id >= pmu->info->num_domains) {
- dev_err(pmu->dev, "%s: invalid domain id %d\n",
- node->name, id);
+ dev_err(pmu->dev, "%pOFn: invalid domain id %d\n",
+ node, id);
return -EINVAL;
}
pd_info = &pmu->info->domain_info[id];
if (!pd_info) {
- dev_err(pmu->dev, "%s: undefined domain id %d\n",
- node->name, id);
+ dev_err(pmu->dev, "%pOFn: undefined domain id %d\n",
+ node, id);
return -EINVAL;
}
@@ -424,8 +424,8 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
if (!pd->clks)
return -ENOMEM;
} else {
- dev_dbg(pmu->dev, "%s: doesn't have clocks: %d\n",
- node->name, pd->num_clks);
+ dev_dbg(pmu->dev, "%pOFn: doesn't have clocks: %d\n",
+ node, pd->num_clks);
pd->num_clks = 0;
}
@@ -434,8 +434,8 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
if (IS_ERR(pd->clks[i].clk)) {
error = PTR_ERR(pd->clks[i].clk);
dev_err(pmu->dev,
- "%s: failed to get clk at index %d: %d\n",
- node->name, i, error);
+ "%pOFn: failed to get clk at index %d: %d\n",
+ node, i, error);
return error;
}
}
@@ -486,8 +486,8 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
error = rockchip_pd_power(pd, true);
if (error) {
dev_err(pmu->dev,
- "failed to power on domain '%s': %d\n",
- node->name, error);
+ "failed to power on domain '%pOFn': %d\n",
+ node, error);
goto err_unprepare_clocks;
}
@@ -575,24 +575,24 @@ static int rockchip_pm_add_subdomain(struct rockchip_pmu *pmu,
error = of_property_read_u32(parent, "reg", &idx);
if (error) {
dev_err(pmu->dev,
- "%s: failed to retrieve domain id (reg): %d\n",
- parent->name, error);
+ "%pOFn: failed to retrieve domain id (reg): %d\n",
+ parent, error);
goto err_out;
}
parent_domain = pmu->genpd_data.domains[idx];
error = rockchip_pm_add_one_domain(pmu, np);
if (error) {
- dev_err(pmu->dev, "failed to handle node %s: %d\n",
- np->name, error);
+ dev_err(pmu->dev, "failed to handle node %pOFn: %d\n",
+ np, error);
goto err_out;
}
error = of_property_read_u32(np, "reg", &idx);
if (error) {
dev_err(pmu->dev,
- "%s: failed to retrieve domain id (reg): %d\n",
- np->name, error);
+ "%pOFn: failed to retrieve domain id (reg): %d\n",
+ np, error);
goto err_out;
}
child_domain = pmu->genpd_data.domains[idx];
@@ -683,16 +683,16 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
for_each_available_child_of_node(np, node) {
error = rockchip_pm_add_one_domain(pmu, node);
if (error) {
- dev_err(dev, "failed to handle node %s: %d\n",
- node->name, error);
+ dev_err(dev, "failed to handle node %pOFn: %d\n",
+ node, error);
of_node_put(node);
goto err_out;
}
error = rockchip_pm_add_subdomain(pmu, node);
if (error < 0) {
- dev_err(dev, "failed to handle subdomain node %s: %d\n",
- node->name, error);
+ dev_err(dev, "failed to handle subdomain node %pOFn: %d\n",
+ node, error);
of_node_put(node);
goto err_out;
}
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 2d6f3fcf3211..acbe63e925d5 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -796,7 +796,7 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
id = tegra_powergate_lookup(pmc, np->name);
if (id < 0) {
- pr_err("powergate lookup failed for %s: %d\n", np->name, id);
+ pr_err("powergate lookup failed for %pOFn: %d\n", np, id);
goto free_mem;
}
@@ -816,13 +816,13 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = tegra_powergate_of_get_clks(pg, np);
if (err < 0) {
- pr_err("failed to get clocks for %s: %d\n", np->name, err);
+ pr_err("failed to get clocks for %pOFn: %d\n", np, err);
goto set_available;
}
err = tegra_powergate_of_get_resets(pg, np, off);
if (err < 0) {
- pr_err("failed to get resets for %s: %d\n", np->name, err);
+ pr_err("failed to get resets for %pOFn: %d\n", np, err);
goto remove_clks;
}
@@ -851,15 +851,15 @@ static void tegra_powergate_add(struct tegra_pmc *pmc, struct device_node *np)
err = pm_genpd_init(&pg->genpd, NULL, off);
if (err < 0) {
- pr_err("failed to initialise PM domain %s: %d\n", np->name,
+ pr_err("failed to initialise PM domain %pOFn: %d\n", np,
err);
goto remove_resets;
}
err = of_genpd_add_provider_simple(np, &pg->genpd);
if (err < 0) {
- pr_err("failed to add PM domain provider for %s: %d\n",
- np->name, err);
+ pr_err("failed to add PM domain provider for %pOFn: %d\n",
+ np, err);
goto remove_genpd;
}
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 224d7ddeeb76..bbd4e5bc8707 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -544,15 +544,15 @@ static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
ret = of_address_to_resource(node, index, &res);
if (ret) {
- dev_err(dev, "Can't translate of node(%s) address for index(%d)\n",
- node->name, index);
+ dev_err(dev, "Can't translate of node(%pOFn) address for index(%d)\n",
+ node, index);
return ERR_PTR(ret);
}
regs = devm_ioremap_resource(kdev->dev, &res);
if (IS_ERR(regs))
- dev_err(dev, "Failed to map register base for index(%d) node(%s)\n",
- index, node->name);
+ dev_err(dev, "Failed to map register base for index(%d) node(%pOFn)\n",
+ index, node);
if (_size)
*_size = resource_size(&res);
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index 6755f2af5619..b5d5673c255c 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -1382,15 +1382,15 @@ static void __iomem *knav_queue_map_reg(struct knav_device *kdev,
ret = of_address_to_resource(node, index, &res);
if (ret) {
- dev_err(kdev->dev, "Can't translate of node(%s) address for index(%d)\n",
- node->name, index);
+ dev_err(kdev->dev, "Can't translate of node(%pOFn) address for index(%d)\n",
+ node, index);
return ERR_PTR(ret);
}
regs = devm_ioremap_resource(kdev->dev, &res);
if (IS_ERR(regs))
- dev_err(kdev->dev, "Failed to map register base for index(%d) node(%s)\n",
- index, node->name);
+ dev_err(kdev->dev, "Failed to map register base for index(%d) node(%pOFn)\n",
+ index, node);
return regs;
}
diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c
index dcc0ff9f0c22..1cbfedfc20ef 100644
--- a/drivers/soundwire/bus.c
+++ b/drivers/soundwire/bus.c
@@ -35,6 +35,11 @@ int sdw_add_bus_master(struct sdw_bus *bus)
INIT_LIST_HEAD(&bus->slaves);
INIT_LIST_HEAD(&bus->m_rt_list);
+ /*
+ * Initialize multi_link flag
+ * TODO: populate this flag by reading property from FW node
+ */
+ bus->multi_link = false;
if (bus->ops->read_prop) {
ret = bus->ops->read_prop(bus);
if (ret < 0) {
@@ -175,6 +180,7 @@ static inline int do_transfer_defer(struct sdw_bus *bus,
defer->msg = msg;
defer->length = msg->len;
+ init_completion(&defer->complete);
for (i = 0; i <= retry; i++) {
resp = bus->ops->xfer_msg_defer(bus, msg, defer);
diff --git a/drivers/soundwire/bus.h b/drivers/soundwire/bus.h
index 3b15c4e25a3a..c77de05b8100 100644
--- a/drivers/soundwire/bus.h
+++ b/drivers/soundwire/bus.h
@@ -4,6 +4,8 @@
#ifndef __SDW_BUS_H
#define __SDW_BUS_H
+#define DEFAULT_BANK_SWITCH_TIMEOUT 3000
+
#if IS_ENABLED(CONFIG_ACPI)
int sdw_acpi_find_slaves(struct sdw_bus *bus);
#else
@@ -99,6 +101,7 @@ struct sdw_slave_runtime {
* this stream, can be zero.
* @slave_rt_list: Slave runtime list
* @port_list: List of Master Ports configured for this stream, can be zero.
+ * @stream_node: sdw_stream_runtime master_list node
* @bus_node: sdw_bus m_rt_list node
*/
struct sdw_master_runtime {
@@ -108,6 +111,7 @@ struct sdw_master_runtime {
unsigned int ch_count;
struct list_head slave_rt_list;
struct list_head port_list;
+ struct list_head stream_node;
struct list_head bus_node;
};
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 0a8990e758f9..c5ee97ee7886 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -398,6 +398,69 @@ static int intel_config_stream(struct sdw_intel *sdw,
}
/*
+ * bank switch routines
+ */
+
+static int intel_pre_bank_switch(struct sdw_bus *bus)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ void __iomem *shim = sdw->res->shim;
+ int sync_reg;
+
+ /* Write to register only for multi-link */
+ if (!bus->multi_link)
+ return 0;
+
+ /* Read SYNC register */
+ sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+ sync_reg |= SDW_SHIM_SYNC_CMDSYNC << sdw->instance;
+ intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
+
+ return 0;
+}
+
+static int intel_post_bank_switch(struct sdw_bus *bus)
+{
+ struct sdw_cdns *cdns = bus_to_cdns(bus);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ void __iomem *shim = sdw->res->shim;
+ int sync_reg, ret;
+
+ /* Write to register only for multi-link */
+ if (!bus->multi_link)
+ return 0;
+
+ /* Read SYNC register */
+ sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
+
+ /*
+ * post_bank_switch() ops is called from the bus in loop for
+ * all the Masters in the steam with the expectation that
+ * we trigger the bankswitch for the only first Master in the list
+ * and do nothing for the other Masters
+ *
+ * So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
+ */
+ if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK))
+ return 0;
+
+ /*
+ * Set SyncGO bit to synchronously trigger a bank switch for
+ * all the masters. A write to SYNCGO bit clears CMDSYNC bit for all
+ * the Masters.
+ */
+ sync_reg |= SDW_SHIM_SYNC_SYNCGO;
+
+ ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
+ SDW_SHIM_SYNC_SYNCGO);
+ if (ret < 0)
+ dev_err(sdw->cdns.dev, "Post bank switch failed: %d", ret);
+
+ return ret;
+}
+
+/*
* DAI routines
*/
@@ -750,6 +813,8 @@ static struct sdw_master_ops sdw_intel_ops = {
.xfer_msg_defer = cdns_xfer_msg_defer,
.reset_page_addr = cdns_reset_page_addr,
.set_bus_conf = cdns_bus_conf,
+ .pre_bank_switch = intel_pre_bank_switch,
+ .post_bank_switch = intel_post_bank_switch,
};
/*
@@ -780,9 +845,6 @@ static int intel_probe(struct platform_device *pdev)
sdw_intel_ops.read_prop = intel_prop_read;
sdw->cdns.bus.ops = &sdw_intel_ops;
- sdw_intel_ops.read_prop = intel_prop_read;
- sdw->cdns.bus.ops = &sdw_intel_ops;
-
platform_set_drvdata(pdev, sdw);
ret = sdw_add_bus_master(&sdw->cdns.bus);
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index d1ea6b4d0ad3..5c8a20d99878 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -151,7 +151,7 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level,
struct acpi_device *adev;
if (acpi_bus_get_device(handle, &adev)) {
- dev_err(&adev->dev, "Couldn't find ACPI handle\n");
+ pr_err("%s: Couldn't find ACPI handle\n", __func__);
return AE_NOT_FOUND;
}
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index e5c7e1ef6318..bd879b1a76c8 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -626,9 +626,10 @@ static int sdw_program_params(struct sdw_bus *bus)
return ret;
}
-static int sdw_bank_switch(struct sdw_bus *bus)
+static int sdw_bank_switch(struct sdw_bus *bus, int m_rt_count)
{
int col_index, row_index;
+ bool multi_link;
struct sdw_msg *wr_msg;
u8 *wbuf = NULL;
int ret = 0;
@@ -638,6 +639,8 @@ static int sdw_bank_switch(struct sdw_bus *bus)
if (!wr_msg)
return -ENOMEM;
+ bus->defer_msg.msg = wr_msg;
+
wbuf = kzalloc(sizeof(*wbuf), GFP_KERNEL);
if (!wbuf) {
ret = -ENOMEM;
@@ -658,17 +661,29 @@ static int sdw_bank_switch(struct sdw_bus *bus)
SDW_MSG_FLAG_WRITE, wbuf);
wr_msg->ssp_sync = true;
- ret = sdw_transfer(bus, wr_msg);
+ /*
+ * Set the multi_link flag only when both the hardware supports
+ * and there is a stream handled by multiple masters
+ */
+ multi_link = bus->multi_link && (m_rt_count > 1);
+
+ if (multi_link)
+ ret = sdw_transfer_defer(bus, wr_msg, &bus->defer_msg);
+ else
+ ret = sdw_transfer(bus, wr_msg);
+
if (ret < 0) {
dev_err(bus->dev, "Slave frame_ctrl reg write failed");
goto error;
}
- kfree(wr_msg);
- kfree(wbuf);
- bus->defer_msg.msg = NULL;
- bus->params.curr_bank = !bus->params.curr_bank;
- bus->params.next_bank = !bus->params.next_bank;
+ if (!multi_link) {
+ kfree(wr_msg);
+ kfree(wbuf);
+ bus->defer_msg.msg = NULL;
+ bus->params.curr_bank = !bus->params.curr_bank;
+ bus->params.next_bank = !bus->params.next_bank;
+ }
return 0;
@@ -679,37 +694,138 @@ error_1:
return ret;
}
+/**
+ * sdw_ml_sync_bank_switch: Multilink register bank switch
+ *
+ * @bus: SDW bus instance
+ *
+ * Caller function should free the buffers on error
+ */
+static int sdw_ml_sync_bank_switch(struct sdw_bus *bus)
+{
+ unsigned long time_left;
+
+ if (!bus->multi_link)
+ return 0;
+
+ /* Wait for completion of transfer */
+ time_left = wait_for_completion_timeout(&bus->defer_msg.complete,
+ bus->bank_switch_timeout);
+
+ if (!time_left) {
+ dev_err(bus->dev, "Controller Timed out on bank switch");
+ return -ETIMEDOUT;
+ }
+
+ bus->params.curr_bank = !bus->params.curr_bank;
+ bus->params.next_bank = !bus->params.next_bank;
+
+ if (bus->defer_msg.msg) {
+ kfree(bus->defer_msg.msg->buf);
+ kfree(bus->defer_msg.msg);
+ }
+
+ return 0;
+}
+
static int do_bank_switch(struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
+ struct sdw_master_runtime *m_rt = NULL;
const struct sdw_master_ops *ops;
- struct sdw_bus *bus = m_rt->bus;
+ struct sdw_bus *bus = NULL;
+ bool multi_link = false;
int ret = 0;
- ops = bus->ops;
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ ops = bus->ops;
+
+ if (bus->multi_link) {
+ multi_link = true;
+ mutex_lock(&bus->msg_lock);
+ }
+
+ /* Pre-bank switch */
+ if (ops->pre_bank_switch) {
+ ret = ops->pre_bank_switch(bus);
+ if (ret < 0) {
+ dev_err(bus->dev,
+ "Pre bank switch op failed: %d", ret);
+ goto msg_unlock;
+ }
+ }
- /* Pre-bank switch */
- if (ops->pre_bank_switch) {
- ret = ops->pre_bank_switch(bus);
+ /*
+ * Perform Bank switch operation.
+ * For multi link cases, the actual bank switch is
+ * synchronized across all Masters and happens later as a
+ * part of post_bank_switch ops.
+ */
+ ret = sdw_bank_switch(bus, stream->m_rt_count);
if (ret < 0) {
- dev_err(bus->dev, "Pre bank switch op failed: %d", ret);
- return ret;
+ dev_err(bus->dev, "Bank switch failed: %d", ret);
+ goto error;
+
}
}
- /* Bank switch */
- ret = sdw_bank_switch(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Bank switch failed: %d", ret);
- return ret;
- }
+ /*
+ * For multi link cases, it is expected that the bank switch is
+ * triggered by the post_bank_switch for the first Master in the list
+ * and for the other Masters the post_bank_switch() should return doing
+ * nothing.
+ */
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ ops = bus->ops;
+
+ /* Post-bank switch */
+ if (ops->post_bank_switch) {
+ ret = ops->post_bank_switch(bus);
+ if (ret < 0) {
+ dev_err(bus->dev,
+ "Post bank switch op failed: %d", ret);
+ goto error;
+ }
+ } else if (bus->multi_link && stream->m_rt_count > 1) {
+ dev_err(bus->dev,
+ "Post bank switch ops not implemented");
+ goto error;
+ }
+
+ /* Set the bank switch timeout to default, if not set */
+ if (!bus->bank_switch_timeout)
+ bus->bank_switch_timeout = DEFAULT_BANK_SWITCH_TIMEOUT;
- /* Post-bank switch */
- if (ops->post_bank_switch) {
- ret = ops->post_bank_switch(bus);
+ /* Check if bank switch was successful */
+ ret = sdw_ml_sync_bank_switch(bus);
if (ret < 0) {
dev_err(bus->dev,
- "Post bank switch op failed: %d", ret);
+ "multi link bank switch failed: %d", ret);
+ goto error;
+ }
+
+ mutex_unlock(&bus->msg_lock);
+ }
+
+ return ret;
+
+error:
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+
+ bus = m_rt->bus;
+
+ kfree(bus->defer_msg.msg->buf);
+ kfree(bus->defer_msg.msg);
+ }
+
+msg_unlock:
+
+ if (multi_link) {
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ if (mutex_is_locked(&bus->msg_lock))
+ mutex_unlock(&bus->msg_lock);
}
}
@@ -747,12 +863,29 @@ struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name)
return NULL;
stream->name = stream_name;
+ INIT_LIST_HEAD(&stream->master_list);
stream->state = SDW_STREAM_ALLOCATED;
+ stream->m_rt_count = 0;
return stream;
}
EXPORT_SYMBOL(sdw_alloc_stream);
+static struct sdw_master_runtime
+*sdw_find_master_rt(struct sdw_bus *bus,
+ struct sdw_stream_runtime *stream)
+{
+ struct sdw_master_runtime *m_rt = NULL;
+
+ /* Retrieve Bus handle if already available */
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ if (m_rt->bus == bus)
+ return m_rt;
+ }
+
+ return NULL;
+}
+
/**
* sdw_alloc_master_rt() - Allocates and initialize Master runtime handle
*
@@ -769,12 +902,11 @@ static struct sdw_master_runtime
{
struct sdw_master_runtime *m_rt;
- m_rt = stream->m_rt;
-
/*
* check if Master is already allocated (as a result of Slave adding
* it first), if so skip allocation and go to configure
*/
+ m_rt = sdw_find_master_rt(bus, stream);
if (m_rt)
goto stream_config;
@@ -785,7 +917,7 @@ static struct sdw_master_runtime
/* Initialization of Master runtime handle */
INIT_LIST_HEAD(&m_rt->port_list);
INIT_LIST_HEAD(&m_rt->slave_rt_list);
- stream->m_rt = m_rt;
+ list_add_tail(&m_rt->stream_node, &stream->master_list);
list_add_tail(&m_rt->bus_node, &bus->m_rt_list);
@@ -843,17 +975,21 @@ static void sdw_slave_port_release(struct sdw_bus *bus,
struct sdw_stream_runtime *stream)
{
struct sdw_port_runtime *p_rt, *_p_rt;
- struct sdw_master_runtime *m_rt = stream->m_rt;
+ struct sdw_master_runtime *m_rt;
struct sdw_slave_runtime *s_rt;
- list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
- if (s_rt->slave != slave)
- continue;
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ list_for_each_entry(s_rt, &m_rt->slave_rt_list, m_rt_node) {
- list_for_each_entry_safe(p_rt, _p_rt,
- &s_rt->port_list, port_node) {
- list_del(&p_rt->port_node);
- kfree(p_rt);
+ if (s_rt->slave != slave)
+ continue;
+
+ list_for_each_entry_safe(p_rt, _p_rt,
+ &s_rt->port_list, port_node) {
+
+ list_del(&p_rt->port_node);
+ kfree(p_rt);
+ }
}
}
}
@@ -870,16 +1006,18 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
struct sdw_stream_runtime *stream)
{
struct sdw_slave_runtime *s_rt, *_s_rt;
- struct sdw_master_runtime *m_rt = stream->m_rt;
+ struct sdw_master_runtime *m_rt;
- /* Retrieve Slave runtime handle */
- list_for_each_entry_safe(s_rt, _s_rt,
- &m_rt->slave_rt_list, m_rt_node) {
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ /* Retrieve Slave runtime handle */
+ list_for_each_entry_safe(s_rt, _s_rt,
+ &m_rt->slave_rt_list, m_rt_node) {
- if (s_rt->slave == slave) {
- list_del(&s_rt->m_rt_node);
- kfree(s_rt);
- return;
+ if (s_rt->slave == slave) {
+ list_del(&s_rt->m_rt_node);
+ kfree(s_rt);
+ return;
+ }
}
}
}
@@ -887,6 +1025,7 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
/**
* sdw_release_master_stream() - Free Master runtime handle
*
+ * @m_rt: Master runtime node
* @stream: Stream runtime handle.
*
* This function is to be called with bus_lock held
@@ -894,9 +1033,9 @@ static void sdw_release_slave_stream(struct sdw_slave *slave,
* handle. If this is called first then sdw_release_slave_stream() will have
* no effect as Slave(s) runtime handle would already be freed up.
*/
-static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
+static void sdw_release_master_stream(struct sdw_master_runtime *m_rt,
+ struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
struct sdw_slave_runtime *s_rt, *_s_rt;
list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) {
@@ -904,7 +1043,9 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
sdw_release_slave_stream(s_rt->slave, stream);
}
+ list_del(&m_rt->stream_node);
list_del(&m_rt->bus_node);
+ kfree(m_rt);
}
/**
@@ -918,13 +1059,23 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
int sdw_stream_remove_master(struct sdw_bus *bus,
struct sdw_stream_runtime *stream)
{
+ struct sdw_master_runtime *m_rt, *_m_rt;
+
mutex_lock(&bus->bus_lock);
- sdw_release_master_stream(stream);
- sdw_master_port_release(bus, stream->m_rt);
- stream->state = SDW_STREAM_RELEASED;
- kfree(stream->m_rt);
- stream->m_rt = NULL;
+ list_for_each_entry_safe(m_rt, _m_rt,
+ &stream->master_list, stream_node) {
+
+ if (m_rt->bus != bus)
+ continue;
+
+ sdw_master_port_release(bus, m_rt);
+ sdw_release_master_stream(m_rt, stream);
+ stream->m_rt_count--;
+ }
+
+ if (list_empty(&stream->master_list))
+ stream->state = SDW_STREAM_RELEASED;
mutex_unlock(&bus->bus_lock);
@@ -1107,6 +1258,18 @@ int sdw_stream_add_master(struct sdw_bus *bus,
mutex_lock(&bus->bus_lock);
+ /*
+ * For multi link streams, add the second master only if
+ * the bus supports it.
+ * Check if bus->multi_link is set
+ */
+ if (!bus->multi_link && stream->m_rt_count > 0) {
+ dev_err(bus->dev,
+ "Multilink not supported, link %d", bus->link_id);
+ ret = -EINVAL;
+ goto unlock;
+ }
+
m_rt = sdw_alloc_master_rt(bus, stream_config, stream);
if (!m_rt) {
dev_err(bus->dev,
@@ -1124,10 +1287,12 @@ int sdw_stream_add_master(struct sdw_bus *bus,
if (ret)
goto stream_error;
+ stream->m_rt_count++;
+
goto unlock;
stream_error:
- sdw_release_master_stream(stream);
+ sdw_release_master_stream(m_rt, stream);
unlock:
mutex_unlock(&bus->bus_lock);
return ret;
@@ -1205,7 +1370,7 @@ stream_error:
* we hit error so cleanup the stream, release all Slave(s) and
* Master runtime
*/
- sdw_release_master_stream(stream);
+ sdw_release_master_stream(m_rt, stream);
error:
mutex_unlock(&slave->bus->bus_lock);
return ret;
@@ -1245,33 +1410,82 @@ struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
return NULL;
}
+/**
+ * sdw_acquire_bus_lock: Acquire bus lock for all Master runtime(s)
+ *
+ * @stream: SoundWire stream
+ *
+ * Acquire bus_lock for each of the master runtime(m_rt) part of this
+ * stream to reconfigure the bus.
+ * NOTE: This function is called from SoundWire stream ops and is
+ * expected that a global lock is held before acquiring bus_lock.
+ */
+static void sdw_acquire_bus_lock(struct sdw_stream_runtime *stream)
+{
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
+
+ /* Iterate for all Master(s) in Master list */
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+
+ mutex_lock(&bus->bus_lock);
+ }
+}
+
+/**
+ * sdw_release_bus_lock: Release bus lock for all Master runtime(s)
+ *
+ * @stream: SoundWire stream
+ *
+ * Release the previously held bus_lock after reconfiguring the bus.
+ * NOTE: This function is called from SoundWire stream ops and is
+ * expected that a global lock is held before releasing bus_lock.
+ */
+static void sdw_release_bus_lock(struct sdw_stream_runtime *stream)
+{
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
+
+ /* Iterate for all Master(s) in Master list */
+ list_for_each_entry_reverse(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ mutex_unlock(&bus->bus_lock);
+ }
+}
+
static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
- struct sdw_bus *bus = m_rt->bus;
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
struct sdw_master_prop *prop = NULL;
struct sdw_bus_params params;
int ret;
- prop = &bus->prop;
- memcpy(&params, &bus->params, sizeof(params));
+ /* Prepare Master(s) and Slave(s) port(s) associated with stream */
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ prop = &bus->prop;
+ memcpy(&params, &bus->params, sizeof(params));
- /* TODO: Support Asynchronous mode */
- if ((prop->max_freq % stream->params.rate) != 0) {
- dev_err(bus->dev, "Async mode not supported");
- return -EINVAL;
- }
+ /* TODO: Support Asynchronous mode */
+ if ((prop->max_freq % stream->params.rate) != 0) {
+ dev_err(bus->dev, "Async mode not supported");
+ return -EINVAL;
+ }
- /* Increment cumulative bus bandwidth */
- /* TODO: Update this during Device-Device support */
- bus->params.bandwidth += m_rt->stream->params.rate *
- m_rt->ch_count * m_rt->stream->params.bps;
+ /* Increment cumulative bus bandwidth */
+ /* TODO: Update this during Device-Device support */
+ bus->params.bandwidth += m_rt->stream->params.rate *
+ m_rt->ch_count * m_rt->stream->params.bps;
+
+ /* Program params */
+ ret = sdw_program_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Program params failed: %d", ret);
+ goto restore_params;
+ }
- /* Program params */
- ret = sdw_program_params(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
- goto restore_params;
}
ret = do_bank_switch(stream);
@@ -1280,12 +1494,16 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream)
goto restore_params;
}
- /* Prepare port(s) on the new clock configuration */
- ret = sdw_prep_deprep_ports(m_rt, true);
- if (ret < 0) {
- dev_err(bus->dev, "Prepare port(s) failed ret = %d",
- ret);
- return ret;
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+
+ /* Prepare port(s) on the new clock configuration */
+ ret = sdw_prep_deprep_ports(m_rt, true);
+ if (ret < 0) {
+ dev_err(bus->dev, "Prepare port(s) failed ret = %d",
+ ret);
+ return ret;
+ }
}
stream->state = SDW_STREAM_PREPARED;
@@ -1313,35 +1531,40 @@ int sdw_prepare_stream(struct sdw_stream_runtime *stream)
return -EINVAL;
}
- mutex_lock(&stream->m_rt->bus->bus_lock);
+ sdw_acquire_bus_lock(stream);
ret = _sdw_prepare_stream(stream);
if (ret < 0)
pr_err("Prepare for stream:%s failed: %d", stream->name, ret);
- mutex_unlock(&stream->m_rt->bus->bus_lock);
+ sdw_release_bus_lock(stream);
return ret;
}
EXPORT_SYMBOL(sdw_prepare_stream);
static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
- struct sdw_bus *bus = m_rt->bus;
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
int ret;
- /* Program params */
- ret = sdw_program_params(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
- return ret;
- }
+ /* Enable Master(s) and Slave(s) port(s) associated with stream */
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
- /* Enable port(s) */
- ret = sdw_enable_disable_ports(m_rt, true);
- if (ret < 0) {
- dev_err(bus->dev, "Enable port(s) failed ret: %d", ret);
- return ret;
+ /* Program params */
+ ret = sdw_program_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Program params failed: %d", ret);
+ return ret;
+ }
+
+ /* Enable port(s) */
+ ret = sdw_enable_disable_ports(m_rt, true);
+ if (ret < 0) {
+ dev_err(bus->dev, "Enable port(s) failed ret: %d", ret);
+ return ret;
+ }
}
ret = do_bank_switch(stream);
@@ -1370,37 +1593,42 @@ int sdw_enable_stream(struct sdw_stream_runtime *stream)
return -EINVAL;
}
- mutex_lock(&stream->m_rt->bus->bus_lock);
+ sdw_acquire_bus_lock(stream);
ret = _sdw_enable_stream(stream);
if (ret < 0)
pr_err("Enable for stream:%s failed: %d", stream->name, ret);
- mutex_unlock(&stream->m_rt->bus->bus_lock);
+ sdw_release_bus_lock(stream);
return ret;
}
EXPORT_SYMBOL(sdw_enable_stream);
static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
- struct sdw_bus *bus = m_rt->bus;
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
int ret;
- /* Disable port(s) */
- ret = sdw_enable_disable_ports(m_rt, false);
- if (ret < 0) {
- dev_err(bus->dev, "Disable port(s) failed: %d", ret);
- return ret;
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ /* Disable port(s) */
+ ret = sdw_enable_disable_ports(m_rt, false);
+ if (ret < 0) {
+ dev_err(bus->dev, "Disable port(s) failed: %d", ret);
+ return ret;
+ }
}
-
stream->state = SDW_STREAM_DISABLED;
- /* Program params */
- ret = sdw_program_params(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
- return ret;
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ /* Program params */
+ ret = sdw_program_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Program params failed: %d", ret);
+ return ret;
+ }
}
return do_bank_switch(stream);
@@ -1422,43 +1650,46 @@ int sdw_disable_stream(struct sdw_stream_runtime *stream)
return -EINVAL;
}
- mutex_lock(&stream->m_rt->bus->bus_lock);
+ sdw_acquire_bus_lock(stream);
ret = _sdw_disable_stream(stream);
if (ret < 0)
pr_err("Disable for stream:%s failed: %d", stream->name, ret);
- mutex_unlock(&stream->m_rt->bus->bus_lock);
+ sdw_release_bus_lock(stream);
return ret;
}
EXPORT_SYMBOL(sdw_disable_stream);
static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
{
- struct sdw_master_runtime *m_rt = stream->m_rt;
- struct sdw_bus *bus = m_rt->bus;
+ struct sdw_master_runtime *m_rt = NULL;
+ struct sdw_bus *bus = NULL;
int ret = 0;
- /* De-prepare port(s) */
- ret = sdw_prep_deprep_ports(m_rt, false);
- if (ret < 0) {
- dev_err(bus->dev, "De-prepare port(s) failed: %d", ret);
- return ret;
- }
+ list_for_each_entry(m_rt, &stream->master_list, stream_node) {
+ bus = m_rt->bus;
+ /* De-prepare port(s) */
+ ret = sdw_prep_deprep_ports(m_rt, false);
+ if (ret < 0) {
+ dev_err(bus->dev, "De-prepare port(s) failed: %d", ret);
+ return ret;
+ }
- stream->state = SDW_STREAM_DEPREPARED;
+ /* TODO: Update this during Device-Device support */
+ bus->params.bandwidth -= m_rt->stream->params.rate *
+ m_rt->ch_count * m_rt->stream->params.bps;
- /* TODO: Update this during Device-Device support */
- bus->params.bandwidth -= m_rt->stream->params.rate *
- m_rt->ch_count * m_rt->stream->params.bps;
+ /* Program params */
+ ret = sdw_program_params(bus);
+ if (ret < 0) {
+ dev_err(bus->dev, "Program params failed: %d", ret);
+ return ret;
+ }
- /* Program params */
- ret = sdw_program_params(bus);
- if (ret < 0) {
- dev_err(bus->dev, "Program params failed: %d", ret);
- return ret;
}
+ stream->state = SDW_STREAM_DEPREPARED;
return do_bank_switch(stream);
}
@@ -1478,13 +1709,12 @@ int sdw_deprepare_stream(struct sdw_stream_runtime *stream)
return -EINVAL;
}
- mutex_lock(&stream->m_rt->bus->bus_lock);
-
+ sdw_acquire_bus_lock(stream);
ret = _sdw_deprepare_stream(stream);
if (ret < 0)
pr_err("De-prepare for stream:%d failed: %d", ret, ret);
- mutex_unlock(&stream->m_rt->bus->bus_lock);
+ sdw_release_bus_lock(stream);
return ret;
}
EXPORT_SYMBOL(sdw_deprepare_stream);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index f756450a8914..7d3a5c94727e 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -83,6 +83,14 @@ config SPI_ATMEL
This selects a driver for the Atmel SPI Controller, present on
many AT91 ARM chips.
+config SPI_AT91_USART
+ tristate "Atmel USART Controller SPI driver"
+ depends on (ARCH_AT91 || COMPILE_TEST)
+ depends on MFD_AT91_USART
+ help
+ This selects a driver for the AT91 USART Controller as SPI Master,
+ present on AT91 and SAMA5 SoC series.
+
config SPI_AU1550
tristate "Au1550/Au1200/Au1300 SPI Controller"
depends on MIPS_ALCHEMY
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index df04dfbe7d70..3575205c5c27 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
obj-$(CONFIG_SPI_ARMADA_3700) += spi-armada-3700.o
obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
+obj-$(CONFIG_SPI_AT91_USART) += spi-at91-usart.o
obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_AXI_SPI_ENGINE) += spi-axi-spi-engine.o
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
new file mode 100644
index 000000000000..a924657642fa
--- /dev/null
+++ b/drivers/spi/spi-at91-usart.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Driver for AT91 USART Controllers as SPI
+//
+// Copyright (C) 2018 Microchip Technology Inc.
+//
+// Author: Radu Pirea <radu.pirea@microchip.com>
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+
+#define US_CR 0x00
+#define US_MR 0x04
+#define US_IER 0x08
+#define US_IDR 0x0C
+#define US_CSR 0x14
+#define US_RHR 0x18
+#define US_THR 0x1C
+#define US_BRGR 0x20
+#define US_VERSION 0xFC
+
+#define US_CR_RSTRX BIT(2)
+#define US_CR_RSTTX BIT(3)
+#define US_CR_RXEN BIT(4)
+#define US_CR_RXDIS BIT(5)
+#define US_CR_TXEN BIT(6)
+#define US_CR_TXDIS BIT(7)
+
+#define US_MR_SPI_MASTER 0x0E
+#define US_MR_CHRL GENMASK(7, 6)
+#define US_MR_CPHA BIT(8)
+#define US_MR_CPOL BIT(16)
+#define US_MR_CLKO BIT(18)
+#define US_MR_WRDBT BIT(20)
+#define US_MR_LOOP BIT(15)
+
+#define US_IR_RXRDY BIT(0)
+#define US_IR_TXRDY BIT(1)
+#define US_IR_OVRE BIT(5)
+
+#define US_BRGR_SIZE BIT(16)
+
+#define US_MIN_CLK_DIV 0x06
+#define US_MAX_CLK_DIV BIT(16)
+
+#define US_RESET (US_CR_RSTRX | US_CR_RSTTX)
+#define US_DISABLE (US_CR_RXDIS | US_CR_TXDIS)
+#define US_ENABLE (US_CR_RXEN | US_CR_TXEN)
+#define US_OVRE_RXRDY_IRQS (US_IR_OVRE | US_IR_RXRDY)
+
+#define US_INIT \
+ (US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
+
+/* Register access macros */
+#define at91_usart_spi_readl(port, reg) \
+ readl_relaxed((port)->regs + US_##reg)
+#define at91_usart_spi_writel(port, reg, value) \
+ writel_relaxed((value), (port)->regs + US_##reg)
+
+#define at91_usart_spi_readb(port, reg) \
+ readb_relaxed((port)->regs + US_##reg)
+#define at91_usart_spi_writeb(port, reg, value) \
+ writeb_relaxed((value), (port)->regs + US_##reg)
+
+struct at91_usart_spi {
+ struct spi_transfer *current_transfer;
+ void __iomem *regs;
+ struct device *dev;
+ struct clk *clk;
+
+ /*used in interrupt to protect data reading*/
+ spinlock_t lock;
+
+ int irq;
+ unsigned int current_tx_remaining_bytes;
+ unsigned int current_rx_remaining_bytes;
+
+ u32 spi_clk;
+ u32 status;
+
+ bool xfer_failed;
+};
+
+static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_TXRDY;
+}
+
+static inline u32 at91_usart_spi_rx_ready(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_RXRDY;
+}
+
+static inline u32 at91_usart_spi_check_overrun(struct at91_usart_spi *aus)
+{
+ return aus->status & US_IR_OVRE;
+}
+
+static inline u32 at91_usart_spi_read_status(struct at91_usart_spi *aus)
+{
+ aus->status = at91_usart_spi_readl(aus, CSR);
+ return aus->status;
+}
+
+static inline void at91_usart_spi_tx(struct at91_usart_spi *aus)
+{
+ unsigned int len = aus->current_transfer->len;
+ unsigned int remaining = aus->current_tx_remaining_bytes;
+ const u8 *tx_buf = aus->current_transfer->tx_buf;
+
+ if (!remaining)
+ return;
+
+ if (at91_usart_spi_tx_ready(aus)) {
+ at91_usart_spi_writeb(aus, THR, tx_buf[len - remaining]);
+ aus->current_tx_remaining_bytes--;
+ }
+}
+
+static inline void at91_usart_spi_rx(struct at91_usart_spi *aus)
+{
+ int len = aus->current_transfer->len;
+ int remaining = aus->current_rx_remaining_bytes;
+ u8 *rx_buf = aus->current_transfer->rx_buf;
+
+ if (!remaining)
+ return;
+
+ rx_buf[len - remaining] = at91_usart_spi_readb(aus, RHR);
+ aus->current_rx_remaining_bytes--;
+}
+
+static inline void
+at91_usart_spi_set_xfer_speed(struct at91_usart_spi *aus,
+ struct spi_transfer *xfer)
+{
+ at91_usart_spi_writel(aus, BRGR,
+ DIV_ROUND_UP(aus->spi_clk, xfer->speed_hz));
+}
+
+static irqreturn_t at91_usart_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_controller *controller = dev_id;
+ struct at91_usart_spi *aus = spi_master_get_devdata(controller);
+
+ spin_lock(&aus->lock);
+ at91_usart_spi_read_status(aus);
+
+ if (at91_usart_spi_check_overrun(aus)) {
+ aus->xfer_failed = true;
+ at91_usart_spi_writel(aus, IDR, US_IR_OVRE | US_IR_RXRDY);
+ spin_unlock(&aus->lock);
+ return IRQ_HANDLED;
+ }
+
+ if (at91_usart_spi_rx_ready(aus)) {
+ at91_usart_spi_rx(aus);
+ spin_unlock(&aus->lock);
+ return IRQ_HANDLED;
+ }
+
+ spin_unlock(&aus->lock);
+
+ return IRQ_NONE;
+}
+
+static int at91_usart_spi_setup(struct spi_device *spi)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(spi->controller);
+ u32 *ausd = spi->controller_state;
+ unsigned int mr = at91_usart_spi_readl(aus, MR);
+ u8 bits = spi->bits_per_word;
+
+ if (bits != 8) {
+ dev_dbg(&spi->dev, "Only 8 bits per word are supported\n");
+ return -EINVAL;
+ }
+
+ if (spi->mode & SPI_CPOL)
+ mr |= US_MR_CPOL;
+ else
+ mr &= ~US_MR_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ mr |= US_MR_CPHA;
+ else
+ mr &= ~US_MR_CPHA;
+
+ if (spi->mode & SPI_LOOP)
+ mr |= US_MR_LOOP;
+ else
+ mr &= ~US_MR_LOOP;
+
+ if (!ausd) {
+ ausd = kzalloc(sizeof(*ausd), GFP_KERNEL);
+ if (!ausd)
+ return -ENOMEM;
+
+ spi->controller_state = ausd;
+ }
+
+ *ausd = mr;
+
+ dev_dbg(&spi->dev,
+ "setup: bpw %u mode 0x%x -> mr %d %08x\n",
+ bits, spi->mode, spi->chip_select, mr);
+
+ return 0;
+}
+
+static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ at91_usart_spi_set_xfer_speed(aus, xfer);
+ aus->xfer_failed = false;
+ aus->current_transfer = xfer;
+ aus->current_tx_remaining_bytes = xfer->len;
+ aus->current_rx_remaining_bytes = xfer->len;
+
+ while ((aus->current_tx_remaining_bytes ||
+ aus->current_rx_remaining_bytes) && !aus->xfer_failed) {
+ at91_usart_spi_read_status(aus);
+ at91_usart_spi_tx(aus);
+ cpu_relax();
+ }
+
+ if (aus->xfer_failed) {
+ dev_err(aus->dev, "Overrun!\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int at91_usart_spi_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+ struct spi_device *spi = message->spi;
+ u32 *ausd = spi->controller_state;
+
+ at91_usart_spi_writel(aus, CR, US_ENABLE);
+ at91_usart_spi_writel(aus, IER, US_OVRE_RXRDY_IRQS);
+ at91_usart_spi_writel(aus, MR, *ausd);
+
+ return 0;
+}
+
+static int at91_usart_spi_unprepare_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
+ at91_usart_spi_writel(aus, IDR, US_OVRE_RXRDY_IRQS);
+
+ return 0;
+}
+
+static void at91_usart_spi_cleanup(struct spi_device *spi)
+{
+ struct at91_usart_spi_device *ausd = spi->controller_state;
+
+ spi->controller_state = NULL;
+ kfree(ausd);
+}
+
+static void at91_usart_spi_init(struct at91_usart_spi *aus)
+{
+ at91_usart_spi_writel(aus, MR, US_INIT);
+ at91_usart_spi_writel(aus, CR, US_RESET | US_DISABLE);
+}
+
+static int at91_usart_gpio_setup(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.parent->of_node;
+ int i;
+ int ret;
+ int nb;
+
+ if (!np)
+ return -EINVAL;
+
+ nb = of_gpio_named_count(np, "cs-gpios");
+ for (i = 0; i < nb; i++) {
+ int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+
+ if (cs_gpio < 0)
+ return cs_gpio;
+
+ if (gpio_is_valid(cs_gpio)) {
+ ret = devm_gpio_request_one(&pdev->dev, cs_gpio,
+ GPIOF_DIR_OUT,
+ dev_name(&pdev->dev));
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int at91_usart_spi_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ struct spi_controller *controller;
+ struct at91_usart_spi *aus;
+ struct clk *clk;
+ int irq;
+ int ret;
+
+ regs = platform_get_resource(to_platform_device(pdev->dev.parent),
+ IORESOURCE_MEM, 0);
+ if (!regs)
+ return -EINVAL;
+
+ irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0);
+ if (irq < 0)
+ return irq;
+
+ clk = devm_clk_get(pdev->dev.parent, "usart");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = -ENOMEM;
+ controller = spi_alloc_master(&pdev->dev, sizeof(*aus));
+ if (!controller)
+ goto at91_usart_spi_probe_fail;
+
+ ret = at91_usart_gpio_setup(pdev);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
+ controller->dev.of_node = pdev->dev.parent->of_node;
+ controller->bits_per_word_mask = SPI_BPW_MASK(8);
+ controller->setup = at91_usart_spi_setup;
+ controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
+ controller->transfer_one = at91_usart_spi_transfer_one;
+ controller->prepare_message = at91_usart_spi_prepare_message;
+ controller->unprepare_message = at91_usart_spi_unprepare_message;
+ controller->cleanup = at91_usart_spi_cleanup;
+ controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
+ US_MIN_CLK_DIV);
+ controller->min_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
+ US_MAX_CLK_DIV);
+ platform_set_drvdata(pdev, controller);
+
+ aus = spi_master_get_devdata(controller);
+
+ aus->dev = &pdev->dev;
+ aus->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(aus->regs)) {
+ ret = PTR_ERR(aus->regs);
+ goto at91_usart_spi_probe_fail;
+ }
+
+ aus->irq = irq;
+ aus->clk = clk;
+
+ ret = devm_request_irq(&pdev->dev, irq, at91_usart_spi_interrupt, 0,
+ dev_name(&pdev->dev), controller);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto at91_usart_spi_probe_fail;
+
+ aus->spi_clk = clk_get_rate(clk);
+ at91_usart_spi_init(aus);
+
+ spin_lock_init(&aus->lock);
+ ret = devm_spi_register_master(&pdev->dev, controller);
+ if (ret)
+ goto at91_usart_fail_register_master;
+
+ dev_info(&pdev->dev,
+ "AT91 USART SPI Controller version 0x%x at %pa (irq %d)\n",
+ at91_usart_spi_readl(aus, VERSION),
+ &regs->start, irq);
+
+ return 0;
+
+at91_usart_fail_register_master:
+ clk_disable_unprepare(clk);
+at91_usart_spi_probe_fail:
+ spi_master_put(controller);
+ return ret;
+}
+
+static int at91_usart_spi_remove(struct platform_device *pdev)
+{
+ struct spi_controller *ctlr = platform_get_drvdata(pdev);
+ struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
+
+ clk_disable_unprepare(aus->clk);
+
+ return 0;
+}
+
+static const struct of_device_id at91_usart_spi_dt_ids[] = {
+ { .compatible = "microchip,at91sam9g45-usart-spi"},
+ { /* sentinel */}
+};
+
+MODULE_DEVICE_TABLE(of, at91_usart_spi_dt_ids);
+
+static struct platform_driver at91_usart_spi_driver = {
+ .driver = {
+ .name = "at91_usart_spi",
+ },
+ .probe = at91_usart_spi_probe,
+ .remove = at91_usart_spi_remove,
+};
+
+module_platform_driver(at91_usart_spi_driver);
+
+MODULE_DESCRIPTION("Microchip AT91 USART SPI Controller driver");
+MODULE_AUTHOR("Radu Pirea <radu.pirea@microchip.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:at91_usart_spi");
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
index 546a47156101..8cf0617d4ea0 100644
--- a/drivers/staging/erofs/namei.c
+++ b/drivers/staging/erofs/namei.c
@@ -223,18 +223,13 @@ static struct dentry *erofs_lookup(struct inode *dir,
if (err == -ENOENT) {
/* negative dentry */
inode = NULL;
- goto negative_out;
- } else if (unlikely(err))
- return ERR_PTR(err);
-
- debugln("%s, %s (nid %llu) found, d_type %u", __func__,
- dentry->d_name.name, nid, d_type);
-
- inode = erofs_iget(dir->i_sb, nid, d_type == EROFS_FT_DIR);
- if (IS_ERR(inode))
- return ERR_CAST(inode);
-
-negative_out:
+ } else if (unlikely(err)) {
+ inode = ERR_PTR(err);
+ } else {
+ debugln("%s, %s (nid %llu) found, d_type %u", __func__,
+ dentry->d_name.name, nid, d_type);
+ inode = erofs_iget(dir->i_sb, nid, d_type == EROFS_FT_DIR);
+ }
return d_splice_alias(inode, dentry);
}
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 9f18be14dda6..f38f1f74fcd6 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -49,9 +49,9 @@ struct rtllib_tkip_data {
u32 dot11RSNAStatsTKIPLocalMICFailures;
int key_idx;
- struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_sync_skcipher *rx_tfm_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_sync_skcipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16];
@@ -66,8 +66,7 @@ static void *rtllib_tkip_init(int key_idx)
if (priv == NULL)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm_arc4)) {
pr_debug("Could not allocate crypto API arc4\n");
priv->tx_tfm_arc4 = NULL;
@@ -81,8 +80,7 @@ static void *rtllib_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm_arc4)) {
pr_debug("Could not allocate crypto API arc4\n");
priv->rx_tfm_arc4 = NULL;
@@ -100,9 +98,9 @@ static void *rtllib_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_skcipher(priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -116,9 +114,9 @@ static void rtllib_tkip_deinit(void *priv)
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_skcipher(_priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -337,7 +335,7 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
@@ -349,8 +347,8 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
sg_init_one(&sg, pos, len+4);
- crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
- skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+ crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
ret = crypto_skcipher_encrypt(req);
@@ -420,7 +418,7 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
if ((iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) &&
@@ -447,8 +445,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
sg_init_one(&sg, pos, plen+4);
- crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
- skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+ crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
err = crypto_skcipher_decrypt(req);
@@ -664,9 +662,9 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
struct rtllib_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index b3343a5d0fd6..d11ec39171d5 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -27,8 +27,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_skcipher *tx_tfm;
- struct crypto_skcipher *rx_tfm;
+ struct crypto_sync_skcipher *tx_tfm;
+ struct crypto_sync_skcipher *rx_tfm;
};
@@ -41,13 +41,13 @@ static void *prism2_wep_init(int keyidx)
goto fail;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm)) {
pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
priv->tx_tfm = NULL;
goto fail;
}
- priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm)) {
pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
priv->rx_tfm = NULL;
@@ -61,8 +61,8 @@ static void *prism2_wep_init(int keyidx)
fail:
if (priv) {
- crypto_free_skcipher(priv->tx_tfm);
- crypto_free_skcipher(priv->rx_tfm);
+ crypto_free_sync_skcipher(priv->tx_tfm);
+ crypto_free_sync_skcipher(priv->rx_tfm);
kfree(priv);
}
return NULL;
@@ -74,8 +74,8 @@ static void prism2_wep_deinit(void *priv)
struct prism2_wep_data *_priv = priv;
if (_priv) {
- crypto_free_skcipher(_priv->tx_tfm);
- crypto_free_skcipher(_priv->rx_tfm);
+ crypto_free_sync_skcipher(_priv->tx_tfm);
+ crypto_free_sync_skcipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -135,7 +135,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
@@ -146,8 +146,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[3] = crc >> 24;
sg_init_one(&sg, pos, len+4);
- crypto_skcipher_setkey(wep->tx_tfm, key, klen);
- skcipher_request_set_tfm(req, wep->tx_tfm);
+ crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
+ skcipher_request_set_sync_tfm(req, wep->tx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
err = crypto_skcipher_encrypt(req);
@@ -199,11 +199,11 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
sg_init_one(&sg, pos, plen+4);
- crypto_skcipher_setkey(wep->rx_tfm, key, klen);
- skcipher_request_set_tfm(req, wep->rx_tfm);
+ crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
+ skcipher_request_set_sync_tfm(req, wep->rx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
err = crypto_skcipher_decrypt(req);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
index 1088fa0aee0e..829fa4bd253c 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
@@ -53,9 +53,9 @@ struct ieee80211_tkip_data {
int key_idx;
- struct crypto_skcipher *rx_tfm_arc4;
+ struct crypto_sync_skcipher *rx_tfm_arc4;
struct crypto_shash *rx_tfm_michael;
- struct crypto_skcipher *tx_tfm_arc4;
+ struct crypto_sync_skcipher *tx_tfm_arc4;
struct crypto_shash *tx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
@@ -71,8 +71,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
priv->key_idx = key_idx;
- priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->tx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
@@ -88,8 +87,7 @@ static void *ieee80211_tkip_init(int key_idx)
goto fail;
}
- priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
- CRYPTO_ALG_ASYNC);
+ priv->rx_tfm_arc4 = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm_arc4)) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
@@ -110,9 +108,9 @@ static void *ieee80211_tkip_init(int key_idx)
fail:
if (priv) {
crypto_free_shash(priv->tx_tfm_michael);
- crypto_free_skcipher(priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->tx_tfm_arc4);
crypto_free_shash(priv->rx_tfm_michael);
- crypto_free_skcipher(priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(priv->rx_tfm_arc4);
kfree(priv);
}
@@ -126,9 +124,9 @@ static void ieee80211_tkip_deinit(void *priv)
if (_priv) {
crypto_free_shash(_priv->tx_tfm_michael);
- crypto_free_skcipher(_priv->tx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->tx_tfm_arc4);
crypto_free_shash(_priv->rx_tfm_michael);
- crypto_free_skcipher(_priv->rx_tfm_arc4);
+ crypto_free_sync_skcipher(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@@ -340,7 +338,7 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
icv = skb_put(skb, 4);
crc = ~crc32_le(~0, pos, len);
@@ -348,9 +346,9 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[1] = crc >> 8;
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+ crypto_sync_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, len+4);
- skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+ skcipher_request_set_sync_tfm(req, tkey->tx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
ret = crypto_skcipher_encrypt(req);
@@ -418,7 +416,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pos += 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
if (iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
@@ -440,10 +438,10 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
- crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+ crypto_sync_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
sg_init_one(&sg, pos, plen+4);
- skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+ skcipher_request_set_sync_tfm(req, tkey->rx_tfm_arc4);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
@@ -663,9 +661,9 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
struct ieee80211_tkip_data *tkey = priv;
int keyidx;
struct crypto_shash *tfm = tkey->tx_tfm_michael;
- struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm2 = tkey->tx_tfm_arc4;
struct crypto_shash *tfm3 = tkey->rx_tfm_michael;
- struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
+ struct crypto_sync_skcipher *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
index b9f86be9e52b..d4a1bf0caa7a 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
@@ -32,8 +32,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
- struct crypto_skcipher *tx_tfm;
- struct crypto_skcipher *rx_tfm;
+ struct crypto_sync_skcipher *tx_tfm;
+ struct crypto_sync_skcipher *rx_tfm;
};
@@ -46,10 +46,10 @@ static void *prism2_wep_init(int keyidx)
return NULL;
priv->key_idx = keyidx;
- priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->tx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->tx_tfm))
goto free_priv;
- priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+ priv->rx_tfm = crypto_alloc_sync_skcipher("ecb(arc4)", 0, 0);
if (IS_ERR(priv->rx_tfm))
goto free_tx;
@@ -58,7 +58,7 @@ static void *prism2_wep_init(int keyidx)
return priv;
free_tx:
- crypto_free_skcipher(priv->tx_tfm);
+ crypto_free_sync_skcipher(priv->tx_tfm);
free_priv:
kfree(priv);
return NULL;
@@ -70,8 +70,8 @@ static void prism2_wep_deinit(void *priv)
struct prism2_wep_data *_priv = priv;
if (_priv) {
- crypto_free_skcipher(_priv->tx_tfm);
- crypto_free_skcipher(_priv->rx_tfm);
+ crypto_free_sync_skcipher(_priv->tx_tfm);
+ crypto_free_sync_skcipher(_priv->rx_tfm);
}
kfree(priv);
}
@@ -128,7 +128,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
memcpy(key + 3, wep->key, wep->key_len);
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
/* Append little-endian CRC32 and encrypt it to produce ICV */
crc = ~crc32_le(~0, pos, len);
@@ -138,10 +138,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
- crypto_skcipher_setkey(wep->tx_tfm, key, klen);
+ crypto_sync_skcipher_setkey(wep->tx_tfm, key, klen);
sg_init_one(&sg, pos, len+4);
- skcipher_request_set_tfm(req, wep->tx_tfm);
+ skcipher_request_set_sync_tfm(req, wep->tx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
@@ -193,12 +193,12 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 8;
if (!tcb_desc->bHwSec) {
- SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
- crypto_skcipher_setkey(wep->rx_tfm, key, klen);
+ crypto_sync_skcipher_setkey(wep->rx_tfm, key, klen);
sg_init_one(&sg, pos, plen+4);
- skcipher_request_set_tfm(req, wep->rx_tfm);
+ skcipher_request_set_sync_tfm(req, wep->rx_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
diff --git a/drivers/target/iscsi/cxgbit/cxgbit_cm.c b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
index 8de16016b6de..71888b979ab5 100644
--- a/drivers/target/iscsi/cxgbit/cxgbit_cm.c
+++ b/drivers/target/iscsi/cxgbit/cxgbit_cm.c
@@ -598,9 +598,12 @@ out:
mutex_unlock(&cdev_list_lock);
}
+static void __cxgbit_free_conn(struct cxgbit_sock *csk);
+
void cxgbit_free_np(struct iscsi_np *np)
{
struct cxgbit_np *cnp = np->np_context;
+ struct cxgbit_sock *csk, *tmp;
cnp->com.state = CSK_STATE_DEAD;
if (cnp->com.cdev)
@@ -608,6 +611,13 @@ void cxgbit_free_np(struct iscsi_np *np)
else
cxgbit_free_all_np(cnp);
+ spin_lock_bh(&cnp->np_accept_lock);
+ list_for_each_entry_safe(csk, tmp, &cnp->np_accept_list, accept_node) {
+ list_del_init(&csk->accept_node);
+ __cxgbit_free_conn(csk);
+ }
+ spin_unlock_bh(&cnp->np_accept_lock);
+
np->np_context = NULL;
cxgbit_put_cnp(cnp);
}
@@ -705,9 +715,9 @@ void cxgbit_abort_conn(struct cxgbit_sock *csk)
csk->tid, 600, __func__);
}
-void cxgbit_free_conn(struct iscsi_conn *conn)
+static void __cxgbit_free_conn(struct cxgbit_sock *csk)
{
- struct cxgbit_sock *csk = conn->context;
+ struct iscsi_conn *conn = csk->conn;
bool release = false;
pr_debug("%s: state %d\n",
@@ -716,7 +726,7 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
spin_lock_bh(&csk->lock);
switch (csk->com.state) {
case CSK_STATE_ESTABLISHED:
- if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
+ if (conn && (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)) {
csk->com.state = CSK_STATE_CLOSING;
cxgbit_send_halfclose(csk);
} else {
@@ -741,6 +751,11 @@ void cxgbit_free_conn(struct iscsi_conn *conn)
cxgbit_put_csk(csk);
}
+void cxgbit_free_conn(struct iscsi_conn *conn)
+{
+ __cxgbit_free_conn(conn->context);
+}
+
static void cxgbit_set_emss(struct cxgbit_sock *csk, u16 opt)
{
csk->emss = csk->com.cdev->lldi.mtus[TCPOPT_MSS_G(opt)] -
@@ -803,6 +818,7 @@ void _cxgbit_free_csk(struct kref *kref)
spin_unlock_bh(&cdev->cskq.lock);
cxgbit_free_skb(csk);
+ cxgbit_put_cnp(csk->cnp);
cxgbit_put_cdev(cdev);
kfree(csk);
@@ -1351,6 +1367,7 @@ cxgbit_pass_accept_req(struct cxgbit_device *cdev, struct sk_buff *skb)
goto rel_skb;
}
+ cxgbit_get_cnp(cnp);
cxgbit_get_cdev(cdev);
spin_lock(&cdev->cskq.lock);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index cc756a123fd8..c1d5a173553d 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4355,7 +4355,7 @@ int iscsit_close_session(struct iscsi_session *sess)
transport_deregister_session(sess->se_sess);
if (sess->sess_ops->ErrorRecoveryLevel == 2)
- iscsit_free_connection_recovery_entires(sess);
+ iscsit_free_connection_recovery_entries(sess);
iscsit_free_all_ooo_cmdsns(sess);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 718fe9a1b709..1193cf884a28 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -770,21 +770,8 @@ void iscsit_handle_time2retain_timeout(struct timer_list *t)
pr_err("Time2Retain timer expired for SID: %u, cleaning up"
" iSCSI session.\n", sess->sid);
- {
- struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
-
- if (tiqn) {
- spin_lock(&tiqn->sess_err_stats.lock);
- strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
- (void *)sess->sess_ops->InitiatorName);
- tiqn->sess_err_stats.last_sess_failure_type =
- ISCSI_SESS_ERR_CXN_TIMEOUT;
- tiqn->sess_err_stats.cxn_timeout_errors++;
- atomic_long_inc(&sess->conn_timeout_errors);
- spin_unlock(&tiqn->sess_err_stats.lock);
- }
- }
+ iscsit_fill_cxn_timeout_err_stats(sess);
spin_unlock_bh(&se_tpg->session_lock);
iscsit_close_session(sess);
}
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 5efa42b939a1..a211e8154f4c 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -1169,15 +1169,21 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
na = iscsit_tpg_get_node_attrib(sess);
if (!sess->sess_ops->ErrorRecoveryLevel) {
- pr_debug("Unable to recover from DataOut timeout while"
- " in ERL=0.\n");
+ pr_err("Unable to recover from DataOut timeout while"
+ " in ERL=0, closing iSCSI connection for I_T Nexus"
+ " %s,i,0x%6phN,%s,t,0x%02x\n",
+ sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
goto failure;
}
if (++cmd->dataout_timeout_retries == na->dataout_timeout_retries) {
- pr_debug("Command ITT: 0x%08x exceeded max retries"
- " for DataOUT timeout %u, closing iSCSI connection.\n",
- cmd->init_task_tag, na->dataout_timeout_retries);
+ pr_err("Command ITT: 0x%08x exceeded max retries"
+ " for DataOUT timeout %u, closing iSCSI connection for"
+ " I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
+ cmd->init_task_tag, na->dataout_timeout_retries,
+ sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
goto failure;
}
@@ -1224,6 +1230,7 @@ void iscsit_handle_dataout_timeout(struct timer_list *t)
failure:
spin_unlock_bh(&cmd->dataout_timeout_lock);
+ iscsit_fill_cxn_timeout_err_stats(sess);
iscsit_cause_connection_reinstatement(conn, 0);
iscsit_dec_conn_usage_count(conn);
}
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 8df9c90f3db3..b08b620b1bf0 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -125,7 +125,7 @@ struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
return NULL;
}
-void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
+void iscsit_free_connection_recovery_entries(struct iscsi_session *sess)
{
struct iscsi_cmd *cmd, *cmd_tmp;
struct iscsi_conn_recovery *cr, *cr_tmp;
diff --git a/drivers/target/iscsi/iscsi_target_erl2.h b/drivers/target/iscsi/iscsi_target_erl2.h
index 93e180d68d07..a39b0caf2337 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.h
+++ b/drivers/target/iscsi/iscsi_target_erl2.h
@@ -13,7 +13,7 @@ extern void iscsit_create_conn_recovery_datain_values(struct iscsi_cmd *, __be32
extern void iscsit_create_conn_recovery_dataout_values(struct iscsi_cmd *);
extern struct iscsi_conn_recovery *iscsit_get_inactive_connection_recovery_entry(
struct iscsi_session *, u16);
-extern void iscsit_free_connection_recovery_entires(struct iscsi_session *);
+extern void iscsit_free_connection_recovery_entries(struct iscsi_session *);
extern int iscsit_remove_active_connection_recovery_entry(
struct iscsi_conn_recovery *, struct iscsi_session *);
extern int iscsit_remove_cmd_from_connection_recovery(struct iscsi_cmd *,
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bb90c80ff388..ae3209efd0e0 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -578,7 +578,7 @@ int iscsi_login_post_auth_non_zero_tsih(
}
/*
- * Check for any connection recovery entires containing CID.
+ * Check for any connection recovery entries containing CID.
* We use the original ExpStatSN sent in the first login request
* to acknowledge commands for the failed connection.
*
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index df0a39811dc2..bb98882bdaa7 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -328,10 +328,10 @@ static ssize_t iscsi_stat_tgt_attr_fail_intr_name_show(struct config_item *item,
{
struct iscsi_tiqn *tiqn = iscsi_tgt_attr_tiqn(item);
struct iscsi_login_stats *lstat = &tiqn->login_stats;
- unsigned char buf[224];
+ unsigned char buf[ISCSI_IQN_LEN];
spin_lock(&lstat->lock);
- snprintf(buf, 224, "%s", lstat->last_intr_fail_name[0] ?
+ snprintf(buf, ISCSI_IQN_LEN, "%s", lstat->last_intr_fail_name[0] ?
lstat->last_intr_fail_name : NONE);
spin_unlock(&lstat->lock);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 49be1e41290c..1227872227dc 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -915,6 +915,7 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
void iscsit_handle_nopin_response_timeout(struct timer_list *t)
{
struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer);
+ struct iscsi_session *sess = conn->sess;
iscsit_inc_conn_usage_count(conn);
@@ -925,28 +926,14 @@ void iscsit_handle_nopin_response_timeout(struct timer_list *t)
return;
}
- pr_debug("Did not receive response to NOPIN on CID: %hu on"
- " SID: %u, failing connection.\n", conn->cid,
- conn->sess->sid);
+ pr_err("Did not receive response to NOPIN on CID: %hu, failing"
+ " connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
+ conn->cid, sess->sess_ops->InitiatorName, sess->isid,
+ sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
spin_unlock_bh(&conn->nopin_timer_lock);
- {
- struct iscsi_portal_group *tpg = conn->sess->tpg;
- struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
-
- if (tiqn) {
- spin_lock_bh(&tiqn->sess_err_stats.lock);
- strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
- conn->sess->sess_ops->InitiatorName);
- tiqn->sess_err_stats.last_sess_failure_type =
- ISCSI_SESS_ERR_CXN_TIMEOUT;
- tiqn->sess_err_stats.cxn_timeout_errors++;
- atomic_long_inc(&conn->sess->conn_timeout_errors);
- spin_unlock_bh(&tiqn->sess_err_stats.lock);
- }
- }
-
+ iscsit_fill_cxn_timeout_err_stats(sess);
iscsit_cause_connection_reinstatement(conn, 0);
iscsit_dec_conn_usage_count(conn);
}
@@ -1405,3 +1392,22 @@ struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *conn)
return tpg->tpg_tiqn;
}
+
+void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *sess)
+{
+ struct iscsi_portal_group *tpg = sess->tpg;
+ struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
+
+ if (!tiqn)
+ return;
+
+ spin_lock_bh(&tiqn->sess_err_stats.lock);
+ strlcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
+ sess->sess_ops->InitiatorName,
+ sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name));
+ tiqn->sess_err_stats.last_sess_failure_type =
+ ISCSI_SESS_ERR_CXN_TIMEOUT;
+ tiqn->sess_err_stats.cxn_timeout_errors++;
+ atomic_long_inc(&sess->conn_timeout_errors);
+ spin_unlock_bh(&tiqn->sess_err_stats.lock);
+}
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index d66dfc212624..68e84803b0a1 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -67,5 +67,6 @@ extern int rx_data(struct iscsi_conn *, struct kvec *, int, int);
extern int tx_data(struct iscsi_conn *, struct kvec *, int, int);
extern void iscsit_collect_login_stats(struct iscsi_conn *, u8, u8);
extern struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsi_conn *);
+extern void iscsit_fill_cxn_timeout_err_stats(struct iscsi_session *);
#endif /*** ISCSI_TARGET_UTIL_H ***/
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index ce1321a5cb7b..b5ed9c377060 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -514,7 +514,7 @@ iblock_execute_write_same(struct se_cmd *cmd)
}
/* Always in 512 byte units for Linux/Block */
- block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ block_lba += sg->length >> SECTOR_SHIFT;
sectors -= 1;
}
@@ -635,14 +635,15 @@ static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
}
static int
-iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
+iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
+ struct sg_mapping_iter *miter)
{
struct se_device *dev = cmd->se_dev;
struct blk_integrity *bi;
struct bio_integrity_payload *bip;
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
- struct scatterlist *sg;
- int i, rc;
+ int rc;
+ size_t resid, len;
bi = bdev_get_integrity(ib_dev->ibd_bd);
if (!bi) {
@@ -650,31 +651,39 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
return -ENODEV;
}
- bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
+ bip = bio_integrity_alloc(bio, GFP_NOIO,
+ min_t(unsigned int, cmd->t_prot_nents, BIO_MAX_PAGES));
if (IS_ERR(bip)) {
pr_err("Unable to allocate bio_integrity_payload\n");
return PTR_ERR(bip);
}
- bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
- dev->prot_length;
- bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
+ bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
+ bip_set_seed(bip, bio->bi_iter.bi_sector);
pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
(unsigned long long)bip->bip_iter.bi_sector);
- for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
+ resid = bip->bip_iter.bi_size;
+ while (resid > 0 && sg_miter_next(miter)) {
- rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
- sg->offset);
- if (rc != sg->length) {
+ len = min_t(size_t, miter->length, resid);
+ rc = bio_integrity_add_page(bio, miter->page, len,
+ offset_in_page(miter->addr));
+ if (rc != len) {
pr_err("bio_integrity_add_page() failed; %d\n", rc);
+ sg_miter_stop(miter);
return -ENOMEM;
}
- pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
- sg_page(sg), sg->length, sg->offset);
+ pr_debug("Added bio integrity page: %p length: %zu offset: %lu\n",
+ miter->page, len, offset_in_page(miter->addr));
+
+ resid -= len;
+ if (len < miter->length)
+ miter->consumed -= miter->length - len;
}
+ sg_miter_stop(miter);
return 0;
}
@@ -686,12 +695,13 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct se_device *dev = cmd->se_dev;
sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
struct iblock_req *ibr;
- struct bio *bio, *bio_start;
+ struct bio *bio;
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
unsigned bio_cnt;
- int i, op, op_flags = 0;
+ int i, rc, op, op_flags = 0;
+ struct sg_mapping_iter prot_miter;
if (data_direction == DMA_TO_DEVICE) {
struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
@@ -726,13 +736,17 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
if (!bio)
goto fail_free_ibr;
- bio_start = bio;
bio_list_init(&list);
bio_list_add(&list, bio);
refcount_set(&ibr->pending, 2);
bio_cnt = 1;
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type)
+ sg_miter_start(&prot_miter, cmd->t_prot_sg, cmd->t_prot_nents,
+ op == REQ_OP_READ ? SG_MITER_FROM_SG :
+ SG_MITER_TO_SG);
+
for_each_sg(sgl, sg, sgl_nents, i) {
/*
* XXX: if the length the device accepts is shorter than the
@@ -741,6 +755,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
*/
while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
!= sg->length) {
+ if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
+ rc = iblock_alloc_bip(cmd, bio, &prot_miter);
+ if (rc)
+ goto fail_put_bios;
+ }
+
if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
iblock_submit_bios(&list);
bio_cnt = 0;
@@ -757,12 +777,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
}
/* Always in 512 byte units for Linux/Block */
- block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+ block_lba += sg->length >> SECTOR_SHIFT;
sg_num--;
}
if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
- int rc = iblock_alloc_bip(cmd, bio_start);
+ rc = iblock_alloc_bip(cmd, bio, &prot_miter);
if (rc)
goto fail_put_bios;
}
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 9cc3843404d4..cefc641145b3 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -9,7 +9,6 @@
#define IBLOCK_VERSION "4.0"
#define IBLOCK_MAX_CDBS 16
-#define IBLOCK_LBA_SHIFT 9
struct iblock_req {
refcount_t pending;
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index ebac2b49b9c6..1ac1f7d2e6c9 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -360,6 +360,10 @@ static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success,
unsigned int offset;
sense_reason_t ret = TCM_NO_SENSE;
int i, count;
+
+ if (!success)
+ return 0;
+
/*
* From sbc3r22.pdf section 5.48 XDWRITEREAD (10) command
*
@@ -425,14 +429,8 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success,
struct se_device *dev = cmd->se_dev;
sense_reason_t ret = TCM_NO_SENSE;
- /*
- * Only set SCF_COMPARE_AND_WRITE_POST to force a response fall-through
- * within target_complete_ok_work() if the command was successfully
- * sent to the backend driver.
- */
spin_lock_irq(&cmd->t_state_lock);
- if (cmd->transport_state & CMD_T_SENT) {
- cmd->se_cmd_flags |= SCF_COMPARE_AND_WRITE_POST;
+ if (success) {
*post_ret = 1;
if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
@@ -453,7 +451,8 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
int *post_ret)
{
struct se_device *dev = cmd->se_dev;
- struct scatterlist *write_sg = NULL, *sg;
+ struct sg_table write_tbl = { };
+ struct scatterlist *write_sg, *sg;
unsigned char *buf = NULL, *addr;
struct sg_mapping_iter m;
unsigned int offset = 0, len;
@@ -494,14 +493,12 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes
goto out;
}
- write_sg = kmalloc_array(cmd->t_data_nents, sizeof(*write_sg),
- GFP_KERNEL);
- if (!write_sg) {
+ if (sg_alloc_table(&write_tbl, cmd->t_data_nents, GFP_KERNEL) < 0) {
pr_err("Unable to allocate compare_and_write sg\n");
ret = TCM_OUT_OF_RESOURCES;
goto out;
}
- sg_init_table(write_sg, cmd->t_data_nents);
+ write_sg = write_tbl.sgl;
/*
* Setup verify and write data payloads from total NumberLBAs.
*/
@@ -597,7 +594,7 @@ out:
* sbc_compare_and_write() before the original READ I/O submission.
*/
up(&dev->caw_sem);
- kfree(write_sg);
+ sg_free_table(&write_tbl);
kfree(buf);
return ret;
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 86c0156e6c88..4cf33e2cc705 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1778,7 +1778,7 @@ EXPORT_SYMBOL(target_submit_tmr);
void transport_generic_request_failure(struct se_cmd *cmd,
sense_reason_t sense_reason)
{
- int ret = 0, post_ret = 0;
+ int ret = 0;
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
sense_reason);
@@ -1789,13 +1789,8 @@ void transport_generic_request_failure(struct se_cmd *cmd,
*/
transport_complete_task_attr(cmd);
- /*
- * Handle special case for COMPARE_AND_WRITE failure, where the
- * callback is expected to drop the per device ->caw_sem.
- */
- if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
- cmd->transport_complete_callback)
- cmd->transport_complete_callback(cmd, false, &post_ret);
+ if (cmd->transport_complete_callback)
+ cmd->transport_complete_callback(cmd, false, NULL);
if (transport_check_aborted_status(cmd, 1))
return;
@@ -2012,7 +2007,7 @@ void target_execute_cmd(struct se_cmd *cmd)
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
*
- * If the received CDB has aleady been aborted stop processing it here.
+ * If the received CDB has already been aborted stop processing it here.
*/
spin_lock_irq(&cmd->t_state_lock);
if (__transport_check_aborted_status(cmd, 1)) {
@@ -2516,7 +2511,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
}
/*
- * Determine is the TCM fabric module has already allocated physical
+ * Determine if the TCM fabric module has already allocated physical
* memory, and is directly calling transport_generic_map_mem_to_cmd()
* beforehand.
*/
@@ -2754,7 +2749,7 @@ static void target_release_cmd_kref(struct kref *kref)
if (se_sess) {
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_del_init(&se_cmd->se_cmd_list);
- if (list_empty(&se_sess->sess_cmd_list))
+ if (se_sess->sess_tearing_down && list_empty(&se_sess->sess_cmd_list))
wake_up(&se_sess->cmd_list_wq);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
@@ -2907,7 +2902,7 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
spin_lock_irq(&se_sess->sess_cmd_lock);
do {
- ret = wait_event_interruptible_lock_irq_timeout(
+ ret = wait_event_lock_irq_timeout(
se_sess->cmd_list_wq,
list_empty(&se_sess->sess_cmd_list),
se_sess->sess_cmd_lock, 180 * HZ);
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 2718a933c0c6..70adcfdca8d1 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -391,7 +391,6 @@ out:
struct xcopy_pt_cmd {
bool remote_port;
struct se_cmd se_cmd;
- struct xcopy_op *xcopy_op;
struct completion xpt_passthrough_sem;
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
};
@@ -596,8 +595,6 @@ static int target_xcopy_setup_pt_cmd(
* X-COPY PUSH or X-COPY PULL based upon where the CDB was received.
*/
target_xcopy_init_pt_lun(se_dev, cmd, remote_port);
-
- xpt_cmd->xcopy_op = xop;
target_xcopy_setup_pt_port(xpt_cmd, xop, remote_port);
cmd->tag = 0;
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index 3be9519654e5..cf3fad2cb871 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -2,7 +2,7 @@
* TURBOchannel bus services.
*
* Copyright (c) Harald Koerfgen, 1998
- * Copyright (c) 2001, 2003, 2005, 2006 Maciej W. Rozycki
+ * Copyright (c) 2001, 2003, 2005, 2006, 2018 Maciej W. Rozycki
* Copyright (c) 2005 James Simmons
*
* This file is subject to the terms and conditions of the GNU
@@ -10,6 +10,7 @@
* directory of this archive for more details.
*/
#include <linux/compiler.h>
+#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
@@ -92,6 +93,11 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
tdev->dev.bus = &tc_bus_type;
tdev->slot = slot;
+ /* TURBOchannel has 34-bit DMA addressing (16GiB space). */
+ tdev->dma_mask = DMA_BIT_MASK(34);
+ tdev->dev.dma_mask = &tdev->dma_mask;
+ tdev->dev.coherent_dma_mask = DMA_BIT_MASK(34);
+
for (i = 0; i < 8; i++) {
tdev->firmware[i] =
readb(module + offset + TC_FIRM_VER + 4 * i);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 0e69edc77d18..5422523c03f8 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -432,7 +432,7 @@ source "drivers/thermal/samsung/Kconfig"
endmenu
menu "STMicroelectronics thermal drivers"
-depends on ARCH_STI && OF
+depends on (ARCH_STI || ARCH_STM32) && OF
source "drivers/thermal/st/Kconfig"
endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 610344eb3e03..82bb50dc6423 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -53,7 +53,7 @@ obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
obj-$(CONFIG_INTEL_BXT_PMIC_THERMAL) += intel_bxt_pmic_thermal.o
obj-$(CONFIG_INTEL_PCH_THERMAL) += intel_pch_thermal.o
-obj-$(CONFIG_ST_THERMAL) += st/
+obj-y += st/
obj-$(CONFIG_QCOM_TSENS) += qcom/
obj-y += tegra/
obj-$(CONFIG_HISI_THERMAL) += hisi_thermal.o
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 2c2f6d93034e..92f67d40f2e9 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -526,8 +526,8 @@ static int armada_thermal_probe_legacy(struct platform_device *pdev,
/* First memory region points towards the status register */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (IS_ERR(res))
- return PTR_ERR(res);
+ if (!res)
+ return -EIO;
/*
* Edit the resource start address and length to map over all the
diff --git a/drivers/thermal/da9062-thermal.c b/drivers/thermal/da9062-thermal.c
index dd8dd947b7f0..01b0cb994457 100644
--- a/drivers/thermal/da9062-thermal.c
+++ b/drivers/thermal/da9062-thermal.c
@@ -106,7 +106,7 @@ static void da9062_thermal_poll_on(struct work_struct *work)
THERMAL_EVENT_UNSPECIFIED);
delay = msecs_to_jiffies(thermal->zone->passive_delay);
- schedule_delayed_work(&thermal->work, delay);
+ queue_delayed_work(system_freezable_wq, &thermal->work, delay);
return;
}
@@ -125,7 +125,7 @@ static irqreturn_t da9062_thermal_irq_handler(int irq, void *data)
struct da9062_thermal *thermal = data;
disable_irq_nosync(thermal->irq);
- schedule_delayed_work(&thermal->work, 0);
+ queue_delayed_work(system_freezable_wq, &thermal->work, 0);
return IRQ_HANDLED;
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 761d0559c268..c4111a98f1a7 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -55,25 +55,39 @@
#define HI3660_TEMP_STEP (205)
#define HI3660_TEMP_LAG (4000)
-#define HI6220_DEFAULT_SENSOR 2
-#define HI3660_DEFAULT_SENSOR 1
+#define HI6220_CLUSTER0_SENSOR 2
+#define HI6220_CLUSTER1_SENSOR 1
+
+#define HI3660_LITTLE_SENSOR 0
+#define HI3660_BIG_SENSOR 1
+#define HI3660_G3D_SENSOR 2
+#define HI3660_MODEM_SENSOR 3
+
+struct hisi_thermal_data;
struct hisi_thermal_sensor {
+ struct hisi_thermal_data *data;
struct thermal_zone_device *tzd;
+ const char *irq_name;
uint32_t id;
uint32_t thres_temp;
};
+struct hisi_thermal_ops {
+ int (*get_temp)(struct hisi_thermal_sensor *sensor);
+ int (*enable_sensor)(struct hisi_thermal_sensor *sensor);
+ int (*disable_sensor)(struct hisi_thermal_sensor *sensor);
+ int (*irq_handler)(struct hisi_thermal_sensor *sensor);
+ int (*probe)(struct hisi_thermal_data *data);
+};
+
struct hisi_thermal_data {
- int (*get_temp)(struct hisi_thermal_data *data);
- int (*enable_sensor)(struct hisi_thermal_data *data);
- int (*disable_sensor)(struct hisi_thermal_data *data);
- int (*irq_handler)(struct hisi_thermal_data *data);
+ const struct hisi_thermal_ops *ops;
+ struct hisi_thermal_sensor *sensor;
struct platform_device *pdev;
struct clk *clk;
- struct hisi_thermal_sensor sensor;
void __iomem *regs;
- int irq;
+ int nr_sensors;
};
/*
@@ -266,30 +280,40 @@ static inline void hi6220_thermal_hdak_set(void __iomem *addr, int value)
(value << 4), addr + HI6220_TEMP0_CFG);
}
-static int hi6220_thermal_irq_handler(struct hisi_thermal_data *data)
+static int hi6220_thermal_irq_handler(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
hi6220_thermal_alarm_clear(data->regs, 1);
return 0;
}
-static int hi3660_thermal_irq_handler(struct hisi_thermal_data *data)
+static int hi3660_thermal_irq_handler(struct hisi_thermal_sensor *sensor)
{
- hi3660_thermal_alarm_clear(data->regs, data->sensor.id, 1);
+ struct hisi_thermal_data *data = sensor->data;
+
+ hi3660_thermal_alarm_clear(data->regs, sensor->id, 1);
return 0;
}
-static int hi6220_thermal_get_temp(struct hisi_thermal_data *data)
+static int hi6220_thermal_get_temp(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
return hi6220_thermal_get_temperature(data->regs);
}
-static int hi3660_thermal_get_temp(struct hisi_thermal_data *data)
+static int hi3660_thermal_get_temp(struct hisi_thermal_sensor *sensor)
{
- return hi3660_thermal_get_temperature(data->regs, data->sensor.id);
+ struct hisi_thermal_data *data = sensor->data;
+
+ return hi3660_thermal_get_temperature(data->regs, sensor->id);
}
-static int hi6220_thermal_disable_sensor(struct hisi_thermal_data *data)
+static int hi6220_thermal_disable_sensor(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
/* disable sensor module */
hi6220_thermal_enable(data->regs, 0);
hi6220_thermal_alarm_enable(data->regs, 0);
@@ -300,16 +324,18 @@ static int hi6220_thermal_disable_sensor(struct hisi_thermal_data *data)
return 0;
}
-static int hi3660_thermal_disable_sensor(struct hisi_thermal_data *data)
+static int hi3660_thermal_disable_sensor(struct hisi_thermal_sensor *sensor)
{
+ struct hisi_thermal_data *data = sensor->data;
+
/* disable sensor module */
- hi3660_thermal_alarm_enable(data->regs, data->sensor.id, 0);
+ hi3660_thermal_alarm_enable(data->regs, sensor->id, 0);
return 0;
}
-static int hi6220_thermal_enable_sensor(struct hisi_thermal_data *data)
+static int hi6220_thermal_enable_sensor(struct hisi_thermal_sensor *sensor)
{
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_data *data = sensor->data;
int ret;
/* enable clock for tsensor */
@@ -345,10 +371,10 @@ static int hi6220_thermal_enable_sensor(struct hisi_thermal_data *data)
return 0;
}
-static int hi3660_thermal_enable_sensor(struct hisi_thermal_data *data)
+static int hi3660_thermal_enable_sensor(struct hisi_thermal_sensor *sensor)
{
unsigned int value;
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_data *data = sensor->data;
/* disable interrupt */
hi3660_thermal_alarm_enable(data->regs, sensor->id, 0);
@@ -371,21 +397,8 @@ static int hi6220_thermal_probe(struct hisi_thermal_data *data)
{
struct platform_device *pdev = data->pdev;
struct device *dev = &pdev->dev;
- struct resource *res;
int ret;
- data->get_temp = hi6220_thermal_get_temp;
- data->enable_sensor = hi6220_thermal_enable_sensor;
- data->disable_sensor = hi6220_thermal_disable_sensor;
- data->irq_handler = hi6220_thermal_irq_handler;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs)) {
- dev_err(dev, "failed to get io address\n");
- return PTR_ERR(data->regs);
- }
-
data->clk = devm_clk_get(dev, "thermal_clk");
if (IS_ERR(data->clk)) {
ret = PTR_ERR(data->clk);
@@ -394,11 +407,14 @@ static int hi6220_thermal_probe(struct hisi_thermal_data *data)
return ret;
}
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0)
- return data->irq;
+ data->sensor = devm_kzalloc(dev, sizeof(*data->sensor), GFP_KERNEL);
+ if (!data->sensor)
+ return -ENOMEM;
- data->sensor.id = HI6220_DEFAULT_SENSOR;
+ data->sensor[0].id = HI6220_CLUSTER0_SENSOR;
+ data->sensor[0].irq_name = "tsensor_intr";
+ data->sensor[0].data = data;
+ data->nr_sensors = 1;
return 0;
}
@@ -407,38 +423,34 @@ static int hi3660_thermal_probe(struct hisi_thermal_data *data)
{
struct platform_device *pdev = data->pdev;
struct device *dev = &pdev->dev;
- struct resource *res;
- data->get_temp = hi3660_thermal_get_temp;
- data->enable_sensor = hi3660_thermal_enable_sensor;
- data->disable_sensor = hi3660_thermal_disable_sensor;
- data->irq_handler = hi3660_thermal_irq_handler;
+ data->nr_sensors = 2;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs)) {
- dev_err(dev, "failed to get io address\n");
- return PTR_ERR(data->regs);
- }
+ data->sensor = devm_kzalloc(dev, sizeof(*data->sensor) *
+ data->nr_sensors, GFP_KERNEL);
+ if (!data->sensor)
+ return -ENOMEM;
- data->irq = platform_get_irq(pdev, 0);
- if (data->irq < 0)
- return data->irq;
+ data->sensor[0].id = HI3660_BIG_SENSOR;
+ data->sensor[0].irq_name = "tsensor_a73";
+ data->sensor[0].data = data;
- data->sensor.id = HI3660_DEFAULT_SENSOR;
+ data->sensor[1].id = HI3660_LITTLE_SENSOR;
+ data->sensor[1].irq_name = "tsensor_a53";
+ data->sensor[1].data = data;
return 0;
}
static int hisi_thermal_get_temp(void *__data, int *temp)
{
- struct hisi_thermal_data *data = __data;
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_sensor *sensor = __data;
+ struct hisi_thermal_data *data = sensor->data;
- *temp = data->get_temp(data);
+ *temp = data->ops->get_temp(sensor);
- dev_dbg(&data->pdev->dev, "id=%d, temp=%d, thres=%d\n",
- sensor->id, *temp, sensor->thres_temp);
+ dev_dbg(&data->pdev->dev, "tzd=%p, id=%d, temp=%d, thres=%d\n",
+ sensor->tzd, sensor->id, *temp, sensor->thres_temp);
return 0;
}
@@ -449,38 +461,39 @@ static const struct thermal_zone_of_device_ops hisi_of_thermal_ops = {
static irqreturn_t hisi_thermal_alarm_irq_thread(int irq, void *dev)
{
- struct hisi_thermal_data *data = dev;
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ struct hisi_thermal_sensor *sensor = dev;
+ struct hisi_thermal_data *data = sensor->data;
int temp = 0;
- data->irq_handler(data);
+ data->ops->irq_handler(sensor);
- hisi_thermal_get_temp(data, &temp);
+ hisi_thermal_get_temp(sensor, &temp);
if (temp >= sensor->thres_temp) {
- dev_crit(&data->pdev->dev, "THERMAL ALARM: %d > %d\n",
- temp, sensor->thres_temp);
+ dev_crit(&data->pdev->dev,
+ "sensor <%d> THERMAL ALARM: %d > %d\n",
+ sensor->id, temp, sensor->thres_temp);
- thermal_zone_device_update(data->sensor.tzd,
+ thermal_zone_device_update(sensor->tzd,
THERMAL_EVENT_UNSPECIFIED);
} else {
- dev_crit(&data->pdev->dev, "THERMAL ALARM stopped: %d < %d\n",
- temp, sensor->thres_temp);
+ dev_crit(&data->pdev->dev,
+ "sensor <%d> THERMAL ALARM stopped: %d < %d\n",
+ sensor->id, temp, sensor->thres_temp);
}
return IRQ_HANDLED;
}
static int hisi_thermal_register_sensor(struct platform_device *pdev,
- struct hisi_thermal_data *data,
struct hisi_thermal_sensor *sensor)
{
int ret, i;
const struct thermal_trip *trip;
sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
- sensor->id, data,
+ sensor->id, sensor,
&hisi_of_thermal_ops);
if (IS_ERR(sensor->tzd)) {
ret = PTR_ERR(sensor->tzd);
@@ -502,14 +515,30 @@ static int hisi_thermal_register_sensor(struct platform_device *pdev,
return 0;
}
+static const struct hisi_thermal_ops hi6220_ops = {
+ .get_temp = hi6220_thermal_get_temp,
+ .enable_sensor = hi6220_thermal_enable_sensor,
+ .disable_sensor = hi6220_thermal_disable_sensor,
+ .irq_handler = hi6220_thermal_irq_handler,
+ .probe = hi6220_thermal_probe,
+};
+
+static const struct hisi_thermal_ops hi3660_ops = {
+ .get_temp = hi3660_thermal_get_temp,
+ .enable_sensor = hi3660_thermal_enable_sensor,
+ .disable_sensor = hi3660_thermal_disable_sensor,
+ .irq_handler = hi3660_thermal_irq_handler,
+ .probe = hi3660_thermal_probe,
+};
+
static const struct of_device_id of_hisi_thermal_match[] = {
{
.compatible = "hisilicon,tsensor",
- .data = hi6220_thermal_probe
+ .data = &hi6220_ops,
},
{
.compatible = "hisilicon,hi3660-tsensor",
- .data = hi3660_thermal_probe
+ .data = &hi3660_ops,
},
{ /* end */ }
};
@@ -527,9 +556,9 @@ static void hisi_thermal_toggle_sensor(struct hisi_thermal_sensor *sensor,
static int hisi_thermal_probe(struct platform_device *pdev)
{
struct hisi_thermal_data *data;
- int (*platform_probe)(struct hisi_thermal_data *);
struct device *dev = &pdev->dev;
- int ret;
+ struct resource *res;
+ int i, ret;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -537,41 +566,50 @@ static int hisi_thermal_probe(struct platform_device *pdev)
data->pdev = pdev;
platform_set_drvdata(pdev, data);
+ data->ops = of_device_get_match_data(dev);
- platform_probe = of_device_get_match_data(dev);
- if (!platform_probe) {
- dev_err(dev, "failed to get probe func\n");
- return -EINVAL;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->regs)) {
+ dev_err(dev, "failed to get io address\n");
+ return PTR_ERR(data->regs);
}
- ret = platform_probe(data);
+ ret = data->ops->probe(data);
if (ret)
return ret;
- ret = hisi_thermal_register_sensor(pdev, data,
- &data->sensor);
- if (ret) {
- dev_err(dev, "failed to register thermal sensor: %d\n", ret);
- return ret;
- }
+ for (i = 0; i < data->nr_sensors; i++) {
+ struct hisi_thermal_sensor *sensor = &data->sensor[i];
- ret = data->enable_sensor(data);
- if (ret) {
- dev_err(dev, "Failed to setup the sensor: %d\n", ret);
- return ret;
- }
+ ret = hisi_thermal_register_sensor(pdev, sensor);
+ if (ret) {
+ dev_err(dev, "failed to register thermal sensor: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = platform_get_irq_byname(pdev, sensor->irq_name);
+ if (ret < 0)
+ return ret;
- if (data->irq) {
- ret = devm_request_threaded_irq(dev, data->irq, NULL,
- hisi_thermal_alarm_irq_thread,
- IRQF_ONESHOT, "hisi_thermal", data);
+ ret = devm_request_threaded_irq(dev, ret, NULL,
+ hisi_thermal_alarm_irq_thread,
+ IRQF_ONESHOT, sensor->irq_name,
+ sensor);
if (ret < 0) {
- dev_err(dev, "failed to request alarm irq: %d\n", ret);
+ dev_err(dev, "Failed to request alarm irq: %d\n", ret);
return ret;
}
- }
- hisi_thermal_toggle_sensor(&data->sensor, true);
+ ret = data->ops->enable_sensor(sensor);
+ if (ret) {
+ dev_err(dev, "Failed to setup the sensor: %d\n", ret);
+ return ret;
+ }
+
+ hisi_thermal_toggle_sensor(sensor, true);
+ }
return 0;
}
@@ -579,11 +617,14 @@ static int hisi_thermal_probe(struct platform_device *pdev)
static int hisi_thermal_remove(struct platform_device *pdev)
{
struct hisi_thermal_data *data = platform_get_drvdata(pdev);
- struct hisi_thermal_sensor *sensor = &data->sensor;
+ int i;
- hisi_thermal_toggle_sensor(sensor, false);
+ for (i = 0; i < data->nr_sensors; i++) {
+ struct hisi_thermal_sensor *sensor = &data->sensor[i];
- data->disable_sensor(data);
+ hisi_thermal_toggle_sensor(sensor, false);
+ data->ops->disable_sensor(sensor);
+ }
return 0;
}
@@ -592,8 +633,10 @@ static int hisi_thermal_remove(struct platform_device *pdev)
static int hisi_thermal_suspend(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
+ int i;
- data->disable_sensor(data);
+ for (i = 0; i < data->nr_sensors; i++)
+ data->ops->disable_sensor(&data->sensor[i]);
return 0;
}
@@ -601,8 +644,12 @@ static int hisi_thermal_suspend(struct device *dev)
static int hisi_thermal_resume(struct device *dev)
{
struct hisi_thermal_data *data = dev_get_drvdata(dev);
+ int i, ret = 0;
+
+ for (i = 0; i < data->nr_sensors; i++)
+ ret |= data->ops->enable_sensor(&data->sensor[i]);
- return data->enable_sensor(data);
+ return ret;
}
#endif
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index aa452acb60b6..15661549eb67 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -725,7 +725,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
} else {
ret = imx_init_from_tempmon_data(pdev);
if (ret) {
- dev_err(&pdev->dev, "failed to init from from fsl,tempmon-data\n");
+ dev_err(&pdev->dev, "failed to init from fsl,tempmon-data\n");
return ret;
}
}
@@ -762,9 +762,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"failed to get thermal clk: %d\n", ret);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto cpufreq_put;
}
/*
@@ -777,9 +775,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
ret = clk_prepare_enable(data->thermal_clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto cpufreq_put;
}
data->tz = thermal_zone_device_register("imx_thermal_zone",
@@ -792,10 +788,7 @@ static int imx_thermal_probe(struct platform_device *pdev)
ret = PTR_ERR(data->tz);
dev_err(&pdev->dev,
"failed to register thermal zone device %d\n", ret);
- clk_disable_unprepare(data->thermal_clk);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto clk_disable;
}
dev_info(&pdev->dev, "%s CPU temperature grade - max:%dC"
@@ -827,14 +820,20 @@ static int imx_thermal_probe(struct platform_device *pdev)
0, "imx_thermal", data);
if (ret < 0) {
dev_err(&pdev->dev, "failed to request alarm irq: %d\n", ret);
- clk_disable_unprepare(data->thermal_clk);
- thermal_zone_device_unregister(data->tz);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
- return ret;
+ goto thermal_zone_unregister;
}
return 0;
+
+thermal_zone_unregister:
+ thermal_zone_device_unregister(data->tz);
+clk_disable:
+ clk_disable_unprepare(data->thermal_clk);
+cpufreq_put:
+ cpufreq_cooling_unregister(data->cdev);
+ cpufreq_cpu_put(data->policy);
+
+ return ret;
}
static int imx_thermal_remove(struct platform_device *pdev)
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 4f2816559205..4bfdb4a1e47d 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -19,23 +19,34 @@
/*** Private data structures to represent thermal device tree data ***/
/**
- * struct __thermal_bind_param - a match between trip and cooling device
+ * struct __thermal_cooling_bind_param - a cooling device for a trip point
* @cooling_device: a pointer to identify the referred cooling device
- * @trip_id: the trip point index
- * @usage: the percentage (from 0 to 100) of cooling contribution
* @min: minimum cooling state used at this trip point
* @max: maximum cooling state used at this trip point
*/
-struct __thermal_bind_params {
+struct __thermal_cooling_bind_param {
struct device_node *cooling_device;
- unsigned int trip_id;
- unsigned int usage;
unsigned long min;
unsigned long max;
};
/**
+ * struct __thermal_bind_param - a match between trip and cooling device
+ * @tcbp: a pointer to an array of cooling devices
+ * @count: number of elements in array
+ * @trip_id: the trip point index
+ * @usage: the percentage (from 0 to 100) of cooling contribution
+ */
+
+struct __thermal_bind_params {
+ struct __thermal_cooling_bind_param *tcbp;
+ unsigned int count;
+ unsigned int trip_id;
+ unsigned int usage;
+};
+
+/**
* struct __thermal_zone - internal representation of a thermal zone
* @mode: current thermal zone device mode (enabled/disabled)
* @passive_delay: polling interval while passive cooling is activated
@@ -192,25 +203,31 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
{
struct __thermal_zone *data = thermal->devdata;
- int i;
+ struct __thermal_bind_params *tbp;
+ struct __thermal_cooling_bind_param *tcbp;
+ int i, j;
if (!data || IS_ERR(data))
return -ENODEV;
/* find where to bind */
for (i = 0; i < data->num_tbps; i++) {
- struct __thermal_bind_params *tbp = data->tbps + i;
+ tbp = data->tbps + i;
- if (tbp->cooling_device == cdev->np) {
- int ret;
+ for (j = 0; j < tbp->count; j++) {
+ tcbp = tbp->tcbp + j;
- ret = thermal_zone_bind_cooling_device(thermal,
+ if (tcbp->cooling_device == cdev->np) {
+ int ret;
+
+ ret = thermal_zone_bind_cooling_device(thermal,
tbp->trip_id, cdev,
- tbp->max,
- tbp->min,
+ tcbp->max,
+ tcbp->min,
tbp->usage);
- if (ret)
- return ret;
+ if (ret)
+ return ret;
+ }
}
}
@@ -221,22 +238,28 @@ static int of_thermal_unbind(struct thermal_zone_device *thermal,
struct thermal_cooling_device *cdev)
{
struct __thermal_zone *data = thermal->devdata;
- int i;
+ struct __thermal_bind_params *tbp;
+ struct __thermal_cooling_bind_param *tcbp;
+ int i, j;
if (!data || IS_ERR(data))
return -ENODEV;
/* find where to unbind */
for (i = 0; i < data->num_tbps; i++) {
- struct __thermal_bind_params *tbp = data->tbps + i;
+ tbp = data->tbps + i;
+
+ for (j = 0; j < tbp->count; j++) {
+ tcbp = tbp->tcbp + j;
- if (tbp->cooling_device == cdev->np) {
- int ret;
+ if (tcbp->cooling_device == cdev->np) {
+ int ret;
- ret = thermal_zone_unbind_cooling_device(thermal,
- tbp->trip_id, cdev);
- if (ret)
- return ret;
+ ret = thermal_zone_unbind_cooling_device(thermal,
+ tbp->trip_id, cdev);
+ if (ret)
+ return ret;
+ }
}
}
@@ -486,8 +509,8 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
if (sensor_specs.args_count >= 1) {
id = sensor_specs.args[0];
WARN(sensor_specs.args_count > 1,
- "%s: too many cells in sensor specifier %d\n",
- sensor_specs.np->name, sensor_specs.args_count);
+ "%pOFn: too many cells in sensor specifier %d\n",
+ sensor_specs.np, sensor_specs.args_count);
} else {
id = 0;
}
@@ -655,8 +678,9 @@ static int thermal_of_populate_bind_params(struct device_node *np,
int ntrips)
{
struct of_phandle_args cooling_spec;
+ struct __thermal_cooling_bind_param *__tcbp;
struct device_node *trip;
- int ret, i;
+ int ret, i, count;
u32 prop;
/* Default weight. Usage is optional */
@@ -683,20 +707,44 @@ static int thermal_of_populate_bind_params(struct device_node *np,
goto end;
}
- ret = of_parse_phandle_with_args(np, "cooling-device", "#cooling-cells",
- 0, &cooling_spec);
- if (ret < 0) {
- pr_err("missing cooling_device property\n");
+ count = of_count_phandle_with_args(np, "cooling-device",
+ "#cooling-cells");
+ if (!count) {
+ pr_err("Add a cooling_device property with at least one device\n");
goto end;
}
- __tbp->cooling_device = cooling_spec.np;
- if (cooling_spec.args_count >= 2) { /* at least min and max */
- __tbp->min = cooling_spec.args[0];
- __tbp->max = cooling_spec.args[1];
- } else {
- pr_err("wrong reference to cooling device, missing limits\n");
+
+ __tcbp = kcalloc(count, sizeof(*__tcbp), GFP_KERNEL);
+ if (!__tcbp)
+ goto end;
+
+ for (i = 0; i < count; i++) {
+ ret = of_parse_phandle_with_args(np, "cooling-device",
+ "#cooling-cells", i, &cooling_spec);
+ if (ret < 0) {
+ pr_err("Invalid cooling-device entry\n");
+ goto free_tcbp;
+ }
+
+ __tcbp[i].cooling_device = cooling_spec.np;
+
+ if (cooling_spec.args_count >= 2) { /* at least min and max */
+ __tcbp[i].min = cooling_spec.args[0];
+ __tcbp[i].max = cooling_spec.args[1];
+ } else {
+ pr_err("wrong reference to cooling device, missing limits\n");
+ }
}
+ __tbp->tcbp = __tcbp;
+ __tbp->count = count;
+
+ goto end;
+
+free_tcbp:
+ for (i = i - 1; i >= 0; i--)
+ of_node_put(__tcbp[i].cooling_device);
+ kfree(__tcbp);
end:
of_node_put(trip);
@@ -903,8 +951,16 @@ finish:
return tz;
free_tbps:
- for (i = i - 1; i >= 0; i--)
- of_node_put(tz->tbps[i].cooling_device);
+ for (i = i - 1; i >= 0; i--) {
+ struct __thermal_bind_params *tbp = tz->tbps + i;
+ int j;
+
+ for (j = 0; j < tbp->count; j++)
+ of_node_put(tbp->tcbp[j].cooling_device);
+
+ kfree(tbp->tcbp);
+ }
+
kfree(tz->tbps);
free_trips:
for (i = 0; i < tz->ntrips; i++)
@@ -920,10 +976,18 @@ free_tz:
static inline void of_thermal_free_zone(struct __thermal_zone *tz)
{
- int i;
+ struct __thermal_bind_params *tbp;
+ int i, j;
+
+ for (i = 0; i < tz->num_tbps; i++) {
+ tbp = tz->tbps + i;
+
+ for (j = 0; j < tbp->count; j++)
+ of_node_put(tbp->tcbp[j].cooling_device);
+
+ kfree(tbp->tcbp);
+ }
- for (i = 0; i < tz->num_tbps; i++)
- of_node_put(tz->tbps[i].cooling_device);
kfree(tz->tbps);
for (i = 0; i < tz->ntrips; i++)
of_node_put(tz->trips[i].np);
@@ -963,8 +1027,8 @@ int __init of_parse_thermal_zones(void)
tz = thermal_of_build_thermal_zone(child);
if (IS_ERR(tz)) {
- pr_err("failed to build thermal zone %s: %ld\n",
- child->name,
+ pr_err("failed to build thermal zone %pOFn: %ld\n",
+ child,
PTR_ERR(tz));
continue;
}
@@ -998,7 +1062,7 @@ int __init of_parse_thermal_zones(void)
tz->passive_delay,
tz->polling_delay);
if (IS_ERR(zone)) {
- pr_err("Failed to build %s zone %ld\n", child->name,
+ pr_err("Failed to build %pOFn zone %ld\n", child,
PTR_ERR(zone));
kfree(tzp);
kfree(ops);
diff --git a/drivers/thermal/qcom-spmi-temp-alarm.c b/drivers/thermal/qcom-spmi-temp-alarm.c
index ad4f3a8d6560..b2d5d5bf4a9b 100644
--- a/drivers/thermal/qcom-spmi-temp-alarm.c
+++ b/drivers/thermal/qcom-spmi-temp-alarm.c
@@ -23,6 +23,8 @@
#include <linux/regmap.h>
#include <linux/thermal.h>
+#include "thermal_core.h"
+
#define QPNP_TM_REG_TYPE 0x04
#define QPNP_TM_REG_SUBTYPE 0x05
#define QPNP_TM_REG_STATUS 0x08
@@ -37,9 +39,11 @@
#define STATUS_GEN2_STATE_MASK GENMASK(6, 4)
#define STATUS_GEN2_STATE_SHIFT 4
-#define SHUTDOWN_CTRL1_OVERRIDE_MASK GENMASK(7, 6)
+#define SHUTDOWN_CTRL1_OVERRIDE_S2 BIT(6)
#define SHUTDOWN_CTRL1_THRESHOLD_MASK GENMASK(1, 0)
+#define SHUTDOWN_CTRL1_RATE_25HZ BIT(3)
+
#define ALARM_CTRL_FORCE_ENABLE BIT(7)
/*
@@ -56,12 +60,19 @@
#define TEMP_THRESH_STEP 5000 /* Threshold step: 5 C */
#define THRESH_MIN 0
+#define THRESH_MAX 3
+
+/* Stage 2 Threshold Min: 125 C */
+#define STAGE2_THRESHOLD_MIN 125000
+/* Stage 2 Threshold Max: 140 C */
+#define STAGE2_THRESHOLD_MAX 140000
/* Temperature in Milli Celsius reported during stage 0 if no ADC is present */
#define DEFAULT_TEMP 37000
struct qpnp_tm_chip {
struct regmap *map;
+ struct device *dev;
struct thermal_zone_device *tz_dev;
unsigned int subtype;
long temp;
@@ -69,6 +80,10 @@ struct qpnp_tm_chip {
unsigned int stage;
unsigned int prev_stage;
unsigned int base;
+ /* protects .thresh, .stage and chip registers */
+ struct mutex lock;
+ bool initialized;
+
struct iio_channel *adc;
};
@@ -125,6 +140,8 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
unsigned int stage, stage_new, stage_old;
int ret;
+ WARN_ON(!mutex_is_locked(&chip->lock));
+
ret = qpnp_tm_get_temp_stage(chip);
if (ret < 0)
return ret;
@@ -163,8 +180,15 @@ static int qpnp_tm_get_temp(void *data, int *temp)
if (!temp)
return -EINVAL;
+ if (!chip->initialized) {
+ *temp = DEFAULT_TEMP;
+ return 0;
+ }
+
if (!chip->adc) {
+ mutex_lock(&chip->lock);
ret = qpnp_tm_update_temp_no_adc(chip);
+ mutex_unlock(&chip->lock);
if (ret < 0)
return ret;
} else {
@@ -180,8 +204,72 @@ static int qpnp_tm_get_temp(void *data, int *temp)
return 0;
}
+static int qpnp_tm_update_critical_trip_temp(struct qpnp_tm_chip *chip,
+ int temp)
+{
+ u8 reg;
+ bool disable_s2_shutdown = false;
+
+ WARN_ON(!mutex_is_locked(&chip->lock));
+
+ /*
+ * Default: S2 and S3 shutdown enabled, thresholds at
+ * 105C/125C/145C, monitoring at 25Hz
+ */
+ reg = SHUTDOWN_CTRL1_RATE_25HZ;
+
+ if (temp == THERMAL_TEMP_INVALID ||
+ temp < STAGE2_THRESHOLD_MIN) {
+ chip->thresh = THRESH_MIN;
+ goto skip;
+ }
+
+ if (temp <= STAGE2_THRESHOLD_MAX) {
+ chip->thresh = THRESH_MAX -
+ ((STAGE2_THRESHOLD_MAX - temp) /
+ TEMP_THRESH_STEP);
+ disable_s2_shutdown = true;
+ } else {
+ chip->thresh = THRESH_MAX;
+
+ if (chip->adc)
+ disable_s2_shutdown = true;
+ else
+ dev_warn(chip->dev,
+ "No ADC is configured and critical temperature is above the maximum stage 2 threshold of 140 C! Configuring stage 2 shutdown at 140 C.\n");
+ }
+
+skip:
+ reg |= chip->thresh;
+ if (disable_s2_shutdown)
+ reg |= SHUTDOWN_CTRL1_OVERRIDE_S2;
+
+ return qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
+}
+
+static int qpnp_tm_set_trip_temp(void *data, int trip, int temp)
+{
+ struct qpnp_tm_chip *chip = data;
+ const struct thermal_trip *trip_points;
+ int ret;
+
+ trip_points = of_thermal_get_trip_points(chip->tz_dev);
+ if (!trip_points)
+ return -EINVAL;
+
+ if (trip_points[trip].type != THERMAL_TRIP_CRITICAL)
+ return 0;
+
+ mutex_lock(&chip->lock);
+ ret = qpnp_tm_update_critical_trip_temp(chip, temp);
+ mutex_unlock(&chip->lock);
+
+ return ret;
+}
+
static const struct thermal_zone_of_device_ops qpnp_tm_sensor_ops = {
.get_temp = qpnp_tm_get_temp,
+ .set_trip_temp = qpnp_tm_set_trip_temp,
};
static irqreturn_t qpnp_tm_isr(int irq, void *data)
@@ -193,6 +281,29 @@ static irqreturn_t qpnp_tm_isr(int irq, void *data)
return IRQ_HANDLED;
}
+static int qpnp_tm_get_critical_trip_temp(struct qpnp_tm_chip *chip)
+{
+ int ntrips;
+ const struct thermal_trip *trips;
+ int i;
+
+ ntrips = of_thermal_get_ntrips(chip->tz_dev);
+ if (ntrips <= 0)
+ return THERMAL_TEMP_INVALID;
+
+ trips = of_thermal_get_trip_points(chip->tz_dev);
+ if (!trips)
+ return THERMAL_TEMP_INVALID;
+
+ for (i = 0; i < ntrips; i++) {
+ if (of_thermal_is_trip_valid(chip->tz_dev, i) &&
+ trips[i].type == THERMAL_TRIP_CRITICAL)
+ return trips[i].temperature;
+ }
+
+ return THERMAL_TEMP_INVALID;
+}
+
/*
* This function initializes the internal temp value based on only the
* current thermal stage and threshold. Setup threshold control and
@@ -203,17 +314,20 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip)
unsigned int stage;
int ret;
u8 reg = 0;
+ int crit_temp;
+
+ mutex_lock(&chip->lock);
ret = qpnp_tm_read(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, &reg);
if (ret < 0)
- return ret;
+ goto out;
chip->thresh = reg & SHUTDOWN_CTRL1_THRESHOLD_MASK;
chip->temp = DEFAULT_TEMP;
ret = qpnp_tm_get_temp_stage(chip);
if (ret < 0)
- return ret;
+ goto out;
chip->stage = ret;
stage = chip->subtype == QPNP_TM_SUBTYPE_GEN1
@@ -224,21 +338,19 @@ static int qpnp_tm_init(struct qpnp_tm_chip *chip)
(stage - 1) * TEMP_STAGE_STEP +
TEMP_THRESH_MIN;
- /*
- * Set threshold and disable software override of stage 2 and 3
- * shutdowns.
- */
- chip->thresh = THRESH_MIN;
- reg &= ~(SHUTDOWN_CTRL1_OVERRIDE_MASK | SHUTDOWN_CTRL1_THRESHOLD_MASK);
- reg |= chip->thresh & SHUTDOWN_CTRL1_THRESHOLD_MASK;
- ret = qpnp_tm_write(chip, QPNP_TM_REG_SHUTDOWN_CTRL1, reg);
+ crit_temp = qpnp_tm_get_critical_trip_temp(chip);
+ ret = qpnp_tm_update_critical_trip_temp(chip, crit_temp);
if (ret < 0)
- return ret;
+ goto out;
/* Enable the thermal alarm PMIC module in always-on mode. */
reg = ALARM_CTRL_FORCE_ENABLE;
ret = qpnp_tm_write(chip, QPNP_TM_REG_ALARM_CTRL, reg);
+ chip->initialized = true;
+
+out:
+ mutex_unlock(&chip->lock);
return ret;
}
@@ -257,6 +369,9 @@ static int qpnp_tm_probe(struct platform_device *pdev)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, chip);
+ chip->dev = &pdev->dev;
+
+ mutex_init(&chip->lock);
chip->map = dev_get_regmap(pdev->dev.parent, NULL);
if (!chip->map)
@@ -302,6 +417,18 @@ static int qpnp_tm_probe(struct platform_device *pdev)
chip->subtype = subtype;
+ /*
+ * Register the sensor before initializing the hardware to be able to
+ * read the trip points. get_temp() returns the default temperature
+ * before the hardware initialization is completed.
+ */
+ chip->tz_dev = devm_thermal_zone_of_sensor_register(
+ &pdev->dev, 0, chip, &qpnp_tm_sensor_ops);
+ if (IS_ERR(chip->tz_dev)) {
+ dev_err(&pdev->dev, "failed to register sensor\n");
+ return PTR_ERR(chip->tz_dev);
+ }
+
ret = qpnp_tm_init(chip);
if (ret < 0) {
dev_err(&pdev->dev, "init failed\n");
@@ -313,12 +440,7 @@ static int qpnp_tm_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- chip->tz_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0, chip,
- &qpnp_tm_sensor_ops);
- if (IS_ERR(chip->tz_dev)) {
- dev_err(&pdev->dev, "failed to register sensor\n");
- return PTR_ERR(chip->tz_dev);
- }
+ thermal_zone_device_update(chip->tz_dev, THERMAL_EVENT_UNSPECIFIED);
return 0;
}
diff --git a/drivers/thermal/qcom/tsens-8916.c b/drivers/thermal/qcom/tsens-8916.c
index fdf561b8b81d..c6dd620ac029 100644
--- a/drivers/thermal/qcom/tsens-8916.c
+++ b/drivers/thermal/qcom/tsens-8916.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/platform_device.h>
@@ -109,5 +100,6 @@ static const struct tsens_ops ops_8916 = {
const struct tsens_data data_8916 = {
.num_sensors = 5,
.ops = &ops_8916,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
.hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
};
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index 0451277d3a8f..0f0adb302a7b 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/platform_device.h>
@@ -69,7 +60,7 @@ static int suspend_8960(struct tsens_device *tmdev)
{
int ret;
unsigned int mask;
- struct regmap *map = tmdev->map;
+ struct regmap *map = tmdev->tm_map;
ret = regmap_read(map, THRESHOLD_ADDR, &tmdev->ctx.threshold);
if (ret)
@@ -94,7 +85,7 @@ static int suspend_8960(struct tsens_device *tmdev)
static int resume_8960(struct tsens_device *tmdev)
{
int ret;
- struct regmap *map = tmdev->map;
+ struct regmap *map = tmdev->tm_map;
ret = regmap_update_bits(map, CNTL_ADDR, SW_RST, SW_RST);
if (ret)
@@ -126,12 +117,12 @@ static int enable_8960(struct tsens_device *tmdev, int id)
int ret;
u32 reg, mask;
- ret = regmap_read(tmdev->map, CNTL_ADDR, &reg);
+ ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg);
if (ret)
return ret;
mask = BIT(id + SENSOR0_SHIFT);
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg | SW_RST);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg | SW_RST);
if (ret)
return ret;
@@ -140,7 +131,7 @@ static int enable_8960(struct tsens_device *tmdev, int id)
else
reg |= mask | SLP_CLK_ENA_8660 | EN;
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg);
if (ret)
return ret;
@@ -157,7 +148,7 @@ static void disable_8960(struct tsens_device *tmdev)
mask <<= SENSOR0_SHIFT;
mask |= EN;
- ret = regmap_read(tmdev->map, CNTL_ADDR, &reg_cntl);
+ ret = regmap_read(tmdev->tm_map, CNTL_ADDR, &reg_cntl);
if (ret)
return;
@@ -168,7 +159,7 @@ static void disable_8960(struct tsens_device *tmdev)
else
reg_cntl &= ~SLP_CLK_ENA_8660;
- regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
}
static int init_8960(struct tsens_device *tmdev)
@@ -176,8 +167,8 @@ static int init_8960(struct tsens_device *tmdev)
int ret, i;
u32 reg_cntl;
- tmdev->map = dev_get_regmap(tmdev->dev, NULL);
- if (!tmdev->map)
+ tmdev->tm_map = dev_get_regmap(tmdev->dev, NULL);
+ if (!tmdev->tm_map)
return -ENODEV;
/*
@@ -193,14 +184,14 @@ static int init_8960(struct tsens_device *tmdev)
}
reg_cntl = SW_RST;
- ret = regmap_update_bits(tmdev->map, CNTL_ADDR, SW_RST, reg_cntl);
+ ret = regmap_update_bits(tmdev->tm_map, CNTL_ADDR, SW_RST, reg_cntl);
if (ret)
return ret;
if (tmdev->num_sensors > 1) {
reg_cntl |= SLP_CLK_ENA | (MEASURE_PERIOD << 18);
reg_cntl &= ~SW_RST;
- ret = regmap_update_bits(tmdev->map, CONFIG_ADDR,
+ ret = regmap_update_bits(tmdev->tm_map, CONFIG_ADDR,
CONFIG_MASK, CONFIG);
} else {
reg_cntl |= SLP_CLK_ENA_8660 | (MEASURE_PERIOD << 16);
@@ -209,12 +200,12 @@ static int init_8960(struct tsens_device *tmdev)
}
reg_cntl |= GENMASK(tmdev->num_sensors - 1, 0) << SENSOR0_SHIFT;
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
reg_cntl |= EN;
- ret = regmap_write(tmdev->map, CNTL_ADDR, reg_cntl);
+ ret = regmap_write(tmdev->tm_map, CNTL_ADDR, reg_cntl);
if (ret)
return ret;
@@ -261,12 +252,12 @@ static int get_temp_8960(struct tsens_device *tmdev, int id, int *temp)
timeout = jiffies + usecs_to_jiffies(TIMEOUT_US);
do {
- ret = regmap_read(tmdev->map, INT_STATUS_ADDR, &trdy);
+ ret = regmap_read(tmdev->tm_map, INT_STATUS_ADDR, &trdy);
if (ret)
return ret;
if (!(trdy & TRDY_MASK))
continue;
- ret = regmap_read(tmdev->map, s->status, &code);
+ ret = regmap_read(tmdev->tm_map, s->status, &code);
if (ret)
return ret;
*temp = code_to_mdegC(code, s);
diff --git a/drivers/thermal/qcom/tsens-8974.c b/drivers/thermal/qcom/tsens-8974.c
index 9baf77e8cbe3..3d3fda3d731b 100644
--- a/drivers/thermal/qcom/tsens-8974.c
+++ b/drivers/thermal/qcom/tsens-8974.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/platform_device.h>
@@ -241,4 +232,5 @@ static const struct tsens_ops ops_8974 = {
const struct tsens_data data_8974 = {
.num_sensors = 11,
.ops = &ops_8974,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x0 },
};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index 6207d8d92351..3be4be2e0465 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/err.h>
@@ -21,7 +12,11 @@
#include <linux/regmap.h>
#include "tsens.h"
-#define S0_ST_ADDR 0x1030
+/* SROT */
+#define TSENS_EN BIT(0)
+
+/* TM */
+#define STATUS_OFFSET 0x30
#define SN_ADDR_OFFSET 0x4
#define SN_ST_TEMP_MASK 0x3ff
#define CAL_DEGC_PT1 30
@@ -107,8 +102,8 @@ int get_temp_common(struct tsens_device *tmdev, int id, int *temp)
unsigned int status_reg;
int last_temp = 0, ret;
- status_reg = S0_ST_ADDR + s->hw_id * SN_ADDR_OFFSET;
- ret = regmap_read(tmdev->map, status_reg, &code);
+ status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * SN_ADDR_OFFSET;
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
last_temp = code & SN_ST_TEMP_MASK;
@@ -126,29 +121,52 @@ static const struct regmap_config tsens_config = {
int __init init_common(struct tsens_device *tmdev)
{
- void __iomem *base;
+ void __iomem *tm_base, *srot_base;
struct resource *res;
+ u32 code;
+ int ret;
struct platform_device *op = of_find_device_by_node(tmdev->dev->of_node);
+ u16 ctrl_offset = tmdev->reg_offsets[SROT_CTRL_OFFSET];
if (!op)
return -EINVAL;
- /* The driver only uses the TM register address space for now */
if (op->num_resources > 1) {
+ /* DT with separate SROT and TM address space */
tmdev->tm_offset = 0;
+ res = platform_get_resource(op, IORESOURCE_MEM, 1);
+ srot_base = devm_ioremap_resource(&op->dev, res);
+ if (IS_ERR(srot_base))
+ return PTR_ERR(srot_base);
+
+ tmdev->srot_map = devm_regmap_init_mmio(tmdev->dev,
+ srot_base, &tsens_config);
+ if (IS_ERR(tmdev->srot_map))
+ return PTR_ERR(tmdev->srot_map);
+
} else {
/* old DTs where SROT and TM were in a contiguous 2K block */
tmdev->tm_offset = 0x1000;
}
res = platform_get_resource(op, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&op->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- tmdev->map = devm_regmap_init_mmio(tmdev->dev, base, &tsens_config);
- if (IS_ERR(tmdev->map))
- return PTR_ERR(tmdev->map);
+ tm_base = devm_ioremap_resource(&op->dev, res);
+ if (IS_ERR(tm_base))
+ return PTR_ERR(tm_base);
+
+ tmdev->tm_map = devm_regmap_init_mmio(tmdev->dev, tm_base, &tsens_config);
+ if (IS_ERR(tmdev->tm_map))
+ return PTR_ERR(tmdev->tm_map);
+
+ if (tmdev->srot_map) {
+ ret = regmap_read(tmdev->srot_map, ctrl_offset, &code);
+ if (ret)
+ return ret;
+ if (!(code & TSENS_EN)) {
+ dev_err(tmdev->dev, "tsens device is not enabled\n");
+ return -ENODEV;
+ }
+ }
return 0;
}
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index 44da02f594ac..381a212872bf 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -21,7 +21,7 @@ static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
int ret;
status_reg = tmdev->tm_offset + STATUS_OFFSET + s->hw_id * 4;
- ret = regmap_read(tmdev->map, status_reg, &code);
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
last_temp = code & LAST_TEMP_MASK;
@@ -29,7 +29,7 @@ static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
goto done;
/* Try a second time */
- ret = regmap_read(tmdev->map, status_reg, &code);
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
if (code & STATUS_VALID_BIT) {
@@ -40,7 +40,7 @@ static int get_temp_tsens_v2(struct tsens_device *tmdev, int id, int *temp)
}
/* Try a third/last time */
- ret = regmap_read(tmdev->map, status_reg, &code);
+ ret = regmap_read(tmdev->tm_map, status_reg, &code);
if (ret)
return ret;
if (code & STATUS_VALID_BIT) {
@@ -68,10 +68,12 @@ static const struct tsens_ops ops_generic_v2 = {
const struct tsens_data data_tsens_v2 = {
.ops = &ops_generic_v2,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
};
/* Kept around for backward compatibility with old msm8996.dtsi */
const struct tsens_data data_8996 = {
.num_sensors = 13,
.ops = &ops_generic_v2,
+ .reg_offsets = { [SROT_CTRL_OFFSET] = 0x4 },
};
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index a2c9bfae3d86..f1ec9bbe4717 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -1,15 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#include <linux/err.h>
@@ -89,11 +80,6 @@ static int tsens_register(struct tsens_device *tmdev)
{
int i;
struct thermal_zone_device *tzd;
- u32 *hw_id, n = tmdev->num_sensors;
-
- hw_id = devm_kcalloc(tmdev->dev, n, sizeof(u32), GFP_KERNEL);
- if (!hw_id)
- return -ENOMEM;
for (i = 0; i < tmdev->num_sensors; i++) {
tmdev->sensor[i].tmdev = tmdev;
@@ -158,6 +144,9 @@ static int tsens_probe(struct platform_device *pdev)
else
tmdev->sensor[i].hw_id = i;
}
+ for (i = 0; i < REG_ARRAY_SIZE; i++) {
+ tmdev->reg_offsets[i] = data->reg_offsets[i];
+ }
if (!tmdev->ops || !tmdev->ops->init || !tmdev->ops->get_temp)
return -EINVAL;
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index 14331eb45a86..7b7feee5dc46 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -1,15 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
+
#ifndef __QCOM_TSENS_H__
#define __QCOM_TSENS_H__
@@ -55,15 +48,23 @@ struct tsens_ops {
int (*get_trend)(struct tsens_device *, int, enum thermal_trend *);
};
+enum reg_list {
+ SROT_CTRL_OFFSET,
+
+ REG_ARRAY_SIZE,
+};
+
/**
* struct tsens_data - tsens instance specific data
* @num_sensors: Max number of sensors supported by platform
* @ops: operations the tsens instance supports
* @hw_ids: Subset of sensors ids supported by platform, if not the first n
+ * @reg_offsets: Register offsets for commonly used registers
*/
struct tsens_data {
const u32 num_sensors;
const struct tsens_ops *ops;
+ const u16 reg_offsets[REG_ARRAY_SIZE];
unsigned int *hw_ids;
};
@@ -76,8 +77,10 @@ struct tsens_context {
struct tsens_device {
struct device *dev;
u32 num_sensors;
- struct regmap *map;
+ struct regmap *tm_map;
+ struct regmap *srot_map;
u32 tm_offset;
+ u16 reg_offsets[REG_ARRAY_SIZE];
struct tsens_context ctx;
const struct tsens_ops *ops;
struct tsens_sensor sensor[0];
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 450ed66edf58..18c711b19514 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -119,8 +119,8 @@ static int qoriq_tmu_get_sensor_id(void)
if (sensor_specs.args_count >= 1) {
id = sensor_specs.args[0];
WARN(sensor_specs.args_count > 1,
- "%s: too many cells in sensor specifier %d\n",
- sensor_specs.np->name, sensor_specs.args_count);
+ "%pOFn: too many cells in sensor specifier %d\n",
+ sensor_specs.np, sensor_specs.args_count);
} else {
id = 0;
}
@@ -294,6 +294,7 @@ static SIMPLE_DEV_PM_OPS(qoriq_tmu_pm_ops,
static const struct of_device_id qoriq_tmu_match[] = {
{ .compatible = "fsl,qoriq-tmu", },
+ { .compatible = "fsl,imx8mq-tmu", },
{},
};
MODULE_DEVICE_TABLE(of, qoriq_tmu_match);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 7aed5337bdd3..75786cc8e2f9 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -318,9 +318,11 @@ static void rcar_gen3_thermal_init(struct rcar_gen3_thermal_tsc *tsc)
}
static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
+ { .compatible = "renesas,r8a774a1-thermal", },
{ .compatible = "renesas,r8a7795-thermal", },
{ .compatible = "renesas,r8a7796-thermal", },
{ .compatible = "renesas,r8a77965-thermal", },
+ { .compatible = "renesas,r8a77980-thermal", },
{},
};
MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 78f932822d38..8014a207d8d9 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -113,6 +113,10 @@ static const struct of_device_id rcar_thermal_dt_ids[] = {
.data = &rcar_gen2_thermal,
},
{
+ .compatible = "renesas,thermal-r8a77970",
+ .data = &rcar_gen3_thermal,
+ },
+ {
.compatible = "renesas,thermal-r8a77995",
.data = &rcar_gen3_thermal,
},
@@ -434,8 +438,8 @@ static irqreturn_t rcar_thermal_irq(int irq, void *data)
rcar_thermal_for_each_priv(priv, common) {
if (rcar_thermal_had_changed(priv, status)) {
rcar_thermal_irq_disable(priv);
- schedule_delayed_work(&priv->work,
- msecs_to_jiffies(300));
+ queue_delayed_work(system_freezable_wq, &priv->work,
+ msecs_to_jiffies(300));
}
}
@@ -453,6 +457,7 @@ static int rcar_thermal_remove(struct platform_device *pdev)
rcar_thermal_for_each_priv(priv, common) {
rcar_thermal_irq_disable(priv);
+ cancel_delayed_work_sync(&priv->work);
if (priv->chip->use_of_thermal)
thermal_remove_hwmon_sysfs(priv->zone);
else
@@ -492,7 +497,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
pm_runtime_get_sync(dev);
for (i = 0; i < chip->nirqs; i++) {
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, i);
if (!irq)
continue;
if (!common->base) {
diff --git a/drivers/thermal/st/Kconfig b/drivers/thermal/st/Kconfig
index 490fdbe22eea..b80f9a9e4f8f 100644
--- a/drivers/thermal/st/Kconfig
+++ b/drivers/thermal/st/Kconfig
@@ -1,3 +1,7 @@
+#
+# STMicroelectronics thermal drivers configuration
+#
+
config ST_THERMAL
tristate "Thermal sensors on STMicroelectronics STi series of SoCs"
help
@@ -10,3 +14,13 @@ config ST_THERMAL_SYSCFG
config ST_THERMAL_MEMMAP
select ST_THERMAL
tristate "STi series memory mapped access based thermal sensors"
+
+config STM32_THERMAL
+ tristate "Thermal framework support on STMicroelectronics STM32 series of SoCs"
+ depends on MACH_STM32MP157
+ default y
+ help
+ Support for thermal framework on STMicroelectronics STM32 series of
+ SoCs. This thermal driver allows to access to general thermal framework
+ functionalities and to acces to SoC sensor functionalities. This
+ configuration is fully dependent of MACH_STM32MP157.
diff --git a/drivers/thermal/st/Makefile b/drivers/thermal/st/Makefile
index b38878977bd8..b2b9e9b96296 100644
--- a/drivers/thermal/st/Makefile
+++ b/drivers/thermal/st/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_ST_THERMAL) := st_thermal.o
obj-$(CONFIG_ST_THERMAL_SYSCFG) += st_thermal_syscfg.o
obj-$(CONFIG_ST_THERMAL_MEMMAP) += st_thermal_memmap.o
+obj-$(CONFIG_STM32_THERMAL) := stm_thermal.o \ No newline at end of file
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
new file mode 100644
index 000000000000..47623da0f91b
--- /dev/null
+++ b/drivers/thermal/st/stm_thermal.c
@@ -0,0 +1,760 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics 2018 - All Rights Reserved
+ * Author: David Hernandez Sanchez <david.hernandezsanchez@st.com> for
+ * STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#include "../thermal_core.h"
+#include "../thermal_hwmon.h"
+
+/* DTS register offsets */
+#define DTS_CFGR1_OFFSET 0x0
+#define DTS_T0VALR1_OFFSET 0x8
+#define DTS_RAMPVALR_OFFSET 0X10
+#define DTS_ITR1_OFFSET 0x14
+#define DTS_DR_OFFSET 0x1C
+#define DTS_SR_OFFSET 0x20
+#define DTS_ITENR_OFFSET 0x24
+#define DTS_CIFR_OFFSET 0x28
+
+/* DTS_CFGR1 register mask definitions */
+#define HSREF_CLK_DIV_MASK GENMASK(30, 24)
+#define TS1_SMP_TIME_MASK GENMASK(19, 16)
+#define TS1_INTRIG_SEL_MASK GENMASK(11, 8)
+
+/* DTS_T0VALR1 register mask definitions */
+#define TS1_T0_MASK GENMASK(17, 16)
+#define TS1_FMT0_MASK GENMASK(15, 0)
+
+/* DTS_RAMPVALR register mask definitions */
+#define TS1_RAMP_COEFF_MASK GENMASK(15, 0)
+
+/* DTS_ITR1 register mask definitions */
+#define TS1_HITTHD_MASK GENMASK(31, 16)
+#define TS1_LITTHD_MASK GENMASK(15, 0)
+
+/* DTS_DR register mask definitions */
+#define TS1_MFREQ_MASK GENMASK(15, 0)
+
+/* Less significant bit position definitions */
+#define TS1_T0_POS 16
+#define TS1_SMP_TIME_POS 16
+#define TS1_HITTHD_POS 16
+#define HSREF_CLK_DIV_POS 24
+
+/* DTS_CFGR1 bit definitions */
+#define TS1_EN BIT(0)
+#define TS1_START BIT(4)
+#define REFCLK_SEL BIT(20)
+#define REFCLK_LSE REFCLK_SEL
+#define Q_MEAS_OPT BIT(21)
+#define CALIBRATION_CONTROL Q_MEAS_OPT
+
+/* DTS_SR bit definitions */
+#define TS_RDY BIT(15)
+/* Bit definitions below are common for DTS_SR, DTS_ITENR and DTS_CIFR */
+#define HIGH_THRESHOLD BIT(2)
+#define LOW_THRESHOLD BIT(1)
+
+/* Constants */
+#define ADJUST 100
+#define ONE_MHZ 1000000
+#define POLL_TIMEOUT 5000
+#define STARTUP_TIME 40
+#define TS1_T0_VAL0 30
+#define TS1_T0_VAL1 130
+#define NO_HW_TRIG 0
+
+/* The Thermal Framework expects millidegrees */
+#define mcelsius(temp) ((temp) * 1000)
+
+/* The Sensor expects oC degrees */
+#define celsius(temp) ((temp) / 1000)
+
+struct stm_thermal_sensor {
+ struct device *dev;
+ struct thermal_zone_device *th_dev;
+ enum thermal_device_mode mode;
+ struct clk *clk;
+ int high_temp;
+ int low_temp;
+ int temp_critical;
+ int temp_passive;
+ unsigned int low_temp_enabled;
+ int num_trips;
+ int irq;
+ unsigned int irq_enabled;
+ void __iomem *base;
+ int t0, fmt0, ramp_coeff;
+};
+
+static irqreturn_t stm_thermal_alarm_irq(int irq, void *sdata)
+{
+ struct stm_thermal_sensor *sensor = sdata;
+
+ disable_irq_nosync(irq);
+ sensor->irq_enabled = false;
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t stm_thermal_alarm_irq_thread(int irq, void *sdata)
+{
+ u32 value;
+ struct stm_thermal_sensor *sensor = sdata;
+
+ /* read IT reason in SR and clear flags */
+ value = readl_relaxed(sensor->base + DTS_SR_OFFSET);
+
+ if ((value & LOW_THRESHOLD) == LOW_THRESHOLD)
+ writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ if ((value & HIGH_THRESHOLD) == HIGH_THRESHOLD)
+ writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ thermal_zone_device_update(sensor->th_dev, THERMAL_EVENT_UNSPECIFIED);
+
+ return IRQ_HANDLED;
+}
+
+static int stm_sensor_power_on(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+ u32 value;
+
+ /* Enable sensor */
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+ value |= TS1_EN;
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ /*
+ * The DTS block can be enabled by setting TSx_EN bit in
+ * DTS_CFGRx register. It requires a startup time of
+ * 40μs. Use 5 ms as arbitrary timeout.
+ */
+ ret = readl_poll_timeout(sensor->base + DTS_SR_OFFSET,
+ value, (value & TS_RDY),
+ STARTUP_TIME, POLL_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Start continuous measuring */
+ value = readl_relaxed(sensor->base +
+ DTS_CFGR1_OFFSET);
+ value |= TS1_START;
+ writel_relaxed(value, sensor->base +
+ DTS_CFGR1_OFFSET);
+
+ return 0;
+}
+
+static int stm_sensor_power_off(struct stm_thermal_sensor *sensor)
+{
+ u32 value;
+
+ /* Stop measuring */
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+ value &= ~TS1_START;
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ /* Ensure stop is taken into account */
+ usleep_range(STARTUP_TIME, POLL_TIMEOUT);
+
+ /* Disable sensor */
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+ value &= ~TS1_EN;
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ /* Ensure disable is taken into account */
+ return readl_poll_timeout(sensor->base + DTS_SR_OFFSET, value,
+ !(value & TS_RDY),
+ STARTUP_TIME, POLL_TIMEOUT);
+}
+
+static int stm_thermal_calibration(struct stm_thermal_sensor *sensor)
+{
+ u32 value, clk_freq;
+ u32 prescaler;
+
+ /* Figure out prescaler value for PCLK during calibration */
+ clk_freq = clk_get_rate(sensor->clk);
+ if (!clk_freq)
+ return -EINVAL;
+
+ prescaler = 0;
+ clk_freq /= ONE_MHZ;
+ if (clk_freq) {
+ while (prescaler <= clk_freq)
+ prescaler++;
+ }
+
+ value = readl_relaxed(sensor->base + DTS_CFGR1_OFFSET);
+
+ /* Clear prescaler */
+ value &= ~HSREF_CLK_DIV_MASK;
+
+ /* Set prescaler. pclk_freq/prescaler < 1MHz */
+ value |= (prescaler << HSREF_CLK_DIV_POS);
+
+ /* Select PCLK as reference clock */
+ value &= ~REFCLK_SEL;
+
+ /* Set maximal sampling time for better precision */
+ value |= TS1_SMP_TIME_MASK;
+
+ /* Measure with calibration */
+ value &= ~CALIBRATION_CONTROL;
+
+ /* select trigger */
+ value &= ~TS1_INTRIG_SEL_MASK;
+ value |= NO_HW_TRIG;
+
+ writel_relaxed(value, sensor->base + DTS_CFGR1_OFFSET);
+
+ return 0;
+}
+
+/* Fill in DTS structure with factory sensor values */
+static int stm_thermal_read_factory_settings(struct stm_thermal_sensor *sensor)
+{
+ /* Retrieve engineering calibration temperature */
+ sensor->t0 = readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET) &
+ TS1_T0_MASK;
+ if (!sensor->t0)
+ sensor->t0 = TS1_T0_VAL0;
+ else
+ sensor->t0 = TS1_T0_VAL1;
+
+ /* Retrieve fmt0 and put it on Hz */
+ sensor->fmt0 = ADJUST * readl_relaxed(sensor->base + DTS_T0VALR1_OFFSET)
+ & TS1_FMT0_MASK;
+
+ /* Retrieve ramp coefficient */
+ sensor->ramp_coeff = readl_relaxed(sensor->base + DTS_RAMPVALR_OFFSET) &
+ TS1_RAMP_COEFF_MASK;
+
+ if (!sensor->fmt0 || !sensor->ramp_coeff) {
+ dev_err(sensor->dev, "%s: wrong setting\n", __func__);
+ return -EINVAL;
+ }
+
+ dev_dbg(sensor->dev, "%s: T0 = %doC, FMT0 = %dHz, RAMP_COEFF = %dHz/oC",
+ __func__, sensor->t0, sensor->fmt0, sensor->ramp_coeff);
+
+ return 0;
+}
+
+static int stm_thermal_calculate_threshold(struct stm_thermal_sensor *sensor,
+ int temp, u32 *th)
+{
+ int freqM;
+ u32 sampling_time;
+
+ /* Retrieve the number of periods to sample */
+ sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
+ TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
+
+ /* Figure out the CLK_PTAT frequency for a given temperature */
+ freqM = ((temp - sensor->t0) * sensor->ramp_coeff)
+ + sensor->fmt0;
+
+ dev_dbg(sensor->dev, "%s: freqM for threshold = %d Hz",
+ __func__, freqM);
+
+ /* Figure out the threshold sample number */
+ *th = clk_get_rate(sensor->clk);
+ if (!*th)
+ return -EINVAL;
+
+ *th = *th / freqM;
+
+ *th *= sampling_time;
+
+ return 0;
+}
+
+static int stm_thermal_set_threshold(struct stm_thermal_sensor *sensor)
+{
+ u32 value, th;
+ int ret;
+
+ value = readl_relaxed(sensor->base + DTS_ITR1_OFFSET);
+
+ /* Erase threshold content */
+ value &= ~(TS1_LITTHD_MASK | TS1_HITTHD_MASK);
+
+ /* Retrieve the sample threshold number th for a given temperature */
+ ret = stm_thermal_calculate_threshold(sensor, sensor->high_temp, &th);
+ if (ret)
+ return ret;
+
+ value |= th & TS1_LITTHD_MASK;
+
+ if (sensor->low_temp_enabled) {
+ /* Retrieve the sample threshold */
+ ret = stm_thermal_calculate_threshold(sensor, sensor->low_temp,
+ &th);
+ if (ret)
+ return ret;
+
+ value |= (TS1_HITTHD_MASK & (th << TS1_HITTHD_POS));
+ }
+
+ /* Write value on the Low interrupt threshold */
+ writel_relaxed(value, sensor->base + DTS_ITR1_OFFSET);
+
+ return 0;
+}
+
+/* Disable temperature interrupt */
+static int stm_disable_irq(struct stm_thermal_sensor *sensor)
+{
+ u32 value;
+
+ /* Disable IT generation for low and high thresholds */
+ value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
+ writel_relaxed(value & ~(LOW_THRESHOLD | HIGH_THRESHOLD),
+ sensor->base + DTS_ITENR_OFFSET);
+
+ dev_dbg(sensor->dev, "%s: IT disabled on sensor side", __func__);
+
+ return 0;
+}
+
+/* Enable temperature interrupt */
+static int stm_enable_irq(struct stm_thermal_sensor *sensor)
+{
+ u32 value;
+
+ /*
+ * Code below enables High temperature threshold using a low threshold
+ * sampling value
+ */
+
+ /* Make sure LOW_THRESHOLD IT is clear before enabling */
+ writel_relaxed(LOW_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ /* Enable IT generation for low threshold */
+ value = readl_relaxed(sensor->base + DTS_ITENR_OFFSET);
+ value |= LOW_THRESHOLD;
+
+ /* Enable the low temperature threshold if needed */
+ if (sensor->low_temp_enabled) {
+ /* Make sure HIGH_THRESHOLD IT is clear before enabling */
+ writel_relaxed(HIGH_THRESHOLD, sensor->base + DTS_CIFR_OFFSET);
+
+ /* Enable IT generation for high threshold */
+ value |= HIGH_THRESHOLD;
+ }
+
+ /* Enable thresholds */
+ writel_relaxed(value, sensor->base + DTS_ITENR_OFFSET);
+
+ dev_dbg(sensor->dev, "%s: IT enabled on sensor side", __func__);
+
+ return 0;
+}
+
+static int stm_thermal_update_threshold(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+
+ sensor->mode = THERMAL_DEVICE_DISABLED;
+
+ ret = stm_sensor_power_off(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_disable_irq(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_thermal_set_threshold(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_enable_irq(sensor);
+ if (ret)
+ return ret;
+
+ ret = stm_sensor_power_on(sensor);
+ if (ret)
+ return ret;
+
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
+ return 0;
+}
+
+/* Callback to get temperature from HW */
+static int stm_thermal_get_temp(void *data, int *temp)
+{
+ struct stm_thermal_sensor *sensor = data;
+ u32 sampling_time;
+ int freqM, ret;
+
+ if (sensor->mode != THERMAL_DEVICE_ENABLED)
+ return -EAGAIN;
+
+ /* Retrieve the number of samples */
+ ret = readl_poll_timeout(sensor->base + DTS_DR_OFFSET, freqM,
+ (freqM & TS1_MFREQ_MASK), STARTUP_TIME,
+ POLL_TIMEOUT);
+
+ if (ret)
+ return ret;
+
+ if (!freqM)
+ return -ENODATA;
+
+ /* Retrieve the number of periods sampled */
+ sampling_time = (readl_relaxed(sensor->base + DTS_CFGR1_OFFSET) &
+ TS1_SMP_TIME_MASK) >> TS1_SMP_TIME_POS;
+
+ /* Figure out the number of samples per period */
+ freqM /= sampling_time;
+
+ /* Figure out the CLK_PTAT frequency */
+ freqM = clk_get_rate(sensor->clk) / freqM;
+ if (!freqM)
+ return -EINVAL;
+
+ dev_dbg(sensor->dev, "%s: freqM=%d\n", __func__, freqM);
+
+ /* Figure out the temperature in mili celsius */
+ *temp = mcelsius(sensor->t0 + ((freqM - sensor->fmt0) /
+ sensor->ramp_coeff));
+
+ dev_dbg(sensor->dev, "%s: temperature = %d millicelsius",
+ __func__, *temp);
+
+ /* Update thresholds */
+ if (sensor->num_trips > 1) {
+ /* Update alarm threshold value to next higher trip point */
+ if (sensor->high_temp == sensor->temp_passive &&
+ celsius(*temp) >= sensor->temp_passive) {
+ sensor->high_temp = sensor->temp_critical;
+ sensor->low_temp = sensor->temp_passive;
+ sensor->low_temp_enabled = true;
+ ret = stm_thermal_update_threshold(sensor);
+ if (ret)
+ return ret;
+ }
+
+ if (sensor->high_temp == sensor->temp_critical &&
+ celsius(*temp) < sensor->temp_passive) {
+ sensor->high_temp = sensor->temp_passive;
+ sensor->low_temp_enabled = false;
+ ret = stm_thermal_update_threshold(sensor);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Re-enable alarm IRQ if temperature below critical
+ * temperature
+ */
+ if (!sensor->irq_enabled &&
+ (celsius(*temp) < sensor->temp_critical)) {
+ sensor->irq_enabled = true;
+ enable_irq(sensor->irq);
+ }
+ }
+
+ return 0;
+}
+
+/* Registers DTS irq to be visible by GIC */
+static int stm_register_irq(struct stm_thermal_sensor *sensor)
+{
+ struct device *dev = sensor->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ int ret;
+
+ sensor->irq = platform_get_irq(pdev, 0);
+ if (sensor->irq < 0) {
+ dev_err(dev, "%s: Unable to find IRQ\n", __func__);
+ return sensor->irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, sensor->irq,
+ stm_thermal_alarm_irq,
+ stm_thermal_alarm_irq_thread,
+ IRQF_ONESHOT,
+ dev->driver->name, sensor);
+ if (ret) {
+ dev_err(dev, "%s: Failed to register IRQ %d\n", __func__,
+ sensor->irq);
+ return ret;
+ }
+
+ sensor->irq_enabled = true;
+
+ dev_dbg(dev, "%s: thermal IRQ registered", __func__);
+
+ return 0;
+}
+
+static int stm_thermal_sensor_off(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+
+ ret = stm_sensor_power_off(sensor);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(sensor->clk);
+
+ return 0;
+}
+
+static int stm_thermal_prepare(struct stm_thermal_sensor *sensor)
+{
+ int ret;
+ struct device *dev = sensor->dev;
+
+ ret = clk_prepare_enable(sensor->clk);
+ if (ret)
+ return ret;
+
+ ret = stm_thermal_calibration(sensor);
+ if (ret)
+ goto thermal_unprepare;
+
+ /* Set threshold(s) for IRQ */
+ ret = stm_thermal_set_threshold(sensor);
+ if (ret)
+ goto thermal_unprepare;
+
+ ret = stm_enable_irq(sensor);
+ if (ret)
+ goto thermal_unprepare;
+
+ ret = stm_sensor_power_on(sensor);
+ if (ret) {
+ dev_err(dev, "%s: failed to power on sensor\n", __func__);
+ goto irq_disable;
+ }
+
+ return 0;
+
+irq_disable:
+ stm_disable_irq(sensor);
+
+thermal_unprepare:
+ clk_disable_unprepare(sensor->clk);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stm_thermal_suspend(struct device *dev)
+{
+ int ret;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ ret = stm_thermal_sensor_off(sensor);
+ if (ret)
+ return ret;
+
+ sensor->mode = THERMAL_DEVICE_DISABLED;
+
+ return 0;
+}
+
+static int stm_thermal_resume(struct device *dev)
+{
+ int ret;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ ret = stm_thermal_prepare(sensor);
+ if (ret)
+ return ret;
+
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume);
+
+static const struct thermal_zone_of_device_ops stm_tz_ops = {
+ .get_temp = stm_thermal_get_temp,
+};
+
+static const struct of_device_id stm_thermal_of_match[] = {
+ { .compatible = "st,stm32-thermal"},
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, stm_thermal_of_match);
+
+static int stm_thermal_probe(struct platform_device *pdev)
+{
+ struct stm_thermal_sensor *sensor;
+ struct resource *res;
+ const struct thermal_trip *trip;
+ void __iomem *base;
+ int ret, i;
+
+ if (!pdev->dev.of_node) {
+ dev_err(&pdev->dev, "%s: device tree node not found\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, sensor);
+
+ sensor->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* Populate sensor */
+ sensor->base = base;
+
+ ret = stm_thermal_read_factory_settings(sensor);
+ if (ret)
+ return ret;
+
+ sensor->clk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(sensor->clk)) {
+ dev_err(&pdev->dev, "%s: failed to fetch PCLK clock\n",
+ __func__);
+ return PTR_ERR(sensor->clk);
+ }
+
+ /* Register IRQ into GIC */
+ ret = stm_register_irq(sensor);
+ if (ret)
+ return ret;
+
+ sensor->th_dev = devm_thermal_zone_of_sensor_register(&pdev->dev, 0,
+ sensor,
+ &stm_tz_ops);
+
+ if (IS_ERR(sensor->th_dev)) {
+ dev_err(&pdev->dev, "%s: thermal zone sensor registering KO\n",
+ __func__);
+ ret = PTR_ERR(sensor->th_dev);
+ return ret;
+ }
+
+ if (!sensor->th_dev->ops->get_crit_temp) {
+ /* Critical point must be provided */
+ ret = -EINVAL;
+ goto err_tz;
+ }
+
+ ret = sensor->th_dev->ops->get_crit_temp(sensor->th_dev,
+ &sensor->temp_critical);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Not able to read critical_temp: %d\n", ret);
+ goto err_tz;
+ }
+
+ sensor->temp_critical = celsius(sensor->temp_critical);
+
+ /* Set thresholds for IRQ */
+ sensor->high_temp = sensor->temp_critical;
+
+ trip = of_thermal_get_trip_points(sensor->th_dev);
+ sensor->num_trips = of_thermal_get_ntrips(sensor->th_dev);
+
+ /* Find out passive temperature if it exists */
+ for (i = (sensor->num_trips - 1); i >= 0; i--) {
+ if (trip[i].type == THERMAL_TRIP_PASSIVE) {
+ sensor->temp_passive = celsius(trip[i].temperature);
+ /* Update high temperature threshold */
+ sensor->high_temp = sensor->temp_passive;
+ }
+ }
+
+ /*
+ * Ensure low_temp_enabled flag is disabled.
+ * By disabling low_temp_enabled, low threshold IT will not be
+ * configured neither enabled because it is not needed as high
+ * threshold is set on the lowest temperature trip point after
+ * probe.
+ */
+ sensor->low_temp_enabled = false;
+
+ /* Configure and enable HW sensor */
+ ret = stm_thermal_prepare(sensor);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Not able to enable sensor: %d\n", ret);
+ goto err_tz;
+ }
+
+ /*
+ * Thermal_zone doesn't enable hwmon as default,
+ * enable it here
+ */
+ sensor->th_dev->tzp->no_hwmon = false;
+ ret = thermal_add_hwmon_sysfs(sensor->th_dev);
+ if (ret)
+ goto err_tz;
+
+ sensor->mode = THERMAL_DEVICE_ENABLED;
+
+ dev_info(&pdev->dev, "%s: Driver initialized successfully\n",
+ __func__);
+
+ return 0;
+
+err_tz:
+ thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev);
+ return ret;
+}
+
+static int stm_thermal_remove(struct platform_device *pdev)
+{
+ struct stm_thermal_sensor *sensor = platform_get_drvdata(pdev);
+
+ stm_thermal_sensor_off(sensor);
+ thermal_remove_hwmon_sysfs(sensor->th_dev);
+ thermal_zone_of_sensor_unregister(&pdev->dev, sensor->th_dev);
+
+ return 0;
+}
+
+static struct platform_driver stm_thermal_driver = {
+ .driver = {
+ .name = "stm_thermal",
+ .pm = &stm_thermal_pm_ops,
+ .of_match_table = stm_thermal_of_match,
+ },
+ .probe = stm_thermal_probe,
+ .remove = stm_thermal_remove,
+};
+module_platform_driver(stm_thermal_driver);
+
+MODULE_DESCRIPTION("STMicroelectronics STM32 Thermal Sensor Driver");
+MODULE_AUTHOR("David Hernandez Sanchez <david.hernandezsanchez@st.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:stm_thermal");
diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c
index c2277b8ee88d..9553305c63ea 100644
--- a/drivers/thunderbolt/cap.c
+++ b/drivers/thunderbolt/cap.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Thunderbolt Cactus Ridge driver - capabilities lookup
+ * Thunderbolt driver - capabilities lookup
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#include <linux/slab.h>
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 37a7f4c735d0..73b386de4d15 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Thunderbolt Cactus Ridge driver - control channel and configuration commands
+ * Thunderbolt driver - control channel and configuration commands
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#include <linux/crc32.h>
@@ -631,7 +632,7 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data)
ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
}
- tb_ctl_info(ctl, "control channel created\n");
+ tb_ctl_dbg(ctl, "control channel created\n");
return ctl;
err:
tb_ctl_free(ctl);
@@ -662,8 +663,7 @@ void tb_ctl_free(struct tb_ctl *ctl)
tb_ctl_pkg_free(ctl->rx_packets[i]);
- if (ctl->frame_pool)
- dma_pool_destroy(ctl->frame_pool);
+ dma_pool_destroy(ctl->frame_pool);
kfree(ctl);
}
@@ -673,7 +673,7 @@ void tb_ctl_free(struct tb_ctl *ctl)
void tb_ctl_start(struct tb_ctl *ctl)
{
int i;
- tb_ctl_info(ctl, "control channel starting...\n");
+ tb_ctl_dbg(ctl, "control channel starting...\n");
tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
tb_ring_start(ctl->rx);
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
@@ -702,7 +702,7 @@ void tb_ctl_stop(struct tb_ctl *ctl)
if (!list_empty(&ctl->request_queue))
tb_ctl_WARN(ctl, "dangling request in request_queue\n");
INIT_LIST_HEAD(&ctl->request_queue);
- tb_ctl_info(ctl, "control channel stopped\n");
+ tb_ctl_dbg(ctl, "control channel stopped\n");
}
/* public interface, commands */
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index 3062e0b5f71e..2f1a1e111110 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Thunderbolt Cactus Ridge driver - control channel and configuration commands
+ * Thunderbolt driver - control channel and configuration commands
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#ifndef _TB_CFG
diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c
index f2701194f810..847dd07a7b17 100644
--- a/drivers/thunderbolt/dma_port.c
+++ b/drivers/thunderbolt/dma_port.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt DMA configuration based mailbox support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/thunderbolt/dma_port.h b/drivers/thunderbolt/dma_port.h
index c4a69e0fbff7..7deadd97ce31 100644
--- a/drivers/thunderbolt/dma_port.h
+++ b/drivers/thunderbolt/dma_port.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt DMA configuration based mailbox support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef DMA_PORT_H_
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 092381e2accf..93e562f18d40 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -1,12 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt bus support
*
* Copyright (C) 2017, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
#include <linux/device.h>
diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c
index 3e8caf22c294..81e8ac4c5805 100644
--- a/drivers/thunderbolt/eeprom.c
+++ b/drivers/thunderbolt/eeprom.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Thunderbolt Cactus Ridge driver - eeprom access
+ * Thunderbolt driver - eeprom access
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#include <linux/crc32.h>
@@ -540,7 +541,7 @@ int tb_drom_read(struct tb_switch *sw)
return res;
size &= 0x3ff;
size += TB_DROM_DATA_START;
- tb_sw_info(sw, "reading drom (length: %#x)\n", size);
+ tb_sw_dbg(sw, "reading drom (length: %#x)\n", size);
if (size < sizeof(*header)) {
tb_sw_warn(sw, "drom too small, aborting\n");
return -EIO;
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 28fc4ce75edb..e3fc920af682 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Internal Thunderbolt Connection Manager. This is a firmware running on
* the Thunderbolt host controller performing most of the low-level
@@ -6,10 +7,6 @@
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/delay.h>
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index 5cd6bdfa068f..9aa44f9762a3 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -1,10 +1,11 @@
/*
- * Thunderbolt Cactus Ridge driver - NHI driver
+ * Thunderbolt driver - NHI driver
*
* The NHI (native host interface) is the pci device that allows us to send and
* receive frames from the thunderbolt bus.
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#include <linux/pm_runtime.h>
@@ -95,9 +96,9 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
else
new = old & ~mask;
- dev_info(&ring->nhi->pdev->dev,
- "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
- active ? "enabling" : "disabling", reg, bit, old, new);
+ dev_dbg(&ring->nhi->pdev->dev,
+ "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
+ active ? "enabling" : "disabling", reg, bit, old, new);
if (new == old)
dev_WARN(&ring->nhi->pdev->dev,
@@ -476,8 +477,9 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
void *poll_data)
{
struct tb_ring *ring = NULL;
- dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
- transmit ? "TX" : "RX", hop, size);
+
+ dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
+ transmit ? "TX" : "RX", hop, size);
/* Tx Ring 2 is reserved for E2E workaround */
if (transmit && hop == RING_E2E_UNUSED_HOPID)
@@ -585,8 +587,8 @@ void tb_ring_start(struct tb_ring *ring)
dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
goto err;
}
- dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
- RING_TYPE(ring), ring->hop);
+ dev_dbg(&ring->nhi->pdev->dev, "starting %s %d\n",
+ RING_TYPE(ring), ring->hop);
if (ring->flags & RING_FLAG_FRAME) {
/* Means 4096 */
@@ -647,8 +649,8 @@ void tb_ring_stop(struct tb_ring *ring)
{
spin_lock_irq(&ring->nhi->lock);
spin_lock(&ring->lock);
- dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
- RING_TYPE(ring), ring->hop);
+ dev_dbg(&ring->nhi->pdev->dev, "stopping %s %d\n",
+ RING_TYPE(ring), ring->hop);
if (ring->nhi->going_away)
goto err;
if (!ring->running) {
@@ -716,10 +718,8 @@ void tb_ring_free(struct tb_ring *ring)
ring->descriptors_dma = 0;
- dev_info(&ring->nhi->pdev->dev,
- "freeing %s %d\n",
- RING_TYPE(ring),
- ring->hop);
+ dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring),
+ ring->hop);
/**
* ring->work can no longer be scheduled (it is scheduled only
@@ -931,7 +931,8 @@ static int nhi_runtime_resume(struct device *dev)
static void nhi_shutdown(struct tb_nhi *nhi)
{
int i;
- dev_info(&nhi->pdev->dev, "shutdown\n");
+
+ dev_dbg(&nhi->pdev->dev, "shutdown\n");
for (i = 0; i < nhi->hop_count; i++) {
if (nhi->tx_rings[i])
@@ -1059,7 +1060,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV;
}
- dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
+ dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
res = tb_domain_add(tb);
if (res) {
diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
index 1696a4560948..1b5d47ecd3ed 100644
--- a/drivers/thunderbolt/nhi.h
+++ b/drivers/thunderbolt/nhi.h
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Thunderbolt Cactus Ridge driver - NHI driver
+ * Thunderbolt driver - NHI driver
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#ifndef DSL3510_H_
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index b3e49d19c01e..a60bd98c1d04 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -3,6 +3,7 @@
* Thunderbolt driver - NHI registers
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#ifndef NHI_REGS_H_
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index ff49ad880bfd..a11956522bac 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -13,19 +13,19 @@
static void tb_dump_hop(struct tb_port *port, struct tb_regs_hop *hop)
{
- tb_port_info(port, " Hop through port %d to hop %d (%s)\n",
- hop->out_port, hop->next_hop,
- hop->enable ? "enabled" : "disabled");
- tb_port_info(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
- hop->weight, hop->priority,
- hop->initial_credits, hop->drop_packages);
- tb_port_info(port, " Counter enabled: %d Counter index: %d\n",
- hop->counter_enable, hop->counter);
- tb_port_info(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
- hop->ingress_fc, hop->egress_fc,
- hop->ingress_shared_buffer, hop->egress_shared_buffer);
- tb_port_info(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
- hop->unknown1, hop->unknown2, hop->unknown3);
+ tb_port_dbg(port, " Hop through port %d to hop %d (%s)\n",
+ hop->out_port, hop->next_hop,
+ hop->enable ? "enabled" : "disabled");
+ tb_port_dbg(port, " Weight: %d Priority: %d Credits: %d Drop: %d\n",
+ hop->weight, hop->priority,
+ hop->initial_credits, hop->drop_packages);
+ tb_port_dbg(port, " Counter enabled: %d Counter index: %d\n",
+ hop->counter_enable, hop->counter);
+ tb_port_dbg(port, " Flow Control (In/Eg): %d/%d Shared Buffer (In/Eg): %d/%d\n",
+ hop->ingress_fc, hop->egress_fc,
+ hop->ingress_shared_buffer, hop->egress_shared_buffer);
+ tb_port_dbg(port, " Unknown1: %#x Unknown2: %#x Unknown3: %#x\n",
+ hop->unknown1, hop->unknown2, hop->unknown3);
}
/**
diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
index 8fe913a95b4a..b2f0d6386cee 100644
--- a/drivers/thunderbolt/property.c
+++ b/drivers/thunderbolt/property.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt XDomain property support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/err.h>
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 7442bc4c6433..52ff854f0d6c 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Thunderbolt Cactus Ridge driver - switch/port utility functions
+ * Thunderbolt driver - switch/port utility functions
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#include <linux/delay.h>
@@ -436,15 +437,15 @@ static const char *tb_port_type(struct tb_regs_port_header *port)
static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port)
{
- tb_info(tb,
- " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
- port->port_number, port->vendor_id, port->device_id,
- port->revision, port->thunderbolt_version, tb_port_type(port),
- port->type);
- tb_info(tb, " Max hop id (in/out): %d/%d\n",
- port->max_in_hop_id, port->max_out_hop_id);
- tb_info(tb, " Max counters: %d\n", port->max_counters);
- tb_info(tb, " NFC Credits: %#x\n", port->nfc_credits);
+ tb_dbg(tb,
+ " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n",
+ port->port_number, port->vendor_id, port->device_id,
+ port->revision, port->thunderbolt_version, tb_port_type(port),
+ port->type);
+ tb_dbg(tb, " Max hop id (in/out): %d/%d\n",
+ port->max_in_hop_id, port->max_out_hop_id);
+ tb_dbg(tb, " Max counters: %d\n", port->max_counters);
+ tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits);
}
/**
@@ -605,20 +606,18 @@ static int tb_init_port(struct tb_port *port)
static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw)
{
- tb_info(tb,
- " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
- sw->vendor_id, sw->device_id, sw->revision,
- sw->thunderbolt_version);
- tb_info(tb, " Max Port Number: %d\n", sw->max_port_number);
- tb_info(tb, " Config:\n");
- tb_info(tb,
+ tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n",
+ sw->vendor_id, sw->device_id, sw->revision,
+ sw->thunderbolt_version);
+ tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number);
+ tb_dbg(tb, " Config:\n");
+ tb_dbg(tb,
" Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n",
- sw->upstream_port_number, sw->depth,
- (((u64) sw->route_hi) << 32) | sw->route_lo,
- sw->enabled, sw->plug_events_delay);
- tb_info(tb,
- " unknown1: %#x unknown4: %#x\n",
- sw->__unknown1, sw->__unknown4);
+ sw->upstream_port_number, sw->depth,
+ (((u64) sw->route_hi) << 32) | sw->route_lo,
+ sw->enabled, sw->plug_events_delay);
+ tb_dbg(tb, " unknown1: %#x unknown4: %#x\n",
+ sw->__unknown1, sw->__unknown4);
}
/**
@@ -634,7 +633,7 @@ int tb_switch_reset(struct tb *tb, u64 route)
header.route_lo = route,
header.enabled = true,
};
- tb_info(tb, "resetting switch at %llx\n", route);
+ tb_dbg(tb, "resetting switch at %llx\n", route);
res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route,
0, 2, 2, 2);
if (res.err)
@@ -1139,7 +1138,7 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
if (tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5))
goto err_free_sw_ports;
- tb_info(tb, "current switch config:\n");
+ tb_dbg(tb, "current switch config:\n");
tb_dump_switch(tb, &sw->config);
/* configure switch */
@@ -1246,9 +1245,8 @@ int tb_switch_configure(struct tb_switch *sw)
int ret;
route = tb_route(sw);
- tb_info(tb,
- "initializing Switch at %#llx (depth: %d, up port: %d)\n",
- route, tb_route_length(route), sw->config.upstream_port_number);
+ tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n",
+ route, tb_route_length(route), sw->config.upstream_port_number);
if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL)
tb_sw_warn(sw, "unknown switch vendor id %#x\n",
@@ -1386,13 +1384,13 @@ int tb_switch_add(struct tb_switch *sw)
tb_sw_warn(sw, "tb_eeprom_read_rom failed\n");
return ret;
}
- tb_sw_info(sw, "uid: %#llx\n", sw->uid);
+ tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);
tb_switch_set_uuid(sw);
for (i = 0; i <= sw->config.max_port_number; i++) {
if (sw->ports[i].disabled) {
- tb_port_info(&sw->ports[i], "disabled by eeprom\n");
+ tb_port_dbg(&sw->ports[i], "disabled by eeprom\n");
continue;
}
ret = tb_init_port(&sw->ports[i]);
@@ -1405,6 +1403,14 @@ int tb_switch_add(struct tb_switch *sw)
if (ret)
return ret;
+ if (tb_route(sw)) {
+ dev_info(&sw->dev, "new device found, vendor=%#x device=%#x\n",
+ sw->vendor, sw->device);
+ if (sw->vendor_name && sw->device_name)
+ dev_info(&sw->dev, "%s %s\n", sw->vendor_name,
+ sw->device_name);
+ }
+
ret = tb_switch_nvm_add(sw);
if (ret) {
device_del(&sw->dev);
@@ -1456,6 +1462,9 @@ void tb_switch_remove(struct tb_switch *sw)
tb_plug_events_active(sw, false);
tb_switch_nvm_remove(sw);
+
+ if (tb_route(sw))
+ dev_info(&sw->dev, "device disconnected\n");
device_unregister(&sw->dev);
}
@@ -1483,7 +1492,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
int tb_switch_resume(struct tb_switch *sw)
{
int i, err;
- tb_sw_info(sw, "resuming switch\n");
+ tb_sw_dbg(sw, "resuming switch\n");
/*
* Check for UID of the connected switches except for root
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 1424581fd9af..30e02c716f6c 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -404,10 +404,10 @@ static int tb_suspend_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
- tb_info(tb, "suspending...\n");
+ tb_dbg(tb, "suspending...\n");
tb_switch_suspend(tb->root_switch);
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
- tb_info(tb, "suspend finished\n");
+ tb_dbg(tb, "suspend finished\n");
return 0;
}
@@ -417,7 +417,7 @@ static int tb_resume_noirq(struct tb *tb)
struct tb_cm *tcm = tb_priv(tb);
struct tb_pci_tunnel *tunnel, *n;
- tb_info(tb, "resuming...\n");
+ tb_dbg(tb, "resuming...\n");
/* remove any pci devices the firmware might have setup */
tb_switch_reset(tb, 0);
@@ -432,12 +432,12 @@ static int tb_resume_noirq(struct tb *tb)
* the pcie links need some time to get going.
* 100ms works for me...
*/
- tb_info(tb, "tunnels restarted, sleeping for 100ms\n");
+ tb_dbg(tb, "tunnels restarted, sleeping for 100ms\n");
msleep(100);
}
/* Allow tb_handle_hotplug to progress events */
tcm->hotplug_active = true;
- tb_info(tb, "resume finished\n");
+ tb_dbg(tb, "resume finished\n");
return 0;
}
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 5067d69d0501..52584c4003e3 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Thunderbolt Cactus Ridge driver - bus logic (NHI independent)
+ * Thunderbolt driver - bus logic (NHI independent)
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#ifndef TB_H_
@@ -327,7 +328,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
#define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
#define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
-
+#define tb_dbg(tb, fmt, arg...) dev_dbg(&(tb)->nhi->pdev->dev, fmt, ## arg)
#define __TB_SW_PRINT(level, sw, fmt, arg...) \
do { \
@@ -338,7 +339,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
#define tb_sw_WARN(sw, fmt, arg...) __TB_SW_PRINT(tb_WARN, sw, fmt, ##arg)
#define tb_sw_warn(sw, fmt, arg...) __TB_SW_PRINT(tb_warn, sw, fmt, ##arg)
#define tb_sw_info(sw, fmt, arg...) __TB_SW_PRINT(tb_info, sw, fmt, ##arg)
-
+#define tb_sw_dbg(sw, fmt, arg...) __TB_SW_PRINT(tb_dbg, sw, fmt, ##arg)
#define __TB_PORT_PRINT(level, _port, fmt, arg...) \
do { \
@@ -352,6 +353,8 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
__TB_PORT_PRINT(tb_warn, port, fmt, ##arg)
#define tb_port_info(port, fmt, arg...) \
__TB_PORT_PRINT(tb_info, port, fmt, ##arg)
+#define tb_port_dbg(port, fmt, arg...) \
+ __TB_PORT_PRINT(tb_dbg, port, fmt, ##arg)
struct tb *icm_probe(struct tb_nhi *nhi);
struct tb *tb_probe(struct tb_nhi *nhi);
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index 2487e162c885..02c84aa3d018 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt control channel messages
*
* Copyright (C) 2014 Andreas Noever <andreas.noever@gmail.com>
* Copyright (C) 2017, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _TB_MSGS
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 693b0353c3fe..6f1ff04ee195 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Thunderbolt Cactus Ridge driver - Port/Switch config area registers
+ * Thunderbolt driver - Port/Switch config area registers
*
* Every thunderbolt device consists (logically) of a switch with multiple
* ports. Every port contains up to four config regions (HOPS, PORT, SWITCH,
* COUNTERS) which are used to configure the device.
*
* Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
+ * Copyright (C) 2018, Intel Corporation
*/
#ifndef _TB_REGS
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index db8bece63327..e27dd8beb94b 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -1,13 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Thunderbolt XDomain discovery protocol support
*
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/device.h>
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index df8bd0c7b97d..32886c304641 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -118,6 +118,7 @@ config SERIAL_ATMEL
depends on ARCH_AT91 || COMPILE_TEST
select SERIAL_CORE
select SERIAL_MCTRL_GPIO if GPIOLIB
+ select MFD_AT91_USART
help
This enables the driver for the on-chip UARTs of the Atmel
AT91 processors.
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 8e4428725848..267d4d1de3f8 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -193,8 +193,7 @@ static struct console atmel_console;
#if defined(CONFIG_OF)
static const struct of_device_id atmel_serial_dt_ids[] = {
- { .compatible = "atmel,at91rm9200-usart" },
- { .compatible = "atmel,at91sam9260-usart" },
+ { .compatible = "atmel,at91rm9200-usart-serial" },
{ /* sentinel */ }
};
#endif
@@ -915,6 +914,7 @@ static void atmel_tx_dma(struct uart_port *port)
static int atmel_prepare_tx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct device *mfd_dev = port->dev->parent;
dma_cap_mask_t mask;
struct dma_slave_config config;
int ret, nent;
@@ -922,7 +922,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx");
+ atmel_port->chan_tx = dma_request_slave_channel(mfd_dev, "tx");
if (atmel_port->chan_tx == NULL)
goto chan_err;
dev_info(port->dev, "using %s for tx DMA transfers\n",
@@ -1093,6 +1093,7 @@ static void atmel_rx_from_dma(struct uart_port *port)
static int atmel_prepare_rx_dma(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct device *mfd_dev = port->dev->parent;
struct dma_async_tx_descriptor *desc;
dma_cap_mask_t mask;
struct dma_slave_config config;
@@ -1104,7 +1105,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
dma_cap_zero(mask);
dma_cap_set(DMA_CYCLIC, mask);
- atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
+ atmel_port->chan_rx = dma_request_slave_channel(mfd_dev, "rx");
if (atmel_port->chan_rx == NULL)
goto chan_err;
dev_info(port->dev, "using %s for rx DMA transfers\n",
@@ -2222,8 +2223,8 @@ static const char *atmel_type(struct uart_port *port)
*/
static void atmel_release_port(struct uart_port *port)
{
- struct platform_device *pdev = to_platform_device(port->dev);
- int size = pdev->resource[0].end - pdev->resource[0].start + 1;
+ struct platform_device *mpdev = to_platform_device(port->dev->parent);
+ int size = resource_size(mpdev->resource);
release_mem_region(port->mapbase, size);
@@ -2238,8 +2239,8 @@ static void atmel_release_port(struct uart_port *port)
*/
static int atmel_request_port(struct uart_port *port)
{
- struct platform_device *pdev = to_platform_device(port->dev);
- int size = pdev->resource[0].end - pdev->resource[0].start + 1;
+ struct platform_device *mpdev = to_platform_device(port->dev->parent);
+ int size = resource_size(mpdev->resource);
if (!request_mem_region(port->mapbase, size, "atmel_serial"))
return -EBUSY;
@@ -2341,27 +2342,28 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
{
int ret;
struct uart_port *port = &atmel_port->uart;
+ struct platform_device *mpdev = to_platform_device(pdev->dev.parent);
atmel_init_property(atmel_port, pdev);
atmel_set_ops(port);
- uart_get_rs485_mode(&pdev->dev, &port->rs485);
+ uart_get_rs485_mode(&mpdev->dev, &port->rs485);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP;
port->ops = &atmel_pops;
port->fifosize = 1;
port->dev = &pdev->dev;
- port->mapbase = pdev->resource[0].start;
- port->irq = pdev->resource[1].start;
+ port->mapbase = mpdev->resource[0].start;
+ port->irq = mpdev->resource[1].start;
port->rs485_config = atmel_config_rs485;
- port->membase = NULL;
+ port->membase = NULL;
memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring));
/* for console, the clock could already be configured */
if (!atmel_port->clk) {
- atmel_port->clk = clk_get(&pdev->dev, "usart");
+ atmel_port->clk = clk_get(&mpdev->dev, "usart");
if (IS_ERR(atmel_port->clk)) {
ret = PTR_ERR(atmel_port->clk);
atmel_port->clk = NULL;
@@ -2694,13 +2696,22 @@ static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
static int atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *atmel_port;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.parent->of_node;
void *data;
int ret = -ENODEV;
bool rs485_enabled;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
+ /*
+ * In device tree there is no node with "atmel,at91rm9200-usart-serial"
+ * as compatible string. This driver is probed by at91-usart mfd driver
+ * which is just a wrapper over the atmel_serial driver and
+ * spi-at91-usart driver. All attributes needed by this driver are
+ * found in of_node of parent.
+ */
+ pdev->dev.of_node = np;
+
ret = of_alias_get_id(np, "serial");
if (ret < 0)
/* port id not found in platform data nor device-tree aliases:
@@ -2836,6 +2847,7 @@ static int atmel_serial_remove(struct platform_device *pdev)
clk_put(atmel_port->clk);
atmel_port->clk = NULL;
+ pdev->dev.of_node = NULL;
return ret;
}
@@ -2846,7 +2858,7 @@ static struct platform_driver atmel_serial_driver = {
.suspend = atmel_serial_suspend,
.resume = atmel_serial_resume,
.driver = {
- .name = "atmel_usart",
+ .name = "atmel_usart_serial",
.of_match_table = of_match_ptr(atmel_serial_dt_ids),
},
};
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 70a7981b94b3..85644669fbe7 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -274,6 +274,8 @@ static struct class uio_class = {
.dev_groups = uio_groups,
};
+static bool uio_class_registered;
+
/*
* device functions
*/
@@ -668,7 +670,7 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
struct page *page;
unsigned long offset;
void *addr;
- int ret = 0;
+ vm_fault_t ret = 0;
int mi;
mutex_lock(&idev->info_lock);
@@ -736,7 +738,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
return -EINVAL;
vma->vm_ops = &uio_physical_vm_ops;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (idev->info->mem[mi].memtype == UIO_MEM_PHYS)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
/*
* We cannot use the vm_iomap_memory() helper here,
@@ -793,18 +796,19 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
}
switch (idev->info->mem[mi].memtype) {
- case UIO_MEM_PHYS:
- ret = uio_mmap_physical(vma);
- break;
- case UIO_MEM_LOGICAL:
- case UIO_MEM_VIRTUAL:
- ret = uio_mmap_logical(vma);
- break;
- default:
- ret = -EINVAL;
+ case UIO_MEM_IOVA:
+ case UIO_MEM_PHYS:
+ ret = uio_mmap_physical(vma);
+ break;
+ case UIO_MEM_LOGICAL:
+ case UIO_MEM_VIRTUAL:
+ ret = uio_mmap_logical(vma);
+ break;
+ default:
+ ret = -EINVAL;
}
-out:
+ out:
mutex_unlock(&idev->info_lock);
return ret;
}
@@ -876,6 +880,9 @@ static int init_uio_class(void)
printk(KERN_ERR "class_register failed for uio\n");
goto err_class_register;
}
+
+ uio_class_registered = true;
+
return 0;
err_class_register:
@@ -886,6 +893,7 @@ exit:
static void release_uio_class(void)
{
+ uio_class_registered = false;
class_unregister(&uio_class);
uio_major_cleanup();
}
@@ -912,6 +920,9 @@ int __uio_register_device(struct module *owner,
struct uio_device *idev;
int ret = 0;
+ if (!uio_class_registered)
+ return -EPROBE_DEFER;
+
if (!parent || !info || !info->name || !info->version)
return -EINVAL;
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index e1134a4d97f3..003badaef5f3 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -163,7 +163,8 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to kmalloc\n");
goto bad2;
}
- uioinfo->name = pdev->dev.of_node->name;
+ uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
+ pdev->dev.of_node);
uioinfo->version = "devicetree";
/* Multiple IRQs are not supported */
diff --git a/drivers/uio/uio_fsl_elbc_gpcm.c b/drivers/uio/uio_fsl_elbc_gpcm.c
index bbc17effae5e..9cc37fe07d35 100644
--- a/drivers/uio/uio_fsl_elbc_gpcm.c
+++ b/drivers/uio/uio_fsl_elbc_gpcm.c
@@ -382,8 +382,7 @@ static int uio_fsl_elbc_gpcm_probe(struct platform_device *pdev)
}
/* set all UIO data */
- if (node->name)
- info->mem[0].name = kstrdup(node->name, GFP_KERNEL);
+ info->mem[0].name = kasprintf(GFP_KERNEL, "%pOFn", node);
info->mem[0].addr = res.start;
info->mem[0].size = resource_size(&res);
info->mem[0].memtype = UIO_MEM_PHYS;
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index e401be8321ab..c2493d011225 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -17,7 +17,6 @@
* # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
* > /sys/bus/vmbus/drivers/uio_hv_generic/bind
*/
-#define DEBUG 1
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
@@ -33,13 +32,13 @@
#include "../hv/hyperv_vmbus.h"
-#define DRIVER_VERSION "0.02.0"
+#define DRIVER_VERSION "0.02.1"
#define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>"
#define DRIVER_DESC "Generic UIO driver for VMBus devices"
#define HV_RING_SIZE 512 /* pages */
-#define SEND_BUFFER_SIZE (15 * 1024 * 1024)
-#define RECV_BUFFER_SIZE (15 * 1024 * 1024)
+#define SEND_BUFFER_SIZE (16 * 1024 * 1024)
+#define RECV_BUFFER_SIZE (31 * 1024 * 1024)
/*
* List of resources to be mapped to user space
@@ -56,6 +55,7 @@ enum hv_uio_map {
struct hv_uio_private_data {
struct uio_info info;
struct hv_device *device;
+ atomic_t refcnt;
void *recv_buf;
u32 recv_gpadl;
@@ -129,13 +129,12 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
{
struct vmbus_channel *channel
= container_of(kobj, struct vmbus_channel, kobj);
- struct hv_device *dev = channel->primary_channel->device_obj;
- u16 q_idx = channel->offermsg.offer.sub_channel_index;
+ void *ring_buffer = page_address(channel->ringbuffer_page);
- dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
- q_idx, vma_pages(vma), vma->vm_pgoff);
+ if (channel->state != CHANNEL_OPENED_STATE)
+ return -ENODEV;
- return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages),
+ return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
channel->ringbuffer_pagecount << PAGE_SHIFT);
}
@@ -176,58 +175,104 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
}
}
+/* free the reserved buffers for send and receive */
static void
hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
{
- if (pdata->send_gpadl)
+ if (pdata->send_gpadl) {
vmbus_teardown_gpadl(dev->channel, pdata->send_gpadl);
- vfree(pdata->send_buf);
+ pdata->send_gpadl = 0;
+ vfree(pdata->send_buf);
+ }
- if (pdata->recv_gpadl)
+ if (pdata->recv_gpadl) {
vmbus_teardown_gpadl(dev->channel, pdata->recv_gpadl);
- vfree(pdata->recv_buf);
+ pdata->recv_gpadl = 0;
+ vfree(pdata->recv_buf);
+ }
+}
+
+/* VMBus primary channel is opened on first use */
+static int
+hv_uio_open(struct uio_info *info, struct inode *inode)
+{
+ struct hv_uio_private_data *pdata
+ = container_of(info, struct hv_uio_private_data, info);
+ struct hv_device *dev = pdata->device;
+ int ret;
+
+ if (atomic_inc_return(&pdata->refcnt) != 1)
+ return 0;
+
+ ret = vmbus_connect_ring(dev->channel,
+ hv_uio_channel_cb, dev->channel);
+
+ if (ret == 0)
+ dev->channel->inbound.ring_buffer->interrupt_mask = 1;
+ else
+ atomic_dec(&pdata->refcnt);
+
+ return ret;
+}
+
+/* VMBus primary channel is closed on last close */
+static int
+hv_uio_release(struct uio_info *info, struct inode *inode)
+{
+ struct hv_uio_private_data *pdata
+ = container_of(info, struct hv_uio_private_data, info);
+ struct hv_device *dev = pdata->device;
+ int ret = 0;
+
+ if (atomic_dec_and_test(&pdata->refcnt))
+ ret = vmbus_disconnect_ring(dev->channel);
+
+ return ret;
}
static int
hv_uio_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
+ struct vmbus_channel *channel = dev->channel;
struct hv_uio_private_data *pdata;
+ void *ring_buffer;
int ret;
+ /* Communicating with host has to be via shared memory not hypercall */
+ if (!channel->offermsg.monitor_allocated) {
+ dev_err(&dev->device, "vmbus channel requires hypercall\n");
+ return -ENOTSUPP;
+ }
+
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
- HV_RING_SIZE * PAGE_SIZE, NULL, 0,
- hv_uio_channel_cb, dev->channel);
+ ret = vmbus_alloc_ring(channel, HV_RING_SIZE * PAGE_SIZE,
+ HV_RING_SIZE * PAGE_SIZE);
if (ret)
goto fail;
- /* Communicating with host has to be via shared memory not hypercall */
- if (!dev->channel->offermsg.monitor_allocated) {
- dev_err(&dev->device, "vmbus channel requires hypercall\n");
- ret = -ENOTSUPP;
- goto fail_close;
- }
-
- dev->channel->inbound.ring_buffer->interrupt_mask = 1;
- set_channel_read_mode(dev->channel, HV_CALL_ISR);
+ set_channel_read_mode(channel, HV_CALL_ISR);
/* Fill general uio info */
pdata->info.name = "uio_hv_generic";
pdata->info.version = DRIVER_VERSION;
pdata->info.irqcontrol = hv_uio_irqcontrol;
+ pdata->info.open = hv_uio_open;
+ pdata->info.release = hv_uio_release;
pdata->info.irq = UIO_IRQ_CUSTOM;
+ atomic_set(&pdata->refcnt, 0);
/* mem resources */
pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
+ ring_buffer = page_address(channel->ringbuffer_page);
pdata->info.mem[TXRX_RING_MAP].addr
- = (uintptr_t)dev->channel->ringbuffer_pages;
+ = (uintptr_t)virt_to_phys(ring_buffer);
pdata->info.mem[TXRX_RING_MAP].size
- = dev->channel->ringbuffer_pagecount << PAGE_SHIFT;
- pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
+ = channel->ringbuffer_pagecount << PAGE_SHIFT;
+ pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_IOVA;
pdata->info.mem[INT_PAGE_MAP].name = "int_page";
pdata->info.mem[INT_PAGE_MAP].addr
@@ -247,7 +292,7 @@ hv_uio_probe(struct hv_device *dev,
goto fail_close;
}
- ret = vmbus_establish_gpadl(dev->channel, pdata->recv_buf,
+ ret = vmbus_establish_gpadl(channel, pdata->recv_buf,
RECV_BUFFER_SIZE, &pdata->recv_gpadl);
if (ret)
goto fail_close;
@@ -261,14 +306,13 @@ hv_uio_probe(struct hv_device *dev,
pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
-
pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
if (pdata->send_buf == NULL) {
ret = -ENOMEM;
goto fail_close;
}
- ret = vmbus_establish_gpadl(dev->channel, pdata->send_buf,
+ ret = vmbus_establish_gpadl(channel, pdata->send_buf,
SEND_BUFFER_SIZE, &pdata->send_gpadl);
if (ret)
goto fail_close;
@@ -290,10 +334,10 @@ hv_uio_probe(struct hv_device *dev,
goto fail_close;
}
- vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
- vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
+ vmbus_set_chn_rescind_callback(channel, hv_uio_rescind);
+ vmbus_set_sc_create_callback(channel, hv_uio_new_channel);
- ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
+ ret = sysfs_create_bin_file(&channel->kobj, &ring_buffer_bin_attr);
if (ret)
dev_notice(&dev->device,
"sysfs create ring bin file failed; %d\n", ret);
@@ -304,7 +348,6 @@ hv_uio_probe(struct hv_device *dev,
fail_close:
hv_uio_cleanup(dev, pdata);
- vmbus_close(dev->channel);
fail:
kfree(pdata);
@@ -322,7 +365,8 @@ hv_uio_remove(struct hv_device *dev)
uio_unregister_device(&pdata->info);
hv_uio_cleanup(dev, pdata);
hv_set_drvdata(dev, NULL);
- vmbus_close(dev->channel);
+
+ vmbus_free_ring(dev->channel);
kfree(pdata);
return 0;
}
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index f598ecddc8a7..6c759934bff3 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -118,7 +118,8 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "unable to kmalloc\n");
return -ENOMEM;
}
- uioinfo->name = pdev->dev.of_node->name;
+ uioinfo->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%pOFn",
+ pdev->dev.of_node);
uioinfo->version = "devicetree";
/* Multiple IRQs are not supported */
}
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 19f5f5f2a48a..09b37c0d075d 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -364,8 +364,7 @@ static void ci_hdrc_imx_shutdown(struct platform_device *pdev)
ci_hdrc_imx_remove(pdev);
}
-#ifdef CONFIG_PM
-static int imx_controller_suspend(struct device *dev)
+static int __maybe_unused imx_controller_suspend(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
@@ -377,7 +376,7 @@ static int imx_controller_suspend(struct device *dev)
return 0;
}
-static int imx_controller_resume(struct device *dev)
+static int __maybe_unused imx_controller_resume(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret = 0;
@@ -408,8 +407,7 @@ clk_disable:
return ret;
}
-#ifdef CONFIG_PM_SLEEP
-static int ci_hdrc_imx_suspend(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_suspend(struct device *dev)
{
int ret;
@@ -431,7 +429,7 @@ static int ci_hdrc_imx_suspend(struct device *dev)
return imx_controller_suspend(dev);
}
-static int ci_hdrc_imx_resume(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_resume(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret;
@@ -445,9 +443,8 @@ static int ci_hdrc_imx_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM_SLEEP */
-static int ci_hdrc_imx_runtime_suspend(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_runtime_suspend(struct device *dev)
{
struct ci_hdrc_imx_data *data = dev_get_drvdata(dev);
int ret;
@@ -466,13 +463,11 @@ static int ci_hdrc_imx_runtime_suspend(struct device *dev)
return imx_controller_suspend(dev);
}
-static int ci_hdrc_imx_runtime_resume(struct device *dev)
+static int __maybe_unused ci_hdrc_imx_runtime_resume(struct device *dev)
{
return imx_controller_resume(dev);
}
-#endif /* CONFIG_PM */
-
static const struct dev_pm_ops ci_hdrc_imx_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ci_hdrc_imx_suspend, ci_hdrc_imx_resume)
SET_RUNTIME_PM_OPS(ci_hdrc_imx_runtime_suspend,
@@ -492,7 +487,7 @@ static struct platform_driver ci_hdrc_imx_driver = {
module_platform_driver(ci_hdrc_imx_driver);
MODULE_ALIAS("platform:imx-usb");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("CI HDRC i.MX USB binding");
MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
MODULE_AUTHOR("Richard Zhao <richard.zhao@freescale.com>");
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 85fc6db48e44..7bfcbb23c2a4 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -53,6 +53,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
@@ -723,6 +724,24 @@ static int ci_get_platdata(struct device *dev,
else
cable->connected = false;
}
+
+ platdata->pctl = devm_pinctrl_get(dev);
+ if (!IS_ERR(platdata->pctl)) {
+ struct pinctrl_state *p;
+
+ p = pinctrl_lookup_state(platdata->pctl, "default");
+ if (!IS_ERR(p))
+ platdata->pins_default = p;
+
+ p = pinctrl_lookup_state(platdata->pctl, "host");
+ if (!IS_ERR(p))
+ platdata->pins_host = p;
+
+ p = pinctrl_lookup_state(platdata->pctl, "device");
+ if (!IS_ERR(p))
+ platdata->pins_device = p;
+ }
+
return 0;
}
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 4638d9b066be..d858a82c4f44 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -13,6 +13,7 @@
#include <linux/usb/hcd.h>
#include <linux/usb/chipidea.h>
#include <linux/regulator/consumer.h>
+#include <linux/pinctrl/consumer.h>
#include "../host/ehci.h"
@@ -153,6 +154,10 @@ static int host_start(struct ci_hdrc *ci)
}
}
+ if (ci->platdata->pins_host)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_host);
+
ret = usb_add_hcd(hcd, 0, 0);
if (ret) {
goto disable_reg;
@@ -197,6 +202,10 @@ static void host_stop(struct ci_hdrc *ci)
}
ci->hcd = NULL;
ci->otg.host = NULL;
+
+ if (ci->platdata->pins_host && ci->platdata->pins_default)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_default);
}
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index db4ceffcf2a6..f25d4827fd49 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -203,14 +203,17 @@ static void ci_otg_work(struct work_struct *work)
}
pm_runtime_get_sync(ci->dev);
+
if (ci->id_event) {
ci->id_event = false;
ci_handle_id_switch(ci);
- } else if (ci->b_sess_valid_event) {
+ }
+
+ if (ci->b_sess_valid_event) {
ci->b_sess_valid_event = false;
ci_handle_vbus_change(ci);
- } else
- dev_err(ci->dev, "unexpected event occurs at %s\n", __func__);
+ }
+
pm_runtime_put_sync(ci->dev);
enable_irq(ci->irq);
diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
index 7e7428e48bfa..4f8b8179ec96 100644
--- a/drivers/usb/chipidea/otg.h
+++ b/drivers/usb/chipidea/otg.h
@@ -17,7 +17,8 @@ void ci_handle_vbus_change(struct ci_hdrc *ci);
static inline void ci_otg_queue_work(struct ci_hdrc *ci)
{
disable_irq_nosync(ci->irq);
- queue_work(ci->wq, &ci->work);
+ if (queue_work(ci->wq, &ci->work) == false)
+ enable_irq(ci->irq);
}
#endif /* __DRIVERS_USB_CHIPIDEA_OTG_H */
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 9852ec5e6e01..829e947cabf5 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg-fsm.h>
@@ -1965,6 +1966,10 @@ void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
static int udc_id_switch_for_device(struct ci_hdrc *ci)
{
+ if (ci->platdata->pins_device)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_device);
+
if (ci->is_otg)
/* Clear and enable BSV irq */
hw_write_otgsc(ci, OTGSC_BSVIS | OTGSC_BSVIE,
@@ -1983,6 +1988,10 @@ static void udc_id_switch_for_host(struct ci_hdrc *ci)
hw_write_otgsc(ci, OTGSC_BSVIE | OTGSC_BSVIS, OTGSC_BSVIS);
ci->vbus_active = 0;
+
+ if (ci->platdata->pins_device && ci->platdata->pins_default)
+ pinctrl_select_state(ci->platdata->pctl,
+ ci->platdata->pins_default);
}
/**
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index 34ad5bf8acd8..def80ff547e4 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -343,6 +343,8 @@ static int usbmisc_imx6q_init(struct imx_usbmisc_data *data)
} else if (data->oc_polarity == 1) {
/* High active */
reg &= ~(MX6_BM_OVER_CUR_DIS | MX6_BM_OVER_CUR_POLARITY);
+ } else {
+ reg &= ~(MX6_BM_OVER_CUR_DIS);
}
writel(reg, usbmisc->base + data->index * 4);
@@ -633,6 +635,6 @@ static struct platform_driver usbmisc_imx_driver = {
module_platform_driver(usbmisc_imx_driver);
MODULE_ALIAS("platform:usbmisc-imx");
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("driver for imx usb non-core registers");
MODULE_AUTHOR("Richard Zhao <richard.zhao@freescale.com>");
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 83ffa5a14c3d..4942122b2346 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -5,6 +5,7 @@
* Copyright (C) 2007 Stefan Kopp, Gechingen, Germany
* Copyright (C) 2008 Novell, Inc.
* Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (C) 2018 IVI Foundation, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -21,21 +22,24 @@
#include <linux/compat.h>
#include <linux/usb/tmc.h>
+/* Increment API VERSION when changing tmc.h with new flags or ioctls
+ * or when changing a significant behavior of the driver.
+ */
+#define USBTMC_API_VERSION (2)
#define USBTMC_HEADER_SIZE 12
#define USBTMC_MINOR_BASE 176
-/*
- * Size of driver internal IO buffer. Must be multiple of 4 and at least as
- * large as wMaxPacketSize (which is usually 512 bytes).
- */
-#define USBTMC_SIZE_IOBUFFER 2048
-
/* Minimum USB timeout (in milliseconds) */
#define USBTMC_MIN_TIMEOUT 100
/* Default USB timeout (in milliseconds) */
#define USBTMC_TIMEOUT 5000
+/* Max number of urbs used in write transfers */
+#define MAX_URBS_IN_FLIGHT 16
+/* I/O buffer size used in generic read/write functions */
+#define USBTMC_BUFSIZE (4096)
+
/*
* Maximum number of read cycles to empty bulk in endpoint during CLEAR and
* ABORT_BULK_IN requests. Ends the loop if (for whatever reason) a short
@@ -79,6 +83,9 @@ struct usbtmc_device_data {
u8 bTag_last_write; /* needed for abort */
u8 bTag_last_read; /* needed for abort */
+ /* packet size of IN bulk */
+ u16 wMaxPacketSize;
+
/* data for interrupt in endpoint handling */
u8 bNotify1;
u8 bNotify2;
@@ -95,11 +102,6 @@ struct usbtmc_device_data {
/* coalesced usb488_caps from usbtmc_dev_capabilities */
__u8 usb488_caps;
- /* attributes from the USB TMC spec for this device */
- u8 TermChar;
- bool TermCharEnabled;
- bool auto_abort;
-
bool zombie; /* fd of disconnected device */
struct usbtmc_dev_capabilities capabilities;
@@ -121,13 +123,34 @@ struct usbtmc_file_data {
u32 timeout;
u8 srq_byte;
atomic_t srq_asserted;
+ atomic_t closing;
+ u8 bmTransferAttributes; /* member of DEV_DEP_MSG_IN */
+
u8 eom_val;
u8 term_char;
bool term_char_enabled;
+ bool auto_abort;
+
+ spinlock_t err_lock; /* lock for errors */
+
+ struct usb_anchor submitted;
+
+ /* data for generic_write */
+ struct semaphore limit_write_sem;
+ u32 out_transfer_size;
+ int out_status;
+
+ /* data for generic_read */
+ u32 in_transfer_size;
+ int in_status;
+ int in_urbs_used;
+ struct usb_anchor in_anchor;
+ wait_queue_head_t wait_bulk_in;
};
/* Forward declarations */
static struct usb_driver usbtmc_driver;
+static void usbtmc_draw_down(struct usbtmc_file_data *file_data);
static void usbtmc_delete(struct kref *kref)
{
@@ -153,6 +176,12 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
if (!file_data)
return -ENOMEM;
+ spin_lock_init(&file_data->err_lock);
+ sema_init(&file_data->limit_write_sem, MAX_URBS_IN_FLIGHT);
+ init_usb_anchor(&file_data->submitted);
+ init_usb_anchor(&file_data->in_anchor);
+ init_waitqueue_head(&file_data->wait_bulk_in);
+
data = usb_get_intfdata(intf);
/* Protect reference to data from file structure until release */
kref_get(&data->kref);
@@ -160,10 +189,12 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
mutex_lock(&data->io_mutex);
file_data->data = data;
- /* copy default values from device settings */
+ atomic_set(&file_data->closing, 0);
+
file_data->timeout = USBTMC_TIMEOUT;
- file_data->term_char = data->TermChar;
- file_data->term_char_enabled = data->TermCharEnabled;
+ file_data->term_char = '\n';
+ file_data->term_char_enabled = 0;
+ file_data->auto_abort = 0;
file_data->eom_val = 1;
INIT_LIST_HEAD(&file_data->file_elem);
@@ -178,6 +209,40 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
return 0;
}
+/*
+ * usbtmc_flush - called before file handle is closed
+ */
+static int usbtmc_flush(struct file *file, fl_owner_t id)
+{
+ struct usbtmc_file_data *file_data;
+ struct usbtmc_device_data *data;
+
+ file_data = file->private_data;
+ if (file_data == NULL)
+ return -ENODEV;
+
+ atomic_set(&file_data->closing, 1);
+ data = file_data->data;
+
+ /* wait for io to stop */
+ mutex_lock(&data->io_mutex);
+
+ usbtmc_draw_down(file_data);
+
+ spin_lock_irq(&file_data->err_lock);
+ file_data->in_status = 0;
+ file_data->in_transfer_size = 0;
+ file_data->in_urbs_used = 0;
+ file_data->out_status = 0;
+ file_data->out_transfer_size = 0;
+ spin_unlock_irq(&file_data->err_lock);
+
+ wake_up_interruptible_all(&data->waitq);
+ mutex_unlock(&data->io_mutex);
+
+ return 0;
+}
+
static int usbtmc_release(struct inode *inode, struct file *file)
{
struct usbtmc_file_data *file_data = file->private_data;
@@ -197,18 +262,17 @@ static int usbtmc_release(struct inode *inode, struct file *file)
return 0;
}
-static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
+static int usbtmc_ioctl_abort_bulk_in_tag(struct usbtmc_device_data *data,
+ u8 tag)
{
u8 *buffer;
struct device *dev;
int rv;
int n;
int actual;
- struct usb_host_interface *current_setting;
- int max_size;
dev = &data->intf->dev;
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+ buffer = kmalloc(USBTMC_BUFSIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -216,86 +280,88 @@ static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_ABORT_BULK_IN,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
- data->bTag_last_read, data->bulk_in,
- buffer, 2, USBTMC_TIMEOUT);
+ tag, data->bulk_in,
+ buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
- dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+ dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x with tag %02x\n",
+ buffer[0], buffer[1]);
if (buffer[0] == USBTMC_STATUS_FAILED) {
+ /* No transfer in progress and the Bulk-OUT FIFO is empty. */
rv = 0;
goto exit;
}
- if (buffer[0] != USBTMC_STATUS_SUCCESS) {
- dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n",
- buffer[0]);
- rv = -EPERM;
+ if (buffer[0] == USBTMC_STATUS_TRANSFER_NOT_IN_PROGRESS) {
+ /* The device returns this status if either:
+ * - There is a transfer in progress, but the specified bTag
+ * does not match.
+ * - There is no transfer in progress, but the Bulk-OUT FIFO
+ * is not empty.
+ */
+ rv = -ENOMSG;
goto exit;
}
- max_size = 0;
- current_setting = data->intf->cur_altsetting;
- for (n = 0; n < current_setting->desc.bNumEndpoints; n++)
- if (current_setting->endpoint[n].desc.bEndpointAddress ==
- data->bulk_in)
- max_size = usb_endpoint_maxp(&current_setting->endpoint[n].desc);
-
- if (max_size == 0) {
- dev_err(dev, "Couldn't get wMaxPacketSize\n");
+ if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+ dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n",
+ buffer[0]);
rv = -EPERM;
goto exit;
}
- dev_dbg(&data->intf->dev, "wMaxPacketSize is %d\n", max_size);
-
n = 0;
- do {
- dev_dbg(dev, "Reading from bulk in EP\n");
+usbtmc_abort_bulk_in_status:
+ dev_dbg(dev, "Reading from bulk in EP\n");
- rv = usb_bulk_msg(data->usb_dev,
- usb_rcvbulkpipe(data->usb_dev,
- data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER,
- &actual, USBTMC_TIMEOUT);
+ /* Data must be present. So use low timeout 300 ms */
+ actual = 0;
+ rv = usb_bulk_msg(data->usb_dev,
+ usb_rcvbulkpipe(data->usb_dev,
+ data->bulk_in),
+ buffer, USBTMC_BUFSIZE,
+ &actual, 300);
- n++;
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE, 16, 1,
+ buffer, actual, true);
- if (rv < 0) {
- dev_err(dev, "usb_bulk_msg returned %d\n", rv);
+ n++;
+
+ if (rv < 0) {
+ dev_err(dev, "usb_bulk_msg returned %d\n", rv);
+ if (rv != -ETIMEDOUT)
goto exit;
- }
- } while ((actual == max_size) &&
- (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
+ }
- if (actual == max_size) {
+ if (actual == USBTMC_BUFSIZE)
+ goto usbtmc_abort_bulk_in_status;
+
+ if (n >= USBTMC_MAX_READS_TO_CLEAR_BULK_IN) {
dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
rv = -EPERM;
goto exit;
}
- n = 0;
-
-usbtmc_abort_bulk_in_status:
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_ABORT_BULK_IN_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
0, data->bulk_in, buffer, 0x08,
- USBTMC_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
}
- dev_dbg(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+ dev_dbg(dev, "CHECK_ABORT_BULK_IN returned %x\n", buffer[0]);
if (buffer[0] == USBTMC_STATUS_SUCCESS) {
rv = 0;
@@ -303,46 +369,30 @@ usbtmc_abort_bulk_in_status:
}
if (buffer[0] != USBTMC_STATUS_PENDING) {
- dev_err(dev, "INITIATE_ABORT_BULK_IN returned %x\n", buffer[0]);
+ dev_err(dev, "CHECK_ABORT_BULK_IN returned %x\n", buffer[0]);
rv = -EPERM;
goto exit;
}
- if (buffer[1] == 1)
- do {
- dev_dbg(dev, "Reading from bulk in EP\n");
-
- rv = usb_bulk_msg(data->usb_dev,
- usb_rcvbulkpipe(data->usb_dev,
- data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER,
- &actual, USBTMC_TIMEOUT);
-
- n++;
-
- if (rv < 0) {
- dev_err(dev, "usb_bulk_msg returned %d\n", rv);
- goto exit;
- }
- } while ((actual == max_size) &&
- (n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
-
- if (actual == max_size) {
- dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
- USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
- rv = -EPERM;
- goto exit;
+ if ((buffer[1] & 1) > 0) {
+ /* The device has 1 or more queued packets the Host can read */
+ goto usbtmc_abort_bulk_in_status;
}
- goto usbtmc_abort_bulk_in_status;
-
+ /* The Host must send CHECK_ABORT_BULK_IN_STATUS at a later time. */
+ rv = -EAGAIN;
exit:
kfree(buffer);
return rv;
+}
+static int usbtmc_ioctl_abort_bulk_in(struct usbtmc_device_data *data)
+{
+ return usbtmc_ioctl_abort_bulk_in_tag(data, data->bTag_last_read);
}
-static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
+static int usbtmc_ioctl_abort_bulk_out_tag(struct usbtmc_device_data *data,
+ u8 tag)
{
struct device *dev;
u8 *buffer;
@@ -359,8 +409,8 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_ABORT_BULK_OUT,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
- data->bTag_last_write, data->bulk_out,
- buffer, 2, USBTMC_TIMEOUT);
+ tag, data->bulk_out,
+ buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -379,12 +429,14 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
n = 0;
usbtmc_abort_bulk_out_check_status:
+ /* do not stress device with subsequent requests */
+ msleep(50);
rv = usb_control_msg(data->usb_dev,
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_ABORT_BULK_OUT_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT,
0, data->bulk_out, buffer, 0x08,
- USBTMC_TIMEOUT);
+ USB_CTRL_GET_TIMEOUT);
n++;
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -418,6 +470,11 @@ exit:
return rv;
}
+static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
+{
+ return usbtmc_ioctl_abort_bulk_out_tag(data, data->bTag_last_write);
+}
+
static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
void __user *arg)
{
@@ -457,7 +514,7 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
data->iin_bTag,
data->ifnum,
- buffer, 0x03, USBTMC_TIMEOUT);
+ buffer, 0x03, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "stb usb_control_msg returned %d\n", rv);
goto exit;
@@ -510,6 +567,54 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
return rv;
}
+static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
+ __u32 __user *arg)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ struct device *dev = &data->intf->dev;
+ int rv;
+ u32 timeout;
+ unsigned long expire;
+
+ if (!data->iin_ep_present) {
+ dev_dbg(dev, "no interrupt endpoint present\n");
+ return -EFAULT;
+ }
+
+ if (get_user(timeout, arg))
+ return -EFAULT;
+
+ expire = msecs_to_jiffies(timeout);
+
+ mutex_unlock(&data->io_mutex);
+
+ rv = wait_event_interruptible_timeout(
+ data->waitq,
+ atomic_read(&file_data->srq_asserted) != 0 ||
+ atomic_read(&file_data->closing),
+ expire);
+
+ mutex_lock(&data->io_mutex);
+
+ /* Note! disconnect or close could be called in the meantime */
+ if (atomic_read(&file_data->closing) || data->zombie)
+ rv = -ENODEV;
+
+ if (rv < 0) {
+ /* dev can be invalid now! */
+ pr_debug("%s - wait interrupted %d\n", __func__, rv);
+ return rv;
+ }
+
+ if (rv == 0) {
+ dev_dbg(dev, "%s - wait timed out\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ dev_dbg(dev, "%s - srq asserted\n", __func__);
+ return 0;
+}
+
static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
void __user *arg, unsigned int cmd)
{
@@ -543,7 +648,7 @@ static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
wValue,
data->ifnum,
- buffer, 0x01, USBTMC_TIMEOUT);
+ buffer, 0x01, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "simple usb_control_msg failed %d\n", rv);
goto exit;
@@ -610,6 +715,559 @@ static int usbtmc488_ioctl_trigger(struct usbtmc_file_data *file_data)
return 0;
}
+static struct urb *usbtmc_create_urb(void)
+{
+ const size_t bufsize = USBTMC_BUFSIZE;
+ u8 *dmabuf = NULL;
+ struct urb *urb = usb_alloc_urb(0, GFP_KERNEL);
+
+ if (!urb)
+ return NULL;
+
+ dmabuf = kmalloc(bufsize, GFP_KERNEL);
+ if (!dmabuf) {
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ urb->transfer_buffer = dmabuf;
+ urb->transfer_buffer_length = bufsize;
+ urb->transfer_flags |= URB_FREE_BUFFER;
+ return urb;
+}
+
+static void usbtmc_read_bulk_cb(struct urb *urb)
+{
+ struct usbtmc_file_data *file_data = urb->context;
+ int status = urb->status;
+ unsigned long flags;
+
+ /* sync/async unlink faults aren't errors */
+ if (status) {
+ if (!(/* status == -ENOENT || */
+ status == -ECONNRESET ||
+ status == -EREMOTEIO || /* Short packet */
+ status == -ESHUTDOWN))
+ dev_err(&file_data->data->intf->dev,
+ "%s - nonzero read bulk status received: %d\n",
+ __func__, status);
+
+ spin_lock_irqsave(&file_data->err_lock, flags);
+ if (!file_data->in_status)
+ file_data->in_status = status;
+ spin_unlock_irqrestore(&file_data->err_lock, flags);
+ }
+
+ spin_lock_irqsave(&file_data->err_lock, flags);
+ file_data->in_transfer_size += urb->actual_length;
+ dev_dbg(&file_data->data->intf->dev,
+ "%s - total size: %u current: %d status: %d\n",
+ __func__, file_data->in_transfer_size,
+ urb->actual_length, status);
+ spin_unlock_irqrestore(&file_data->err_lock, flags);
+ usb_anchor_urb(urb, &file_data->in_anchor);
+
+ wake_up_interruptible(&file_data->wait_bulk_in);
+ wake_up_interruptible(&file_data->data->waitq);
+}
+
+static inline bool usbtmc_do_transfer(struct usbtmc_file_data *file_data)
+{
+ bool data_or_error;
+
+ spin_lock_irq(&file_data->err_lock);
+ data_or_error = !usb_anchor_empty(&file_data->in_anchor)
+ || file_data->in_status;
+ spin_unlock_irq(&file_data->err_lock);
+ dev_dbg(&file_data->data->intf->dev, "%s: returns %d\n", __func__,
+ data_or_error);
+ return data_or_error;
+}
+
+static ssize_t usbtmc_generic_read(struct usbtmc_file_data *file_data,
+ void __user *user_buffer,
+ u32 transfer_size,
+ u32 *transferred,
+ u32 flags)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ struct device *dev = &data->intf->dev;
+ u32 done = 0;
+ u32 remaining;
+ const u32 bufsize = USBTMC_BUFSIZE;
+ int retval = 0;
+ u32 max_transfer_size;
+ unsigned long expire;
+ int bufcount = 1;
+ int again = 0;
+
+ /* mutex already locked */
+
+ *transferred = done;
+
+ max_transfer_size = transfer_size;
+
+ if (flags & USBTMC_FLAG_IGNORE_TRAILER) {
+ /* The device may send extra alignment bytes (up to
+ * wMaxPacketSize – 1) to avoid sending a zero-length
+ * packet
+ */
+ remaining = transfer_size;
+ if ((max_transfer_size % data->wMaxPacketSize) == 0)
+ max_transfer_size += (data->wMaxPacketSize - 1);
+ } else {
+ /* round down to bufsize to avoid truncated data left */
+ if (max_transfer_size > bufsize) {
+ max_transfer_size =
+ roundup(max_transfer_size + 1 - bufsize,
+ bufsize);
+ }
+ remaining = max_transfer_size;
+ }
+
+ spin_lock_irq(&file_data->err_lock);
+
+ if (file_data->in_status) {
+ /* return the very first error */
+ retval = file_data->in_status;
+ spin_unlock_irq(&file_data->err_lock);
+ goto error;
+ }
+
+ if (flags & USBTMC_FLAG_ASYNC) {
+ if (usb_anchor_empty(&file_data->in_anchor))
+ again = 1;
+
+ if (file_data->in_urbs_used == 0) {
+ file_data->in_transfer_size = 0;
+ file_data->in_status = 0;
+ }
+ } else {
+ file_data->in_transfer_size = 0;
+ file_data->in_status = 0;
+ }
+
+ if (max_transfer_size == 0) {
+ bufcount = 0;
+ } else {
+ bufcount = roundup(max_transfer_size, bufsize) / bufsize;
+ if (bufcount > file_data->in_urbs_used)
+ bufcount -= file_data->in_urbs_used;
+ else
+ bufcount = 0;
+
+ if (bufcount + file_data->in_urbs_used > MAX_URBS_IN_FLIGHT) {
+ bufcount = MAX_URBS_IN_FLIGHT -
+ file_data->in_urbs_used;
+ }
+ }
+ spin_unlock_irq(&file_data->err_lock);
+
+ dev_dbg(dev, "%s: requested=%u flags=0x%X size=%u bufs=%d used=%d\n",
+ __func__, transfer_size, flags,
+ max_transfer_size, bufcount, file_data->in_urbs_used);
+
+ while (bufcount > 0) {
+ u8 *dmabuf = NULL;
+ struct urb *urb = usbtmc_create_urb();
+
+ if (!urb) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ dmabuf = urb->transfer_buffer;
+
+ usb_fill_bulk_urb(urb, data->usb_dev,
+ usb_rcvbulkpipe(data->usb_dev, data->bulk_in),
+ dmabuf, bufsize,
+ usbtmc_read_bulk_cb, file_data);
+
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ /* urb is anchored. We can release our reference. */
+ usb_free_urb(urb);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ goto error;
+ }
+ file_data->in_urbs_used++;
+ bufcount--;
+ }
+
+ if (again) {
+ dev_dbg(dev, "%s: ret=again\n", __func__);
+ return -EAGAIN;
+ }
+
+ if (user_buffer == NULL)
+ return -EINVAL;
+
+ expire = msecs_to_jiffies(file_data->timeout);
+
+ while (max_transfer_size > 0) {
+ u32 this_part;
+ struct urb *urb = NULL;
+
+ if (!(flags & USBTMC_FLAG_ASYNC)) {
+ dev_dbg(dev, "%s: before wait time %lu\n",
+ __func__, expire);
+ retval = wait_event_interruptible_timeout(
+ file_data->wait_bulk_in,
+ usbtmc_do_transfer(file_data),
+ expire);
+
+ dev_dbg(dev, "%s: wait returned %d\n",
+ __func__, retval);
+
+ if (retval <= 0) {
+ if (retval == 0)
+ retval = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ urb = usb_get_from_anchor(&file_data->in_anchor);
+ if (!urb) {
+ if (!(flags & USBTMC_FLAG_ASYNC)) {
+ /* synchronous case: must not happen */
+ retval = -EFAULT;
+ goto error;
+ }
+
+ /* asynchronous case: ready, do not block or wait */
+ *transferred = done;
+ dev_dbg(dev, "%s: (async) done=%u ret=0\n",
+ __func__, done);
+ return 0;
+ }
+
+ file_data->in_urbs_used--;
+
+ if (max_transfer_size > urb->actual_length)
+ max_transfer_size -= urb->actual_length;
+ else
+ max_transfer_size = 0;
+
+ if (remaining > urb->actual_length)
+ this_part = urb->actual_length;
+ else
+ this_part = remaining;
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE, 16, 1,
+ urb->transfer_buffer, urb->actual_length, true);
+
+ if (copy_to_user(user_buffer + done,
+ urb->transfer_buffer, this_part)) {
+ usb_free_urb(urb);
+ retval = -EFAULT;
+ goto error;
+ }
+
+ remaining -= this_part;
+ done += this_part;
+
+ spin_lock_irq(&file_data->err_lock);
+ if (urb->status) {
+ /* return the very first error */
+ retval = file_data->in_status;
+ spin_unlock_irq(&file_data->err_lock);
+ usb_free_urb(urb);
+ goto error;
+ }
+ spin_unlock_irq(&file_data->err_lock);
+
+ if (urb->actual_length < bufsize) {
+ /* short packet or ZLP received => ready */
+ usb_free_urb(urb);
+ retval = 1;
+ break;
+ }
+
+ if (!(flags & USBTMC_FLAG_ASYNC) &&
+ max_transfer_size > (bufsize * file_data->in_urbs_used)) {
+ /* resubmit, since other buffers still not enough */
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
+ goto error;
+ }
+ file_data->in_urbs_used++;
+ }
+ usb_free_urb(urb);
+ retval = 0;
+ }
+
+error:
+ *transferred = done;
+
+ dev_dbg(dev, "%s: before kill\n", __func__);
+ /* Attention: killing urbs can take long time (2 ms) */
+ usb_kill_anchored_urbs(&file_data->submitted);
+ dev_dbg(dev, "%s: after kill\n", __func__);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+ file_data->in_urbs_used = 0;
+ file_data->in_status = 0; /* no spinlock needed here */
+ dev_dbg(dev, "%s: done=%u ret=%d\n", __func__, done, retval);
+
+ return retval;
+}
+
+static ssize_t usbtmc_ioctl_generic_read(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ struct usbtmc_message msg;
+ ssize_t retval = 0;
+
+ /* mutex already locked */
+
+ if (copy_from_user(&msg, arg, sizeof(struct usbtmc_message)))
+ return -EFAULT;
+
+ retval = usbtmc_generic_read(file_data, msg.message,
+ msg.transfer_size, &msg.transferred,
+ msg.flags);
+
+ if (put_user(msg.transferred,
+ &((struct usbtmc_message __user *)arg)->transferred))
+ return -EFAULT;
+
+ return retval;
+}
+
+static void usbtmc_write_bulk_cb(struct urb *urb)
+{
+ struct usbtmc_file_data *file_data = urb->context;
+ int wakeup = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&file_data->err_lock, flags);
+ file_data->out_transfer_size += urb->actual_length;
+
+ /* sync/async unlink faults aren't errors */
+ if (urb->status) {
+ if (!(urb->status == -ENOENT ||
+ urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN))
+ dev_err(&file_data->data->intf->dev,
+ "%s - nonzero write bulk status received: %d\n",
+ __func__, urb->status);
+
+ if (!file_data->out_status) {
+ file_data->out_status = urb->status;
+ wakeup = 1;
+ }
+ }
+ spin_unlock_irqrestore(&file_data->err_lock, flags);
+
+ dev_dbg(&file_data->data->intf->dev,
+ "%s - write bulk total size: %u\n",
+ __func__, file_data->out_transfer_size);
+
+ up(&file_data->limit_write_sem);
+ if (usb_anchor_empty(&file_data->submitted) || wakeup)
+ wake_up_interruptible(&file_data->data->waitq);
+}
+
+static ssize_t usbtmc_generic_write(struct usbtmc_file_data *file_data,
+ const void __user *user_buffer,
+ u32 transfer_size,
+ u32 *transferred,
+ u32 flags)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ struct device *dev;
+ u32 done = 0;
+ u32 remaining;
+ unsigned long expire;
+ const u32 bufsize = USBTMC_BUFSIZE;
+ struct urb *urb = NULL;
+ int retval = 0;
+ u32 timeout;
+
+ *transferred = 0;
+
+ /* Get pointer to private data structure */
+ dev = &data->intf->dev;
+
+ dev_dbg(dev, "%s: size=%u flags=0x%X sema=%u\n",
+ __func__, transfer_size, flags,
+ file_data->limit_write_sem.count);
+
+ if (flags & USBTMC_FLAG_APPEND) {
+ spin_lock_irq(&file_data->err_lock);
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+ if (retval < 0)
+ return retval;
+ } else {
+ spin_lock_irq(&file_data->err_lock);
+ file_data->out_transfer_size = 0;
+ file_data->out_status = 0;
+ spin_unlock_irq(&file_data->err_lock);
+ }
+
+ remaining = transfer_size;
+ if (remaining > INT_MAX)
+ remaining = INT_MAX;
+
+ timeout = file_data->timeout;
+ expire = msecs_to_jiffies(timeout);
+
+ while (remaining > 0) {
+ u32 this_part, aligned;
+ u8 *buffer = NULL;
+
+ if (flags & USBTMC_FLAG_ASYNC) {
+ if (down_trylock(&file_data->limit_write_sem)) {
+ retval = (done)?(0):(-EAGAIN);
+ goto exit;
+ }
+ } else {
+ retval = down_timeout(&file_data->limit_write_sem,
+ expire);
+ if (retval < 0) {
+ retval = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ spin_lock_irq(&file_data->err_lock);
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+ if (retval < 0) {
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+
+ /* prepare next urb to send */
+ urb = usbtmc_create_urb();
+ if (!urb) {
+ retval = -ENOMEM;
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+ buffer = urb->transfer_buffer;
+
+ if (remaining > bufsize)
+ this_part = bufsize;
+ else
+ this_part = remaining;
+
+ if (copy_from_user(buffer, user_buffer + done, this_part)) {
+ retval = -EFAULT;
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, this_part, true);
+
+ /* fill bulk with 32 bit alignment to meet USBTMC specification
+ * (size + 3 & ~3) rounds up and simplifies user code
+ */
+ aligned = (this_part + 3) & ~3;
+ dev_dbg(dev, "write(size:%u align:%u done:%u)\n",
+ (unsigned int)this_part,
+ (unsigned int)aligned,
+ (unsigned int)done);
+
+ usb_fill_bulk_urb(urb, data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev, data->bulk_out),
+ urb->transfer_buffer, aligned,
+ usbtmc_write_bulk_cb, file_data);
+
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ up(&file_data->limit_write_sem);
+ goto error;
+ }
+
+ usb_free_urb(urb);
+ urb = NULL; /* urb will be finally released by usb driver */
+
+ remaining -= this_part;
+ done += this_part;
+ }
+
+ /* All urbs are on the fly */
+ if (!(flags & USBTMC_FLAG_ASYNC)) {
+ if (!usb_wait_anchor_empty_timeout(&file_data->submitted,
+ timeout)) {
+ retval = -ETIMEDOUT;
+ goto error;
+ }
+ }
+
+ retval = 0;
+ goto exit;
+
+error:
+ usb_kill_anchored_urbs(&file_data->submitted);
+exit:
+ usb_free_urb(urb);
+
+ spin_lock_irq(&file_data->err_lock);
+ if (!(flags & USBTMC_FLAG_ASYNC))
+ done = file_data->out_transfer_size;
+ if (!retval && file_data->out_status)
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+
+ *transferred = done;
+
+ dev_dbg(dev, "%s: done=%u, retval=%d, urbstat=%d\n",
+ __func__, done, retval, file_data->out_status);
+
+ return retval;
+}
+
+static ssize_t usbtmc_ioctl_generic_write(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ struct usbtmc_message msg;
+ ssize_t retval = 0;
+
+ /* mutex already locked */
+
+ if (copy_from_user(&msg, arg, sizeof(struct usbtmc_message)))
+ return -EFAULT;
+
+ retval = usbtmc_generic_write(file_data, msg.message,
+ msg.transfer_size, &msg.transferred,
+ msg.flags);
+
+ if (put_user(msg.transferred,
+ &((struct usbtmc_message __user *)arg)->transferred))
+ return -EFAULT;
+
+ return retval;
+}
+
+/*
+ * Get the generic write result
+ */
+static ssize_t usbtmc_ioctl_write_result(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ u32 transferred;
+ int retval;
+
+ spin_lock_irq(&file_data->err_lock);
+ transferred = file_data->out_transfer_size;
+ retval = file_data->out_status;
+ spin_unlock_irq(&file_data->err_lock);
+
+ if (put_user(transferred, (__u32 __user *)arg))
+ return -EFAULT;
+
+ return retval;
+}
+
/*
* Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-OUT endpoint.
* @transfer_size: number of bytes to request from the device.
@@ -619,7 +1277,7 @@ static int usbtmc488_ioctl_trigger(struct usbtmc_file_data *file_data)
* Also updates bTag_last_write.
*/
static int send_request_dev_dep_msg_in(struct usbtmc_file_data *file_data,
- size_t transfer_size)
+ u32 transfer_size)
{
struct usbtmc_device_data *data = file_data->data;
int retval;
@@ -662,12 +1320,11 @@ static int send_request_dev_dep_msg_in(struct usbtmc_file_data *file_data,
data->bTag++;
kfree(buffer);
- if (retval < 0) {
- dev_err(&data->intf->dev, "usb_bulk_msg in send_request_dev_dep_msg_in() returned %d\n", retval);
- return retval;
- }
+ if (retval < 0)
+ dev_err(&data->intf->dev, "%s returned %d\n",
+ __func__, retval);
- return 0;
+ return retval;
}
static ssize_t usbtmc_read(struct file *filp, char __user *buf,
@@ -676,20 +1333,20 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
struct device *dev;
+ const u32 bufsize = USBTMC_BUFSIZE;
u32 n_characters;
u8 *buffer;
int actual;
- size_t done;
- size_t remaining;
+ u32 done = 0;
+ u32 remaining;
int retval;
- size_t this_part;
/* Get pointer to private data structure */
file_data = filp->private_data;
data = file_data->data;
dev = &data->intf->dev;
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+ buffer = kmalloc(bufsize, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -699,124 +1356,116 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
goto exit;
}
- dev_dbg(dev, "usb_bulk_msg_in: count(%zu)\n", count);
+ if (count > INT_MAX)
+ count = INT_MAX;
+
+ dev_dbg(dev, "%s(count:%zu)\n", __func__, count);
retval = send_request_dev_dep_msg_in(file_data, count);
if (retval < 0) {
- if (data->auto_abort)
+ if (file_data->auto_abort)
usbtmc_ioctl_abort_bulk_out(data);
goto exit;
}
/* Loop until we have fetched everything we requested */
remaining = count;
- this_part = remaining;
- done = 0;
+ actual = 0;
- while (remaining > 0) {
- /* Send bulk URB */
- retval = usb_bulk_msg(data->usb_dev,
- usb_rcvbulkpipe(data->usb_dev,
- data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER, &actual,
- file_data->timeout);
-
- dev_dbg(dev, "usb_bulk_msg: retval(%u), done(%zu), remaining(%zu), actual(%d)\n", retval, done, remaining, actual);
+ /* Send bulk URB */
+ retval = usb_bulk_msg(data->usb_dev,
+ usb_rcvbulkpipe(data->usb_dev,
+ data->bulk_in),
+ buffer, bufsize, &actual,
+ file_data->timeout);
- /* Store bTag (in case we need to abort) */
- data->bTag_last_read = data->bTag;
+ dev_dbg(dev, "%s: bulk_msg retval(%u), actual(%d)\n",
+ __func__, retval, actual);
- if (retval < 0) {
- dev_dbg(dev, "Unable to read data, error %d\n", retval);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
-
- /* Parse header in first packet */
- if (done == 0) {
- /* Sanity checks for the header */
- if (actual < USBTMC_HEADER_SIZE) {
- dev_err(dev, "Device sent too small first packet: %u < %u\n", actual, USBTMC_HEADER_SIZE);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ /* Store bTag (in case we need to abort) */
+ data->bTag_last_read = data->bTag;
- if (buffer[0] != 2) {
- dev_err(dev, "Device sent reply with wrong MsgID: %u != 2\n", buffer[0]);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ if (retval < 0) {
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- if (buffer[1] != data->bTag_last_write) {
- dev_err(dev, "Device sent reply with wrong bTag: %u != %u\n", buffer[1], data->bTag_last_write);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ /* Sanity checks for the header */
+ if (actual < USBTMC_HEADER_SIZE) {
+ dev_err(dev, "Device sent too small first packet: %u < %u\n",
+ actual, USBTMC_HEADER_SIZE);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- /* How many characters did the instrument send? */
- n_characters = buffer[4] +
- (buffer[5] << 8) +
- (buffer[6] << 16) +
- (buffer[7] << 24);
+ if (buffer[0] != 2) {
+ dev_err(dev, "Device sent reply with wrong MsgID: %u != 2\n",
+ buffer[0]);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- if (n_characters > this_part) {
- dev_err(dev, "Device wants to return more data than requested: %u > %zu\n", n_characters, count);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_in(data);
- goto exit;
- }
+ if (buffer[1] != data->bTag_last_write) {
+ dev_err(dev, "Device sent reply with wrong bTag: %u != %u\n",
+ buffer[1], data->bTag_last_write);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- /* Remove the USBTMC header */
- actual -= USBTMC_HEADER_SIZE;
+ /* How many characters did the instrument send? */
+ n_characters = buffer[4] +
+ (buffer[5] << 8) +
+ (buffer[6] << 16) +
+ (buffer[7] << 24);
- /* Check if the message is smaller than requested */
- if (remaining > n_characters)
- remaining = n_characters;
- /* Remove padding if it exists */
- if (actual > remaining)
- actual = remaining;
+ file_data->bmTransferAttributes = buffer[8];
- dev_dbg(dev, "Bulk-IN header: N_characters(%u), bTransAttr(%u)\n", n_characters, buffer[8]);
+ dev_dbg(dev, "Bulk-IN header: N_characters(%u), bTransAttr(%u)\n",
+ n_characters, buffer[8]);
- remaining -= actual;
+ if (n_characters > remaining) {
+ dev_err(dev, "Device wants to return more data than requested: %u > %zu\n",
+ n_characters, count);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_in(data);
+ goto exit;
+ }
- /* Terminate if end-of-message bit received from device */
- if ((buffer[8] & 0x01) && (actual >= n_characters))
- remaining = 0;
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, actual, true);
- dev_dbg(dev, "Bulk-IN header: remaining(%zu), buf(%p), buffer(%p) done(%zu)\n", remaining,buf,buffer,done);
+ remaining = n_characters;
+ /* Remove the USBTMC header */
+ actual -= USBTMC_HEADER_SIZE;
- /* Copy buffer to user space */
- if (copy_to_user(buf + done, &buffer[USBTMC_HEADER_SIZE], actual)) {
- /* There must have been an addressing problem */
- retval = -EFAULT;
- goto exit;
- }
- done += actual;
- }
- else {
- if (actual > remaining)
- actual = remaining;
+ /* Remove padding if it exists */
+ if (actual > remaining)
+ actual = remaining;
- remaining -= actual;
+ remaining -= actual;
- dev_dbg(dev, "Bulk-IN header cont: actual(%u), done(%zu), remaining(%zu), buf(%p), buffer(%p)\n", actual, done, remaining,buf,buffer);
+ /* Copy buffer to user space */
+ if (copy_to_user(buf, &buffer[USBTMC_HEADER_SIZE], actual)) {
+ /* There must have been an addressing problem */
+ retval = -EFAULT;
+ goto exit;
+ }
- /* Copy buffer to user space */
- if (copy_to_user(buf + done, buffer, actual)) {
- /* There must have been an addressing problem */
- retval = -EFAULT;
- goto exit;
- }
- done += actual;
- }
+ if ((actual + USBTMC_HEADER_SIZE) == bufsize) {
+ retval = usbtmc_generic_read(file_data, buf + actual,
+ remaining,
+ &done,
+ USBTMC_FLAG_IGNORE_TRAILER);
+ if (retval < 0)
+ goto exit;
}
+ done += actual;
/* Update file position value */
*f_pos = *f_pos + done;
@@ -833,113 +1482,152 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
{
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
+ struct urb *urb = NULL;
+ ssize_t retval = 0;
u8 *buffer;
- int retval;
- int actual;
- unsigned long int n_bytes;
- int remaining;
- int done;
- int this_part;
+ u32 remaining, done;
+ u32 transfersize, aligned, buflen;
file_data = filp->private_data;
data = file_data->data;
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
mutex_lock(&data->io_mutex);
+
if (data->zombie) {
retval = -ENODEV;
goto exit;
}
- remaining = count;
done = 0;
- while (remaining > 0) {
- if (remaining > USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE) {
- this_part = USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE;
- buffer[8] = 0;
- } else {
- this_part = remaining;
- buffer[8] = file_data->eom_val;
- }
+ spin_lock_irq(&file_data->err_lock);
+ file_data->out_transfer_size = 0;
+ file_data->out_status = 0;
+ spin_unlock_irq(&file_data->err_lock);
- /* Setup IO buffer for DEV_DEP_MSG_OUT message */
- buffer[0] = 1;
- buffer[1] = data->bTag;
- buffer[2] = ~data->bTag;
- buffer[3] = 0; /* Reserved */
- buffer[4] = this_part >> 0;
- buffer[5] = this_part >> 8;
- buffer[6] = this_part >> 16;
- buffer[7] = this_part >> 24;
- /* buffer[8] is set above... */
- buffer[9] = 0; /* Reserved */
- buffer[10] = 0; /* Reserved */
- buffer[11] = 0; /* Reserved */
-
- if (copy_from_user(&buffer[USBTMC_HEADER_SIZE], buf + done, this_part)) {
- retval = -EFAULT;
- goto exit;
- }
+ if (!count)
+ goto exit;
- n_bytes = roundup(USBTMC_HEADER_SIZE + this_part, 4);
- memset(buffer + USBTMC_HEADER_SIZE + this_part, 0, n_bytes - (USBTMC_HEADER_SIZE + this_part));
+ if (down_trylock(&file_data->limit_write_sem)) {
+ /* previous calls were async */
+ retval = -EBUSY;
+ goto exit;
+ }
- do {
- retval = usb_bulk_msg(data->usb_dev,
- usb_sndbulkpipe(data->usb_dev,
- data->bulk_out),
- buffer, n_bytes,
- &actual, file_data->timeout);
- if (retval != 0)
- break;
- n_bytes -= actual;
- } while (n_bytes);
-
- data->bTag_last_write = data->bTag;
+ urb = usbtmc_create_urb();
+ if (!urb) {
+ retval = -ENOMEM;
+ up(&file_data->limit_write_sem);
+ goto exit;
+ }
+
+ buffer = urb->transfer_buffer;
+ buflen = urb->transfer_buffer_length;
+
+ if (count > INT_MAX) {
+ transfersize = INT_MAX;
+ buffer[8] = 0;
+ } else {
+ transfersize = count;
+ buffer[8] = file_data->eom_val;
+ }
+
+ /* Setup IO buffer for DEV_DEP_MSG_OUT message */
+ buffer[0] = 1;
+ buffer[1] = data->bTag;
+ buffer[2] = ~data->bTag;
+ buffer[3] = 0; /* Reserved */
+ buffer[4] = transfersize >> 0;
+ buffer[5] = transfersize >> 8;
+ buffer[6] = transfersize >> 16;
+ buffer[7] = transfersize >> 24;
+ /* buffer[8] is set above... */
+ buffer[9] = 0; /* Reserved */
+ buffer[10] = 0; /* Reserved */
+ buffer[11] = 0; /* Reserved */
+
+ remaining = transfersize;
+
+ if (transfersize + USBTMC_HEADER_SIZE > buflen) {
+ transfersize = buflen - USBTMC_HEADER_SIZE;
+ aligned = buflen;
+ } else {
+ aligned = (transfersize + (USBTMC_HEADER_SIZE + 3)) & ~3;
+ }
+
+ if (copy_from_user(&buffer[USBTMC_HEADER_SIZE], buf, transfersize)) {
+ retval = -EFAULT;
+ up(&file_data->limit_write_sem);
+ goto exit;
+ }
+
+ dev_dbg(&data->intf->dev, "%s(size:%u align:%u)\n", __func__,
+ (unsigned int)transfersize, (unsigned int)aligned);
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, aligned, true);
+
+ usb_fill_bulk_urb(urb, data->usb_dev,
+ usb_sndbulkpipe(data->usb_dev, data->bulk_out),
+ urb->transfer_buffer, aligned,
+ usbtmc_write_bulk_cb, file_data);
+
+ usb_anchor_urb(urb, &file_data->submitted);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (unlikely(retval)) {
+ usb_unanchor_urb(urb);
+ up(&file_data->limit_write_sem);
+ goto exit;
+ }
+
+ remaining -= transfersize;
+
+ data->bTag_last_write = data->bTag;
+ data->bTag++;
+
+ if (!data->bTag)
data->bTag++;
- if (!data->bTag)
- data->bTag++;
+ /* call generic_write even when remaining = 0 */
+ retval = usbtmc_generic_write(file_data, buf + transfersize, remaining,
+ &done, USBTMC_FLAG_APPEND);
+ /* truncate alignment bytes */
+ if (done > remaining)
+ done = remaining;
- if (retval < 0) {
- dev_err(&data->intf->dev,
- "Unable to send data, error %d\n", retval);
- if (data->auto_abort)
- usbtmc_ioctl_abort_bulk_out(data);
- goto exit;
- }
+ /*add size of first urb*/
+ done += transfersize;
- remaining -= this_part;
- done += this_part;
+ if (retval < 0) {
+ usb_kill_anchored_urbs(&file_data->submitted);
+
+ dev_err(&data->intf->dev,
+ "Unable to send data, error %d\n", (int)retval);
+ if (file_data->auto_abort)
+ usbtmc_ioctl_abort_bulk_out(data);
+ goto exit;
}
- retval = count;
+ retval = done;
exit:
+ usb_free_urb(urb);
mutex_unlock(&data->io_mutex);
- kfree(buffer);
return retval;
}
static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
{
- struct usb_host_interface *current_setting;
- struct usb_endpoint_descriptor *desc;
struct device *dev;
u8 *buffer;
int rv;
int n;
int actual = 0;
- int max_size;
dev = &data->intf->dev;
dev_dbg(dev, "Sending INITIATE_CLEAR request\n");
- buffer = kmalloc(USBTMC_SIZE_IOBUFFER, GFP_KERNEL);
+ buffer = kmalloc(USBTMC_BUFSIZE, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -947,7 +1635,7 @@ static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INITIATE_CLEAR,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 1, USBTMC_TIMEOUT);
+ 0, 0, buffer, 1, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
@@ -961,22 +1649,6 @@ static int usbtmc_ioctl_clear(struct usbtmc_device_data *data)
goto exit;
}
- max_size = 0;
- current_setting = data->intf->cur_altsetting;
- for (n = 0; n < current_setting->desc.bNumEndpoints; n++) {
- desc = &current_setting->endpoint[n].desc;
- if (desc->bEndpointAddress == data->bulk_in)
- max_size = usb_endpoint_maxp(desc);
- }
-
- if (max_size == 0) {
- dev_err(dev, "Couldn't get wMaxPacketSize\n");
- rv = -EPERM;
- goto exit;
- }
-
- dev_dbg(dev, "wMaxPacketSize is %d\n", max_size);
-
n = 0;
usbtmc_clear_check_status:
@@ -987,7 +1659,7 @@ usbtmc_clear_check_status:
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_CHECK_CLEAR_STATUS,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 2, USBTMC_TIMEOUT);
+ 0, 0, buffer, 2, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto exit;
@@ -1004,15 +1676,20 @@ usbtmc_clear_check_status:
goto exit;
}
- if (buffer[1] == 1)
+ if ((buffer[1] & 1) != 0) {
do {
dev_dbg(dev, "Reading from bulk in EP\n");
+ actual = 0;
rv = usb_bulk_msg(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev,
data->bulk_in),
- buffer, USBTMC_SIZE_IOBUFFER,
- &actual, USBTMC_TIMEOUT);
+ buffer, USBTMC_BUFSIZE,
+ &actual, USB_CTRL_GET_TIMEOUT);
+
+ print_hex_dump_debug("usbtmc ", DUMP_PREFIX_NONE,
+ 16, 1, buffer, actual, true);
+
n++;
if (rv < 0) {
@@ -1020,10 +1697,15 @@ usbtmc_clear_check_status:
rv);
goto exit;
}
- } while ((actual == max_size) &&
+ } while ((actual == USBTMC_BUFSIZE) &&
(n < USBTMC_MAX_READS_TO_CLEAR_BULK_IN));
+ } else {
+ /* do not stress device with subsequent requests */
+ msleep(50);
+ n++;
+ }
- if (actual == max_size) {
+ if (n >= USBTMC_MAX_READS_TO_CLEAR_BULK_IN) {
dev_err(dev, "Couldn't clear device buffer within %d cycles\n",
USBTMC_MAX_READS_TO_CLEAR_BULK_IN);
rv = -EPERM;
@@ -1037,7 +1719,7 @@ usbtmc_clear_bulk_out_halt:
rv = usb_clear_halt(data->usb_dev,
usb_sndbulkpipe(data->usb_dev, data->bulk_out));
if (rv < 0) {
- dev_err(dev, "usb_control_msg returned %d\n", rv);
+ dev_err(dev, "usb_clear_halt returned %d\n", rv);
goto exit;
}
rv = 0;
@@ -1054,12 +1736,9 @@ static int usbtmc_ioctl_clear_out_halt(struct usbtmc_device_data *data)
rv = usb_clear_halt(data->usb_dev,
usb_sndbulkpipe(data->usb_dev, data->bulk_out));
- if (rv < 0) {
- dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
- rv);
- return rv;
- }
- return 0;
+ if (rv < 0)
+ dev_err(&data->usb_dev->dev, "%s returned %d\n", __func__, rv);
+ return rv;
}
static int usbtmc_ioctl_clear_in_halt(struct usbtmc_device_data *data)
@@ -1069,11 +1748,33 @@ static int usbtmc_ioctl_clear_in_halt(struct usbtmc_device_data *data)
rv = usb_clear_halt(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev, data->bulk_in));
- if (rv < 0) {
- dev_err(&data->usb_dev->dev, "usb_control_msg returned %d\n",
- rv);
- return rv;
- }
+ if (rv < 0)
+ dev_err(&data->usb_dev->dev, "%s returned %d\n", __func__, rv);
+ return rv;
+}
+
+static int usbtmc_ioctl_cancel_io(struct usbtmc_file_data *file_data)
+{
+ spin_lock_irq(&file_data->err_lock);
+ file_data->in_status = -ECANCELED;
+ file_data->out_status = -ECANCELED;
+ spin_unlock_irq(&file_data->err_lock);
+ usb_kill_anchored_urbs(&file_data->submitted);
+ return 0;
+}
+
+static int usbtmc_ioctl_cleanup_io(struct usbtmc_file_data *file_data)
+{
+ usb_kill_anchored_urbs(&file_data->submitted);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+ spin_lock_irq(&file_data->err_lock);
+ file_data->in_status = 0;
+ file_data->in_transfer_size = 0;
+ file_data->out_status = 0;
+ file_data->out_transfer_size = 0;
+ spin_unlock_irq(&file_data->err_lock);
+
+ file_data->in_urbs_used = 0;
return 0;
}
@@ -1090,7 +1791,7 @@ static int get_capabilities(struct usbtmc_device_data *data)
rv = usb_control_msg(data->usb_dev, usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_GET_CAPABILITIES,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 0x18, USBTMC_TIMEOUT);
+ 0, 0, buffer, 0x18, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
goto err_out;
@@ -1147,72 +1848,6 @@ static const struct attribute_group capability_attr_grp = {
.attrs = capability_attrs,
};
-static ssize_t TermChar_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbtmc_device_data *data = usb_get_intfdata(intf);
-
- return sprintf(buf, "%c\n", data->TermChar);
-}
-
-static ssize_t TermChar_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbtmc_device_data *data = usb_get_intfdata(intf);
-
- if (count < 1)
- return -EINVAL;
- data->TermChar = buf[0];
- return count;
-}
-static DEVICE_ATTR_RW(TermChar);
-
-#define data_attribute(name) \
-static ssize_t name##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- struct usb_interface *intf = to_usb_interface(dev); \
- struct usbtmc_device_data *data = usb_get_intfdata(intf); \
- \
- return sprintf(buf, "%d\n", data->name); \
-} \
-static ssize_t name##_store(struct device *dev, \
- struct device_attribute *attr, \
- const char *buf, size_t count) \
-{ \
- struct usb_interface *intf = to_usb_interface(dev); \
- struct usbtmc_device_data *data = usb_get_intfdata(intf); \
- ssize_t result; \
- unsigned val; \
- \
- result = sscanf(buf, "%u\n", &val); \
- if (result != 1) \
- result = -EINVAL; \
- data->name = val; \
- if (result < 0) \
- return result; \
- else \
- return count; \
-} \
-static DEVICE_ATTR_RW(name)
-
-data_attribute(TermCharEnabled);
-data_attribute(auto_abort);
-
-static struct attribute *data_attrs[] = {
- &dev_attr_TermChar.attr,
- &dev_attr_TermCharEnabled.attr,
- &dev_attr_auto_abort.attr,
- NULL,
-};
-
-static const struct attribute_group data_attr_grp = {
- .attrs = data_attrs,
-};
-
static int usbtmc_ioctl_indicator_pulse(struct usbtmc_device_data *data)
{
struct device *dev;
@@ -1229,7 +1864,7 @@ static int usbtmc_ioctl_indicator_pulse(struct usbtmc_device_data *data)
usb_rcvctrlpipe(data->usb_dev, 0),
USBTMC_REQUEST_INDICATOR_PULSE,
USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
- 0, 0, buffer, 0x01, USBTMC_TIMEOUT);
+ 0, 0, buffer, 0x01, USB_CTRL_GET_TIMEOUT);
if (rv < 0) {
dev_err(dev, "usb_control_msg returned %d\n", rv);
@@ -1250,6 +1885,63 @@ exit:
return rv;
}
+static int usbtmc_ioctl_request(struct usbtmc_device_data *data,
+ void __user *arg)
+{
+ struct device *dev = &data->intf->dev;
+ struct usbtmc_ctrlrequest request;
+ u8 *buffer = NULL;
+ int rv;
+ unsigned long res;
+
+ res = copy_from_user(&request, arg, sizeof(struct usbtmc_ctrlrequest));
+ if (res)
+ return -EFAULT;
+
+ if (request.req.wLength > USBTMC_BUFSIZE)
+ return -EMSGSIZE;
+
+ if (request.req.wLength) {
+ buffer = kmalloc(request.req.wLength, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ if ((request.req.bRequestType & USB_DIR_IN) == 0) {
+ /* Send control data to device */
+ res = copy_from_user(buffer, request.data,
+ request.req.wLength);
+ if (res) {
+ rv = -EFAULT;
+ goto exit;
+ }
+ }
+ }
+
+ rv = usb_control_msg(data->usb_dev,
+ usb_rcvctrlpipe(data->usb_dev, 0),
+ request.req.bRequest,
+ request.req.bRequestType,
+ request.req.wValue,
+ request.req.wIndex,
+ buffer, request.req.wLength, USB_CTRL_GET_TIMEOUT);
+
+ if (rv < 0) {
+ dev_err(dev, "%s failed %d\n", __func__, rv);
+ goto exit;
+ }
+
+ if (rv && (request.req.bRequestType & USB_DIR_IN)) {
+ /* Read control data from device */
+ res = copy_to_user(request.data, buffer, rv);
+ if (res)
+ rv = -EFAULT;
+ }
+
+ exit:
+ kfree(buffer);
+ return rv;
+}
+
/*
* Get the usb timeout value
*/
@@ -1331,6 +2023,7 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct usbtmc_file_data *file_data;
struct usbtmc_device_data *data;
int retval = -EBADRQC;
+ __u8 tmp_byte;
file_data = file->private_data;
data = file_data->data;
@@ -1366,6 +2059,10 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
retval = usbtmc_ioctl_abort_bulk_in(data);
break;
+ case USBTMC_IOCTL_CTRL_REQUEST:
+ retval = usbtmc_ioctl_request(data, (void __user *)arg);
+ break;
+
case USBTMC_IOCTL_GET_TIMEOUT:
retval = usbtmc_ioctl_get_timeout(file_data,
(void __user *)arg);
@@ -1386,12 +2083,29 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
(void __user *)arg);
break;
+ case USBTMC_IOCTL_WRITE:
+ retval = usbtmc_ioctl_generic_write(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_READ:
+ retval = usbtmc_ioctl_generic_read(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_WRITE_RESULT:
+ retval = usbtmc_ioctl_write_result(file_data,
+ (void __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_API_VERSION:
+ retval = put_user(USBTMC_API_VERSION,
+ (__u32 __user *)arg);
+ break;
+
case USBTMC488_IOCTL_GET_CAPS:
- retval = copy_to_user((void __user *)arg,
- &data->usb488_caps,
- sizeof(data->usb488_caps));
- if (retval)
- retval = -EFAULT;
+ retval = put_user(data->usb488_caps,
+ (unsigned char __user *)arg);
break;
case USBTMC488_IOCTL_READ_STB:
@@ -1417,6 +2131,30 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case USBTMC488_IOCTL_TRIGGER:
retval = usbtmc488_ioctl_trigger(file_data);
break;
+
+ case USBTMC488_IOCTL_WAIT_SRQ:
+ retval = usbtmc488_ioctl_wait_srq(file_data,
+ (__u32 __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_MSG_IN_ATTR:
+ retval = put_user(file_data->bmTransferAttributes,
+ (__u8 __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_AUTO_ABORT:
+ retval = get_user(tmp_byte, (unsigned char __user *)arg);
+ if (retval == 0)
+ file_data->auto_abort = !!tmp_byte;
+ break;
+
+ case USBTMC_IOCTL_CANCEL_IO:
+ retval = usbtmc_ioctl_cancel_io(file_data);
+ break;
+
+ case USBTMC_IOCTL_CLEANUP_IO:
+ retval = usbtmc_ioctl_cleanup_io(file_data);
+ break;
}
skip_io_on_zombie:
@@ -1446,7 +2184,28 @@ static __poll_t usbtmc_poll(struct file *file, poll_table *wait)
poll_wait(file, &data->waitq, wait);
- mask = (atomic_read(&file_data->srq_asserted)) ? EPOLLPRI : 0;
+ /* Note that EPOLLPRI is now assigned to SRQ, and
+ * EPOLLIN|EPOLLRDNORM to normal read data.
+ */
+ mask = 0;
+ if (atomic_read(&file_data->srq_asserted))
+ mask |= EPOLLPRI;
+
+ /* Note that the anchor submitted includes all urbs for BULK IN
+ * and OUT. So EPOLLOUT is signaled when BULK OUT is empty and
+ * all BULK IN urbs are completed and moved to in_anchor.
+ */
+ if (usb_anchor_empty(&file_data->submitted))
+ mask |= (EPOLLOUT | EPOLLWRNORM);
+ if (!usb_anchor_empty(&file_data->in_anchor))
+ mask |= (EPOLLIN | EPOLLRDNORM);
+
+ spin_lock_irq(&file_data->err_lock);
+ if (file_data->in_status || file_data->out_status)
+ mask |= EPOLLERR;
+ spin_unlock_irq(&file_data->err_lock);
+
+ dev_dbg(&data->intf->dev, "poll mask = %x\n", mask);
no_poll:
mutex_unlock(&data->io_mutex);
@@ -1459,6 +2218,7 @@ static const struct file_operations fops = {
.write = usbtmc_write,
.open = usbtmc_open,
.release = usbtmc_release,
+ .flush = usbtmc_flush,
.unlocked_ioctl = usbtmc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = usbtmc_ioctl,
@@ -1552,7 +2312,9 @@ static void usbtmc_free_int(struct usbtmc_device_data *data)
return;
usb_kill_urb(data->iin_urb);
kfree(data->iin_buffer);
+ data->iin_buffer = NULL;
usb_free_urb(data->iin_urb);
+ data->iin_urb = NULL;
kref_put(&data->kref, usbtmc_delete);
}
@@ -1585,8 +2347,6 @@ static int usbtmc_probe(struct usb_interface *intf,
/* Initialize USBTMC bTag and other fields */
data->bTag = 1;
- data->TermCharEnabled = 0;
- data->TermChar = '\n';
/* 2 <= bTag <= 127 USBTMC-USB488 subclass specification 4.3.1 */
data->iin_bTag = 2;
@@ -1603,6 +2363,7 @@ static int usbtmc_probe(struct usb_interface *intf,
}
data->bulk_in = bulk_in->bEndpointAddress;
+ data->wMaxPacketSize = usb_endpoint_maxp(bulk_in);
dev_dbg(&intf->dev, "Found bulk in endpoint at %u\n", data->bulk_in);
data->bulk_out = bulk_out->bEndpointAddress;
@@ -1659,12 +2420,10 @@ static int usbtmc_probe(struct usb_interface *intf,
}
}
- retcode = sysfs_create_group(&intf->dev.kobj, &data_attr_grp);
-
retcode = usb_register_dev(intf, &usbtmc_class);
if (retcode) {
- dev_err(&intf->dev, "Not able to get a minor"
- " (base %u, slice default): %d\n", USBTMC_MINOR_BASE,
+ dev_err(&intf->dev, "Not able to get a minor (base %u, slice default): %d\n",
+ USBTMC_MINOR_BASE,
retcode);
goto error_register;
}
@@ -1674,7 +2433,6 @@ static int usbtmc_probe(struct usb_interface *intf,
error_register:
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
- sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
usbtmc_free_int(data);
err_put:
kref_put(&data->kref, usbtmc_delete);
@@ -1684,26 +2442,103 @@ err_put:
static void usbtmc_disconnect(struct usb_interface *intf)
{
struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ struct list_head *elem;
usb_deregister_dev(intf, &usbtmc_class);
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
- sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
mutex_lock(&data->io_mutex);
data->zombie = 1;
wake_up_interruptible_all(&data->waitq);
+ list_for_each(elem, &data->file_list) {
+ struct usbtmc_file_data *file_data;
+
+ file_data = list_entry(elem,
+ struct usbtmc_file_data,
+ file_elem);
+ usb_kill_anchored_urbs(&file_data->submitted);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+ }
mutex_unlock(&data->io_mutex);
usbtmc_free_int(data);
kref_put(&data->kref, usbtmc_delete);
}
+static void usbtmc_draw_down(struct usbtmc_file_data *file_data)
+{
+ int time;
+
+ time = usb_wait_anchor_empty_timeout(&file_data->submitted, 1000);
+ if (!time)
+ usb_kill_anchored_urbs(&file_data->submitted);
+ usb_scuttle_anchored_urbs(&file_data->in_anchor);
+}
+
static int usbtmc_suspend(struct usb_interface *intf, pm_message_t message)
{
- /* this driver does not have pending URBs */
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ struct list_head *elem;
+
+ if (!data)
+ return 0;
+
+ mutex_lock(&data->io_mutex);
+ list_for_each(elem, &data->file_list) {
+ struct usbtmc_file_data *file_data;
+
+ file_data = list_entry(elem,
+ struct usbtmc_file_data,
+ file_elem);
+ usbtmc_draw_down(file_data);
+ }
+
+ if (data->iin_ep_present && data->iin_urb)
+ usb_kill_urb(data->iin_urb);
+
+ mutex_unlock(&data->io_mutex);
return 0;
}
static int usbtmc_resume(struct usb_interface *intf)
{
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ int retcode = 0;
+
+ if (data->iin_ep_present && data->iin_urb)
+ retcode = usb_submit_urb(data->iin_urb, GFP_KERNEL);
+ if (retcode)
+ dev_err(&intf->dev, "Failed to submit iin_urb\n");
+
+ return retcode;
+}
+
+static int usbtmc_pre_reset(struct usb_interface *intf)
+{
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+ struct list_head *elem;
+
+ if (!data)
+ return 0;
+
+ mutex_lock(&data->io_mutex);
+
+ list_for_each(elem, &data->file_list) {
+ struct usbtmc_file_data *file_data;
+
+ file_data = list_entry(elem,
+ struct usbtmc_file_data,
+ file_elem);
+ usbtmc_ioctl_cancel_io(file_data);
+ }
+
+ return 0;
+}
+
+static int usbtmc_post_reset(struct usb_interface *intf)
+{
+ struct usbtmc_device_data *data = usb_get_intfdata(intf);
+
+ mutex_unlock(&data->io_mutex);
+
return 0;
}
@@ -1714,6 +2549,8 @@ static struct usb_driver usbtmc_driver = {
.disconnect = usbtmc_disconnect,
.suspend = usbtmc_suspend,
.resume = usbtmc_resume,
+ .pre_reset = usbtmc_pre_reset,
+ .post_reset = usbtmc_post_reset,
};
module_usb_driver(usbtmc_driver);
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 77eef8acff94..f641342cdec0 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -101,12 +101,8 @@ void hcd_buffer_destroy(struct usb_hcd *hcd)
return;
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
- struct dma_pool *pool = hcd->pool[i];
-
- if (pool) {
- dma_pool_destroy(pool);
- hcd->pool[i] = NULL;
- }
+ dma_pool_destroy(hcd->pool[i]);
+ hcd->pool[i] = NULL;
}
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index a1f225f077cd..53564386ed57 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -510,7 +510,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
struct usb_interface *iface, void *priv)
{
struct device *dev;
- struct usb_device *udev;
int retval = 0;
if (!iface)
@@ -524,8 +523,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
if (!iface->authorized)
return -ENODEV;
- udev = interface_to_usbdev(iface);
-
dev->driver = &driver->drvwrap.driver;
usb_set_intfdata(iface, priv);
iface->needs_binding = 0;
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index bc8242bc4564..356b05c82dbc 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -21,6 +21,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <uapi/linux/usb/audio.h>
#include "usb.h"
static inline const char *plural(int n)
@@ -42,6 +43,16 @@ static int is_activesync(struct usb_interface_descriptor *desc)
&& desc->bInterfaceProtocol == 1;
}
+static bool is_audio(struct usb_interface_descriptor *desc)
+{
+ return desc->bInterfaceClass == USB_CLASS_AUDIO;
+}
+
+static bool is_uac3_config(struct usb_interface_descriptor *desc)
+{
+ return desc->bInterfaceProtocol == UAC_VERSION_3;
+}
+
int usb_choose_configuration(struct usb_device *udev)
{
int i;
@@ -121,6 +132,22 @@ int usb_choose_configuration(struct usb_device *udev)
#endif
}
+ /*
+ * Select first configuration as default for audio so that
+ * devices that don't comply with UAC3 protocol are supported.
+ * But, still iterate through other configurations and
+ * select UAC3 compliant config if present.
+ */
+ if (i == 0 && num_configs > 1 && desc && is_audio(desc)) {
+ best = c;
+ continue;
+ }
+
+ if (i > 0 && desc && is_audio(desc) && is_uac3_config(desc)) {
+ best = c;
+ break;
+ }
+
/* From the remaining configs, choose the first one whose
* first interface is for a non-vendor-specific class.
* Reason: Linux is more likely to have a class driver
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 1c21955fe7c0..487025d31d44 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1738,7 +1738,6 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
struct usb_anchor *anchor = urb->anchor;
int status = urb->unlinked;
- unsigned long flags;
urb->hcpriv = NULL;
if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
@@ -1755,20 +1754,7 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
-
- /*
- * We disable local IRQs here avoid possible deadlock because
- * drivers may call spin_lock() to hold lock which might be
- * acquired in one hard interrupt handler.
- *
- * The local_irq_save()/local_irq_restore() around complete()
- * will be removed if current USB drivers have been cleaned up
- * and no one may trigger the above deadlock situation when
- * running complete() in tasklet.
- */
- local_irq_save(flags);
urb->complete(urb);
- local_irq_restore(flags);
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 462ce49f683a..c6077d582d29 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -28,6 +28,7 @@
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/pm_qos.h>
+#include <linux/kobject.h>
#include <linux/uaccess.h>
#include <asm/byteorder.h>
@@ -2660,11 +2661,13 @@ static bool use_new_scheme(struct usb_device *udev, int retry,
{
int old_scheme_first_port =
port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME;
+ int quick_enumeration = (udev->speed == USB_SPEED_HIGH);
if (udev->speed >= USB_SPEED_SUPER)
return false;
- return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first);
+ return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first
+ || quick_enumeration);
}
/* Is a USB 3.0 port in the Inactive or Compliance Mode state?
@@ -5147,6 +5150,42 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
usb_lock_port(port_dev);
}
+/* Handle notifying userspace about hub over-current events */
+static void port_over_current_notify(struct usb_port *port_dev)
+{
+ static char *envp[] = { NULL, NULL, NULL };
+ struct device *hub_dev;
+ char *port_dev_path;
+
+ sysfs_notify(&port_dev->dev.kobj, NULL, "over_current_count");
+
+ hub_dev = port_dev->dev.parent;
+
+ if (!hub_dev)
+ return;
+
+ port_dev_path = kobject_get_path(&port_dev->dev.kobj, GFP_KERNEL);
+ if (!port_dev_path)
+ return;
+
+ envp[0] = kasprintf(GFP_KERNEL, "OVER_CURRENT_PORT=%s", port_dev_path);
+ if (!envp[0])
+ goto exit_path;
+
+ envp[1] = kasprintf(GFP_KERNEL, "OVER_CURRENT_COUNT=%u",
+ port_dev->over_current_count);
+ if (!envp[1])
+ goto exit;
+
+ kobject_uevent_env(&hub_dev->kobj, KOBJ_CHANGE, envp);
+
+ kfree(envp[1]);
+exit:
+ kfree(envp[0]);
+exit_path:
+ kfree(port_dev_path);
+}
+
static void port_event(struct usb_hub *hub, int port1)
__must_hold(&port_dev->status_lock)
{
@@ -5189,6 +5228,7 @@ static void port_event(struct usb_hub *hub, int port1)
if (portchange & USB_PORT_STAT_C_OVERCURRENT) {
u16 status = 0, unused;
port_dev->over_current_count++;
+ port_over_current_notify(port_dev);
dev_dbg(&port_dev->dev, "over-current change #%u\n",
port_dev->over_current_count);
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index 9879767452a2..38b2c776c4b4 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -23,10 +23,11 @@ static int usb_phy_roothub_add_phy(struct device *dev, int index,
struct list_head *list)
{
struct usb_phy_roothub *roothub_entry;
- struct phy *phy = devm_of_phy_get_by_index(dev, dev->of_node, index);
+ struct phy *phy;
- if (IS_ERR_OR_NULL(phy)) {
- if (!phy || PTR_ERR(phy) == -ENODEV)
+ phy = devm_of_phy_get_by_index(dev, dev->of_node, index);
+ if (IS_ERR(phy)) {
+ if (PTR_ERR(phy) == -ENODEV)
return 0;
else
return PTR_ERR(phy);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 4a2143195395..1a06a4b5fbb1 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -16,6 +16,15 @@ static int usb_port_block_power_off;
static const struct attribute_group *port_dev_group[];
+static ssize_t location_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usb_port *port_dev = to_usb_port(dev);
+
+ return sprintf(buf, "0x%08x\n", port_dev->location);
+}
+static DEVICE_ATTR_RO(location);
+
static ssize_t connect_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -140,6 +149,7 @@ static DEVICE_ATTR_RW(usb3_lpm_permit);
static struct attribute *port_dev_attrs[] = {
&dev_attr_connect_type.attr,
+ &dev_attr_location.attr,
&dev_attr_quirks.attr,
&dev_attr_over_current_count.attr,
NULL,
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index cc9c93affa14..30bab8463c96 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -393,6 +393,20 @@ enum dwc2_ep0_state {
* 0 - No
* 1 - Yes
* @hird_threshold: Value of BESL or HIRD Threshold.
+ * @ref_clk_per: Indicates in terms of pico seconds the period
+ * of ref_clk.
+ * 62500 - 16MHz
+ * 58823 - 17MHz
+ * 52083 - 19.2MHz
+ * 50000 - 20MHz
+ * 41666 - 24MHz
+ * 33333 - 30MHz (default)
+ * 25000 - 40MHz
+ * @sof_cnt_wkup_alert: Indicates in term of number of SOF's after which
+ * the controller should generate an interrupt if the
+ * device had been in L1 state until that period.
+ * This is used by SW to initiate Remote WakeUp in the
+ * controller so as to sync to the uF number from the host.
* @activate_stm_fs_transceiver: Activate internal transceiver using GGPIO
* register.
* 0 - Deactivate the transceiver (default)
@@ -416,6 +430,9 @@ enum dwc2_ep0_state {
* back to DWC2_SPEED_PARAM_HIGH while device is gone.
* 0 - No (default)
* 1 - Yes
+ * @service_interval: Enable service interval based scheduling.
+ * 0 - No
+ * 1 - Yes
*
* The following parameters may be specified when starting the module. These
* parameters define how the DWC_otg controller should be configured. A
@@ -461,6 +478,7 @@ struct dwc2_core_params {
bool lpm_clock_gating;
bool besl;
bool hird_threshold_en;
+ bool service_interval;
u8 hird_threshold;
bool activate_stm_fs_transceiver;
bool ipg_isoc_en;
@@ -468,6 +486,10 @@ struct dwc2_core_params {
u32 max_transfer_size;
u32 ahbcfg;
+ /* GREFCLK parameters */
+ u32 ref_clk_per;
+ u16 sof_cnt_wkup_alert;
+
/* Host parameters */
bool host_dma;
bool dma_desc_enable;
@@ -605,6 +627,10 @@ struct dwc2_core_params {
* FIFO sizing is enabled 16 to 32768
* Actual maximum value is autodetected and also
* the default.
+ * @service_interval_mode: For enabling service interval based scheduling in the
+ * controller.
+ * 0 - Disable
+ * 1 - Enable
*/
struct dwc2_hw_params {
unsigned op_mode:3;
@@ -635,6 +661,7 @@ struct dwc2_hw_params {
unsigned utmi_phy_data_width:2;
unsigned lpm_mode:1;
unsigned ipg_isoc_en:1;
+ unsigned service_interval_mode:1;
u32 snpsid;
u32 dev_ep_dirs;
u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
@@ -1354,6 +1381,7 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg);
int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg);
int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg);
void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg);
+void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg);
#else
static inline int dwc2_hsotg_remove(struct dwc2_hsotg *dwc2)
{ return 0; }
@@ -1388,6 +1416,7 @@ static inline int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
static inline int dwc2_hsotg_tx_fifo_average_depth(struct dwc2_hsotg *hsotg)
{ return 0; }
static inline void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg) {}
+static inline void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg) {}
#endif
#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index 22d015b0424f..7f62f4cdc265 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -701,6 +701,7 @@ static int params_show(struct seq_file *seq, void *v)
print_param(seq, p, besl);
print_param(seq, p, hird_threshold_en);
print_param(seq, p, hird_threshold);
+ print_param(seq, p, service_interval);
print_param(seq, p, host_dma);
print_param(seq, p, g_dma);
print_param(seq, p, g_dma_desc);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 220c0f9b89b0..2d6d2c8244de 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -123,6 +123,24 @@ static inline void dwc2_gadget_incr_frame_num(struct dwc2_hsotg_ep *hs_ep)
}
/**
+ * dwc2_gadget_dec_frame_num_by_one - Decrements the targeted frame number
+ * by one.
+ * @hs_ep: The endpoint.
+ *
+ * This function used in service interval based scheduling flow to calculate
+ * descriptor frame number filed value. For service interval mode frame
+ * number in descriptor should point to last (u)frame in the interval.
+ *
+ */
+static inline void dwc2_gadget_dec_frame_num_by_one(struct dwc2_hsotg_ep *hs_ep)
+{
+ if (hs_ep->target_frame)
+ hs_ep->target_frame -= 1;
+ else
+ hs_ep->target_frame = DSTS_SOFFN_LIMIT;
+}
+
+/**
* dwc2_hsotg_en_gsint - enable one or more of the general interrupt
* @hsotg: The device state
* @ints: A bitmask of the interrupts to enable
@@ -228,6 +246,27 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg)
}
/**
+ * dwc2_gadget_wkup_alert_handler - Handler for WKUP_ALERT interrupt
+ *
+ * @hsotg: Programming view of the DWC_otg controller
+ *
+ */
+static void dwc2_gadget_wkup_alert_handler(struct dwc2_hsotg *hsotg)
+{
+ u32 gintsts2;
+ u32 gintmsk2;
+
+ gintsts2 = dwc2_readl(hsotg, GINTSTS2);
+ gintmsk2 = dwc2_readl(hsotg, GINTMSK2);
+
+ if (gintsts2 & GINTSTS2_WKUP_ALERT_INT) {
+ dev_dbg(hsotg->dev, "%s: Wkup_Alert_Int\n", __func__);
+ dwc2_clear_bit(hsotg, GINTSTS2, GINTSTS2_WKUP_ALERT_INT);
+ dwc2_set_bit(hsotg, DCFG, DCTL_RMTWKUPSIG);
+ }
+}
+
+/**
* dwc2_hsotg_tx_fifo_average_depth - returns average depth of device mode
* TX FIFOs
*
@@ -2812,6 +2851,23 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
if (using_desc_dma(hsotg)) {
hs_ep->target_frame = hsotg->frame_number;
dwc2_gadget_incr_frame_num(hs_ep);
+
+ /* In service interval mode target_frame must
+ * be set to last (u)frame of the service interval.
+ */
+ if (hsotg->params.service_interval) {
+ /* Set target_frame to the first (u)frame of
+ * the service interval
+ */
+ hs_ep->target_frame &= ~hs_ep->interval + 1;
+
+ /* Set target_frame to the last (u)frame of
+ * the service interval
+ */
+ dwc2_gadget_incr_frame_num(hs_ep);
+ dwc2_gadget_dec_frame_num_by_one(hs_ep);
+ }
+
dwc2_gadget_start_isoc_ddma(hs_ep);
return;
}
@@ -3109,6 +3165,8 @@ static void kill_all_requests(struct dwc2_hsotg *hsotg,
dwc2_hsotg_txfifo_flush(hsotg, ep->fifo_index);
}
+static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
+
/**
* dwc2_hsotg_disconnect - disconnect service
* @hsotg: The device state.
@@ -3127,13 +3185,12 @@ void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
hsotg->connected = 0;
hsotg->test_mode = 0;
+ /* all endpoints should be shutdown */
for (ep = 0; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
- kill_all_requests(hsotg, hsotg->eps_in[ep],
- -ESHUTDOWN);
+ dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
if (hsotg->eps_out[ep])
- kill_all_requests(hsotg, hsotg->eps_out[ep],
- -ESHUTDOWN);
+ dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
}
call_gadget(hsotg, disconnect);
@@ -3191,13 +3248,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
u32 val;
u32 usbcfg;
u32 dcfg = 0;
+ int ep;
/* Kill any ep0 requests as controller will be reinitialized */
kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
- if (!is_usb_reset)
+ if (!is_usb_reset) {
if (dwc2_core_reset(hsotg, true))
return;
+ } else {
+ /* all endpoints should be shutdown */
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+ dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+ if (hsotg->eps_out[ep])
+ dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+ }
+ }
/*
* we must now enable ep0 ready for host detection and then
@@ -3312,6 +3379,10 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
dwc2_set_bit(hsotg, DIEPMSK, DIEPMSK_BNAININTRMSK);
}
+ /* Enable Service Interval mode if supported */
+ if (using_desc_dma(hsotg) && hsotg->params.service_interval)
+ dwc2_set_bit(hsotg, DCTL, DCTL_SERVICE_INTERVAL_SUPPORTED);
+
dwc2_writel(hsotg, 0, DAINTMSK);
dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
@@ -3368,6 +3439,10 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
/* configure the core to support LPM */
dwc2_gadget_init_lpm(hsotg);
+ /* program GREFCLK register if needed */
+ if (using_desc_dma(hsotg) && hsotg->params.service_interval)
+ dwc2_gadget_program_ref_clk(hsotg);
+
/* must be at-least 3ms to allow bus to see disconnect */
mdelay(3);
@@ -3676,6 +3751,10 @@ irq_retry:
if (gintsts & IRQ_RETRY_MASK && --retry_count > 0)
goto irq_retry;
+ /* Check WKUP_ALERT interrupt*/
+ if (hsotg->params.service_interval)
+ dwc2_gadget_wkup_alert_handler(hsotg);
+
spin_unlock(&hsotg->lock);
return IRQ_HANDLED;
@@ -3993,6 +4072,7 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
unsigned long flags;
u32 epctrl_reg;
u32 ctrl;
+ int locked;
dev_dbg(hsotg->dev, "%s(ep %p)\n", __func__, ep);
@@ -4008,7 +4088,9 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
- spin_lock_irqsave(&hsotg->lock, flags);
+ locked = spin_is_locked(&hsotg->lock);
+ if (!locked)
+ spin_lock_irqsave(&hsotg->lock, flags);
ctrl = dwc2_readl(hsotg, epctrl_reg);
@@ -4032,7 +4114,9 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
hs_ep->fifo_index = 0;
hs_ep->fifo_size = 0;
- spin_unlock_irqrestore(&hsotg->lock, flags);
+ if (!locked)
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
return 0;
}
@@ -4944,6 +5028,29 @@ void dwc2_gadget_init_lpm(struct dwc2_hsotg *hsotg)
val |= hsotg->params.besl ? GLPMCFG_ENBESL : 0;
dwc2_writel(hsotg, val, GLPMCFG);
dev_dbg(hsotg->dev, "GLPMCFG=0x%08x\n", dwc2_readl(hsotg, GLPMCFG));
+
+ /* Unmask WKUP_ALERT Interrupt */
+ if (hsotg->params.service_interval)
+ dwc2_set_bit(hsotg, GINTMSK2, GINTMSK2_WKUP_ALERT_INT_MSK);
+}
+
+/**
+ * dwc2_gadget_program_ref_clk - Program GREFCLK register in device mode
+ *
+ * @hsotg: Programming view of DWC_otg controller
+ *
+ */
+void dwc2_gadget_program_ref_clk(struct dwc2_hsotg *hsotg)
+{
+ u32 val = 0;
+
+ val |= GREFCLK_REF_CLK_MODE;
+ val |= hsotg->params.ref_clk_per << GREFCLK_REFCLKPER_SHIFT;
+ val |= hsotg->params.sof_cnt_wkup_alert <<
+ GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT;
+
+ dwc2_writel(hsotg, val, GREFCLK);
+ dev_dbg(hsotg->dev, "GREFCLK=0x%08x\n", dwc2_readl(hsotg, GREFCLK));
}
/**
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 2bd6e6bfc241..dd82fa516f3f 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -358,16 +358,10 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
static int dwc2_vbus_supply_init(struct dwc2_hsotg *hsotg)
{
- int ret;
-
- hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
- if (IS_ERR(hsotg->vbus_supply)) {
- ret = PTR_ERR(hsotg->vbus_supply);
- hsotg->vbus_supply = NULL;
- return ret == -ENODEV ? 0 : ret;
- }
+ if (hsotg->vbus_supply)
+ return regulator_enable(hsotg->vbus_supply);
- return regulator_enable(hsotg->vbus_supply);
+ return 0;
}
static int dwc2_vbus_supply_exit(struct dwc2_hsotg *hsotg)
@@ -1328,14 +1322,11 @@ static void dwc2_hc_write_packet(struct dwc2_hsotg *hsotg,
u32 remaining_count;
u32 byte_count;
u32 dword_count;
- u32 __iomem *data_fifo;
u32 *data_buf = (u32 *)chan->xfer_buf;
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
- data_fifo = (u32 __iomem *)(hsotg->regs + HCFIFO(chan->hc_num));
-
remaining_count = chan->xfer_len - chan->xfer_count;
if (remaining_count > chan->max_packet)
byte_count = chan->max_packet;
@@ -3564,6 +3555,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
u32 port_status;
u32 speed;
u32 pcgctl;
+ u32 pwr;
switch (typereq) {
case ClearHubFeature:
@@ -3612,8 +3604,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
hprt0 &= ~HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
+ if (pwr)
+ dwc2_vbus_supply_exit(hsotg);
break;
case USB_PORT_FEAT_INDICATOR:
@@ -3823,8 +3818,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"SetPortFeature - USB_PORT_FEAT_POWER\n");
hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
hprt0 |= HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
+ if (!pwr)
+ dwc2_vbus_supply_init(hsotg);
break;
case USB_PORT_FEAT_RESET:
@@ -3841,6 +3839,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dwc2_writel(hsotg, 0, PCGCTL);
hprt0 = dwc2_read_hprt0(hsotg);
+ pwr = hprt0 & HPRT0_PWR;
/* Clear suspend bit if resetting from suspend state */
hprt0 &= ~HPRT0_SUSP;
@@ -3854,6 +3853,8 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"In host mode, hprt0=%08x\n", hprt0);
dwc2_writel(hsotg, hprt0, HPRT0);
+ if (!pwr)
+ dwc2_vbus_supply_init(hsotg);
}
/* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
@@ -4393,6 +4394,8 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
struct usb_bus *bus = hcd_to_bus(hcd);
unsigned long flags;
+ u32 hprt0;
+ int ret;
dev_dbg(hsotg->dev, "DWC OTG HCD START\n");
@@ -4408,6 +4411,17 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
dwc2_hcd_reinit(hsotg);
+ hprt0 = dwc2_read_hprt0(hsotg);
+ /* Has vbus power been turned on in dwc2_core_host_init ? */
+ if (hprt0 & HPRT0_PWR) {
+ /* Enable external vbus supply before resuming root hub */
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ ret = dwc2_vbus_supply_init(hsotg);
+ if (ret)
+ return ret;
+ spin_lock_irqsave(&hsotg->lock, flags);
+ }
+
/* Initialize and connect root hub if one is not already attached */
if (bus->root_hub) {
dev_dbg(hsotg->dev, "DWC OTG HCD Has Root Hub\n");
@@ -4417,7 +4431,7 @@ static int _dwc2_hcd_start(struct usb_hcd *hcd)
spin_unlock_irqrestore(&hsotg->lock, flags);
- return dwc2_vbus_supply_init(hsotg);
+ return 0;
}
/*
@@ -4428,6 +4442,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
unsigned long flags;
+ u32 hprt0;
/* Turn off all host-specific interrupts */
dwc2_disable_host_interrupts(hsotg);
@@ -4436,6 +4451,7 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
synchronize_irq(hcd->irq);
spin_lock_irqsave(&hsotg->lock, flags);
+ hprt0 = dwc2_read_hprt0(hsotg);
/* Ensure hcd is disconnected */
dwc2_hcd_disconnect(hsotg, true);
dwc2_hcd_stop(hsotg);
@@ -4444,7 +4460,9 @@ static void _dwc2_hcd_stop(struct usb_hcd *hcd)
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irqrestore(&hsotg->lock, flags);
- dwc2_vbus_supply_exit(hsotg);
+ /* keep balanced supply init/exit by checking HPRT0_PWR */
+ if (hprt0 & HPRT0_PWR)
+ dwc2_vbus_supply_exit(hsotg);
usleep_range(1000, 3000);
}
@@ -4482,7 +4500,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
hprt0 |= HPRT0_SUSP;
hprt0 &= ~HPRT0_PWR;
dwc2_writel(hsotg, hprt0, HPRT0);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_vbus_supply_exit(hsotg);
+ spin_lock_irqsave(&hsotg->lock, flags);
}
/* Enter partial_power_down */
diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h
index 0ca8e7bc7aaf..2b1ea441b7d4 100644
--- a/drivers/usb/dwc2/hw.h
+++ b/drivers/usb/dwc2/hw.h
@@ -312,6 +312,7 @@
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14
#define GHWCFG4_ACG_SUPPORTED BIT(12)
#define GHWCFG4_IPG_ISOC_SUPPORTED BIT(11)
+#define GHWCFG4_SERVICE_INTERVAL_SUPPORTED BIT(10)
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2
@@ -404,6 +405,19 @@
#define ADPCTL_PRB_DSCHRG_MASK (0x3 << 0)
#define ADPCTL_PRB_DSCHRG_SHIFT 0
+#define GREFCLK HSOTG_REG(0x0064)
+#define GREFCLK_REFCLKPER_MASK (0x1ffff << 15)
+#define GREFCLK_REFCLKPER_SHIFT 15
+#define GREFCLK_REF_CLK_MODE BIT(14)
+#define GREFCLK_SOF_CNT_WKUP_ALERT_MASK (0x3ff)
+#define GREFCLK_SOF_CNT_WKUP_ALERT_SHIFT 0
+
+#define GINTMSK2 HSOTG_REG(0x0068)
+#define GINTMSK2_WKUP_ALERT_INT_MSK BIT(0)
+
+#define GINTSTS2 HSOTG_REG(0x006c)
+#define GINTSTS2_WKUP_ALERT_INT BIT(0)
+
#define HPTXFSIZ HSOTG_REG(0x100)
/* Use FIFOSIZE_* constants to access this register */
@@ -443,6 +457,7 @@
#define DCFG_DEVSPD_FS48 3
#define DCTL HSOTG_REG(0x804)
+#define DCTL_SERVICE_INTERVAL_SUPPORTED BIT(19)
#define DCTL_PWRONPRGDONE BIT(11)
#define DCTL_CGOUTNAK BIT(10)
#define DCTL_SGOUTNAK BIT(9)
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index bf7052e037d6..7c1b6938f212 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -81,6 +81,7 @@ static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
p->host_perio_tx_fifo_size = 256;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 <<
GAHBCFG_HBSTLEN_SHIFT;
+ p->power_down = 0;
}
static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg)
@@ -299,9 +300,12 @@ static void dwc2_set_default_params(struct dwc2_hsotg *hsotg)
p->hird_threshold_en = true;
p->hird_threshold = 4;
p->ipg_isoc_en = false;
+ p->service_interval = false;
p->max_packet_count = hw->max_packet_count;
p->max_transfer_size = hw->max_transfer_size;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR << GAHBCFG_HBSTLEN_SHIFT;
+ p->ref_clk_per = 33333;
+ p->sof_cnt_wkup_alert = 100;
if ((hsotg->dr_mode == USB_DR_MODE_HOST) ||
(hsotg->dr_mode == USB_DR_MODE_OTG)) {
@@ -592,6 +596,7 @@ static void dwc2_check_params(struct dwc2_hsotg *hsotg)
CHECK_BOOL(besl, (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a));
CHECK_BOOL(hird_threshold_en, hsotg->params.lpm);
CHECK_RANGE(hird_threshold, 0, hsotg->params.besl ? 12 : 7, 0);
+ CHECK_BOOL(service_interval, hw->service_interval_mode);
CHECK_RANGE(max_packet_count,
15, hw->max_packet_count,
hw->max_packet_count);
@@ -780,6 +785,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
hw->acg_enable = !!(hwcfg4 & GHWCFG4_ACG_SUPPORTED);
hw->ipg_isoc_en = !!(hwcfg4 & GHWCFG4_IPG_ISOC_SUPPORTED);
+ hw->service_interval_mode = !!(hwcfg4 &
+ GHWCFG4_SERVICE_INTERVAL_SUPPORTED);
/* fifo sizes */
hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 577642895b57..c0b64d483552 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -432,6 +432,14 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (retval)
return retval;
+ hsotg->vbus_supply = devm_regulator_get_optional(hsotg->dev, "vbus");
+ if (IS_ERR(hsotg->vbus_supply)) {
+ retval = PTR_ERR(hsotg->vbus_supply);
+ hsotg->vbus_supply = NULL;
+ if (retval != -ENODEV)
+ return retval;
+ }
+
retval = dwc2_lowlevel_hw_enable(hsotg);
if (retval)
return retval;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 518ead12458d..1a0404fda596 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -113,7 +113,7 @@ config USB_DWC3_ST
config USB_DWC3_QCOM
tristate "Qualcomm Platform"
- depends on ARCH_QCOM || COMPILE_TEST
+ depends on EXTCON && (ARCH_QCOM || COMPILE_TEST)
depends on OF
default USB_DWC3
help
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 88c80fcc39f5..becfbb87f791 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -756,7 +756,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
/* check if current dwc3 is on simulation board */
if (dwc->hwparams.hwparams6 & DWC3_GHWPARAMS6_EN_FPGA) {
- dev_info(dwc->dev, "Running with FPGA optmizations\n");
+ dev_info(dwc->dev, "Running with FPGA optimizations\n");
dwc->is_fpga = true;
}
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index a94fb1ba8f2c..cb7fcd7c0ad8 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -13,80 +13,30 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/usb_phy_generic.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regulator/consumer.h>
+#define DWC3_EXYNOS_MAX_CLOCKS 4
+
+struct dwc3_exynos_driverdata {
+ const char *clk_names[DWC3_EXYNOS_MAX_CLOCKS];
+ int num_clks;
+ int suspend_clk_idx;
+};
+
struct dwc3_exynos {
- struct platform_device *usb2_phy;
- struct platform_device *usb3_phy;
struct device *dev;
- struct clk *clk;
- struct clk *susp_clk;
- struct clk *axius_clk;
+ const char **clk_names;
+ struct clk *clks[DWC3_EXYNOS_MAX_CLOCKS];
+ int num_clks;
+ int suspend_clk_idx;
struct regulator *vdd33;
struct regulator *vdd10;
};
-static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos)
-{
- struct usb_phy_generic_platform_data pdata;
- struct platform_device *pdev;
- int ret;
-
- memset(&pdata, 0x00, sizeof(pdata));
-
- pdev = platform_device_alloc("usb_phy_generic", PLATFORM_DEVID_AUTO);
- if (!pdev)
- return -ENOMEM;
-
- exynos->usb2_phy = pdev;
- pdata.type = USB_PHY_TYPE_USB2;
- pdata.gpio_reset = -1;
-
- ret = platform_device_add_data(exynos->usb2_phy, &pdata, sizeof(pdata));
- if (ret)
- goto err1;
-
- pdev = platform_device_alloc("usb_phy_generic", PLATFORM_DEVID_AUTO);
- if (!pdev) {
- ret = -ENOMEM;
- goto err1;
- }
-
- exynos->usb3_phy = pdev;
- pdata.type = USB_PHY_TYPE_USB3;
-
- ret = platform_device_add_data(exynos->usb3_phy, &pdata, sizeof(pdata));
- if (ret)
- goto err2;
-
- ret = platform_device_add(exynos->usb2_phy);
- if (ret)
- goto err2;
-
- ret = platform_device_add(exynos->usb3_phy);
- if (ret)
- goto err3;
-
- return 0;
-
-err3:
- platform_device_del(exynos->usb2_phy);
-
-err2:
- platform_device_put(exynos->usb3_phy);
-
-err1:
- platform_device_put(exynos->usb2_phy);
-
- return ret;
-}
-
static int dwc3_exynos_remove_child(struct device *dev, void *unused)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -101,47 +51,42 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
struct dwc3_exynos *exynos;
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
-
- int ret;
+ const struct dwc3_exynos_driverdata *driver_data;
+ int i, ret;
exynos = devm_kzalloc(dev, sizeof(*exynos), GFP_KERNEL);
if (!exynos)
return -ENOMEM;
- platform_set_drvdata(pdev, exynos);
+ driver_data = of_device_get_match_data(dev);
+ exynos->dev = dev;
+ exynos->num_clks = driver_data->num_clks;
+ exynos->clk_names = (const char **)driver_data->clk_names;
+ exynos->suspend_clk_idx = driver_data->suspend_clk_idx;
- exynos->dev = dev;
+ platform_set_drvdata(pdev, exynos);
- exynos->clk = devm_clk_get(dev, "usbdrd30");
- if (IS_ERR(exynos->clk)) {
- dev_err(dev, "couldn't get clock\n");
- return -EINVAL;
+ for (i = 0; i < exynos->num_clks; i++) {
+ exynos->clks[i] = devm_clk_get(dev, exynos->clk_names[i]);
+ if (IS_ERR(exynos->clks[i])) {
+ dev_err(dev, "failed to get clock: %s\n",
+ exynos->clk_names[i]);
+ return PTR_ERR(exynos->clks[i]);
+ }
}
- ret = clk_prepare_enable(exynos->clk);
- if (ret)
- return ret;
- exynos->susp_clk = devm_clk_get(dev, "usbdrd30_susp_clk");
- if (IS_ERR(exynos->susp_clk))
- exynos->susp_clk = NULL;
- ret = clk_prepare_enable(exynos->susp_clk);
- if (ret)
- goto susp_clk_err;
-
- if (of_device_is_compatible(node, "samsung,exynos7-dwusb3")) {
- exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
- if (IS_ERR(exynos->axius_clk)) {
- dev_err(dev, "no AXI UpScaler clk specified\n");
- ret = -ENODEV;
- goto axius_clk_err;
+ for (i = 0; i < exynos->num_clks; i++) {
+ ret = clk_prepare_enable(exynos->clks[i]);
+ if (ret) {
+ while (--i > 0)
+ clk_disable_unprepare(exynos->clks[i]);
+ return ret;
}
- ret = clk_prepare_enable(exynos->axius_clk);
- if (ret)
- goto axius_clk_err;
- } else {
- exynos->axius_clk = NULL;
}
+ if (exynos->suspend_clk_idx >= 0)
+ clk_prepare_enable(exynos->clks[exynos->suspend_clk_idx]);
+
exynos->vdd33 = devm_regulator_get(dev, "vdd33");
if (IS_ERR(exynos->vdd33)) {
ret = PTR_ERR(exynos->vdd33);
@@ -164,12 +109,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
goto vdd10_err;
}
- ret = dwc3_exynos_register_phys(exynos);
- if (ret) {
- dev_err(dev, "couldn't register PHYs\n");
- goto phys_err;
- }
-
if (node) {
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
@@ -185,32 +124,31 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
return 0;
populate_err:
- platform_device_unregister(exynos->usb2_phy);
- platform_device_unregister(exynos->usb3_phy);
-phys_err:
regulator_disable(exynos->vdd10);
vdd10_err:
regulator_disable(exynos->vdd33);
vdd33_err:
- clk_disable_unprepare(exynos->axius_clk);
-axius_clk_err:
- clk_disable_unprepare(exynos->susp_clk);
-susp_clk_err:
- clk_disable_unprepare(exynos->clk);
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
+
+ if (exynos->suspend_clk_idx >= 0)
+ clk_disable_unprepare(exynos->clks[exynos->suspend_clk_idx]);
+
return ret;
}
static int dwc3_exynos_remove(struct platform_device *pdev)
{
struct dwc3_exynos *exynos = platform_get_drvdata(pdev);
+ int i;
device_for_each_child(&pdev->dev, NULL, dwc3_exynos_remove_child);
- platform_device_unregister(exynos->usb2_phy);
- platform_device_unregister(exynos->usb3_phy);
- clk_disable_unprepare(exynos->axius_clk);
- clk_disable_unprepare(exynos->susp_clk);
- clk_disable_unprepare(exynos->clk);
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
+
+ if (exynos->suspend_clk_idx >= 0)
+ clk_disable_unprepare(exynos->clks[exynos->suspend_clk_idx]);
regulator_disable(exynos->vdd33);
regulator_disable(exynos->vdd10);
@@ -218,10 +156,36 @@ static int dwc3_exynos_remove(struct platform_device *pdev)
return 0;
}
+static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
+ .clk_names = { "usbdrd30" },
+ .num_clks = 1,
+ .suspend_clk_idx = -1,
+};
+
+static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
+ .clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
+ .num_clks = 4,
+ .suspend_clk_idx = 1,
+};
+
+static const struct dwc3_exynos_driverdata exynos7_drvdata = {
+ .clk_names = { "usbdrd30", "usbdrd30_susp_clk", "usbdrd30_axius_clk" },
+ .num_clks = 3,
+ .suspend_clk_idx = 1,
+};
+
static const struct of_device_id exynos_dwc3_match[] = {
- { .compatible = "samsung,exynos5250-dwusb3" },
- { .compatible = "samsung,exynos7-dwusb3" },
- {},
+ {
+ .compatible = "samsung,exynos5250-dwusb3",
+ .data = &exynos5250_drvdata,
+ }, {
+ .compatible = "samsung,exynos5433-dwusb3",
+ .data = &exynos5433_drvdata,
+ }, {
+ .compatible = "samsung,exynos7-dwusb3",
+ .data = &exynos7_drvdata,
+ }, {
+ }
};
MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
@@ -229,9 +193,10 @@ MODULE_DEVICE_TABLE(of, exynos_dwc3_match);
static int dwc3_exynos_suspend(struct device *dev)
{
struct dwc3_exynos *exynos = dev_get_drvdata(dev);
+ int i;
- clk_disable(exynos->axius_clk);
- clk_disable(exynos->clk);
+ for (i = exynos->num_clks - 1; i >= 0; i--)
+ clk_disable_unprepare(exynos->clks[i]);
regulator_disable(exynos->vdd33);
regulator_disable(exynos->vdd10);
@@ -242,7 +207,7 @@ static int dwc3_exynos_suspend(struct device *dev)
static int dwc3_exynos_resume(struct device *dev)
{
struct dwc3_exynos *exynos = dev_get_drvdata(dev);
- int ret;
+ int i, ret;
ret = regulator_enable(exynos->vdd33);
if (ret) {
@@ -255,13 +220,14 @@ static int dwc3_exynos_resume(struct device *dev)
return ret;
}
- clk_enable(exynos->clk);
- clk_enable(exynos->axius_clk);
-
- /* runtime set active to reflect active state. */
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
+ for (i = 0; i < exynos->num_clks; i++) {
+ ret = clk_prepare_enable(exynos->clks[i]);
+ if (ret) {
+ while (--i > 0)
+ clk_disable_unprepare(exynos->clks[i]);
+ return ret;
+ }
+ }
return 0;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2b53194081ba..679c12e14522 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -270,27 +270,36 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
struct dwc3 *dwc = dep->dwc;
u32 timeout = 1000;
+ u32 saved_config = 0;
u32 reg;
int cmd_status = 0;
- int susphy = false;
int ret = -EINVAL;
/*
- * Synopsys Databook 2.60a states, on section 6.3.2.5.[1-8], that if
- * we're issuing an endpoint command, we must check if
- * GUSB2PHYCFG.SUSPHY bit is set. If it is, then we need to clear it.
+ * When operating in USB 2.0 speeds (HS/FS), if GUSB2PHYCFG.ENBLSLPM or
+ * GUSB2PHYCFG.SUSPHY is set, it must be cleared before issuing an
+ * endpoint command.
*
- * We will also set SUSPHY bit to what it was before returning as stated
- * by the same section on Synopsys databook.
+ * Save and clear both GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY
+ * settings. Restore them after the command is completed.
+ *
+ * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2
*/
if (dwc->gadget.speed <= USB_SPEED_HIGH) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) {
- susphy = true;
+ saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
+
+ if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
+ saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+ }
+
+ if (saved_config)
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
@@ -389,9 +398,9 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
}
}
- if (unlikely(susphy)) {
+ if (saved_config) {
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- reg |= DWC3_GUSB2PHYCFG_SUSPHY;
+ reg |= saved_config;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
}
diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c
index e15e896f356c..165653a5e45d 100644
--- a/drivers/usb/early/xhci-dbc.c
+++ b/drivers/usb/early/xhci-dbc.c
@@ -717,17 +717,14 @@ static void xdbc_handle_port_status(struct xdbc_trb *evt_trb)
static void xdbc_handle_tx_event(struct xdbc_trb *evt_trb)
{
- size_t remain_length;
u32 comp_code;
int ep_id;
comp_code = GET_COMP_CODE(le32_to_cpu(evt_trb->field[2]));
- remain_length = EVENT_TRB_LEN(le32_to_cpu(evt_trb->field[2]));
ep_id = TRB_TO_EP_ID(le32_to_cpu(evt_trb->field[3]));
switch (comp_code) {
case COMP_SUCCESS:
- remain_length = 0;
case COMP_SHORT_PACKET:
break;
case COMP_TRB_ERROR:
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index d582921f7257..db2d4980cb35 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -22,12 +22,8 @@
* controlled by two clock sources :
* CLK_5 := c_srate, and CLK_6 := p_srate
*/
-#define USB_OUT_IT_ID 1
-#define IO_IN_IT_ID 2
-#define IO_OUT_OT_ID 3
-#define USB_IN_OT_ID 4
-#define USB_OUT_CLK_ID 5
-#define USB_IN_CLK_ID 6
+#define USB_OUT_CLK_ID (out_clk_src_desc.bClockID)
+#define USB_IN_CLK_ID (in_clk_src_desc.bClockID)
#define CONTROL_ABSENT 0
#define CONTROL_RDONLY 1
@@ -43,6 +39,9 @@
#define UNFLW_CTRL 8
#define OVFLW_CTRL 10
+#define EPIN_EN(_opts) ((_opts)->p_chmask != 0)
+#define EPOUT_EN(_opts) ((_opts)->c_chmask != 0)
+
struct f_uac2 {
struct g_audio g_audio;
u8 ac_intf, as_in_intf, as_out_intf;
@@ -135,7 +134,7 @@ static struct uac_clock_source_descriptor in_clk_src_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC2_CLOCK_SOURCE,
- .bClockID = USB_IN_CLK_ID,
+ /* .bClockID = DYNAMIC */
.bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED,
.bmControls = (CONTROL_RDONLY << CLK_FREQ_CTRL),
.bAssocTerminal = 0,
@@ -147,7 +146,7 @@ static struct uac_clock_source_descriptor out_clk_src_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC2_CLOCK_SOURCE,
- .bClockID = USB_OUT_CLK_ID,
+ /* .bClockID = DYNAMIC */
.bmAttributes = UAC_CLOCK_SOURCE_TYPE_INT_FIXED,
.bmControls = (CONTROL_RDONLY << CLK_FREQ_CTRL),
.bAssocTerminal = 0,
@@ -159,10 +158,10 @@ static struct uac2_input_terminal_descriptor usb_out_it_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
- .bTerminalID = USB_OUT_IT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
- .bCSourceID = USB_OUT_CLK_ID,
+ /* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -173,10 +172,10 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
- .bTerminalID = IO_IN_IT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED),
.bAssocTerminal = 0,
- .bCSourceID = USB_IN_CLK_ID,
+ /* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -187,11 +186,11 @@ static struct uac2_output_terminal_descriptor usb_in_ot_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
- .bTerminalID = USB_IN_OT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING),
.bAssocTerminal = 0,
- .bSourceID = IO_IN_IT_ID,
- .bCSourceID = USB_IN_CLK_ID,
+ /* .bSourceID = DYNAMIC */
+ /* .bCSourceID = DYNAMIC */
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -201,11 +200,11 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
- .bTerminalID = IO_OUT_OT_ID,
+ /* .bTerminalID = DYNAMIC */
.wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED),
.bAssocTerminal = 0,
- .bSourceID = USB_OUT_IT_ID,
- .bCSourceID = USB_OUT_CLK_ID,
+ /* .bSourceID = DYNAMIC */
+ /* .bCSourceID = DYNAMIC */
.bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL),
};
@@ -253,7 +252,7 @@ static struct uac2_as_header_descriptor as_out_hdr_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
- .bTerminalLink = USB_OUT_IT_ID,
+ /* .bTerminalLink = DYNAMIC */
.bmControls = 0,
.bFormatType = UAC_FORMAT_TYPE_I,
.bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM),
@@ -330,7 +329,7 @@ static struct uac2_as_header_descriptor as_in_hdr_desc = {
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_AS_GENERAL,
- .bTerminalLink = USB_IN_OT_ID,
+ /* .bTerminalLink = DYNAMIC */
.bmControls = 0,
.bFormatType = UAC_FORMAT_TYPE_I,
.bmFormats = cpu_to_le32(UAC_FORMAT_TYPE_I_PCM),
@@ -471,6 +470,125 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
le16_to_cpu(ep_desc->wMaxPacketSize)));
}
+/* Use macro to overcome line length limitation */
+#define USBDHDR(p) (struct usb_descriptor_header *)(p)
+
+static void setup_descriptor(struct f_uac2_opts *opts)
+{
+ /* patch descriptors */
+ int i = 1; /* ID's start with 1 */
+
+ if (EPOUT_EN(opts))
+ usb_out_it_desc.bTerminalID = i++;
+ if (EPIN_EN(opts))
+ io_in_it_desc.bTerminalID = i++;
+ if (EPOUT_EN(opts))
+ io_out_ot_desc.bTerminalID = i++;
+ if (EPIN_EN(opts))
+ usb_in_ot_desc.bTerminalID = i++;
+ if (EPOUT_EN(opts))
+ out_clk_src_desc.bClockID = i++;
+ if (EPIN_EN(opts))
+ in_clk_src_desc.bClockID = i++;
+
+ usb_out_it_desc.bCSourceID = out_clk_src_desc.bClockID;
+ usb_in_ot_desc.bSourceID = io_in_it_desc.bTerminalID;
+ usb_in_ot_desc.bCSourceID = in_clk_src_desc.bClockID;
+ io_in_it_desc.bCSourceID = in_clk_src_desc.bClockID;
+ io_out_ot_desc.bCSourceID = out_clk_src_desc.bClockID;
+ io_out_ot_desc.bSourceID = usb_out_it_desc.bTerminalID;
+ as_out_hdr_desc.bTerminalLink = usb_out_it_desc.bTerminalID;
+ as_in_hdr_desc.bTerminalLink = usb_in_ot_desc.bTerminalID;
+
+ iad_desc.bInterfaceCount = 1;
+ ac_hdr_desc.wTotalLength = 0;
+
+ if (EPIN_EN(opts)) {
+ u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength);
+
+ len += sizeof(in_clk_src_desc);
+ len += sizeof(usb_in_ot_desc);
+ len += sizeof(io_in_it_desc);
+ ac_hdr_desc.wTotalLength = cpu_to_le16(len);
+ iad_desc.bInterfaceCount++;
+ }
+ if (EPOUT_EN(opts)) {
+ u16 len = le16_to_cpu(ac_hdr_desc.wTotalLength);
+
+ len += sizeof(out_clk_src_desc);
+ len += sizeof(usb_out_it_desc);
+ len += sizeof(io_out_ot_desc);
+ ac_hdr_desc.wTotalLength = cpu_to_le16(len);
+ iad_desc.bInterfaceCount++;
+ }
+
+ i = 0;
+ fs_audio_desc[i++] = USBDHDR(&iad_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_ac_if_desc);
+ fs_audio_desc[i++] = USBDHDR(&ac_hdr_desc);
+ if (EPIN_EN(opts))
+ fs_audio_desc[i++] = USBDHDR(&in_clk_src_desc);
+ if (EPOUT_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&out_clk_src_desc);
+ fs_audio_desc[i++] = USBDHDR(&usb_out_it_desc);
+ }
+ if (EPIN_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&io_in_it_desc);
+ fs_audio_desc[i++] = USBDHDR(&usb_in_ot_desc);
+ }
+ if (EPOUT_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&io_out_ot_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_as_out_if0_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_as_out_if1_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_out_hdr_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_out_fmt1_desc);
+ fs_audio_desc[i++] = USBDHDR(&fs_epout_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_iso_out_desc);
+ }
+ if (EPIN_EN(opts)) {
+ fs_audio_desc[i++] = USBDHDR(&std_as_in_if0_desc);
+ fs_audio_desc[i++] = USBDHDR(&std_as_in_if1_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_in_hdr_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_in_fmt1_desc);
+ fs_audio_desc[i++] = USBDHDR(&fs_epin_desc);
+ fs_audio_desc[i++] = USBDHDR(&as_iso_in_desc);
+ }
+ fs_audio_desc[i] = NULL;
+
+ i = 0;
+ hs_audio_desc[i++] = USBDHDR(&iad_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_ac_if_desc);
+ hs_audio_desc[i++] = USBDHDR(&ac_hdr_desc);
+ if (EPIN_EN(opts))
+ hs_audio_desc[i++] = USBDHDR(&in_clk_src_desc);
+ if (EPOUT_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&out_clk_src_desc);
+ hs_audio_desc[i++] = USBDHDR(&usb_out_it_desc);
+ }
+ if (EPIN_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&io_in_it_desc);
+ hs_audio_desc[i++] = USBDHDR(&usb_in_ot_desc);
+ }
+ if (EPOUT_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&io_out_ot_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_as_out_if0_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_as_out_if1_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_out_hdr_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_out_fmt1_desc);
+ hs_audio_desc[i++] = USBDHDR(&hs_epout_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_iso_out_desc);
+ }
+ if (EPIN_EN(opts)) {
+ hs_audio_desc[i++] = USBDHDR(&std_as_in_if0_desc);
+ hs_audio_desc[i++] = USBDHDR(&std_as_in_if1_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_in_hdr_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_in_fmt1_desc);
+ hs_audio_desc[i++] = USBDHDR(&hs_epin_desc);
+ hs_audio_desc[i++] = USBDHDR(&as_iso_in_desc);
+ }
+ hs_audio_desc[i] = NULL;
+}
+
static int
afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
{
@@ -530,25 +648,29 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
uac2->ac_intf = ret;
uac2->ac_alt = 0;
- ret = usb_interface_id(cfg, fn);
- if (ret < 0) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return ret;
+ if (EPOUT_EN(uac2_opts)) {
+ ret = usb_interface_id(cfg, fn);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+ std_as_out_if0_desc.bInterfaceNumber = ret;
+ std_as_out_if1_desc.bInterfaceNumber = ret;
+ uac2->as_out_intf = ret;
+ uac2->as_out_alt = 0;
}
- std_as_out_if0_desc.bInterfaceNumber = ret;
- std_as_out_if1_desc.bInterfaceNumber = ret;
- uac2->as_out_intf = ret;
- uac2->as_out_alt = 0;
- ret = usb_interface_id(cfg, fn);
- if (ret < 0) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return ret;
+ if (EPIN_EN(uac2_opts)) {
+ ret = usb_interface_id(cfg, fn);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+ std_as_in_if0_desc.bInterfaceNumber = ret;
+ std_as_in_if1_desc.bInterfaceNumber = ret;
+ uac2->as_in_intf = ret;
+ uac2->as_in_alt = 0;
}
- std_as_in_if0_desc.bInterfaceNumber = ret;
- std_as_in_if1_desc.bInterfaceNumber = ret;
- uac2->as_in_intf = ret;
- uac2->as_in_alt = 0;
/* Calculate wMaxPacketSize according to audio bandwidth */
set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
@@ -556,16 +678,20 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
- agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
- if (!agdev->out_ep) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return -ENODEV;
+ if (EPOUT_EN(uac2_opts)) {
+ agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
+ if (!agdev->out_ep) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -ENODEV;
+ }
}
- agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
- if (!agdev->in_ep) {
- dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return -ENODEV;
+ if (EPIN_EN(uac2_opts)) {
+ agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
+ if (!agdev->in_ep) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return -ENODEV;
+ }
}
agdev->in_ep_maxpsize = max_t(u16,
@@ -578,6 +704,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress;
hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress;
+ setup_descriptor(uac2_opts);
+
ret = usb_assign_descriptors(fn, fs_audio_desc, hs_audio_desc, NULL,
NULL);
if (ret)
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index d8ce7868fe22..8c99392df593 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -197,12 +197,6 @@ static const struct usb_descriptor_header * const uvc_ss_streaming[] = {
NULL,
};
-void uvc_set_trace_param(unsigned int trace)
-{
- uvc_gadget_trace_param = trace;
-}
-EXPORT_SYMBOL(uvc_set_trace_param);
-
/* --------------------------------------------------------------------------
* Control requests
*/
@@ -232,13 +226,8 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
struct v4l2_event v4l2_event;
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
- /* printk(KERN_INFO "setup request %02x %02x value %04x index %04x %04x\n",
- * ctrl->bRequestType, ctrl->bRequest, le16_to_cpu(ctrl->wValue),
- * le16_to_cpu(ctrl->wIndex), le16_to_cpu(ctrl->wLength));
- */
-
if ((ctrl->bRequestType & USB_TYPE_MASK) != USB_TYPE_CLASS) {
- INFO(f->config->cdev, "invalid request type\n");
+ uvcg_info(f, "invalid request type\n");
return -EINVAL;
}
@@ -272,7 +261,7 @@ uvc_function_get_alt(struct usb_function *f, unsigned interface)
{
struct uvc_device *uvc = to_uvc(f);
- INFO(f->config->cdev, "uvc_function_get_alt(%u)\n", interface);
+ uvcg_info(f, "%s(%u)\n", __func__, interface);
if (interface == uvc->control_intf)
return 0;
@@ -291,13 +280,13 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
int ret;
- INFO(cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt);
+ uvcg_info(f, "%s(%u, %u)\n", __func__, interface, alt);
if (interface == uvc->control_intf) {
if (alt)
return -EINVAL;
- INFO(cdev, "reset UVC Control\n");
+ uvcg_info(f, "reset UVC Control\n");
usb_ep_disable(uvc->control_ep);
if (!uvc->control_ep->desc)
@@ -348,7 +337,7 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
if (!uvc->video.ep)
return -EINVAL;
- INFO(cdev, "reset UVC\n");
+ uvcg_info(f, "reset UVC\n");
usb_ep_disable(uvc->video.ep);
ret = config_ep_by_speed(f->config->cdev->gadget,
@@ -373,7 +362,7 @@ uvc_function_disable(struct usb_function *f)
struct uvc_device *uvc = to_uvc(f);
struct v4l2_event v4l2_event;
- INFO(f->config->cdev, "uvc_function_disable\n");
+ uvcg_info(f, "%s()\n", __func__);
memset(&v4l2_event, 0, sizeof(v4l2_event));
v4l2_event.type = UVC_EVENT_DISCONNECT;
@@ -392,21 +381,19 @@ uvc_function_disable(struct usb_function *f)
void
uvc_function_connect(struct uvc_device *uvc)
{
- struct usb_composite_dev *cdev = uvc->func.config->cdev;
int ret;
if ((ret = usb_function_activate(&uvc->func)) < 0)
- INFO(cdev, "UVC connect failed with %d\n", ret);
+ uvcg_info(&uvc->func, "UVC connect failed with %d\n", ret);
}
void
uvc_function_disconnect(struct uvc_device *uvc)
{
- struct usb_composite_dev *cdev = uvc->func.config->cdev;
int ret;
if ((ret = usb_function_deactivate(&uvc->func)) < 0)
- INFO(cdev, "UVC disconnect failed with %d\n", ret);
+ uvcg_info(&uvc->func, "UVC disconnect failed with %d\n", ret);
}
/* --------------------------------------------------------------------------
@@ -605,7 +592,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
struct f_uvc_opts *opts;
int ret = -EINVAL;
- INFO(cdev, "uvc_function_bind\n");
+ uvcg_info(f, "%s()\n", __func__);
opts = fi_to_f_uvc_opts(f->fi);
/* Sanity check the streaming endpoint module parameters.
@@ -618,8 +605,8 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
if (opts->streaming_maxburst &&
(opts->streaming_maxpacket % 1024) != 0) {
opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024);
- INFO(cdev, "overriding streaming_maxpacket to %d\n",
- opts->streaming_maxpacket);
+ uvcg_info(f, "overriding streaming_maxpacket to %d\n",
+ opts->streaming_maxpacket);
}
/* Fill in the FS/HS/SS Video Streaming specific descriptors from the
@@ -658,7 +645,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
/* Allocate endpoints. */
ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
if (!ep) {
- INFO(cdev, "Unable to allocate control EP\n");
+ uvcg_info(f, "Unable to allocate control EP\n");
goto error;
}
uvc->control_ep = ep;
@@ -672,7 +659,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
ep = usb_ep_autoconfig(cdev->gadget, &uvc_fs_streaming_ep);
if (!ep) {
- INFO(cdev, "Unable to allocate streaming EP\n");
+ uvcg_info(f, "Unable to allocate streaming EP\n");
goto error;
}
uvc->video.ep = ep;
@@ -699,12 +686,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc_iad.bFirstInterface = ret;
uvc_control_intf.bInterfaceNumber = ret;
uvc->control_intf = ret;
+ opts->control_interface = ret;
if ((ret = usb_interface_id(c, f)) < 0)
goto error;
uvc_streaming_intf_alt0.bInterfaceNumber = ret;
uvc_streaming_intf_alt1.bInterfaceNumber = ret;
uvc->streaming_intf = ret;
+ opts->streaming_interface = ret;
/* Copy descriptors */
f->fs_descriptors = uvc_copy_descriptors(uvc, USB_SPEED_FULL);
@@ -743,19 +732,19 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f)
uvc->control_req->context = uvc;
if (v4l2_device_register(&cdev->gadget->dev, &uvc->v4l2_dev)) {
- printk(KERN_INFO "v4l2_device_register failed\n");
+ uvcg_err(f, "failed to register V4L2 device\n");
goto error;
}
/* Initialise video. */
- ret = uvcg_video_init(&uvc->video);
+ ret = uvcg_video_init(&uvc->video, uvc);
if (ret < 0)
goto error;
/* Register a V4L2 device. */
ret = uvc_register_video(uvc);
if (ret < 0) {
- printk(KERN_INFO "Unable to register video device\n");
+ uvcg_err(f, "failed to register video device\n");
goto error;
}
@@ -792,6 +781,7 @@ static struct usb_function_instance *uvc_alloc_inst(void)
struct uvc_output_terminal_descriptor *od;
struct uvc_color_matching_descriptor *md;
struct uvc_descriptor_header **ctl_cls;
+ int ret;
opts = kzalloc(sizeof(*opts), GFP_KERNEL);
if (!opts)
@@ -868,7 +858,12 @@ static struct usb_function_instance *uvc_alloc_inst(void)
opts->streaming_interval = 1;
opts->streaming_maxpacket = 1024;
- uvcg_attach_configfs(opts);
+ ret = uvcg_attach_configfs(opts);
+ if (ret < 0) {
+ kfree(opts);
+ return ERR_PTR(ret);
+ }
+
return &opts->func_inst;
}
@@ -886,7 +881,7 @@ static void uvc_unbind(struct usb_configuration *c, struct usb_function *f)
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
- INFO(cdev, "%s\n", __func__);
+ uvcg_info(f, "%s\n", __func__);
device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
video_unregister_device(&uvc->vdev);
diff --git a/drivers/usb/gadget/function/u_uvc.h b/drivers/usb/gadget/function/u_uvc.h
index 2ed292e94fbc..5242d489e20a 100644
--- a/drivers/usb/gadget/function/u_uvc.h
+++ b/drivers/usb/gadget/function/u_uvc.h
@@ -25,6 +25,9 @@ struct f_uvc_opts {
unsigned int streaming_maxpacket;
unsigned int streaming_maxburst;
+ unsigned int control_interface;
+ unsigned int streaming_interface;
+
/*
* Control descriptors array pointers for full-/high-speed and
* super-speed. They point by default to the uvc_fs_control_cls and
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 93cf78b420fe..099d650082e5 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -24,6 +24,7 @@
struct usb_ep;
struct usb_request;
struct uvc_descriptor_header;
+struct uvc_device;
/* ------------------------------------------------------------------------
* Debugging, printing and logging
@@ -51,14 +52,12 @@ extern unsigned int uvc_gadget_trace_param;
printk(KERN_DEBUG "uvcvideo: " msg); \
} while (0)
-#define uvc_warn_once(dev, warn, msg...) \
- do { \
- if (!test_and_set_bit(warn, &dev->warnings)) \
- printk(KERN_INFO "uvcvideo: " msg); \
- } while (0)
-
-#define uvc_printk(level, msg...) \
- printk(level "uvcvideo: " msg)
+#define uvcg_dbg(f, fmt, args...) \
+ dev_dbg(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
+#define uvcg_info(f, fmt, args...) \
+ dev_info(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
+#define uvcg_err(f, fmt, args...) \
+ dev_err(&(f)->config->cdev->gadget->dev, "%s: " fmt, (f)->name, ##args)
/* ------------------------------------------------------------------------
* Driver specific constants
@@ -73,6 +72,7 @@ extern unsigned int uvc_gadget_trace_param;
*/
struct uvc_video {
+ struct uvc_device *uvc;
struct usb_ep *ep;
/* Frame parameters */
diff --git a/drivers/usb/gadget/function/uvc_configfs.c b/drivers/usb/gadget/function/uvc_configfs.c
index b51f0d278826..bc1e2af566c3 100644
--- a/drivers/usb/gadget/function/uvc_configfs.c
+++ b/drivers/usb/gadget/function/uvc_configfs.c
@@ -9,9 +9,16 @@
*
* Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
*/
+
+#include <linux/sort.h>
+
#include "u_uvc.h"
#include "uvc_configfs.h"
+/* -----------------------------------------------------------------------------
+ * Global Utility Structures and Macros
+ */
+
#define UVCG_STREAMING_CONTROL_SIZE 1
#define UVC_ATTR(prefix, cname, aname) \
@@ -31,13 +38,93 @@ static struct configfs_attribute prefix##attr_##cname = { \
.show = prefix##cname##_show, \
}
+#define le8_to_cpu(x) (x)
+#define cpu_to_le8(x) (x)
+
+static int uvcg_config_compare_u32(const void *l, const void *r)
+{
+ u32 li = *(const u32 *)l;
+ u32 ri = *(const u32 *)r;
+
+ return li < ri ? -1 : li == ri ? 0 : 1;
+}
+
static inline struct f_uvc_opts *to_f_uvc_opts(struct config_item *item)
{
return container_of(to_config_group(item), struct f_uvc_opts,
func_inst.group);
}
-/* control/header/<NAME> */
+struct uvcg_config_group_type {
+ struct config_item_type type;
+ const char *name;
+ const struct uvcg_config_group_type **children;
+ int (*create_children)(struct config_group *group);
+};
+
+static void uvcg_config_item_release(struct config_item *item)
+{
+ struct config_group *group = to_config_group(item);
+
+ kfree(group);
+}
+
+static struct configfs_item_operations uvcg_config_item_ops = {
+ .release = uvcg_config_item_release,
+};
+
+static int uvcg_config_create_group(struct config_group *parent,
+ const struct uvcg_config_group_type *type);
+
+static int uvcg_config_create_children(struct config_group *group,
+ const struct uvcg_config_group_type *type)
+{
+ const struct uvcg_config_group_type **child;
+ int ret;
+
+ if (type->create_children)
+ return type->create_children(group);
+
+ for (child = type->children; child && *child; ++child) {
+ ret = uvcg_config_create_group(group, *child);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int uvcg_config_create_group(struct config_group *parent,
+ const struct uvcg_config_group_type *type)
+{
+ struct config_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
+
+ config_group_init_type_name(group, type->name, &type->type);
+ configfs_add_default_group(group, parent);
+
+ return uvcg_config_create_children(group, type);
+}
+
+static void uvcg_config_remove_children(struct config_group *group)
+{
+ struct config_group *child, *n;
+
+ list_for_each_entry_safe(child, n, &group->default_groups, group_entry) {
+ list_del(&child->group_entry);
+ uvcg_config_remove_children(child);
+ config_item_put(&child->cg_item);
+ }
+}
+
+/* -----------------------------------------------------------------------------
+ * control/header/<NAME>
+ * control/header
+ */
+
DECLARE_UVC_HEADER_DESCRIPTOR(1);
struct uvcg_control_header {
@@ -51,9 +138,9 @@ static struct uvcg_control_header *to_uvcg_control_header(struct config_item *it
return container_of(item, struct uvcg_control_header, item);
}
-#define UVCG_CTRL_HDR_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \
+#define UVCG_CTRL_HDR_ATTR(cname, aname, bits, limit) \
static ssize_t uvcg_control_header_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
struct uvcg_control_header *ch = to_uvcg_control_header(item); \
struct f_uvc_opts *opts; \
@@ -67,7 +154,7 @@ static ssize_t uvcg_control_header_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(ch->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(ch->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -83,7 +170,7 @@ uvcg_control_header_##cname##_store(struct config_item *item, \
struct config_item *opts_item; \
struct mutex *su_mutex = &ch->item.ci_group->cg_subsys->su_mutex;\
int ret; \
- uxx num; \
+ u##bits num; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
@@ -96,7 +183,7 @@ uvcg_control_header_##cname##_store(struct config_item *item, \
goto end; \
} \
\
- ret = str2u(page, 0, &num); \
+ ret = kstrtou##bits(page, 0, &num); \
if (ret) \
goto end; \
\
@@ -104,7 +191,7 @@ uvcg_control_header_##cname##_store(struct config_item *item, \
ret = -EINVAL; \
goto end; \
} \
- ch->desc.aname = vnoc(num); \
+ ch->desc.aname = cpu_to_le##bits(num); \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
@@ -114,11 +201,9 @@ end: \
\
UVC_ATTR(uvcg_control_header_, cname, aname)
-UVCG_CTRL_HDR_ATTR(bcd_uvc, bcdUVC, le16_to_cpu, kstrtou16, u16, cpu_to_le16,
- 0xffff);
+UVCG_CTRL_HDR_ATTR(bcd_uvc, bcdUVC, 16, 0xffff);
-UVCG_CTRL_HDR_ATTR(dw_clock_frequency, dwClockFrequency, le32_to_cpu, kstrtou32,
- u32, cpu_to_le32, 0x7fffffff);
+UVCG_CTRL_HDR_ATTR(dw_clock_frequency, dwClockFrequency, 32, 0x7fffffff);
#undef UVCG_CTRL_HDR_ATTR
@@ -129,6 +214,7 @@ static struct configfs_attribute *uvcg_control_header_attrs[] = {
};
static const struct config_item_type uvcg_control_header_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_control_header_attrs,
.ct_owner = THIS_MODULE,
};
@@ -153,60 +239,42 @@ static struct config_item *uvcg_control_header_make(struct config_group *group,
return &h->item;
}
-static void uvcg_control_header_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_control_header *h = to_uvcg_control_header(item);
-
- kfree(h);
-}
-
-/* control/header */
-static struct uvcg_control_header_grp {
- struct config_group group;
-} uvcg_control_header_grp;
-
static struct configfs_group_operations uvcg_control_header_grp_ops = {
.make_item = uvcg_control_header_make,
- .drop_item = uvcg_control_header_drop,
};
-static const struct config_item_type uvcg_control_header_grp_type = {
- .ct_group_ops = &uvcg_control_header_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_control_header_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_control_header_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "header",
};
-/* control/processing/default */
-static struct uvcg_default_processing {
- struct config_group group;
-} uvcg_default_processing;
-
-static inline struct uvcg_default_processing
-*to_uvcg_default_processing(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_processing, group);
-}
+/* -----------------------------------------------------------------------------
+ * control/processing/default
+ */
-#define UVCG_DEFAULT_PROCESSING_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_PROCESSING_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_processing_##cname##_show( \
struct config_item *item, char *page) \
{ \
- struct uvcg_default_processing *dp = to_uvcg_default_processing(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_processing_unit_descriptor *pd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent; \
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
pd = &opts->uvc_processing; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(pd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(pd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -215,37 +283,33 @@ static ssize_t uvcg_default_processing_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_processing_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_PROCESSING_ATTR(b_unit_id, bUnitID, identity_conv);
-UVCG_DEFAULT_PROCESSING_ATTR(b_source_id, bSourceID, identity_conv);
-UVCG_DEFAULT_PROCESSING_ATTR(w_max_multiplier, wMaxMultiplier, le16_to_cpu);
-UVCG_DEFAULT_PROCESSING_ATTR(i_processing, iProcessing, identity_conv);
-
-#undef identity_conv
+UVCG_DEFAULT_PROCESSING_ATTR(b_unit_id, bUnitID, 8);
+UVCG_DEFAULT_PROCESSING_ATTR(b_source_id, bSourceID, 8);
+UVCG_DEFAULT_PROCESSING_ATTR(w_max_multiplier, wMaxMultiplier, 16);
+UVCG_DEFAULT_PROCESSING_ATTR(i_processing, iProcessing, 8);
#undef UVCG_DEFAULT_PROCESSING_ATTR
static ssize_t uvcg_default_processing_bm_controls_show(
struct config_item *item, char *page)
{
- struct uvcg_default_processing *dp = to_uvcg_default_processing(item);
+ struct config_group *group = to_config_group(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
- struct mutex *su_mutex = &dp->group.cg_subsys->su_mutex;
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_processing_unit_descriptor *pd;
int result, i;
char *pg = page;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
- opts_item = dp->group.cg_item.ci_parent->ci_parent->ci_parent;
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
pd = &opts->uvc_processing;
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < pd->bControlSize; ++i) {
- result += sprintf(pg, "%d\n", pd->bmControls[i]);
+ result += sprintf(pg, "%u\n", pd->bmControls[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
@@ -266,54 +330,55 @@ static struct configfs_attribute *uvcg_default_processing_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_processing_type = {
- .ct_attrs = uvcg_default_processing_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_processing_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_processing_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_processing {}; */
-
-/* control/processing */
-static struct uvcg_processing_grp {
- struct config_group group;
-} uvcg_processing_grp;
+/* -----------------------------------------------------------------------------
+ * control/processing
+ */
-static const struct config_item_type uvcg_processing_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_processing_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "processing",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_processing_type,
+ NULL,
+ },
};
-/* control/terminal/camera/default */
-static struct uvcg_default_camera {
- struct config_group group;
-} uvcg_default_camera;
-
-static inline struct uvcg_default_camera
-*to_uvcg_default_camera(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_camera, group);
-}
+/* -----------------------------------------------------------------------------
+ * control/terminal/camera/default
+ */
-#define UVCG_DEFAULT_CAMERA_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_CAMERA_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_camera_##cname##_show( \
struct config_item *item, char *page) \
{ \
- struct uvcg_default_camera *dc = to_uvcg_default_camera(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_camera_terminal_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent-> \
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent-> \
ci_parent; \
opts = to_f_uvc_opts(opts_item); \
cd = &opts->uvc_camera_terminal; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(cd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -323,44 +388,40 @@ static ssize_t uvcg_default_camera_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_camera_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_CAMERA_ATTR(b_terminal_id, bTerminalID, identity_conv);
-UVCG_DEFAULT_CAMERA_ATTR(w_terminal_type, wTerminalType, le16_to_cpu);
-UVCG_DEFAULT_CAMERA_ATTR(b_assoc_terminal, bAssocTerminal, identity_conv);
-UVCG_DEFAULT_CAMERA_ATTR(i_terminal, iTerminal, identity_conv);
+UVCG_DEFAULT_CAMERA_ATTR(b_terminal_id, bTerminalID, 8);
+UVCG_DEFAULT_CAMERA_ATTR(w_terminal_type, wTerminalType, 16);
+UVCG_DEFAULT_CAMERA_ATTR(b_assoc_terminal, bAssocTerminal, 8);
+UVCG_DEFAULT_CAMERA_ATTR(i_terminal, iTerminal, 8);
UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_min, wObjectiveFocalLengthMin,
- le16_to_cpu);
+ 16);
UVCG_DEFAULT_CAMERA_ATTR(w_objective_focal_length_max, wObjectiveFocalLengthMax,
- le16_to_cpu);
+ 16);
UVCG_DEFAULT_CAMERA_ATTR(w_ocular_focal_length, wOcularFocalLength,
- le16_to_cpu);
-
-#undef identity_conv
+ 16);
#undef UVCG_DEFAULT_CAMERA_ATTR
static ssize_t uvcg_default_camera_bm_controls_show(
struct config_item *item, char *page)
{
- struct uvcg_default_camera *dc = to_uvcg_default_camera(item);
+ struct config_group *group = to_config_group(item);
struct f_uvc_opts *opts;
struct config_item *opts_item;
- struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex;
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
struct uvc_camera_terminal_descriptor *cd;
int result, i;
char *pg = page;
mutex_lock(su_mutex); /* for navigating configfs hierarchy */
- opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent->
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent->
ci_parent;
opts = to_f_uvc_opts(opts_item);
cd = &opts->uvc_camera_terminal;
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < cd->bControlSize; ++i) {
- result += sprintf(pg, "%d\n", cd->bmControls[i]);
+ result += sprintf(pg, "%u\n", cd->bmControls[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
@@ -383,54 +444,55 @@ static struct configfs_attribute *uvcg_default_camera_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_camera_type = {
- .ct_attrs = uvcg_default_camera_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_camera_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_camera_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_camera {}; */
-
-/* control/terminal/camera */
-static struct uvcg_camera_grp {
- struct config_group group;
-} uvcg_camera_grp;
+/* -----------------------------------------------------------------------------
+ * control/terminal/camera
+ */
-static const struct config_item_type uvcg_camera_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_camera_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "camera",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_camera_type,
+ NULL,
+ },
};
-/* control/terminal/output/default */
-static struct uvcg_default_output {
- struct config_group group;
-} uvcg_default_output;
-
-static inline struct uvcg_default_output
-*to_uvcg_default_output(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_output, group);
-}
+/* -----------------------------------------------------------------------------
+ * control/terminal/output/default
+ */
-#define UVCG_DEFAULT_OUTPUT_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_OUTPUT_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_output_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
- struct uvcg_default_output *dout = to_uvcg_default_output(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dout->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_output_terminal_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dout->group.cg_item.ci_parent->ci_parent-> \
+ opts_item = group->cg_item.ci_parent->ci_parent-> \
ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
cd = &opts->uvc_output_terminal; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(cd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -440,15 +502,11 @@ static ssize_t uvcg_default_output_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_output_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, identity_conv);
-UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, le16_to_cpu);
-UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, identity_conv);
-UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, identity_conv);
-UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, identity_conv);
-
-#undef identity_conv
+UVCG_DEFAULT_OUTPUT_ATTR(b_terminal_id, bTerminalID, 8);
+UVCG_DEFAULT_OUTPUT_ATTR(w_terminal_type, wTerminalType, 16);
+UVCG_DEFAULT_OUTPUT_ATTR(b_assoc_terminal, bAssocTerminal, 8);
+UVCG_DEFAULT_OUTPUT_ATTR(b_source_id, bSourceID, 8);
+UVCG_DEFAULT_OUTPUT_ATTR(i_terminal, iTerminal, 8);
#undef UVCG_DEFAULT_OUTPUT_ATTR
@@ -461,47 +519,68 @@ static struct configfs_attribute *uvcg_default_output_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_output_type = {
- .ct_attrs = uvcg_default_output_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_output_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_output_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_output {}; */
-
-/* control/terminal/output */
-static struct uvcg_output_grp {
- struct config_group group;
-} uvcg_output_grp;
+/* -----------------------------------------------------------------------------
+ * control/terminal/output
+ */
-static const struct config_item_type uvcg_output_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_output_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "output",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_output_type,
+ NULL,
+ },
};
-/* control/terminal */
-static struct uvcg_terminal_grp {
- struct config_group group;
-} uvcg_terminal_grp;
+/* -----------------------------------------------------------------------------
+ * control/terminal
+ */
-static const struct config_item_type uvcg_terminal_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_terminal_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "terminal",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_camera_grp_type,
+ &uvcg_output_grp_type,
+ NULL,
+ },
};
-/* control/class/{fs} */
-static struct uvcg_control_class {
- struct config_group group;
-} uvcg_control_class_fs, uvcg_control_class_ss;
+/* -----------------------------------------------------------------------------
+ * control/class/{fs|ss}
+ */
+struct uvcg_control_class_group {
+ struct config_group group;
+ const char *name;
+};
static inline struct uvc_descriptor_header
**uvcg_get_ctl_class_arr(struct config_item *i, struct f_uvc_opts *o)
{
- struct uvcg_control_class *cl = container_of(to_config_group(i),
- struct uvcg_control_class, group);
+ struct uvcg_control_class_group *group =
+ container_of(i, struct uvcg_control_class_group,
+ group.cg_item);
- if (cl == &uvcg_control_class_fs)
+ if (!strcmp(group->name, "fs"))
return o->uvc_fs_control_cls;
- if (cl == &uvcg_control_class_ss)
+ if (!strcmp(group->name, "ss"))
return o->uvc_ss_control_cls;
return NULL;
@@ -544,6 +623,7 @@ static int uvcg_control_class_allow_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
@@ -579,10 +659,12 @@ static void uvcg_control_class_drop_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_control_class_item_ops = {
+ .release = uvcg_config_item_release,
.allow_link = uvcg_control_class_allow_link,
.drop_link = uvcg_control_class_drop_link,
};
@@ -592,37 +674,99 @@ static const struct config_item_type uvcg_control_class_type = {
.ct_owner = THIS_MODULE,
};
-/* control/class */
-static struct uvcg_control_class_grp {
- struct config_group group;
-} uvcg_control_class_grp;
+/* -----------------------------------------------------------------------------
+ * control/class
+ */
+
+static int uvcg_control_class_create_children(struct config_group *parent)
+{
+ static const char * const names[] = { "fs", "ss" };
+ unsigned int i;
-static const struct config_item_type uvcg_control_class_grp_type = {
- .ct_owner = THIS_MODULE,
+ for (i = 0; i < ARRAY_SIZE(names); ++i) {
+ struct uvcg_control_class_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
+
+ group->name = names[i];
+
+ config_group_init_type_name(&group->group, group->name,
+ &uvcg_control_class_type);
+ configfs_add_default_group(&group->group, parent);
+ }
+
+ return 0;
+}
+
+static const struct uvcg_config_group_type uvcg_control_class_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "class",
+ .create_children = uvcg_control_class_create_children,
};
-/* control */
-static struct uvcg_control_grp {
- struct config_group group;
-} uvcg_control_grp;
+/* -----------------------------------------------------------------------------
+ * control
+ */
+
+static ssize_t uvcg_default_control_b_interface_number_show(
+ struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int result = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = item->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result += sprintf(page, "%u\n", opts->control_interface);
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return result;
+}
+
+UVC_ATTR_RO(uvcg_default_control_, b_interface_number, bInterfaceNumber);
-static const struct config_item_type uvcg_control_grp_type = {
- .ct_owner = THIS_MODULE,
+static struct configfs_attribute *uvcg_default_control_attrs[] = {
+ &uvcg_default_control_attr_b_interface_number,
+ NULL,
};
-/* streaming/uncompressed */
-static struct uvcg_uncompressed_grp {
- struct config_group group;
-} uvcg_uncompressed_grp;
+static const struct uvcg_config_group_type uvcg_control_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_control_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "control",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_control_header_grp_type,
+ &uvcg_processing_grp_type,
+ &uvcg_terminal_grp_type,
+ &uvcg_control_class_grp_type,
+ NULL,
+ },
+};
-/* streaming/mjpeg */
-static struct uvcg_mjpeg_grp {
- struct config_group group;
-} uvcg_mjpeg_grp;
+/* -----------------------------------------------------------------------------
+ * streaming/uncompressed
+ * streaming/mjpeg
+ */
-static struct config_item *fmt_parent[] = {
- &uvcg_uncompressed_grp.group.cg_item,
- &uvcg_mjpeg_grp.group.cg_item,
+static const char * const uvcg_format_names[] = {
+ "uncompressed",
+ "mjpeg",
};
enum uvcg_format_type {
@@ -706,7 +850,11 @@ struct uvcg_format_ptr {
struct list_head entry;
};
-/* streaming/header/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/header/<NAME>
+ * streaming/header
+ */
+
struct uvcg_streaming_header {
struct config_item item;
struct uvc_input_header_descriptor desc;
@@ -720,6 +868,8 @@ static struct uvcg_streaming_header *to_uvcg_streaming_header(struct config_item
return container_of(item, struct uvcg_streaming_header, item);
}
+static void uvcg_format_set_indices(struct config_group *fmt);
+
static int uvcg_streaming_header_allow_link(struct config_item *src,
struct config_item *target)
{
@@ -744,10 +894,22 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
goto out;
}
- for (i = 0; i < ARRAY_SIZE(fmt_parent); ++i)
- if (target->ci_parent == fmt_parent[i])
+ /*
+ * Linking is only allowed to direct children of the format nodes
+ * (streaming/uncompressed or streaming/mjpeg nodes). First check that
+ * the grand-parent of the target matches the grand-parent of the source
+ * (the streaming node), and then verify that the target parent is a
+ * format node.
+ */
+ if (src->ci_parent->ci_parent != target->ci_parent->ci_parent)
+ goto out;
+
+ for (i = 0; i < ARRAY_SIZE(uvcg_format_names); ++i) {
+ if (!strcmp(target->ci_parent->ci_name, uvcg_format_names[i]))
break;
- if (i == ARRAY_SIZE(fmt_parent))
+ }
+
+ if (i == ARRAY_SIZE(uvcg_format_names))
goto out;
target_fmt = container_of(to_config_group(target), struct uvcg_format,
@@ -755,6 +917,8 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
if (!target_fmt)
goto out;
+ uvcg_format_set_indices(to_config_group(target));
+
format_ptr = kzalloc(sizeof(*format_ptr), GFP_KERNEL);
if (!format_ptr) {
ret = -ENOMEM;
@@ -764,6 +928,7 @@ static int uvcg_streaming_header_allow_link(struct config_item *src,
format_ptr->fmt = target_fmt;
list_add_tail(&format_ptr->entry, &src_hdr->formats);
++src_hdr->num_fmt;
+ ++target_fmt->linked;
out:
mutex_unlock(&opts->lock);
@@ -801,19 +966,22 @@ static void uvcg_streaming_header_drop_link(struct config_item *src,
break;
}
+ --target_fmt->linked;
+
out:
mutex_unlock(&opts->lock);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_streaming_header_item_ops = {
- .allow_link = uvcg_streaming_header_allow_link,
- .drop_link = uvcg_streaming_header_drop_link,
+ .release = uvcg_config_item_release,
+ .allow_link = uvcg_streaming_header_allow_link,
+ .drop_link = uvcg_streaming_header_drop_link,
};
-#define UVCG_STREAMING_HEADER_ATTR(cname, aname, conv) \
+#define UVCG_STREAMING_HEADER_ATTR(cname, aname, bits) \
static ssize_t uvcg_streaming_header_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
struct uvcg_streaming_header *sh = to_uvcg_streaming_header(item); \
struct f_uvc_opts *opts; \
@@ -827,7 +995,7 @@ static ssize_t uvcg_streaming_header_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(sh->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(sh->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -836,16 +1004,11 @@ static ssize_t uvcg_streaming_header_##cname##_show( \
\
UVC_ATTR_RO(uvcg_streaming_header_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_STREAMING_HEADER_ATTR(bm_info, bmInfo, identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_terminal_link, bTerminalLink, identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_still_capture_method, bStillCaptureMethod,
- identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_trigger_support, bTriggerSupport, identity_conv);
-UVCG_STREAMING_HEADER_ATTR(b_trigger_usage, bTriggerUsage, identity_conv);
-
-#undef identity_conv
+UVCG_STREAMING_HEADER_ATTR(bm_info, bmInfo, 8);
+UVCG_STREAMING_HEADER_ATTR(b_terminal_link, bTerminalLink, 8);
+UVCG_STREAMING_HEADER_ATTR(b_still_capture_method, bStillCaptureMethod, 8);
+UVCG_STREAMING_HEADER_ATTR(b_trigger_support, bTriggerSupport, 8);
+UVCG_STREAMING_HEADER_ATTR(b_trigger_usage, bTriggerUsage, 8);
#undef UVCG_STREAMING_HEADER_ATTR
@@ -884,31 +1047,26 @@ static struct config_item
return &h->item;
}
-static void uvcg_streaming_header_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_streaming_header *h = to_uvcg_streaming_header(item);
-
- kfree(h);
-}
-
-/* streaming/header */
-static struct uvcg_streaming_header_grp {
- struct config_group group;
-} uvcg_streaming_header_grp;
-
static struct configfs_group_operations uvcg_streaming_header_grp_ops = {
.make_item = uvcg_streaming_header_make,
- .drop_item = uvcg_streaming_header_drop,
};
-static const struct config_item_type uvcg_streaming_header_grp_type = {
- .ct_group_ops = &uvcg_streaming_header_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_streaming_header_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_streaming_header_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "header",
};
-/* streaming/<mode>/<format>/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/<mode>/<format>/<NAME>
+ */
+
struct uvcg_frame {
+ struct config_item item;
+ enum uvcg_format_type fmt_type;
struct {
u8 b_length;
u8 b_descriptor_type;
@@ -924,8 +1082,6 @@ struct uvcg_frame {
u8 b_frame_interval_type;
} __attribute__((packed)) frame;
u32 *dw_frame_interval;
- enum uvcg_format_type fmt_type;
- struct config_item item;
};
static struct uvcg_frame *to_uvcg_frame(struct config_item *item)
@@ -933,7 +1089,7 @@ static struct uvcg_frame *to_uvcg_frame(struct config_item *item)
return container_of(item, struct uvcg_frame, item);
}
-#define UVCG_FRAME_ATTR(cname, aname, to_cpu_endian, to_little_endian, bits) \
+#define UVCG_FRAME_ATTR(cname, aname, bits) \
static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_frame *f = to_uvcg_frame(item); \
@@ -948,7 +1104,7 @@ static ssize_t uvcg_frame_##cname##_show(struct config_item *item, char *page)\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", to_cpu_endian(f->frame.cname)); \
+ result = sprintf(page, "%u\n", f->frame.cname); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -963,8 +1119,8 @@ static ssize_t uvcg_frame_##cname##_store(struct config_item *item, \
struct config_item *opts_item; \
struct uvcg_format *fmt; \
struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;\
+ typeof(f->frame.cname) num; \
int ret; \
- u##bits num; \
\
ret = kstrtou##bits(page, 0, &num); \
if (ret) \
@@ -982,7 +1138,7 @@ static ssize_t uvcg_frame_##cname##_store(struct config_item *item, \
goto end; \
} \
\
- f->frame.cname = to_little_endian(num); \
+ f->frame.cname = num; \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
@@ -992,20 +1148,48 @@ end: \
\
UVC_ATTR(uvcg_frame_, cname, aname);
-#define noop_conversion(x) (x)
+static ssize_t uvcg_frame_b_frame_index_show(struct config_item *item,
+ char *page)
+{
+ struct uvcg_frame *f = to_uvcg_frame(item);
+ struct uvcg_format *fmt;
+ struct f_uvc_opts *opts;
+ struct config_item *opts_item;
+ struct config_item *fmt_item;
+ struct mutex *su_mutex = &f->item.ci_group->cg_subsys->su_mutex;
+ int result;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ fmt_item = f->item.ci_parent;
+ fmt = to_uvcg_format(fmt_item);
-UVCG_FRAME_ATTR(bm_capabilities, bmCapabilities, noop_conversion,
- noop_conversion, 8);
-UVCG_FRAME_ATTR(w_width, wWidth, le16_to_cpu, cpu_to_le16, 16);
-UVCG_FRAME_ATTR(w_height, wHeight, le16_to_cpu, cpu_to_le16, 16);
-UVCG_FRAME_ATTR(dw_min_bit_rate, dwMinBitRate, le32_to_cpu, cpu_to_le32, 32);
-UVCG_FRAME_ATTR(dw_max_bit_rate, dwMaxBitRate, le32_to_cpu, cpu_to_le32, 32);
-UVCG_FRAME_ATTR(dw_max_video_frame_buffer_size, dwMaxVideoFrameBufferSize,
- le32_to_cpu, cpu_to_le32, 32);
-UVCG_FRAME_ATTR(dw_default_frame_interval, dwDefaultFrameInterval,
- le32_to_cpu, cpu_to_le32, 32);
+ if (!fmt->linked) {
+ result = -EBUSY;
+ goto out;
+ }
-#undef noop_conversion
+ opts_item = fmt_item->ci_parent->ci_parent->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result = sprintf(page, "%u\n", f->frame.b_frame_index);
+ mutex_unlock(&opts->lock);
+
+out:
+ mutex_unlock(su_mutex);
+ return result;
+}
+
+UVC_ATTR_RO(uvcg_frame_, b_frame_index, bFrameIndex);
+
+UVCG_FRAME_ATTR(bm_capabilities, bmCapabilities, 8);
+UVCG_FRAME_ATTR(w_width, wWidth, 16);
+UVCG_FRAME_ATTR(w_height, wHeight, 16);
+UVCG_FRAME_ATTR(dw_min_bit_rate, dwMinBitRate, 32);
+UVCG_FRAME_ATTR(dw_max_bit_rate, dwMaxBitRate, 32);
+UVCG_FRAME_ATTR(dw_max_video_frame_buffer_size, dwMaxVideoFrameBufferSize, 32);
+UVCG_FRAME_ATTR(dw_default_frame_interval, dwDefaultFrameInterval, 32);
#undef UVCG_FRAME_ATTR
@@ -1026,8 +1210,7 @@ static ssize_t uvcg_frame_dw_frame_interval_show(struct config_item *item,
mutex_lock(&opts->lock);
for (result = 0, i = 0; i < frm->frame.b_frame_interval_type; ++i) {
- result += sprintf(pg, "%d\n",
- le32_to_cpu(frm->dw_frame_interval[i]));
+ result += sprintf(pg, "%u\n", frm->dw_frame_interval[i]);
pg = page + result;
}
mutex_unlock(&opts->lock);
@@ -1052,7 +1235,7 @@ static inline int __uvcg_fill_frm_intrv(char *buf, void *priv)
return ret;
interv = priv;
- **interv = cpu_to_le32(num);
+ **interv = num;
++*interv;
return 0;
@@ -1129,6 +1312,8 @@ static ssize_t uvcg_frame_dw_frame_interval_store(struct config_item *item,
kfree(ch->dw_frame_interval);
ch->dw_frame_interval = frm_intrv;
ch->frame.b_frame_interval_type = n;
+ sort(ch->dw_frame_interval, n, sizeof(*ch->dw_frame_interval),
+ uvcg_config_compare_u32, NULL);
ret = len;
end:
@@ -1140,6 +1325,7 @@ end:
UVC_ATTR(uvcg_frame_, dw_frame_interval, dwFrameInterval);
static struct configfs_attribute *uvcg_frame_attrs[] = {
+ &uvcg_frame_attr_b_frame_index,
&uvcg_frame_attr_bm_capabilities,
&uvcg_frame_attr_w_width,
&uvcg_frame_attr_w_height,
@@ -1152,6 +1338,7 @@ static struct configfs_attribute *uvcg_frame_attrs[] = {
};
static const struct config_item_type uvcg_frame_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_attrs = uvcg_frame_attrs,
.ct_owner = THIS_MODULE,
};
@@ -1170,12 +1357,12 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
h->frame.b_descriptor_type = USB_DT_CS_INTERFACE;
h->frame.b_frame_index = 1;
- h->frame.w_width = cpu_to_le16(640);
- h->frame.w_height = cpu_to_le16(360);
- h->frame.dw_min_bit_rate = cpu_to_le32(18432000);
- h->frame.dw_max_bit_rate = cpu_to_le32(55296000);
- h->frame.dw_max_video_frame_buffer_size = cpu_to_le32(460800);
- h->frame.dw_default_frame_interval = cpu_to_le32(666666);
+ h->frame.w_width = 640;
+ h->frame.w_height = 360;
+ h->frame.dw_min_bit_rate = 18432000;
+ h->frame.dw_max_bit_rate = 55296000;
+ h->frame.dw_max_video_frame_buffer_size = 460800;
+ h->frame.dw_default_frame_interval = 666666;
opts_item = group->cg_item.ci_parent->ci_parent->ci_parent;
opts = to_f_uvc_opts(opts_item);
@@ -1203,7 +1390,6 @@ static struct config_item *uvcg_frame_make(struct config_group *group,
static void uvcg_frame_drop(struct config_group *group, struct config_item *item)
{
- struct uvcg_frame *h = to_uvcg_frame(item);
struct uvcg_format *fmt;
struct f_uvc_opts *opts;
struct config_item *opts_item;
@@ -1214,11 +1400,31 @@ static void uvcg_frame_drop(struct config_group *group, struct config_item *item
mutex_lock(&opts->lock);
fmt = to_uvcg_format(&group->cg_item);
--fmt->num_frames;
- kfree(h);
mutex_unlock(&opts->lock);
+
+ config_item_put(item);
+}
+
+static void uvcg_format_set_indices(struct config_group *fmt)
+{
+ struct config_item *ci;
+ unsigned int i = 1;
+
+ list_for_each_entry(ci, &fmt->cg_children, ci_entry) {
+ struct uvcg_frame *frm;
+
+ if (ci->ci_type != &uvcg_frame_type)
+ continue;
+
+ frm = to_uvcg_frame(ci);
+ frm->frame.b_frame_index = i++;
+ }
}
-/* streaming/uncompressed/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/uncompressed/<NAME>
+ */
+
struct uvcg_uncompressed {
struct uvcg_format fmt;
struct uvc_format_uncompressed desc;
@@ -1290,7 +1496,7 @@ end:
UVC_ATTR(uvcg_uncompressed_, guid_format, guidFormat);
-#define UVCG_UNCOMPRESSED_ATTR_RO(cname, aname, conv) \
+#define UVCG_UNCOMPRESSED_ATTR_RO(cname, aname, bits) \
static ssize_t uvcg_uncompressed_##cname##_show( \
struct config_item *item, char *page) \
{ \
@@ -1306,7 +1512,7 @@ static ssize_t uvcg_uncompressed_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1315,7 +1521,7 @@ static ssize_t uvcg_uncompressed_##cname##_show( \
\
UVC_ATTR_RO(uvcg_uncompressed_, cname, aname);
-#define UVCG_UNCOMPRESSED_ATTR(cname, aname, conv) \
+#define UVCG_UNCOMPRESSED_ATTR(cname, aname, bits) \
static ssize_t uvcg_uncompressed_##cname##_show( \
struct config_item *item, char *page) \
{ \
@@ -1331,7 +1537,7 @@ static ssize_t uvcg_uncompressed_##cname##_show( \
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1378,16 +1584,12 @@ end: \
\
UVC_ATTR(uvcg_uncompressed_, cname, aname);
-#define identity_conv(x) (x)
-
-UVCG_UNCOMPRESSED_ATTR(b_bits_per_pixel, bBitsPerPixel, identity_conv);
-UVCG_UNCOMPRESSED_ATTR(b_default_frame_index, bDefaultFrameIndex,
- identity_conv);
-UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, identity_conv);
-UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, identity_conv);
-UVCG_UNCOMPRESSED_ATTR_RO(bm_interface_flags, bmInterfaceFlags, identity_conv);
-
-#undef identity_conv
+UVCG_UNCOMPRESSED_ATTR_RO(b_format_index, bFormatIndex, 8);
+UVCG_UNCOMPRESSED_ATTR(b_bits_per_pixel, bBitsPerPixel, 8);
+UVCG_UNCOMPRESSED_ATTR(b_default_frame_index, bDefaultFrameIndex, 8);
+UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8);
+UVCG_UNCOMPRESSED_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8);
+UVCG_UNCOMPRESSED_ATTR_RO(bm_interface_flags, bmInterfaceFlags, 8);
#undef UVCG_UNCOMPRESSED_ATTR
#undef UVCG_UNCOMPRESSED_ATTR_RO
@@ -1410,6 +1612,7 @@ uvcg_uncompressed_bma_controls_store(struct config_item *item,
UVC_ATTR(uvcg_uncompressed_, bma_controls, bmaControls);
static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
+ &uvcg_uncompressed_attr_b_format_index,
&uvcg_uncompressed_attr_guid_format,
&uvcg_uncompressed_attr_b_bits_per_pixel,
&uvcg_uncompressed_attr_b_default_frame_index,
@@ -1421,6 +1624,7 @@ static struct configfs_attribute *uvcg_uncompressed_attrs[] = {
};
static const struct config_item_type uvcg_uncompressed_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_uncompressed_group_ops,
.ct_attrs = uvcg_uncompressed_attrs,
.ct_owner = THIS_MODULE,
@@ -1457,25 +1661,23 @@ static struct config_group *uvcg_uncompressed_make(struct config_group *group,
return &h->fmt.group;
}
-static void uvcg_uncompressed_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_uncompressed *h = to_uvcg_uncompressed(item);
-
- kfree(h);
-}
-
static struct configfs_group_operations uvcg_uncompressed_grp_ops = {
.make_group = uvcg_uncompressed_make,
- .drop_item = uvcg_uncompressed_drop,
};
-static const struct config_item_type uvcg_uncompressed_grp_type = {
- .ct_group_ops = &uvcg_uncompressed_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_uncompressed_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_uncompressed_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "uncompressed",
};
-/* streaming/mjpeg/<NAME> */
+/* -----------------------------------------------------------------------------
+ * streaming/mjpeg/<NAME>
+ */
+
struct uvcg_mjpeg {
struct uvcg_format fmt;
struct uvc_format_mjpeg desc;
@@ -1493,7 +1695,7 @@ static struct configfs_group_operations uvcg_mjpeg_group_ops = {
.drop_item = uvcg_frame_drop,
};
-#define UVCG_MJPEG_ATTR_RO(cname, aname, conv) \
+#define UVCG_MJPEG_ATTR_RO(cname, aname, bits) \
static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \
@@ -1508,7 +1710,7 @@ static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1517,7 +1719,7 @@ static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
\
UVC_ATTR_RO(uvcg_mjpeg_, cname, aname)
-#define UVCG_MJPEG_ATTR(cname, aname, conv) \
+#define UVCG_MJPEG_ATTR(cname, aname, bits) \
static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
{ \
struct uvcg_mjpeg *u = to_uvcg_mjpeg(item); \
@@ -1532,7 +1734,7 @@ static ssize_t uvcg_mjpeg_##cname##_show(struct config_item *item, char *page)\
opts = to_f_uvc_opts(opts_item); \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(u->desc.aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(u->desc.aname));\
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1579,16 +1781,12 @@ end: \
\
UVC_ATTR(uvcg_mjpeg_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_MJPEG_ATTR(b_default_frame_index, bDefaultFrameIndex,
- identity_conv);
-UVCG_MJPEG_ATTR_RO(bm_flags, bmFlags, identity_conv);
-UVCG_MJPEG_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, identity_conv);
-UVCG_MJPEG_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, identity_conv);
-UVCG_MJPEG_ATTR_RO(bm_interface_flags, bmInterfaceFlags, identity_conv);
-
-#undef identity_conv
+UVCG_MJPEG_ATTR_RO(b_format_index, bFormatIndex, 8);
+UVCG_MJPEG_ATTR(b_default_frame_index, bDefaultFrameIndex, 8);
+UVCG_MJPEG_ATTR_RO(bm_flags, bmFlags, 8);
+UVCG_MJPEG_ATTR_RO(b_aspect_ratio_x, bAspectRatioX, 8);
+UVCG_MJPEG_ATTR_RO(b_aspect_ratio_y, bAspectRatioY, 8);
+UVCG_MJPEG_ATTR_RO(bm_interface_flags, bmInterfaceFlags, 8);
#undef UVCG_MJPEG_ATTR
#undef UVCG_MJPEG_ATTR_RO
@@ -1611,6 +1809,7 @@ uvcg_mjpeg_bma_controls_store(struct config_item *item,
UVC_ATTR(uvcg_mjpeg_, bma_controls, bmaControls);
static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
+ &uvcg_mjpeg_attr_b_format_index,
&uvcg_mjpeg_attr_b_default_frame_index,
&uvcg_mjpeg_attr_bm_flags,
&uvcg_mjpeg_attr_b_aspect_ratio_x,
@@ -1621,6 +1820,7 @@ static struct configfs_attribute *uvcg_mjpeg_attrs[] = {
};
static const struct config_item_type uvcg_mjpeg_type = {
+ .ct_item_ops = &uvcg_config_item_ops,
.ct_group_ops = &uvcg_mjpeg_group_ops,
.ct_attrs = uvcg_mjpeg_attrs,
.ct_owner = THIS_MODULE,
@@ -1651,56 +1851,42 @@ static struct config_group *uvcg_mjpeg_make(struct config_group *group,
return &h->fmt.group;
}
-static void uvcg_mjpeg_drop(struct config_group *group,
- struct config_item *item)
-{
- struct uvcg_mjpeg *h = to_uvcg_mjpeg(item);
-
- kfree(h);
-}
-
static struct configfs_group_operations uvcg_mjpeg_grp_ops = {
.make_group = uvcg_mjpeg_make,
- .drop_item = uvcg_mjpeg_drop,
};
-static const struct config_item_type uvcg_mjpeg_grp_type = {
- .ct_group_ops = &uvcg_mjpeg_grp_ops,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_mjpeg_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_group_ops = &uvcg_mjpeg_grp_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "mjpeg",
};
-/* streaming/color_matching/default */
-static struct uvcg_default_color_matching {
- struct config_group group;
-} uvcg_default_color_matching;
-
-static inline struct uvcg_default_color_matching
-*to_uvcg_default_color_matching(struct config_item *item)
-{
- return container_of(to_config_group(item),
- struct uvcg_default_color_matching, group);
-}
+/* -----------------------------------------------------------------------------
+ * streaming/color_matching/default
+ */
-#define UVCG_DEFAULT_COLOR_MATCHING_ATTR(cname, aname, conv) \
+#define UVCG_DEFAULT_COLOR_MATCHING_ATTR(cname, aname, bits) \
static ssize_t uvcg_default_color_matching_##cname##_show( \
- struct config_item *item, char *page) \
+ struct config_item *item, char *page) \
{ \
- struct uvcg_default_color_matching *dc = \
- to_uvcg_default_color_matching(item); \
+ struct config_group *group = to_config_group(item); \
struct f_uvc_opts *opts; \
struct config_item *opts_item; \
- struct mutex *su_mutex = &dc->group.cg_subsys->su_mutex; \
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex; \
struct uvc_color_matching_descriptor *cd; \
int result; \
\
mutex_lock(su_mutex); /* for navigating configfs hierarchy */ \
\
- opts_item = dc->group.cg_item.ci_parent->ci_parent->ci_parent; \
+ opts_item = group->cg_item.ci_parent->ci_parent->ci_parent; \
opts = to_f_uvc_opts(opts_item); \
cd = &opts->uvc_color_matching; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(cd->aname)); \
+ result = sprintf(page, "%u\n", le##bits##_to_cpu(cd->aname)); \
mutex_unlock(&opts->lock); \
\
mutex_unlock(su_mutex); \
@@ -1709,16 +1895,10 @@ static ssize_t uvcg_default_color_matching_##cname##_show( \
\
UVC_ATTR_RO(uvcg_default_color_matching_, cname, aname)
-#define identity_conv(x) (x)
-
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries,
- identity_conv);
+UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_color_primaries, bColorPrimaries, 8);
UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_transfer_characteristics,
- bTransferCharacteristics, identity_conv);
-UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients,
- identity_conv);
-
-#undef identity_conv
+ bTransferCharacteristics, 8);
+UVCG_DEFAULT_COLOR_MATCHING_ATTR(b_matrix_coefficients, bMatrixCoefficients, 8);
#undef UVCG_DEFAULT_COLOR_MATCHING_ATTR
@@ -1729,41 +1909,54 @@ static struct configfs_attribute *uvcg_default_color_matching_attrs[] = {
NULL,
};
-static const struct config_item_type uvcg_default_color_matching_type = {
- .ct_attrs = uvcg_default_color_matching_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_default_color_matching_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_color_matching_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "default",
};
-/* struct uvcg_color_matching {}; */
-
-/* streaming/color_matching */
-static struct uvcg_color_matching_grp {
- struct config_group group;
-} uvcg_color_matching_grp;
+/* -----------------------------------------------------------------------------
+ * streaming/color_matching
+ */
-static const struct config_item_type uvcg_color_matching_grp_type = {
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvcg_color_matching_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "color_matching",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_default_color_matching_type,
+ NULL,
+ },
};
-/* streaming/class/{fs|hs|ss} */
-static struct uvcg_streaming_class {
- struct config_group group;
-} uvcg_streaming_class_fs, uvcg_streaming_class_hs, uvcg_streaming_class_ss;
+/* -----------------------------------------------------------------------------
+ * streaming/class/{fs|hs|ss}
+ */
+struct uvcg_streaming_class_group {
+ struct config_group group;
+ const char *name;
+};
static inline struct uvc_descriptor_header
***__uvcg_get_stream_class_arr(struct config_item *i, struct f_uvc_opts *o)
{
- struct uvcg_streaming_class *cl = container_of(to_config_group(i),
- struct uvcg_streaming_class, group);
+ struct uvcg_streaming_class_group *group =
+ container_of(i, struct uvcg_streaming_class_group,
+ group.cg_item);
- if (cl == &uvcg_streaming_class_fs)
+ if (!strcmp(group->name, "fs"))
return &o->uvc_fs_streaming_cls;
- if (cl == &uvcg_streaming_class_hs)
+ if (!strcmp(group->name, "hs"))
return &o->uvc_hs_streaming_cls;
- if (cl == &uvcg_streaming_class_ss)
+ if (!strcmp(group->name, "ss"))
return &o->uvc_ss_streaming_cls;
return NULL;
@@ -1922,24 +2115,22 @@ static int __uvcg_fill_strm(void *priv1, void *priv2, void *priv3, int n,
struct uvcg_format *fmt = priv1;
if (fmt->type == UVCG_UNCOMPRESSED) {
- struct uvc_format_uncompressed *unc = *dest;
struct uvcg_uncompressed *u =
container_of(fmt, struct uvcg_uncompressed,
fmt);
+ u->desc.bFormatIndex = n + 1;
+ u->desc.bNumFrameDescriptors = fmt->num_frames;
memcpy(*dest, &u->desc, sizeof(u->desc));
*dest += sizeof(u->desc);
- unc->bNumFrameDescriptors = fmt->num_frames;
- unc->bFormatIndex = n + 1;
} else if (fmt->type == UVCG_MJPEG) {
- struct uvc_format_mjpeg *mjp = *dest;
struct uvcg_mjpeg *m =
container_of(fmt, struct uvcg_mjpeg, fmt);
+ m->desc.bFormatIndex = n + 1;
+ m->desc.bNumFrameDescriptors = fmt->num_frames;
memcpy(*dest, &m->desc, sizeof(m->desc));
*dest += sizeof(m->desc);
- mjp->bNumFrameDescriptors = fmt->num_frames;
- mjp->bFormatIndex = n + 1;
} else {
return -EINVAL;
}
@@ -2038,6 +2229,7 @@ static int uvcg_streaming_class_allow_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
return ret;
}
@@ -2078,10 +2270,12 @@ static void uvcg_streaming_class_drop_link(struct config_item *src,
unlock:
mutex_unlock(&opts->lock);
out:
+ config_item_put(header);
mutex_unlock(su_mutex);
}
static struct configfs_item_operations uvcg_streaming_class_item_ops = {
+ .release = uvcg_config_item_release,
.allow_link = uvcg_streaming_class_allow_link,
.drop_link = uvcg_streaming_class_drop_link,
};
@@ -2091,36 +2285,109 @@ static const struct config_item_type uvcg_streaming_class_type = {
.ct_owner = THIS_MODULE,
};
-/* streaming/class */
-static struct uvcg_streaming_class_grp {
- struct config_group group;
-} uvcg_streaming_class_grp;
+/* -----------------------------------------------------------------------------
+ * streaming/class
+ */
+
+static int uvcg_streaming_class_create_children(struct config_group *parent)
+{
+ static const char * const names[] = { "fs", "hs", "ss" };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(names); ++i) {
+ struct uvcg_streaming_class_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return -ENOMEM;
-static const struct config_item_type uvcg_streaming_class_grp_type = {
- .ct_owner = THIS_MODULE,
+ group->name = names[i];
+
+ config_group_init_type_name(&group->group, group->name,
+ &uvcg_streaming_class_type);
+ configfs_add_default_group(&group->group, parent);
+ }
+
+ return 0;
+}
+
+static const struct uvcg_config_group_type uvcg_streaming_class_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "class",
+ .create_children = uvcg_streaming_class_create_children,
};
-/* streaming */
-static struct uvcg_streaming_grp {
- struct config_group group;
-} uvcg_streaming_grp;
+/* -----------------------------------------------------------------------------
+ * streaming
+ */
-static const struct config_item_type uvcg_streaming_grp_type = {
- .ct_owner = THIS_MODULE,
+static ssize_t uvcg_default_streaming_b_interface_number_show(
+ struct config_item *item, char *page)
+{
+ struct config_group *group = to_config_group(item);
+ struct mutex *su_mutex = &group->cg_subsys->su_mutex;
+ struct config_item *opts_item;
+ struct f_uvc_opts *opts;
+ int result = 0;
+
+ mutex_lock(su_mutex); /* for navigating configfs hierarchy */
+
+ opts_item = item->ci_parent;
+ opts = to_f_uvc_opts(opts_item);
+
+ mutex_lock(&opts->lock);
+ result += sprintf(page, "%u\n", opts->streaming_interface);
+ mutex_unlock(&opts->lock);
+
+ mutex_unlock(su_mutex);
+
+ return result;
+}
+
+UVC_ATTR_RO(uvcg_default_streaming_, b_interface_number, bInterfaceNumber);
+
+static struct configfs_attribute *uvcg_default_streaming_attrs[] = {
+ &uvcg_default_streaming_attr_b_interface_number,
+ NULL,
+};
+
+static const struct uvcg_config_group_type uvcg_streaming_grp_type = {
+ .type = {
+ .ct_item_ops = &uvcg_config_item_ops,
+ .ct_attrs = uvcg_default_streaming_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "streaming",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_streaming_header_grp_type,
+ &uvcg_uncompressed_grp_type,
+ &uvcg_mjpeg_grp_type,
+ &uvcg_color_matching_grp_type,
+ &uvcg_streaming_class_grp_type,
+ NULL,
+ },
};
-static void uvc_attr_release(struct config_item *item)
+/* -----------------------------------------------------------------------------
+ * UVC function
+ */
+
+static void uvc_func_item_release(struct config_item *item)
{
struct f_uvc_opts *opts = to_f_uvc_opts(item);
+ uvcg_config_remove_children(to_config_group(item));
usb_put_function_instance(&opts->func_inst);
}
-static struct configfs_item_operations uvc_item_ops = {
- .release = uvc_attr_release,
+static struct configfs_item_operations uvc_func_item_ops = {
+ .release = uvc_func_item_release,
};
-#define UVCG_OPTS_ATTR(cname, aname, conv, str2u, uxx, vnoc, limit) \
+#define UVCG_OPTS_ATTR(cname, aname, limit) \
static ssize_t f_uvc_opts_##cname##_show( \
struct config_item *item, char *page) \
{ \
@@ -2128,7 +2395,7 @@ static ssize_t f_uvc_opts_##cname##_show( \
int result; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", conv(opts->cname)); \
+ result = sprintf(page, "%u\n", opts->cname); \
mutex_unlock(&opts->lock); \
\
return result; \
@@ -2139,8 +2406,8 @@ f_uvc_opts_##cname##_store(struct config_item *item, \
const char *page, size_t len) \
{ \
struct f_uvc_opts *opts = to_f_uvc_opts(item); \
+ unsigned int num; \
int ret; \
- uxx num; \
\
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
@@ -2148,7 +2415,7 @@ f_uvc_opts_##cname##_store(struct config_item *item, \
goto end; \
} \
\
- ret = str2u(page, 0, &num); \
+ ret = kstrtouint(page, 0, &num); \
if (ret) \
goto end; \
\
@@ -2156,7 +2423,7 @@ f_uvc_opts_##cname##_store(struct config_item *item, \
ret = -EINVAL; \
goto end; \
} \
- opts->cname = vnoc(num); \
+ opts->cname = num; \
ret = len; \
end: \
mutex_unlock(&opts->lock); \
@@ -2165,16 +2432,9 @@ end: \
\
UVC_ATTR(f_uvc_opts_, cname, cname)
-#define identity_conv(x) (x)
-
-UVCG_OPTS_ATTR(streaming_interval, streaming_interval, identity_conv,
- kstrtou8, u8, identity_conv, 16);
-UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, le16_to_cpu,
- kstrtou16, u16, le16_to_cpu, 3072);
-UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, identity_conv,
- kstrtou8, u8, identity_conv, 15);
-
-#undef identity_conv
+UVCG_OPTS_ATTR(streaming_interval, streaming_interval, 16);
+UVCG_OPTS_ATTR(streaming_maxpacket, streaming_maxpacket, 3072);
+UVCG_OPTS_ATTR(streaming_maxburst, streaming_maxburst, 15);
#undef UVCG_OPTS_ATTR
@@ -2185,123 +2445,31 @@ static struct configfs_attribute *uvc_attrs[] = {
NULL,
};
-static const struct config_item_type uvc_func_type = {
- .ct_item_ops = &uvc_item_ops,
- .ct_attrs = uvc_attrs,
- .ct_owner = THIS_MODULE,
+static const struct uvcg_config_group_type uvc_func_type = {
+ .type = {
+ .ct_item_ops = &uvc_func_item_ops,
+ .ct_attrs = uvc_attrs,
+ .ct_owner = THIS_MODULE,
+ },
+ .name = "",
+ .children = (const struct uvcg_config_group_type*[]) {
+ &uvcg_control_grp_type,
+ &uvcg_streaming_grp_type,
+ NULL,
+ },
};
int uvcg_attach_configfs(struct f_uvc_opts *opts)
{
- config_group_init_type_name(&uvcg_control_header_grp.group,
- "header",
- &uvcg_control_header_grp_type);
-
- config_group_init_type_name(&uvcg_default_processing.group,
- "default", &uvcg_default_processing_type);
- config_group_init_type_name(&uvcg_processing_grp.group,
- "processing", &uvcg_processing_grp_type);
- configfs_add_default_group(&uvcg_default_processing.group,
- &uvcg_processing_grp.group);
-
- config_group_init_type_name(&uvcg_default_camera.group,
- "default", &uvcg_default_camera_type);
- config_group_init_type_name(&uvcg_camera_grp.group,
- "camera", &uvcg_camera_grp_type);
- configfs_add_default_group(&uvcg_default_camera.group,
- &uvcg_camera_grp.group);
-
- config_group_init_type_name(&uvcg_default_output.group,
- "default", &uvcg_default_output_type);
- config_group_init_type_name(&uvcg_output_grp.group,
- "output", &uvcg_output_grp_type);
- configfs_add_default_group(&uvcg_default_output.group,
- &uvcg_output_grp.group);
-
- config_group_init_type_name(&uvcg_terminal_grp.group,
- "terminal", &uvcg_terminal_grp_type);
- configfs_add_default_group(&uvcg_camera_grp.group,
- &uvcg_terminal_grp.group);
- configfs_add_default_group(&uvcg_output_grp.group,
- &uvcg_terminal_grp.group);
-
- config_group_init_type_name(&uvcg_control_class_fs.group,
- "fs", &uvcg_control_class_type);
- config_group_init_type_name(&uvcg_control_class_ss.group,
- "ss", &uvcg_control_class_type);
- config_group_init_type_name(&uvcg_control_class_grp.group,
- "class",
- &uvcg_control_class_grp_type);
- configfs_add_default_group(&uvcg_control_class_fs.group,
- &uvcg_control_class_grp.group);
- configfs_add_default_group(&uvcg_control_class_ss.group,
- &uvcg_control_class_grp.group);
-
- config_group_init_type_name(&uvcg_control_grp.group,
- "control",
- &uvcg_control_grp_type);
- configfs_add_default_group(&uvcg_control_header_grp.group,
- &uvcg_control_grp.group);
- configfs_add_default_group(&uvcg_processing_grp.group,
- &uvcg_control_grp.group);
- configfs_add_default_group(&uvcg_terminal_grp.group,
- &uvcg_control_grp.group);
- configfs_add_default_group(&uvcg_control_class_grp.group,
- &uvcg_control_grp.group);
-
- config_group_init_type_name(&uvcg_streaming_header_grp.group,
- "header",
- &uvcg_streaming_header_grp_type);
- config_group_init_type_name(&uvcg_uncompressed_grp.group,
- "uncompressed",
- &uvcg_uncompressed_grp_type);
- config_group_init_type_name(&uvcg_mjpeg_grp.group,
- "mjpeg",
- &uvcg_mjpeg_grp_type);
- config_group_init_type_name(&uvcg_default_color_matching.group,
- "default",
- &uvcg_default_color_matching_type);
- config_group_init_type_name(&uvcg_color_matching_grp.group,
- "color_matching",
- &uvcg_color_matching_grp_type);
- configfs_add_default_group(&uvcg_default_color_matching.group,
- &uvcg_color_matching_grp.group);
-
- config_group_init_type_name(&uvcg_streaming_class_fs.group,
- "fs", &uvcg_streaming_class_type);
- config_group_init_type_name(&uvcg_streaming_class_hs.group,
- "hs", &uvcg_streaming_class_type);
- config_group_init_type_name(&uvcg_streaming_class_ss.group,
- "ss", &uvcg_streaming_class_type);
- config_group_init_type_name(&uvcg_streaming_class_grp.group,
- "class", &uvcg_streaming_class_grp_type);
- configfs_add_default_group(&uvcg_streaming_class_fs.group,
- &uvcg_streaming_class_grp.group);
- configfs_add_default_group(&uvcg_streaming_class_hs.group,
- &uvcg_streaming_class_grp.group);
- configfs_add_default_group(&uvcg_streaming_class_ss.group,
- &uvcg_streaming_class_grp.group);
-
- config_group_init_type_name(&uvcg_streaming_grp.group,
- "streaming", &uvcg_streaming_grp_type);
- configfs_add_default_group(&uvcg_streaming_header_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_uncompressed_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_mjpeg_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_color_matching_grp.group,
- &uvcg_streaming_grp.group);
- configfs_add_default_group(&uvcg_streaming_class_grp.group,
- &uvcg_streaming_grp.group);
-
- config_group_init_type_name(&opts->func_inst.group,
- "",
- &uvc_func_type);
- configfs_add_default_group(&uvcg_control_grp.group,
- &opts->func_inst.group);
- configfs_add_default_group(&uvcg_streaming_grp.group,
- &opts->func_inst.group);
+ int ret;
- return 0;
+ config_group_init_type_name(&opts->func_inst.group, uvc_func_type.name,
+ &uvc_func_type.type);
+
+ ret = uvcg_config_create_children(&opts->func_inst.group,
+ &uvc_func_type);
+ if (ret < 0)
+ config_group_put(&opts->func_inst.group);
+
+ return ret;
}
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 7f1ca3b57823..a1183eccee22 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -115,8 +115,8 @@ uvc_v4l2_set_format(struct file *file, void *fh, struct v4l2_format *fmt)
}
if (i == ARRAY_SIZE(uvc_formats)) {
- printk(KERN_INFO "Unsupported format 0x%08x.\n",
- fmt->fmt.pix.pixelformat);
+ uvcg_info(&uvc->func, "Unsupported format 0x%08x.\n",
+ fmt->fmt.pix.pixelformat);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index d3567b90343a..5c042f380708 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -125,6 +125,23 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
* Request handling
*/
+static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
+{
+ int ret;
+
+ ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
+ if (ret < 0) {
+ uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
+ ret);
+
+ /* Isochronous endpoints can't be halted. */
+ if (usb_endpoint_xfer_bulk(video->ep->desc))
+ usb_ep_set_halt(video->ep);
+ }
+
+ return ret;
+}
+
/*
* I somehow feel that synchronisation won't be easy to achieve here. We have
* three events that control USB requests submission:
@@ -169,13 +186,14 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
break;
case -ESHUTDOWN: /* disconnect from host. */
- printk(KERN_DEBUG "VS request cancelled.\n");
+ uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
uvcg_queue_cancel(queue, 1);
goto requeue;
default:
- printk(KERN_INFO "VS request completed with status %d.\n",
- req->status);
+ uvcg_info(&video->uvc->func,
+ "VS request completed with status %d.\n",
+ req->status);
uvcg_queue_cancel(queue, 0);
goto requeue;
}
@@ -189,14 +207,13 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
video->encode(req, video, buf);
- if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) {
- printk(KERN_INFO "Failed to queue request (%d).\n", ret);
- usb_ep_set_halt(ep);
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
+ ret = uvcg_video_ep_queue(video, req);
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+
+ if (ret < 0) {
uvcg_queue_cancel(queue, 0);
goto requeue;
}
- spin_unlock_irqrestore(&video->queue.irqlock, flags);
return;
@@ -316,15 +333,13 @@ int uvcg_video_pump(struct uvc_video *video)
video->encode(req, video, buf);
/* Queue the USB request */
- ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
+ ret = uvcg_video_ep_queue(video, req);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+
if (ret < 0) {
- printk(KERN_INFO "Failed to queue request (%d)\n", ret);
- usb_ep_set_halt(video->ep);
- spin_unlock_irqrestore(&queue->irqlock, flags);
uvcg_queue_cancel(queue, 0);
break;
}
- spin_unlock_irqrestore(&queue->irqlock, flags);
}
spin_lock_irqsave(&video->req_lock, flags);
@@ -342,8 +357,8 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
int ret;
if (video->ep == NULL) {
- printk(KERN_INFO "Video enable failed, device is "
- "uninitialized.\n");
+ uvcg_info(&video->uvc->func,
+ "Video enable failed, device is uninitialized.\n");
return -ENODEV;
}
@@ -375,11 +390,12 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
/*
* Initialize the UVC video stream.
*/
-int uvcg_video_init(struct uvc_video *video)
+int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
{
INIT_LIST_HEAD(&video->req_free);
spin_lock_init(&video->req_lock);
+ video->uvc = uvc;
video->fcc = V4L2_PIX_FMT_YUYV;
video->bpp = 16;
video->width = 320;
diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h
index 7d77122b0ff9..278dc52c7604 100644
--- a/drivers/usb/gadget/function/uvc_video.h
+++ b/drivers/usb/gadget/function/uvc_video.h
@@ -18,6 +18,6 @@ int uvcg_video_pump(struct uvc_video *video);
int uvcg_video_enable(struct uvc_video *video, int enable);
-int uvcg_video_init(struct uvc_video *video);
+int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc);
#endif /* __UVC_VIDEO_H__ */
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 5939eb1e97f2..4a28e3fbeb0b 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -353,7 +353,7 @@ static int ast_vhub_epn_queue(struct usb_ep* u_ep, struct usb_request *u_req,
/* Endpoint enabled ? */
if (!ep->epn.enabled || !u_ep->desc || !ep->dev || !ep->d_idx ||
!ep->dev->enabled || ep->dev->suspended) {
- EPDBG(ep,"Enqueing request on wrong or disabled EP\n");
+ EPDBG(ep, "Enqueuing request on wrong or disabled EP\n");
return -ESHUTDOWN;
}
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 17147b8c771e..11247322d587 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2004,7 +2004,6 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
struct usba_udc *udc)
{
u32 val;
- const char *name;
struct device_node *np = pdev->dev.of_node;
const struct of_device_id *match;
struct device_node *pp;
@@ -2018,6 +2017,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
udc->errata = match->data;
udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc");
if (IS_ERR(udc->pmc))
+ udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc");
+ if (IS_ERR(udc->pmc))
udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc");
if (udc->errata && IS_ERR(udc->pmc))
return ERR_CAST(udc->pmc);
@@ -2094,11 +2095,6 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
ep->can_dma = of_property_read_bool(pp, "atmel,can-dma");
ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc");
- ret = of_property_read_string(pp, "name", &name);
- if (ret) {
- dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
- goto err;
- }
sprintf(ep->name, "ep%d", ep->index);
ep->ep.name = ep->name;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index af88b48c1cea..87d6b12779f2 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -690,6 +690,9 @@ EXPORT_SYMBOL_GPL(usb_gadget_connect);
* as a disconnect (when a VBUS session is active). Not all systems
* support software pullup controls.
*
+ * Following a successful disconnect, invoke the ->disconnect() callback
+ * for the current gadget driver so that UDC drivers don't need to.
+ *
* Returns zero on success, else negative errno.
*/
int usb_gadget_disconnect(struct usb_gadget *gadget)
@@ -711,8 +714,10 @@ int usb_gadget_disconnect(struct usb_gadget *gadget)
}
ret = gadget->ops->pullup(gadget, 0);
- if (!ret)
+ if (!ret) {
gadget->connected = 0;
+ gadget->udc->driver->disconnect(gadget);
+ }
out:
trace_usb_gadget_disconnect(gadget, ret);
@@ -1281,7 +1286,6 @@ static void usb_gadget_remove_driver(struct usb_udc *udc)
kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE);
usb_gadget_disconnect(udc->gadget);
- udc->driver->disconnect(udc->gadget);
udc->driver->unbind(udc->gadget);
usb_gadget_udc_stop(udc);
@@ -1471,7 +1475,6 @@ static ssize_t soft_connect_store(struct device *dev,
usb_gadget_connect(udc->gadget);
} else if (sysfs_streq(buf, "disconnect")) {
usb_gadget_disconnect(udc->gadget);
- udc->driver->disconnect(udc->gadget);
usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 587c5037ff07..bc6abaea907d 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -741,7 +741,7 @@ static void fotg210_get_status(struct fotg210_udc *fotg210,
fotg210->ep0_req->length = 2;
spin_unlock(&fotg210->lock);
- fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_KERNEL);
+ fotg210_ep_queue(fotg210->gadget.ep0, fotg210->ep0_req, GFP_ATOMIC);
spin_lock(&fotg210->lock);
}
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index be59309e848c..20141c3096f6 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -2234,8 +2234,10 @@ static void fsl_udc_release(struct device *dev)
Internal structure setup functions
*******************************************************************/
/*------------------------------------------------------------------
- * init resource for globle controller
- * Return the udc handle on success or NULL on failure
+ * init resource for global controller called by fsl_udc_probe()
+ * On success the udc handle is initialized, on failure it is
+ * unchanged (reset).
+ * Return 0 on success and -1 on allocation failure
------------------------------------------------------------------*/
static int struct_udc_setup(struct fsl_udc *udc,
struct platform_device *pdev)
@@ -2247,8 +2249,10 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->phy_mode = pdata->phy_mode;
udc->eps = kcalloc(udc->max_ep, sizeof(struct fsl_ep), GFP_KERNEL);
- if (!udc->eps)
- return -1;
+ if (!udc->eps) {
+ ERR("kmalloc udc endpoint status failed\n");
+ goto eps_alloc_failed;
+ }
/* initialized QHs, take care of alignment */
size = udc->max_ep * sizeof(struct ep_queue_head);
@@ -2262,8 +2266,7 @@ static int struct_udc_setup(struct fsl_udc *udc,
&udc->ep_qh_dma, GFP_KERNEL);
if (!udc->ep_qh) {
ERR("malloc QHs for udc failed\n");
- kfree(udc->eps);
- return -1;
+ goto ep_queue_alloc_failed;
}
udc->ep_qh_size = size;
@@ -2272,8 +2275,17 @@ static int struct_udc_setup(struct fsl_udc *udc,
/* FIXME: fsl_alloc_request() ignores ep argument */
udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
struct fsl_req, req);
+ if (!udc->status_req) {
+ ERR("kzalloc for udc status request failed\n");
+ goto udc_status_alloc_failed;
+ }
+
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
+ if (!udc->status_req->req.buf) {
+ ERR("kzalloc for udc request buffer failed\n");
+ goto udc_req_buf_alloc_failed;
+ }
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
@@ -2281,6 +2293,18 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->remote_wakeup = 0; /* default to 0 on reset */
return 0;
+
+udc_req_buf_alloc_failed:
+ kfree(udc->status_req);
+udc_status_alloc_failed:
+ kfree(udc->ep_qh);
+ udc->ep_qh_size = 0;
+ep_queue_alloc_failed:
+ kfree(udc->eps);
+eps_alloc_failed:
+ udc->phy_mode = 0;
+ return -1;
+
}
/*----------------------------------------------------------------
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 95f52232493b..cafde053788b 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -185,7 +185,7 @@ static int process_ep_req(struct mv_udc *udc, int index,
else
bit_pos = 1 << (16 + curr_req->ep->ep_num);
- while ((curr_dqh->curr_dtd_ptr == curr_dtd->td_dma)) {
+ while (curr_dqh->curr_dtd_ptr == curr_dtd->td_dma) {
if (curr_dtd->dtd_next == EP_QUEUE_HEAD_NEXT_TERMINATE) {
while (readl(&udc->op_regs->epstatus) & bit_pos)
udelay(1);
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index b02ab2a8d927..e7dae5379e04 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1550,9 +1550,6 @@ static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
spin_unlock_irqrestore(&dev->lock, flags);
- if (!is_on && dev->driver)
- dev->driver->disconnect(&dev->gadget);
-
return 0;
}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index e1656f361e08..cdffbd1e0316 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2437,6 +2437,9 @@ static ssize_t renesas_usb3_b_device_write(struct file *file,
else
usb3->forced_b_device = false;
+ if (usb3->workaround_for_vbus)
+ usb3_disconnect(usb3);
+
/* Let this driver call usb3_connect() anyway */
usb3_check_id(usb3);
@@ -2600,6 +2603,13 @@ static const struct renesas_usb3_priv renesas_usb3_priv_gen3 = {
.ramsize_per_pipe = SZ_4K,
};
+static const struct renesas_usb3_priv renesas_usb3_priv_r8a77990 = {
+ .ramsize_per_ramif = SZ_16K,
+ .num_ramif = 4,
+ .ramsize_per_pipe = SZ_4K,
+ .workaround_for_vbus = true,
+};
+
static const struct of_device_id usb3_of_match[] = {
{
.compatible = "renesas,r8a7795-usb3-peri",
@@ -2618,6 +2628,10 @@ static const struct soc_device_attribute renesas_usb3_quirks_match[] = {
.soc_id = "r8a7795", .revision = "ES1.*",
.data = &renesas_usb3_priv_r8a7795_es1,
},
+ {
+ .soc_id = "r8a77990",
+ .data = &renesas_usb3_priv_r8a77990,
+ },
{ /* sentinel */ },
};
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 6407e433bc78..b1f4104d1283 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1078,7 +1078,7 @@ static int xudc_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
unsigned long flags;
if (!ep->desc) {
- dev_dbg(udc->dev, "%s:queing request to disabled %s\n",
+ dev_dbg(udc->dev, "%s: queuing request to disabled %s\n",
__func__, ep->name);
return -ESHUTDOWN;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 1a4ea98cac2a..16758b12a5e9 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -276,7 +276,7 @@ config USB_EHCI_EXYNOS
Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
config USB_EHCI_MV
- bool "EHCI support for Marvell PXA/MMP USB controller"
+ tristate "EHCI support for Marvell PXA/MMP USB controller"
depends on (ARCH_PXA || ARCH_MMP)
select USB_EHCI_ROOT_HUB_TT
---help---
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index e6235269c151..84514f71ae44 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -87,6 +87,7 @@ obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
obj-$(CONFIG_USB_FSL_USB2) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_EHCI_FSL) += fsl-mph-dr-of.o
obj-$(CONFIG_USB_EHCI_FSL) += ehci-fsl.o
+obj-$(CONFIG_USB_EHCI_MV) += ehci-mv.o
obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 8608ac513fb7..cdafa97f632d 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -730,9 +730,9 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
if (likely ((status & STS_ERR) == 0))
- COUNT (ehci->stats.normal);
+ INCR(ehci->stats.normal);
else
- COUNT (ehci->stats.error);
+ INCR(ehci->stats.error);
bh = 1;
}
@@ -756,7 +756,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
if (cmd & CMD_IAAD)
ehci_dbg(ehci, "IAA with IAAD still set?\n");
if (ehci->iaa_in_progress)
- COUNT(ehci->stats.iaa);
+ INCR(ehci->stats.iaa);
end_iaa_cycle(ehci);
}
@@ -1286,11 +1286,6 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_grlib_driver
#endif
-#ifdef CONFIG_USB_EHCI_MV
-#include "ehci-mv.c"
-#define PLATFORM_DRIVER ehci_mv_driver
-#endif
-
static int __init ehci_hcd_init(void)
{
int retval = 0;
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index de764459e05a..f26109eafdbf 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -12,24 +12,33 @@
#include <linux/err.h>
#include <linux/usb/otg.h>
#include <linux/platform_data/mv_usb.h>
+#include <linux/io.h>
+
+#include <linux/usb/hcd.h>
+
+#include "ehci.h"
+
+/* registers */
+#define U2x_CAPREGS_OFFSET 0x100
#define CAPLENGTH_MASK (0xff)
-struct ehci_hcd_mv {
- struct usb_hcd *hcd;
+#define hcd_to_ehci_hcd_mv(h) ((struct ehci_hcd_mv *)hcd_to_ehci(h)->priv)
+struct ehci_hcd_mv {
/* Which mode does this ehci running OTG/Host ? */
int mode;
- void __iomem *phy_regs;
+ void __iomem *base;
void __iomem *cap_regs;
void __iomem *op_regs;
struct usb_phy *otg;
+ struct clk *clk;
- struct mv_usb_platform_data *pdata;
+ struct phy *phy;
- struct clk *clk;
+ int (*set_vbus)(unsigned int vbus);
};
static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
@@ -44,29 +53,20 @@ static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
{
- int retval;
-
ehci_clock_enable(ehci_mv);
- if (ehci_mv->pdata->phy_init) {
- retval = ehci_mv->pdata->phy_init(ehci_mv->phy_regs);
- if (retval)
- return retval;
- }
-
- return 0;
+ return phy_init(ehci_mv->phy);
}
static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
{
- if (ehci_mv->pdata->phy_deinit)
- ehci_mv->pdata->phy_deinit(ehci_mv->phy_regs);
+ phy_exit(ehci_mv->phy);
ehci_clock_disable(ehci_mv);
}
static int mv_ehci_reset(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
- struct ehci_hcd_mv *ehci_mv = dev_get_drvdata(dev);
+ struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
int retval;
if (ehci_mv == NULL) {
@@ -83,46 +83,11 @@ static int mv_ehci_reset(struct usb_hcd *hcd)
return retval;
}
-static const struct hc_driver mv_ehci_hc_driver = {
- .description = hcd_name,
- .product_desc = "Marvell EHCI",
- .hcd_priv_size = sizeof(struct ehci_hcd),
-
- /*
- * generic hardware linkage
- */
- .irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
-
- /*
- * basic lifecycle operations
- */
- .reset = mv_ehci_reset,
- .start = ehci_run,
- .stop = ehci_stop,
- .shutdown = ehci_shutdown,
-
- /*
- * managing i/o requests and associated device resources
- */
- .urb_enqueue = ehci_urb_enqueue,
- .urb_dequeue = ehci_urb_dequeue,
- .endpoint_disable = ehci_endpoint_disable,
- .endpoint_reset = ehci_endpoint_reset,
- .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
-
- /*
- * scheduling support
- */
- .get_frame_number = ehci_get_frame,
-
- /*
- * root hub support
- */
- .hub_status_data = ehci_hub_status_data,
- .hub_control = ehci_hub_control,
- .bus_suspend = ehci_bus_suspend,
- .bus_resume = ehci_bus_resume,
+static struct hc_driver __read_mostly ehci_platform_hc_driver;
+
+static const struct ehci_driver_overrides platform_overrides __initconst = {
+ .reset = mv_ehci_reset,
+ .extra_priv_size = sizeof(struct ehci_hcd_mv),
};
static int mv_ehci_probe(struct platform_device *pdev)
@@ -135,27 +100,29 @@ static int mv_ehci_probe(struct platform_device *pdev)
int retval = -ENODEV;
u32 offset;
- if (!pdata) {
- dev_err(&pdev->dev, "missing platform_data\n");
- return -ENODEV;
- }
-
if (usb_disabled())
return -ENODEV;
- hcd = usb_create_hcd(&mv_ehci_hc_driver, &pdev->dev, "mv ehci");
+ hcd = usb_create_hcd(&ehci_platform_hc_driver, &pdev->dev, "mv ehci");
if (!hcd)
return -ENOMEM;
- ehci_mv = devm_kzalloc(&pdev->dev, sizeof(*ehci_mv), GFP_KERNEL);
- if (ehci_mv == NULL) {
- retval = -ENOMEM;
- goto err_put_hcd;
+ platform_set_drvdata(pdev, hcd);
+ ehci_mv = hcd_to_ehci_hcd_mv(hcd);
+
+ ehci_mv->mode = MV_USB_MODE_HOST;
+ if (pdata) {
+ ehci_mv->mode = pdata->mode;
+ ehci_mv->set_vbus = pdata->set_vbus;
}
- platform_set_drvdata(pdev, ehci_mv);
- ehci_mv->pdata = pdata;
- ehci_mv->hcd = hcd;
+ ehci_mv->phy = devm_phy_get(&pdev->dev, "usb");
+ if (IS_ERR(ehci_mv->phy)) {
+ retval = PTR_ERR(ehci_mv->phy);
+ if (retval != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get phy.\n");
+ goto err_put_hcd;
+ }
ehci_mv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ehci_mv->clk)) {
@@ -164,17 +131,12 @@ static int mv_ehci_probe(struct platform_device *pdev)
goto err_put_hcd;
}
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phyregs");
- ehci_mv->phy_regs = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(ehci_mv->phy_regs)) {
- retval = PTR_ERR(ehci_mv->phy_regs);
- goto err_put_hcd;
- }
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "capregs");
- ehci_mv->cap_regs = devm_ioremap_resource(&pdev->dev, r);
- if (IS_ERR(ehci_mv->cap_regs)) {
- retval = PTR_ERR(ehci_mv->cap_regs);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ehci_mv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(ehci_mv->base)) {
+ retval = PTR_ERR(ehci_mv->base);
goto err_put_hcd;
}
@@ -184,6 +146,8 @@ static int mv_ehci_probe(struct platform_device *pdev)
goto err_put_hcd;
}
+ ehci_mv->cap_regs =
+ (void __iomem *) ((unsigned long) ehci_mv->base + U2x_CAPREGS_OFFSET);
offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
ehci_mv->op_regs =
(void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);
@@ -202,7 +166,6 @@ static int mv_ehci_probe(struct platform_device *pdev)
ehci = hcd_to_ehci(hcd);
ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
- ehci_mv->mode = pdata->mode;
if (ehci_mv->mode == MV_USB_MODE_OTG) {
ehci_mv->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
if (IS_ERR(ehci_mv->otg)) {
@@ -227,8 +190,8 @@ static int mv_ehci_probe(struct platform_device *pdev)
/* otg will enable clock before use as host */
mv_ehci_disable(ehci_mv);
} else {
- if (pdata->set_vbus)
- pdata->set_vbus(1);
+ if (ehci_mv->set_vbus)
+ ehci_mv->set_vbus(1);
retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
if (retval) {
@@ -239,9 +202,6 @@ static int mv_ehci_probe(struct platform_device *pdev)
device_wakeup_enable(hcd->self.controller);
}
- if (pdata->private_init)
- pdata->private_init(ehci_mv->op_regs, ehci_mv->phy_regs);
-
dev_info(&pdev->dev,
"successful find EHCI device with regs 0x%p irq %d"
" working in %s mode\n", hcd->regs, hcd->irq,
@@ -250,8 +210,8 @@ static int mv_ehci_probe(struct platform_device *pdev)
return 0;
err_set_vbus:
- if (pdata->set_vbus)
- pdata->set_vbus(0);
+ if (ehci_mv->set_vbus)
+ ehci_mv->set_vbus(0);
err_disable_clk:
mv_ehci_disable(ehci_mv);
err_put_hcd:
@@ -262,8 +222,8 @@ err_put_hcd:
static int mv_ehci_remove(struct platform_device *pdev)
{
- struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_mv->hcd;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct ehci_hcd_mv *ehci_mv = hcd_to_ehci_hcd_mv(hcd);
if (hcd->rh_registered)
usb_remove_hcd(hcd);
@@ -272,8 +232,8 @@ static int mv_ehci_remove(struct platform_device *pdev)
otg_set_host(ehci_mv->otg->otg, NULL);
if (ehci_mv->mode == MV_USB_MODE_HOST) {
- if (ehci_mv->pdata->set_vbus)
- ehci_mv->pdata->set_vbus(0);
+ if (ehci_mv->set_vbus)
+ ehci_mv->set_vbus(0);
mv_ehci_disable(ehci_mv);
}
@@ -295,8 +255,7 @@ static const struct platform_device_id ehci_id_table[] = {
static void mv_ehci_shutdown(struct platform_device *pdev)
{
- struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
- struct usb_hcd *hcd = ehci_mv->hcd;
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
if (!hcd->rh_registered)
return;
@@ -305,13 +264,41 @@ static void mv_ehci_shutdown(struct platform_device *pdev)
hcd->driver->shutdown(hcd);
}
+static const struct of_device_id ehci_mv_dt_ids[] = {
+ { .compatible = "marvell,pxau2o-ehci", },
+ {},
+};
+
static struct platform_driver ehci_mv_driver = {
.probe = mv_ehci_probe,
.remove = mv_ehci_remove,
.shutdown = mv_ehci_shutdown,
.driver = {
- .name = "mv-ehci",
- .bus = &platform_bus_type,
- },
+ .name = "mv-ehci",
+ .bus = &platform_bus_type,
+ .of_match_table = ehci_mv_dt_ids,
+ },
.id_table = ehci_id_table,
};
+
+static int __init ehci_platform_init(void)
+{
+ if (usb_disabled())
+ return -ENODEV;
+
+ ehci_init_driver(&ehci_platform_hc_driver, &platform_overrides);
+ return platform_driver_register(&ehci_mv_driver);
+}
+module_init(ehci_platform_init);
+
+static void __exit ehci_platform_cleanup(void)
+{
+ platform_driver_unregister(&ehci_mv_driver);
+}
+module_exit(ehci_platform_cleanup);
+
+MODULE_DESCRIPTION("Marvell EHCI driver");
+MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
+MODULE_AUTHOR("Neil Zhang <zhangwm@marvell.com>");
+MODULE_ALIAS("mv-ehci");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 327630405695..aa2f77f1506d 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -245,12 +245,12 @@ ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
}
if (unlikely(urb->unlinked)) {
- COUNT(ehci->stats.unlink);
+ INCR(ehci->stats.unlink);
} else {
/* report non-error and short read status as zero */
if (status == -EINPROGRESS || status == -EREMOTEIO)
status = 0;
- COUNT(ehci->stats.complete);
+ INCR(ehci->stats.complete);
}
#ifdef EHCI_URB_TRACE
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 4fcebda4b79d..a79c8ac0a55f 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -347,7 +347,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
*/
status = ehci_readl(ehci, &ehci->regs->status);
if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
- COUNT(ehci->stats.lost_iaa);
+ INCR(ehci->stats.lost_iaa);
ehci_writel(ehci, STS_IAA, &ehci->regs->status);
}
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index c8e9a48e1d51..ac5e967907d1 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -235,9 +235,9 @@ struct ehci_hcd { /* one per controller */
/* irq statistics */
#ifdef EHCI_STATS
struct ehci_stats stats;
-# define COUNT(x) ((x)++)
+# define INCR(x) ((x)++)
#else
-# define COUNT(x)
+# define INCR(x) do {} while (0)
#endif
/* debug files */
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index e64eb47770c8..0da68df259c8 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -31,6 +31,7 @@
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
@@ -1285,7 +1286,7 @@ static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
*/
status = fotg210_readl(fotg210, &fotg210->regs->status);
if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
- COUNT(fotg210->stats.lost_iaa);
+ INCR(fotg210->stats.lost_iaa);
fotg210_writel(fotg210, STS_IAA,
&fotg210->regs->status);
}
@@ -2204,12 +2205,12 @@ __acquires(fotg210->lock)
}
if (unlikely(urb->unlinked)) {
- COUNT(fotg210->stats.unlink);
+ INCR(fotg210->stats.unlink);
} else {
/* report non-error and short read status as zero */
if (status == -EINPROGRESS || status == -EREMOTEIO)
status = 0;
- COUNT(fotg210->stats.complete);
+ INCR(fotg210->stats.complete);
}
#ifdef FOTG210_URB_TRACE
@@ -5153,9 +5154,9 @@ static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely((status & (STS_INT|STS_ERR)) != 0)) {
if (likely((status & STS_ERR) == 0))
- COUNT(fotg210->stats.normal);
+ INCR(fotg210->stats.normal);
else
- COUNT(fotg210->stats.error);
+ INCR(fotg210->stats.error);
bh = 1;
}
@@ -5180,7 +5181,7 @@ static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
if (cmd & CMD_IAAD)
fotg210_dbg(fotg210, "IAA with IAAD still set?\n");
if (fotg210->async_iaa) {
- COUNT(fotg210->stats.iaa);
+ INCR(fotg210->stats.iaa);
end_unlink_async(fotg210);
} else
fotg210_dbg(fotg210, "IAA with nothing unlinked?\n");
@@ -5596,7 +5597,7 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
hcd->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hcd->regs)) {
retval = PTR_ERR(hcd->regs);
- goto failed;
+ goto failed_put_hcd;
}
hcd->rsrc_start = res->start;
@@ -5606,22 +5607,43 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
fotg210->caps = hcd->regs;
+ /* It's OK not to supply this clock */
+ fotg210->pclk = clk_get(dev, "PCLK");
+ if (!IS_ERR(fotg210->pclk)) {
+ retval = clk_prepare_enable(fotg210->pclk);
+ if (retval) {
+ dev_err(dev, "failed to enable PCLK\n");
+ goto failed_put_hcd;
+ }
+ } else if (PTR_ERR(fotg210->pclk) == -EPROBE_DEFER) {
+ /*
+ * Percolate deferrals, for anything else,
+ * just live without the clocking.
+ */
+ retval = PTR_ERR(fotg210->pclk);
+ goto failed_dis_clk;
+ }
+
retval = fotg210_setup(hcd);
if (retval)
- goto failed;
+ goto failed_dis_clk;
fotg210_init(fotg210);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval) {
dev_err(dev, "failed to add hcd with err %d\n", retval);
- goto failed;
+ goto failed_dis_clk;
}
device_wakeup_enable(hcd->self.controller);
+ platform_set_drvdata(pdev, hcd);
return retval;
-failed:
+failed_dis_clk:
+ if (!IS_ERR(fotg210->pclk))
+ clk_disable_unprepare(fotg210->pclk);
+failed_put_hcd:
usb_put_hcd(hcd);
fail_create_hcd:
dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
@@ -5635,11 +5657,11 @@ fail_create_hcd:
*/
static int fotg210_hcd_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
- if (!hcd)
- return 0;
+ if (!IS_ERR(fotg210->pclk))
+ clk_disable_unprepare(fotg210->pclk);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
index 7fcd785c7bc8..1b4db95e5c43 100644
--- a/drivers/usb/host/fotg210.h
+++ b/drivers/usb/host/fotg210.h
@@ -177,11 +177,14 @@ struct fotg210_hcd { /* one per controller */
/* irq statistics */
#ifdef FOTG210_STATS
struct fotg210_stats stats;
-# define COUNT(x) ((x)++)
+# define INCR(x) ((x)++)
#else
-# define COUNT(x)
+# define INCR(x) do {} while (0)
#endif
+ /* silicon clock */
+ struct clk *pclk;
+
/* debug files */
struct dentry *debug_dir;
};
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index e98673954020..ec6739ef3129 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -551,6 +551,8 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
pdata->overcurrent_pin[i] =
devm_gpiod_get_index_optional(&pdev->dev, "atmel,oc",
i, GPIOD_IN);
+ if (!pdata->overcurrent_pin[i])
+ continue;
if (IS_ERR(pdata->overcurrent_pin[i])) {
err = PTR_ERR(pdata->overcurrent_pin[i]);
dev_err(&pdev->dev, "unable to claim gpio \"overcurrent\": %d\n", err);
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 3625a5c1a41b..3ce71cbfbb58 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -783,15 +783,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
/* disable interrupts */
writel((u32) ~0, base + OHCI_INTRDISABLE);
- /* Reset the USB bus, if the controller isn't already in RESET */
- if (control & OHCI_HCFS) {
- /* Go into RESET, preserving RWC (and possibly IR) */
- writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
- readl(base + OHCI_CONTROL);
-
- /* drive bus reset for at least 50 ms (7.1.7.5) */
- msleep(50);
- }
+ /* Go into the USB_RESET state, preserving RWC (and possibly IR) */
+ writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL);
+ readl(base + OHCI_CONTROL);
/* software reset of the controller, preserving HcFmInterval */
if (!no_fminterval)
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 7e2a531ba321..12eea73d9f20 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -900,6 +900,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
set_bit(wIndex, &bus_state->resuming_ports);
bus_state->resume_done[wIndex] = timeout;
mod_timer(&hcd->rh_timer, timeout);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
}
/* Has resume been signalled for USB_RESUME_TIME yet? */
} else if (time_after_eq(jiffies,
@@ -940,6 +941,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
clear_bit(wIndex, &bus_state->rexit_ports);
}
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
bus_state->port_c_suspend |= 1 << wIndex;
bus_state->suspended_ports &= ~(1 << wIndex);
} else {
@@ -962,6 +964,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
(raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) {
bus_state->resume_done[wIndex] = 0;
clear_bit(wIndex, &bus_state->resuming_ports);
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
}
@@ -1337,6 +1340,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
set_bit(wIndex, &bus_state->resuming_ports);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
xhci_set_link_state(xhci, ports[wIndex],
XDEV_RESUME);
spin_unlock_irqrestore(&xhci->lock, flags);
@@ -1345,6 +1349,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
xhci_set_link_state(xhci, ports[wIndex],
XDEV_U0);
clear_bit(wIndex, &bus_state->resuming_ports);
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
}
bus_state->port_c_suspend |= 1 << wIndex;
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index fa33d6e5b1cb..fea555570ad4 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -13,14 +13,20 @@
#include "xhci.h"
#include "xhci-mtk.h"
+#define SSP_BW_BOUNDARY 130000
#define SS_BW_BOUNDARY 51000
/* table 5-5. High-speed Isoc Transaction Limits in usb_20 spec */
#define HS_BW_BOUNDARY 6144
/* usb2 spec section11.18.1: at most 188 FS bytes per microframe */
#define FS_PAYLOAD_MAX 188
+/*
+ * max number of microframes for split transfer,
+ * for fs isoc in : 1 ss + 1 idle + 7 cs
+ */
+#define TT_MICROFRAMES_MAX 9
/* mtk scheduler bitmasks */
-#define EP_BPKTS(p) ((p) & 0x3f)
+#define EP_BPKTS(p) ((p) & 0x7f)
#define EP_BCSCOUNT(p) (((p) & 0x7) << 8)
#define EP_BBM(p) ((p) << 11)
#define EP_BOFFSET(p) ((p) & 0x3fff)
@@ -51,7 +57,7 @@ static int get_bw_index(struct xhci_hcd *xhci, struct usb_device *udev,
virt_dev = xhci->devs[udev->slot_id];
- if (udev->speed == USB_SPEED_SUPER) {
+ if (udev->speed >= USB_SPEED_SUPER) {
if (usb_endpoint_dir_out(&ep->desc))
bw_index = (virt_dev->real_port - 1) * 2;
else
@@ -64,25 +70,167 @@ static int get_bw_index(struct xhci_hcd *xhci, struct usb_device *udev,
return bw_index;
}
+static u32 get_esit(struct xhci_ep_ctx *ep_ctx)
+{
+ u32 esit;
+
+ esit = 1 << CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info));
+ if (esit > XHCI_MTK_MAX_ESIT)
+ esit = XHCI_MTK_MAX_ESIT;
+
+ return esit;
+}
+
+static struct mu3h_sch_tt *find_tt(struct usb_device *udev)
+{
+ struct usb_tt *utt = udev->tt;
+ struct mu3h_sch_tt *tt, **tt_index, **ptt;
+ unsigned int port;
+ bool allocated_index = false;
+
+ if (!utt)
+ return NULL; /* Not below a TT */
+
+ /*
+ * Find/create our data structure.
+ * For hubs with a single TT, we get it directly.
+ * For hubs with multiple TTs, there's an extra level of pointers.
+ */
+ tt_index = NULL;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ if (!tt_index) { /* Create the index array */
+ tt_index = kcalloc(utt->hub->maxchild,
+ sizeof(*tt_index), GFP_KERNEL);
+ if (!tt_index)
+ return ERR_PTR(-ENOMEM);
+ utt->hcpriv = tt_index;
+ allocated_index = true;
+ }
+ port = udev->ttport - 1;
+ ptt = &tt_index[port];
+ } else {
+ port = 0;
+ ptt = (struct mu3h_sch_tt **) &utt->hcpriv;
+ }
+
+ tt = *ptt;
+ if (!tt) { /* Create the mu3h_sch_tt */
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt) {
+ if (allocated_index) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
+ }
+ return ERR_PTR(-ENOMEM);
+ }
+ INIT_LIST_HEAD(&tt->ep_list);
+ tt->usb_tt = utt;
+ tt->tt_port = port;
+ *ptt = tt;
+ }
+
+ return tt;
+}
+
+/* Release the TT above udev, if it's not in use */
+static void drop_tt(struct usb_device *udev)
+{
+ struct usb_tt *utt = udev->tt;
+ struct mu3h_sch_tt *tt, **tt_index, **ptt;
+ int i, cnt;
+
+ if (!utt || !utt->hcpriv)
+ return; /* Not below a TT, or never allocated */
+
+ cnt = 0;
+ if (utt->multi) {
+ tt_index = utt->hcpriv;
+ ptt = &tt_index[udev->ttport - 1];
+ /* How many entries are left in tt_index? */
+ for (i = 0; i < utt->hub->maxchild; ++i)
+ cnt += !!tt_index[i];
+ } else {
+ tt_index = NULL;
+ ptt = (struct mu3h_sch_tt **)&utt->hcpriv;
+ }
+
+ tt = *ptt;
+ if (!tt || !list_empty(&tt->ep_list))
+ return; /* never allocated , or still in use*/
+
+ *ptt = NULL;
+ kfree(tt);
+
+ if (cnt == 1) {
+ utt->hcpriv = NULL;
+ kfree(tt_index);
+ }
+}
+
+static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
+ struct usb_host_endpoint *ep, struct xhci_ep_ctx *ep_ctx)
+{
+ struct mu3h_sch_ep_info *sch_ep;
+ struct mu3h_sch_tt *tt = NULL;
+ u32 len_bw_budget_table;
+ size_t mem_size;
+
+ if (is_fs_or_ls(udev->speed))
+ len_bw_budget_table = TT_MICROFRAMES_MAX;
+ else if ((udev->speed >= USB_SPEED_SUPER)
+ && usb_endpoint_xfer_isoc(&ep->desc))
+ len_bw_budget_table = get_esit(ep_ctx);
+ else
+ len_bw_budget_table = 1;
+
+ mem_size = sizeof(struct mu3h_sch_ep_info) +
+ len_bw_budget_table * sizeof(u32);
+ sch_ep = kzalloc(mem_size, GFP_KERNEL);
+ if (!sch_ep)
+ return ERR_PTR(-ENOMEM);
+
+ if (is_fs_or_ls(udev->speed)) {
+ tt = find_tt(udev);
+ if (IS_ERR(tt)) {
+ kfree(sch_ep);
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ sch_ep->sch_tt = tt;
+ sch_ep->ep = ep;
+
+ return sch_ep;
+}
+
static void setup_sch_info(struct usb_device *udev,
struct xhci_ep_ctx *ep_ctx, struct mu3h_sch_ep_info *sch_ep)
{
u32 ep_type;
- u32 ep_interval;
- u32 max_packet_size;
+ u32 maxpkt;
u32 max_burst;
u32 mult;
u32 esit_pkts;
+ u32 max_esit_payload;
+ u32 *bwb_table = sch_ep->bw_budget_table;
+ int i;
ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2));
- ep_interval = CTX_TO_EP_INTERVAL(le32_to_cpu(ep_ctx->ep_info));
- max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
+ maxpkt = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
max_burst = CTX_TO_MAX_BURST(le32_to_cpu(ep_ctx->ep_info2));
mult = CTX_TO_EP_MULT(le32_to_cpu(ep_ctx->ep_info));
-
- sch_ep->esit = 1 << ep_interval;
+ max_esit_payload =
+ (CTX_TO_MAX_ESIT_PAYLOAD_HI(
+ le32_to_cpu(ep_ctx->ep_info)) << 16) |
+ CTX_TO_MAX_ESIT_PAYLOAD(le32_to_cpu(ep_ctx->tx_info));
+
+ sch_ep->esit = get_esit(ep_ctx);
+ sch_ep->ep_type = ep_type;
+ sch_ep->maxpkt = maxpkt;
sch_ep->offset = 0;
sch_ep->burst_mode = 0;
+ sch_ep->repeat = 0;
if (udev->speed == USB_SPEED_HIGH) {
sch_ep->cs_count = 0;
@@ -93,7 +241,6 @@ static void setup_sch_info(struct usb_device *udev,
* in a interval
*/
sch_ep->num_budget_microframes = 1;
- sch_ep->repeat = 0;
/*
* xHCI spec section6.2.3.4
@@ -101,19 +248,33 @@ static void setup_sch_info(struct usb_device *udev,
* opportunities per microframe
*/
sch_ep->pkts = max_burst + 1;
- sch_ep->bw_cost_per_microframe = max_packet_size * sch_ep->pkts;
- } else if (udev->speed == USB_SPEED_SUPER) {
+ sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
+ bwb_table[0] = sch_ep->bw_cost_per_microframe;
+ } else if (udev->speed >= USB_SPEED_SUPER) {
/* usb3_r1 spec section4.4.7 & 4.4.8 */
sch_ep->cs_count = 0;
- esit_pkts = (mult + 1) * (max_burst + 1);
+ sch_ep->burst_mode = 1;
+ /*
+ * some device's (d)wBytesPerInterval is set as 0,
+ * then max_esit_payload is 0, so evaluate esit_pkts from
+ * mult and burst
+ */
+ esit_pkts = DIV_ROUND_UP(max_esit_payload, maxpkt);
+ if (esit_pkts == 0)
+ esit_pkts = (mult + 1) * (max_burst + 1);
+
if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
sch_ep->pkts = esit_pkts;
sch_ep->num_budget_microframes = 1;
- sch_ep->repeat = 0;
+ bwb_table[0] = maxpkt * sch_ep->pkts;
}
if (ep_type == ISOC_IN_EP || ep_type == ISOC_OUT_EP) {
- if (esit_pkts <= sch_ep->esit)
+ u32 remainder;
+
+ if (sch_ep->esit == 1)
+ sch_ep->pkts = esit_pkts;
+ else if (esit_pkts <= sch_ep->esit)
sch_ep->pkts = 1;
else
sch_ep->pkts = roundup_pow_of_two(esit_pkts)
@@ -122,43 +283,48 @@ static void setup_sch_info(struct usb_device *udev,
sch_ep->num_budget_microframes =
DIV_ROUND_UP(esit_pkts, sch_ep->pkts);
- if (sch_ep->num_budget_microframes > 1)
- sch_ep->repeat = 1;
- else
- sch_ep->repeat = 0;
+ sch_ep->repeat = !!(sch_ep->num_budget_microframes > 1);
+ sch_ep->bw_cost_per_microframe = maxpkt * sch_ep->pkts;
+
+ remainder = sch_ep->bw_cost_per_microframe;
+ remainder *= sch_ep->num_budget_microframes;
+ remainder -= (maxpkt * esit_pkts);
+ for (i = 0; i < sch_ep->num_budget_microframes - 1; i++)
+ bwb_table[i] = sch_ep->bw_cost_per_microframe;
+
+ /* last one <= bw_cost_per_microframe */
+ bwb_table[i] = remainder;
}
- sch_ep->bw_cost_per_microframe = max_packet_size * sch_ep->pkts;
} else if (is_fs_or_ls(udev->speed)) {
+ sch_ep->pkts = 1; /* at most one packet for each microframe */
/*
- * usb_20 spec section11.18.4
- * assume worst cases
+ * num_budget_microframes and cs_count will be updated when
+ * check TT for INT_OUT_EP, ISOC/INT_IN_EP type
*/
- sch_ep->repeat = 0;
- sch_ep->pkts = 1; /* at most one packet for each microframe */
- if (ep_type == INT_IN_EP || ep_type == INT_OUT_EP) {
- sch_ep->cs_count = 3; /* at most need 3 CS*/
- /* one for SS and one for budgeted transaction */
- sch_ep->num_budget_microframes = sch_ep->cs_count + 2;
- sch_ep->bw_cost_per_microframe = max_packet_size;
- }
- if (ep_type == ISOC_OUT_EP) {
+ sch_ep->cs_count = DIV_ROUND_UP(maxpkt, FS_PAYLOAD_MAX);
+ sch_ep->num_budget_microframes = sch_ep->cs_count;
+ sch_ep->bw_cost_per_microframe =
+ (maxpkt < FS_PAYLOAD_MAX) ? maxpkt : FS_PAYLOAD_MAX;
+ /* init budget table */
+ if (ep_type == ISOC_OUT_EP) {
+ for (i = 0; i < sch_ep->num_budget_microframes; i++)
+ bwb_table[i] = sch_ep->bw_cost_per_microframe;
+ } else if (ep_type == INT_OUT_EP) {
+ /* only first one consumes bandwidth, others as zero */
+ bwb_table[0] = sch_ep->bw_cost_per_microframe;
+ } else { /* INT_IN_EP or ISOC_IN_EP */
+ bwb_table[0] = 0; /* start split */
+ bwb_table[1] = 0; /* idle */
/*
- * the best case FS budget assumes that 188 FS bytes
- * occur in each microframe
+ * due to cs_count will be updated according to cs
+ * position, assign all remainder budget array
+ * elements as @bw_cost_per_microframe, but only first
+ * @num_budget_microframes elements will be used later
*/
- sch_ep->num_budget_microframes = DIV_ROUND_UP(
- max_packet_size, FS_PAYLOAD_MAX);
- sch_ep->bw_cost_per_microframe = FS_PAYLOAD_MAX;
- sch_ep->cs_count = sch_ep->num_budget_microframes;
- }
- if (ep_type == ISOC_IN_EP) {
- /* at most need additional two CS. */
- sch_ep->cs_count = DIV_ROUND_UP(
- max_packet_size, FS_PAYLOAD_MAX) + 2;
- sch_ep->num_budget_microframes = sch_ep->cs_count + 2;
- sch_ep->bw_cost_per_microframe = FS_PAYLOAD_MAX;
+ for (i = 2; i < TT_MICROFRAMES_MAX; i++)
+ bwb_table[i] = sch_ep->bw_cost_per_microframe;
}
}
}
@@ -169,6 +335,7 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
{
u32 num_esit;
u32 max_bw = 0;
+ u32 bw;
int i;
int j;
@@ -177,15 +344,17 @@ static u32 get_max_bw(struct mu3h_sch_bw_info *sch_bw,
u32 base = offset + i * sch_ep->esit;
for (j = 0; j < sch_ep->num_budget_microframes; j++) {
- if (sch_bw->bus_bw[base + j] > max_bw)
- max_bw = sch_bw->bus_bw[base + j];
+ bw = sch_bw->bus_bw[base + j] +
+ sch_ep->bw_budget_table[j];
+ if (bw > max_bw)
+ max_bw = bw;
}
}
return max_bw;
}
static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
- struct mu3h_sch_ep_info *sch_ep, int bw_cost)
+ struct mu3h_sch_ep_info *sch_ep, bool used)
{
u32 num_esit;
u32 base;
@@ -195,9 +364,105 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
for (i = 0; i < num_esit; i++) {
base = sch_ep->offset + i * sch_ep->esit;
+ for (j = 0; j < sch_ep->num_budget_microframes; j++) {
+ if (used)
+ sch_bw->bus_bw[base + j] +=
+ sch_ep->bw_budget_table[j];
+ else
+ sch_bw->bus_bw[base + j] -=
+ sch_ep->bw_budget_table[j];
+ }
+ }
+}
+
+static int check_sch_tt(struct usb_device *udev,
+ struct mu3h_sch_ep_info *sch_ep, u32 offset)
+{
+ struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ u32 extra_cs_count;
+ u32 fs_budget_start;
+ u32 start_ss, last_ss;
+ u32 start_cs, last_cs;
+ int i;
+
+ start_ss = offset % 8;
+ fs_budget_start = (start_ss + 1) % 8;
+
+ if (sch_ep->ep_type == ISOC_OUT_EP) {
+ last_ss = start_ss + sch_ep->cs_count - 1;
+
+ /*
+ * usb_20 spec section11.18:
+ * must never schedule Start-Split in Y6
+ */
+ if (!(start_ss == 7 || last_ss < 6))
+ return -ERANGE;
+
+ for (i = 0; i < sch_ep->cs_count; i++)
+ if (test_bit(offset + i, tt->split_bit_map))
+ return -ERANGE;
+
+ } else {
+ u32 cs_count = DIV_ROUND_UP(sch_ep->maxpkt, FS_PAYLOAD_MAX);
+
+ /*
+ * usb_20 spec section11.18:
+ * must never schedule Start-Split in Y6
+ */
+ if (start_ss == 6)
+ return -ERANGE;
+
+ /* one uframe for ss + one uframe for idle */
+ start_cs = (start_ss + 2) % 8;
+ last_cs = start_cs + cs_count - 1;
+
+ if (last_cs > 7)
+ return -ERANGE;
+
+ if (sch_ep->ep_type == ISOC_IN_EP)
+ extra_cs_count = (last_cs == 7) ? 1 : 2;
+ else /* ep_type : INTR IN / INTR OUT */
+ extra_cs_count = (fs_budget_start == 6) ? 1 : 2;
+
+ cs_count += extra_cs_count;
+ if (cs_count > 7)
+ cs_count = 7; /* HW limit */
+
+ for (i = 0; i < cs_count + 2; i++) {
+ if (test_bit(offset + i, tt->split_bit_map))
+ return -ERANGE;
+ }
+
+ sch_ep->cs_count = cs_count;
+ /* one for ss, the other for idle */
+ sch_ep->num_budget_microframes = cs_count + 2;
+
+ /*
+ * if interval=1, maxp >752, num_budge_micoframe is larger
+ * than sch_ep->esit, will overstep boundary
+ */
+ if (sch_ep->num_budget_microframes > sch_ep->esit)
+ sch_ep->num_budget_microframes = sch_ep->esit;
+ }
+
+ return 0;
+}
+
+static void update_sch_tt(struct usb_device *udev,
+ struct mu3h_sch_ep_info *sch_ep)
+{
+ struct mu3h_sch_tt *tt = sch_ep->sch_tt;
+ u32 base, num_esit;
+ int i, j;
+
+ num_esit = XHCI_MTK_MAX_ESIT / sch_ep->esit;
+ for (i = 0; i < num_esit; i++) {
+ base = sch_ep->offset + i * sch_ep->esit;
for (j = 0; j < sch_ep->num_budget_microframes; j++)
- sch_bw->bus_bw[base + j] += bw_cost;
+ set_bit(base + j, tt->split_bit_map);
}
+
+ list_add_tail(&sch_ep->tt_endpoint, &tt->ep_list);
}
static int check_sch_bw(struct usb_device *udev,
@@ -205,17 +470,16 @@ static int check_sch_bw(struct usb_device *udev,
{
u32 offset;
u32 esit;
- u32 num_budget_microframes;
u32 min_bw;
u32 min_index;
u32 worst_bw;
u32 bw_boundary;
-
- if (sch_ep->esit > XHCI_MTK_MAX_ESIT)
- sch_ep->esit = XHCI_MTK_MAX_ESIT;
+ u32 min_num_budget;
+ u32 min_cs_count;
+ bool tt_offset_ok = false;
+ int ret;
esit = sch_ep->esit;
- num_budget_microframes = sch_ep->num_budget_microframes;
/*
* Search through all possible schedule microframes.
@@ -223,36 +487,56 @@ static int check_sch_bw(struct usb_device *udev,
*/
min_bw = ~0;
min_index = 0;
+ min_cs_count = sch_ep->cs_count;
+ min_num_budget = sch_ep->num_budget_microframes;
for (offset = 0; offset < esit; offset++) {
- if ((offset + num_budget_microframes) > sch_ep->esit)
- break;
+ if (is_fs_or_ls(udev->speed)) {
+ ret = check_sch_tt(udev, sch_ep, offset);
+ if (ret)
+ continue;
+ else
+ tt_offset_ok = true;
+ }
- /*
- * usb_20 spec section11.18:
- * must never schedule Start-Split in Y6
- */
- if (is_fs_or_ls(udev->speed) && (offset % 8 == 6))
- continue;
+ if ((offset + sch_ep->num_budget_microframes) > sch_ep->esit)
+ break;
worst_bw = get_max_bw(sch_bw, sch_ep, offset);
if (min_bw > worst_bw) {
min_bw = worst_bw;
min_index = offset;
+ min_cs_count = sch_ep->cs_count;
+ min_num_budget = sch_ep->num_budget_microframes;
}
if (min_bw == 0)
break;
}
- sch_ep->offset = min_index;
- bw_boundary = (udev->speed == USB_SPEED_SUPER)
- ? SS_BW_BOUNDARY : HS_BW_BOUNDARY;
+ if (udev->speed == USB_SPEED_SUPER_PLUS)
+ bw_boundary = SSP_BW_BOUNDARY;
+ else if (udev->speed == USB_SPEED_SUPER)
+ bw_boundary = SS_BW_BOUNDARY;
+ else
+ bw_boundary = HS_BW_BOUNDARY;
/* check bandwidth */
- if (min_bw + sch_ep->bw_cost_per_microframe > bw_boundary)
+ if (min_bw > bw_boundary)
return -ERANGE;
+ sch_ep->offset = min_index;
+ sch_ep->cs_count = min_cs_count;
+ sch_ep->num_budget_microframes = min_num_budget;
+
+ if (is_fs_or_ls(udev->speed)) {
+ /* all offset for tt is not ok*/
+ if (!tt_offset_ok)
+ return -ERANGE;
+
+ update_sch_tt(udev, sch_ep);
+ }
+
/* update bus bandwidth info */
- update_bus_bw(sch_bw, sch_ep, sch_ep->bw_cost_per_microframe);
+ update_bus_bw(sch_bw, sch_ep, 1);
return 0;
}
@@ -347,8 +631,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
- sch_ep = kzalloc(sizeof(struct mu3h_sch_ep_info), GFP_NOIO);
- if (!sch_ep)
+ sch_ep = create_sch_ep(udev, ep, ep_ctx);
+ if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
setup_sch_info(udev, ep_ctx, sch_ep);
@@ -356,12 +640,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
ret = check_sch_bw(udev, sch_bw, sch_ep);
if (ret) {
xhci_err(xhci, "Not enough bandwidth!\n");
+ if (is_fs_or_ls(udev->speed))
+ drop_tt(udev);
+
kfree(sch_ep);
return -ENOSPC;
}
list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
- sch_ep->ep = ep;
ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
| EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
@@ -406,9 +692,12 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
if (sch_ep->ep == ep) {
- update_bus_bw(sch_bw, sch_ep,
- -sch_ep->bw_cost_per_microframe);
+ update_bus_bw(sch_bw, sch_ep, 0);
list_del(&sch_ep->endpoint);
+ if (is_fs_or_ls(udev->speed)) {
+ list_del(&sch_ep->tt_endpoint);
+ drop_tt(udev);
+ }
kfree(sch_ep);
break;
}
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index cc59d80b663b..8be8c5f7ff62 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -20,6 +20,19 @@
#define XHCI_MTK_MAX_ESIT 64
/**
+ * @split_bit_map: used to avoid split microframes overlay
+ * @ep_list: Endpoints using this TT
+ * @usb_tt: usb TT related
+ * @tt_port: TT port number
+ */
+struct mu3h_sch_tt {
+ DECLARE_BITMAP(split_bit_map, XHCI_MTK_MAX_ESIT);
+ struct list_head ep_list;
+ struct usb_tt *usb_tt;
+ int tt_port;
+};
+
+/**
* struct mu3h_sch_bw_info: schedule information for bandwidth domain
*
* @bus_bw: array to keep track of bandwidth already used at each uframes
@@ -41,6 +54,10 @@ struct mu3h_sch_bw_info {
* (@repeat==1) scheduled within the interval
* @bw_cost_per_microframe: bandwidth cost per microframe
* @endpoint: linked into bandwidth domain which it belongs to
+ * @tt_endpoint: linked into mu3h_sch_tt's list which it belongs to
+ * @sch_tt: mu3h_sch_tt linked into
+ * @ep_type: endpoint type
+ * @maxpkt: max packet size of endpoint
* @ep: address of usb_host_endpoint struct
* @offset: which uframe of the interval that transfer should be
* scheduled first time within the interval
@@ -57,12 +74,17 @@ struct mu3h_sch_bw_info {
* times; 1: distribute the (bMaxBurst+1)*(Mult+1) packets
* according to @pkts and @repeat. normal mode is used by
* default
+ * @bw_budget_table: table to record bandwidth budget per microframe
*/
struct mu3h_sch_ep_info {
u32 esit;
u32 num_budget_microframes;
u32 bw_cost_per_microframe;
struct list_head endpoint;
+ struct list_head tt_endpoint;
+ struct mu3h_sch_tt *sch_tt;
+ u32 ep_type;
+ u32 maxpkt;
void *ep;
/*
* mtk xHCI scheduling information put into reserved DWs
@@ -73,6 +95,7 @@ struct mu3h_sch_ep_info {
u32 pkts;
u32 cs_count;
u32 burst_mode;
+ u32 bw_budget_table[0];
};
#define MU3C_U3_PORT_MAX 4
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 51dd8e00c4f8..01c57055c0c5 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -41,6 +41,13 @@
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI 0x15b5
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI 0x15b6
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI 0x15db
+#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI 0x15d4
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI 0x15e9
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
+#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
@@ -193,6 +200,16 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
xhci->quirks |= XHCI_MISSING_CAS;
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ (pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI))
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_EJ168) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -336,6 +353,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);
+ if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ pm_runtime_allow(&dev->dev);
+
return 0;
put_usb3_hcd:
@@ -353,6 +373,10 @@ static void xhci_pci_remove(struct pci_dev *dev)
xhci = hcd_to_xhci(pci_get_drvdata(dev));
xhci->xhc_state |= XHCI_STATE_REMOVING;
+
+ if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
+ pm_runtime_forbid(&dev->dev);
+
if (xhci->shared_hcd) {
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 94e939249b2b..32b5574ad5c5 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -18,6 +18,7 @@
#include <linux/usb/phy.h>
#include <linux/slab.h>
#include <linux/acpi.h>
+#include <linux/usb/of.h>
#include "xhci.h"
#include "xhci-plat.h"
@@ -305,6 +306,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd->skip_phy_initialization = 1;
}
+ hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
+ xhci->shared_hcd->tpl_support = hcd->tpl_support;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto disable_usb_phy;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index f0a99aa0ac58..a8d92c90fb58 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1155,6 +1155,10 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
/* Clear our internal halted state */
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
}
+
+ /* if this was a soft reset, then restart */
+ if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
@@ -1602,6 +1606,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
mod_timer(&hcd->rh_timer,
bus_state->resume_done[hcd_portnum]);
+ usb_hcd_start_port_resume(&hcd->self, hcd_portnum);
bogus_port_status = true;
}
}
@@ -2132,10 +2137,16 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event,
struct xhci_virt_ep *ep, int *status)
{
+ struct xhci_slot_ctx *slot_ctx;
struct xhci_ring *ep_ring;
u32 trb_comp_code;
u32 remaining, requested, ep_trb_len;
+ unsigned int slot_id;
+ int ep_index;
+ slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[slot_id]->out_ctx);
+ ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
@@ -2144,6 +2155,7 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
switch (trb_comp_code) {
case COMP_SUCCESS:
+ ep_ring->err_count = 0;
/* handle success with untransferred data as short packet */
if (ep_trb != td->last_trb || remaining) {
xhci_warn(xhci, "WARN Successful completion on short TX\n");
@@ -2167,6 +2179,14 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
ep_trb_len = 0;
remaining = 0;
break;
+ case COMP_USB_TRANSACTION_ERROR:
+ if ((ep_ring->err_count++ > MAX_SOFT_RETRY) ||
+ le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
+ break;
+ *status = 0;
+ xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
+ ep_ring->stream_id, td, EP_SOFT_RESET);
+ return 0;
default:
/* do nothing */
break;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 4b463e5202a4..6b5db344de30 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -18,6 +18,7 @@
#include <linux/phy/tegra/xusb.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -107,35 +108,35 @@
#define IMEM_BLOCK_SIZE 256
struct tegra_xusb_fw_header {
- u32 boot_loadaddr_in_imem;
- u32 boot_codedfi_offset;
- u32 boot_codetag;
- u32 boot_codesize;
- u32 phys_memaddr;
- u16 reqphys_memsize;
- u16 alloc_phys_memsize;
- u32 rodata_img_offset;
- u32 rodata_section_start;
- u32 rodata_section_end;
- u32 main_fnaddr;
- u32 fwimg_cksum;
- u32 fwimg_created_time;
- u32 imem_resident_start;
- u32 imem_resident_end;
- u32 idirect_start;
- u32 idirect_end;
- u32 l2_imem_start;
- u32 l2_imem_end;
- u32 version_id;
+ __le32 boot_loadaddr_in_imem;
+ __le32 boot_codedfi_offset;
+ __le32 boot_codetag;
+ __le32 boot_codesize;
+ __le32 phys_memaddr;
+ __le16 reqphys_memsize;
+ __le16 alloc_phys_memsize;
+ __le32 rodata_img_offset;
+ __le32 rodata_section_start;
+ __le32 rodata_section_end;
+ __le32 main_fnaddr;
+ __le32 fwimg_cksum;
+ __le32 fwimg_created_time;
+ __le32 imem_resident_start;
+ __le32 imem_resident_end;
+ __le32 idirect_start;
+ __le32 idirect_end;
+ __le32 l2_imem_start;
+ __le32 l2_imem_end;
+ __le32 version_id;
u8 init_ddirect;
u8 reserved[3];
- u32 phys_addr_log_buffer;
- u32 total_log_entries;
- u32 dequeue_ptr;
- u32 dummy_var[2];
- u32 fwimg_len;
+ __le32 phys_addr_log_buffer;
+ __le32 total_log_entries;
+ __le32 dequeue_ptr;
+ __le32 dummy_var[2];
+ __le32 fwimg_len;
u8 magic[8];
- u32 ss_low_power_entry_timeout;
+ __le32 ss_low_power_entry_timeout;
u8 num_hsic_port;
u8 padding[139]; /* Pad to 256 bytes */
};
@@ -194,6 +195,11 @@ struct tegra_xusb {
struct reset_control *host_rst;
struct reset_control *ss_rst;
+ struct device *genpd_dev_host;
+ struct device *genpd_dev_ss;
+ struct device_link *genpd_dl_host;
+ struct device_link *genpd_dl_ss;
+
struct phy **phys;
unsigned int num_phys;
@@ -928,6 +934,57 @@ static int tegra_xusb_load_firmware(struct tegra_xusb *tegra)
return 0;
}
+static void tegra_xusb_powerdomain_remove(struct device *dev,
+ struct tegra_xusb *tegra)
+{
+ if (tegra->genpd_dl_ss)
+ device_link_del(tegra->genpd_dl_ss);
+ if (tegra->genpd_dl_host)
+ device_link_del(tegra->genpd_dl_host);
+ if (tegra->genpd_dev_ss)
+ dev_pm_domain_detach(tegra->genpd_dev_ss, true);
+ if (tegra->genpd_dev_host)
+ dev_pm_domain_detach(tegra->genpd_dev_host, true);
+}
+
+static int tegra_xusb_powerdomain_init(struct device *dev,
+ struct tegra_xusb *tegra)
+{
+ int err;
+
+ tegra->genpd_dev_host = dev_pm_domain_attach_by_name(dev, "xusb_host");
+ if (IS_ERR(tegra->genpd_dev_host)) {
+ err = PTR_ERR(tegra->genpd_dev_host);
+ dev_err(dev, "failed to get host pm-domain: %d\n", err);
+ return err;
+ }
+
+ tegra->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "xusb_ss");
+ if (IS_ERR(tegra->genpd_dev_ss)) {
+ err = PTR_ERR(tegra->genpd_dev_ss);
+ dev_err(dev, "failed to get superspeed pm-domain: %d\n", err);
+ return err;
+ }
+
+ tegra->genpd_dl_host = device_link_add(dev, tegra->genpd_dev_host,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!tegra->genpd_dl_host) {
+ dev_err(dev, "adding host device link failed!\n");
+ return -ENODEV;
+ }
+
+ tegra->genpd_dl_ss = device_link_add(dev, tegra->genpd_dev_ss,
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS);
+ if (!tegra->genpd_dl_ss) {
+ dev_err(dev, "adding superspeed device link failed!\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
static int tegra_xusb_probe(struct platform_device *pdev)
{
struct tegra_xusb_mbox_msg msg;
@@ -1038,7 +1095,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
goto put_padctl;
}
- if (!pdev->dev.pm_domain) {
+ if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
tegra->host_rst = devm_reset_control_get(&pdev->dev,
"xusb_host");
if (IS_ERR(tegra->host_rst)) {
@@ -1069,17 +1126,22 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra->host_clk,
tegra->host_rst);
if (err) {
+ tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
dev_err(&pdev->dev,
"failed to enable XUSBC domain: %d\n", err);
- goto disable_xusba;
+ goto put_padctl;
}
+ } else {
+ err = tegra_xusb_powerdomain_init(&pdev->dev, tegra);
+ if (err)
+ goto put_powerdomains;
}
tegra->supplies = devm_kcalloc(&pdev->dev, tegra->soc->num_supplies,
sizeof(*tegra->supplies), GFP_KERNEL);
if (!tegra->supplies) {
err = -ENOMEM;
- goto disable_xusbc;
+ goto put_powerdomains;
}
for (i = 0; i < tegra->soc->num_supplies; i++)
@@ -1089,7 +1151,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
tegra->supplies);
if (err) {
dev_err(&pdev->dev, "failed to get regulators: %d\n", err);
- goto disable_xusbc;
+ goto put_powerdomains;
}
for (i = 0; i < tegra->soc->num_types; i++)
@@ -1099,7 +1161,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
sizeof(*tegra->phys), GFP_KERNEL);
if (!tegra->phys) {
err = -ENOMEM;
- goto disable_xusbc;
+ goto put_powerdomains;
}
for (i = 0, k = 0; i < tegra->soc->num_types; i++) {
@@ -1115,7 +1177,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
"failed to get PHY %s: %ld\n", prop,
PTR_ERR(phy));
err = PTR_ERR(phy);
- goto disable_xusbc;
+ goto put_powerdomains;
}
tegra->phys[k++] = phy;
@@ -1126,7 +1188,7 @@ static int tegra_xusb_probe(struct platform_device *pdev)
dev_name(&pdev->dev));
if (!tegra->hcd) {
err = -ENOMEM;
- goto disable_xusbc;
+ goto put_powerdomains;
}
/*
@@ -1222,12 +1284,13 @@ put_rpm:
disable_rpm:
pm_runtime_disable(&pdev->dev);
usb_put_hcd(tegra->hcd);
-disable_xusbc:
- if (!pdev->dev.pm_domain)
+put_powerdomains:
+ if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
-disable_xusba:
- if (!pdev->dev.pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
+ } else {
+ tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
+ }
put_padctl:
tegra_xusb_padctl_put(tegra->padctl);
return err;
@@ -1249,6 +1312,13 @@ static int tegra_xusb_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ if (!of_property_read_bool(pdev->dev.of_node, "power-domains")) {
+ tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
+ tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
+ } else {
+ tegra_xusb_powerdomain_remove(&pdev->dev, tegra);
+ }
+
tegra_xusb_padctl_put(tegra->padctl);
return 0;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6230a578324c..bf0b3692dc9a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1496,6 +1496,7 @@ static inline const char *xhci_trb_type_string(u8 type)
/* How much data is left before the 64KB boundary? */
#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \
(addr & (TRB_MAX_BUFF_SIZE - 1)))
+#define MAX_SOFT_RETRY 3
struct xhci_segment {
union xhci_trb *trbs;
@@ -1583,6 +1584,7 @@ struct xhci_ring {
* if we own the TRB (if we are the consumer). See section 4.9.1.
*/
u32 cycle_state;
+ unsigned int err_count;
unsigned int stream_id;
unsigned int num_segs;
unsigned int num_trbs_free;
@@ -1846,6 +1848,7 @@ struct xhci_hcd {
#define XHCI_SUSPEND_DELAY BIT_ULL(30)
#define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31)
#define XHCI_ZERO_64B_REGS BIT_ULL(32)
+#define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33)
unsigned int num_active_eps;
unsigned int limit_active_eps;
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index d746c26a8055..bd539f3058bc 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -146,8 +146,11 @@ static int appledisplay_bl_update_status(struct backlight_device *bd)
pdata->msgdata, 2,
ACD_USB_TIMEOUT);
mutex_unlock(&pdata->sysfslock);
-
- return retval;
+
+ if (retval < 0)
+ return retval;
+ else
+ return 0;
}
static int appledisplay_bl_get_brightness(struct backlight_device *bd)
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c2991b8a65ce..ba05dd80a020 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -808,8 +808,8 @@ static int iowarrior_probe(struct usb_interface *interface,
dev->int_in_endpoint->bInterval);
/* create an internal buffer for interrupt data from the device */
dev->read_queue =
- kmalloc(((dev->report_size + 1) * MAX_INTERRUPT_BUFFER),
- GFP_KERNEL);
+ kmalloc_array(dev->report_size + 1, MAX_INTERRUPT_BUFFER,
+ GFP_KERNEL);
if (!dev->read_queue)
goto error;
/* Get the serial-number of the chip */
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index b3e1f553954a..ac357ce2d1a6 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -46,7 +46,9 @@ static ssize_t speed_store(struct device *dev, struct device_attribute *attr,
struct trancevibrator *tv = usb_get_intfdata(intf);
int temp, retval, old;
- temp = simple_strtoul(buf, NULL, 10);
+ retval = kstrtoint(buf, 10, &temp);
+ if (retval)
+ return retval;
if (temp > 255)
temp = 255;
else if (temp < 0)
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index d045d8458f81..ae70b9bfd797 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -185,8 +185,8 @@ static void mtu3_intr_enable(struct mtu3 *mtu)
if (mtu->is_u3_ip) {
/* Enable U3 LTSSM interrupts */
- value = HOT_RST_INTR | WARM_RST_INTR | VBUS_RISE_INTR |
- VBUS_FALL_INTR | ENTER_U3_INTR | EXIT_U3_INTR;
+ value = HOT_RST_INTR | WARM_RST_INTR |
+ ENTER_U3_INTR | EXIT_U3_INTR;
mtu3_writel(mbase, U3D_LTSSM_INTR_ENABLE, value);
}
diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c
index 5c60a8c5a0b5..bbcd3332471d 100644
--- a/drivers/usb/mtu3/mtu3_gadget.c
+++ b/drivers/usb/mtu3/mtu3_gadget.c
@@ -585,6 +585,17 @@ static const struct usb_gadget_ops mtu3_gadget_ops = {
.udc_stop = mtu3_gadget_stop,
};
+static void mtu3_state_reset(struct mtu3 *mtu)
+{
+ mtu->address = 0;
+ mtu->ep0_state = MU3D_EP0_STATE_SETUP;
+ mtu->may_wakeup = 0;
+ mtu->u1_enable = 0;
+ mtu->u2_enable = 0;
+ mtu->delayed_status = false;
+ mtu->test_mode = false;
+}
+
static void init_hw_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
u32 epnum, u32 is_in)
{
@@ -702,6 +713,7 @@ void mtu3_gadget_disconnect(struct mtu3 *mtu)
spin_lock(&mtu->lock);
}
+ mtu3_state_reset(mtu);
usb_gadget_set_state(&mtu->g, USB_STATE_NOTATTACHED);
}
@@ -712,12 +724,6 @@ void mtu3_gadget_reset(struct mtu3 *mtu)
/* report disconnect, if we didn't flush EP state */
if (mtu->g.speed != USB_SPEED_UNKNOWN)
mtu3_gadget_disconnect(mtu);
-
- mtu->address = 0;
- mtu->ep0_state = MU3D_EP0_STATE_SETUP;
- mtu->may_wakeup = 0;
- mtu->u1_enable = 0;
- mtu->u2_enable = 0;
- mtu->delayed_status = false;
- mtu->test_mode = false;
+ else
+ mtu3_state_reset(mtu);
}
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 66143ab8c043..aaf363f19714 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -505,15 +505,19 @@ static int abx500_usb_link_status_update(struct ab8500_usb *ab)
if (is_ab8500(ab->ab8500)) {
enum ab8500_usb_link_status lsts;
- abx500_get_register_interruptible(ab->dev,
+ ret = abx500_get_register_interruptible(ab->dev,
AB8500_USB, AB8500_USB_LINE_STAT_REG, &reg);
+ if (ret < 0)
+ return ret;
lsts = (reg >> 3) & 0x0F;
ret = ab8500_usb_link_status_update(ab, lsts);
} else if (is_ab8505(ab->ab8500)) {
enum ab8505_usb_link_status lsts;
- abx500_get_register_interruptible(ab->dev,
+ ret = abx500_get_register_interruptible(ab->dev,
AB8500_USB, AB8505_USB_LINE_STAT_REG, &reg);
+ if (ret < 0)
+ return ret;
lsts = (reg >> 3) & 0x1F;
ret = ab8505_usb_link_status_update(ab, lsts);
}
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index e5aa24c1e4fd..1b1bb0ad40c3 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -563,7 +563,7 @@ static enum usb_charger_type mxs_charger_primary_detection(struct mxs_phy *x)
regmap_read(regmap, ANADIG_USB1_CHRG_DET_STAT, &val);
if (!(val & ANADIG_USB1_CHRG_DET_STAT_CHRG_DETECTED)) {
chgr_type = SDP_TYPE;
- dev_dbg(x->phy.dev, "It is a stardard downstream port\n");
+ dev_dbg(x->phy.dev, "It is a standard downstream port\n");
}
/* Disable charger detector */
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 4310df46639d..a3e1290d682d 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -5,6 +5,7 @@
* Copyright (C) 2011 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
*/
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/io.h>
@@ -12,6 +13,7 @@
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "common.h"
@@ -290,6 +292,79 @@ static void usbhsc_set_buswait(struct usbhs_priv *priv)
usbhs_bset(priv, BUSWAIT, 0x000F, wait);
}
+static bool usbhsc_is_multi_clks(struct usbhs_priv *priv)
+{
+ if (priv->dparam.type == USBHS_TYPE_RCAR_GEN3 ||
+ priv->dparam.type == USBHS_TYPE_RCAR_GEN3_WITH_PLL)
+ return true;
+
+ return false;
+}
+
+static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv)
+{
+ if (!usbhsc_is_multi_clks(priv))
+ return 0;
+
+ /* The first clock should exist */
+ priv->clks[0] = of_clk_get(dev->of_node, 0);
+ if (IS_ERR(priv->clks[0]))
+ return PTR_ERR(priv->clks[0]);
+
+ /*
+ * To backward compatibility with old DT, this driver checks the return
+ * value if it's -ENOENT or not.
+ */
+ priv->clks[1] = of_clk_get(dev->of_node, 1);
+ if (PTR_ERR(priv->clks[1]) == -ENOENT)
+ priv->clks[1] = NULL;
+ else if (IS_ERR(priv->clks[1]))
+ return PTR_ERR(priv->clks[1]);
+
+ return 0;
+}
+
+static void usbhsc_clk_put(struct usbhs_priv *priv)
+{
+ int i;
+
+ if (!usbhsc_is_multi_clks(priv))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(priv->clks); i++)
+ clk_put(priv->clks[i]);
+}
+
+static int usbhsc_clk_prepare_enable(struct usbhs_priv *priv)
+{
+ int i, ret;
+
+ if (!usbhsc_is_multi_clks(priv))
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(priv->clks); i++) {
+ ret = clk_prepare_enable(priv->clks[i]);
+ if (ret) {
+ while (--i >= 0)
+ clk_disable_unprepare(priv->clks[i]);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void usbhsc_clk_disable_unprepare(struct usbhs_priv *priv)
+{
+ int i;
+
+ if (!usbhsc_is_multi_clks(priv))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(priv->clks); i++)
+ clk_disable_unprepare(priv->clks[i]);
+}
+
/*
* platform default param
*/
@@ -340,6 +415,10 @@ static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
/* enable PM */
pm_runtime_get_sync(dev);
+ /* enable clks */
+ if (usbhsc_clk_prepare_enable(priv))
+ return;
+
/* enable platform power */
usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
@@ -352,6 +431,9 @@ static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
/* disable platform power */
usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
+ /* disable clks */
+ usbhsc_clk_disable_unprepare(priv);
+
/* disable PM */
pm_runtime_put_sync(dev);
}
@@ -478,6 +560,10 @@ static const struct of_device_id usbhs_of_match[] = {
.data = (void *)USBHS_TYPE_RCAR_GEN3,
},
{
+ .compatible = "renesas,usbhs-r8a77990",
+ .data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
+ },
+ {
.compatible = "renesas,usbhs-r8a77995",
.data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
},
@@ -574,6 +660,10 @@ static int usbhs_probe(struct platform_device *pdev)
return PTR_ERR(priv->edev);
}
+ priv->rsts = devm_reset_control_array_get_optional_shared(&pdev->dev);
+ if (IS_ERR(priv->rsts))
+ return PTR_ERR(priv->rsts);
+
/*
* care platform info
*/
@@ -591,15 +681,6 @@ static int usbhs_probe(struct platform_device *pdev)
break;
case USBHS_TYPE_RCAR_GEN3_WITH_PLL:
priv->pfunc = usbhs_rcar3_with_pll_ops;
- if (!IS_ERR_OR_NULL(priv->edev)) {
- priv->nb.notifier_call = priv->pfunc.notifier;
- ret = devm_extcon_register_notifier(&pdev->dev,
- priv->edev,
- EXTCON_USB_HOST,
- &priv->nb);
- if (ret < 0)
- dev_err(&pdev->dev, "no notifier registered\n");
- }
break;
case USBHS_TYPE_RZA1:
priv->pfunc = usbhs_rza1_ops;
@@ -658,6 +739,14 @@ static int usbhs_probe(struct platform_device *pdev)
/* dev_set_drvdata should be called after usbhs_mod_init */
platform_set_drvdata(pdev, priv);
+ ret = reset_control_deassert(priv->rsts);
+ if (ret)
+ goto probe_fail_rst;
+
+ ret = usbhsc_clk_get(&pdev->dev, priv);
+ if (ret)
+ goto probe_fail_clks;
+
/*
* deviece reset here because
* USB device might be used in boot loader.
@@ -711,6 +800,10 @@ static int usbhs_probe(struct platform_device *pdev)
return ret;
probe_end_mod_exit:
+ usbhsc_clk_put(priv);
+probe_fail_clks:
+ reset_control_assert(priv->rsts);
+probe_fail_rst:
usbhs_mod_remove(priv);
probe_end_fifo_exit:
usbhs_fifo_remove(priv);
@@ -739,6 +832,8 @@ static int usbhs_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
usbhs_platform_call(priv, hardware_exit, pdev);
+ usbhsc_clk_put(priv);
+ reset_control_assert(priv->rsts);
usbhs_mod_remove(priv);
usbhs_fifo_remove(priv);
usbhs_pipe_remove(priv);
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 6137f7942c05..3777af848a35 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -8,8 +8,10 @@
#ifndef RENESAS_USB_DRIVER_H
#define RENESAS_USB_DRIVER_H
+#include <linux/clk.h>
#include <linux/extcon.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/usb/renesas_usbhs.h>
struct usbhs_priv;
@@ -255,7 +257,6 @@ struct usbhs_priv {
struct platform_device *pdev;
struct extcon_dev *edev;
- struct notifier_block nb;
spinlock_t lock;
@@ -277,6 +278,8 @@ struct usbhs_priv {
struct usbhs_fifo_info fifo_info;
struct phy *phy;
+ struct reset_control *rsts;
+ struct clk *clks[2];
};
/*
diff --git a/drivers/usb/renesas_usbhs/rcar3.c b/drivers/usb/renesas_usbhs/rcar3.c
index d0ea4ff89622..aa3820448286 100644
--- a/drivers/usb/renesas_usbhs/rcar3.c
+++ b/drivers/usb/renesas_usbhs/rcar3.c
@@ -27,7 +27,6 @@
* Remarks: bit[31:11] and bit[9:6] should be 0
*/
#define UGCTRL2_RESERVED_3 0x00000001 /* bit[3:0] should be B'0001 */
-#define UGCTRL2_USB0SEL_EHCI 0x00000010
#define UGCTRL2_USB0SEL_HSUSB 0x00000020
#define UGCTRL2_USB0SEL_OTG 0x00000030
#define UGCTRL2_VBUSSEL 0x00000400
@@ -50,14 +49,6 @@ static void usbhs_rcar3_set_ugctrl2(struct usbhs_priv *priv, u32 val)
usbhs_write32(priv, UGCTRL2, val | UGCTRL2_RESERVED_3);
}
-static void usbhs_rcar3_set_usbsel(struct usbhs_priv *priv, bool ehci)
-{
- if (ehci)
- usbhs_rcar3_set_ugctrl2(priv, UGCTRL2_USB0SEL_EHCI);
- else
- usbhs_rcar3_set_ugctrl2(priv, UGCTRL2_USB0SEL_HSUSB);
-}
-
static int usbhs_rcar3_power_ctrl(struct platform_device *pdev,
void __iomem *base, int enable)
{
@@ -83,14 +74,11 @@ static int usbhs_rcar3_power_and_pll_ctrl(struct platform_device *pdev,
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
u32 val;
int timeout = 1000;
- bool is_host = false;
if (enable) {
usbhs_write32(priv, UGCTRL, 0); /* release PLLRESET */
- if (priv->edev)
- is_host = extcon_get_state(priv->edev, EXTCON_USB_HOST);
-
- usbhs_rcar3_set_usbsel(priv, is_host);
+ usbhs_rcar3_set_ugctrl2(priv,
+ UGCTRL2_USB0SEL_OTG | UGCTRL2_VBUSSEL);
usbhs_bset(priv, LPSTS, LPSTS_SUSPM, LPSTS_SUSPM);
do {
@@ -112,16 +100,6 @@ static int usbhs_rcar3_get_id(struct platform_device *pdev)
return USBHS_GADGET;
}
-static int usbhs_rcar3_notifier(struct notifier_block *nb, unsigned long event,
- void *data)
-{
- struct usbhs_priv *priv = container_of(nb, struct usbhs_priv, nb);
-
- usbhs_rcar3_set_usbsel(priv, !!event);
-
- return NOTIFY_DONE;
-}
-
const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
.power_ctrl = usbhs_rcar3_power_ctrl,
.get_id = usbhs_rcar3_get_id,
@@ -130,5 +108,4 @@ const struct renesas_usbhs_platform_callback usbhs_rcar3_ops = {
const struct renesas_usbhs_platform_callback usbhs_rcar3_with_pll_ops = {
.power_ctrl = usbhs_rcar3_power_and_pll_ctrl,
.get_id = usbhs_rcar3_get_id,
- .notifier = usbhs_rcar3_notifier,
};
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index e0035c023120..ed51bc48eea6 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -378,7 +378,7 @@ static int cypress_serial_control(struct tty_struct *tty,
retval = -ENOTTY;
goto out;
}
- dev_dbg(dev, "%s - retreiving serial line settings\n", __func__);
+ dev_dbg(dev, "%s - retrieving serial line settings\n", __func__);
do {
retval = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
@@ -769,7 +769,7 @@ send:
usb_fill_int_urb(port->interrupt_out_urb, port->serial->dev,
usb_sndintpipe(port->serial->dev, port->interrupt_out_endpointAddress),
- port->interrupt_out_buffer, port->interrupt_out_size,
+ port->interrupt_out_buffer, actual_size,
cypress_write_int_callback, port, priv->write_urb_interval);
result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
if (result) {
@@ -863,7 +863,7 @@ static void cypress_set_termios(struct tty_struct *tty,
struct cypress_private *priv = usb_get_serial_port_data(port);
struct device *dev = &port->dev;
int data_bits, stop_bits, parity_type, parity_enable;
- unsigned cflag, iflag;
+ unsigned int cflag;
unsigned long flags;
__u8 oldlines;
int linechange = 0;
@@ -899,7 +899,6 @@ static void cypress_set_termios(struct tty_struct *tty,
tty->termios.c_cflag &= ~(CMSPAR|CRTSCTS);
cflag = tty->termios.c_cflag;
- iflag = tty->termios.c_iflag;
/* check if there are new settings */
if (old_termios) {
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 758ba789e997..609198d9594c 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -39,6 +39,7 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/serial.h>
+#include <linux/gpio/driver.h>
#include <linux/usb/serial.h>
#include "ftdi_sio.h"
#include "ftdi_sio_ids.h"
@@ -72,6 +73,15 @@ struct ftdi_private {
unsigned int latency; /* latency setting in use */
unsigned short max_packet_size;
struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() and change_speed() */
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gc;
+ struct mutex gpio_lock; /* protects GPIO state */
+ bool gpio_registered; /* is the gpiochip in kernel registered */
+ bool gpio_used; /* true if the user requested a gpio */
+ u8 gpio_altfunc; /* which pins are in gpio mode */
+ u8 gpio_output; /* pin directions cache */
+ u8 gpio_value; /* pin value for outputs */
+#endif
};
/* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -1764,6 +1774,375 @@ static void remove_sysfs_attrs(struct usb_serial_port *port)
}
+#ifdef CONFIG_GPIOLIB
+
+static int ftdi_set_bitmode(struct usb_serial_port *port, u8 mode)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ int result;
+ u16 val;
+
+ val = (mode << 8) | (priv->gpio_output << 4) | priv->gpio_value;
+ result = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ FTDI_SIO_SET_BITMODE_REQUEST,
+ FTDI_SIO_SET_BITMODE_REQUEST_TYPE, val,
+ priv->interface, NULL, 0, WDR_TIMEOUT);
+ if (result < 0) {
+ dev_err(&serial->interface->dev,
+ "bitmode request failed for value 0x%04x: %d\n",
+ val, result);
+ }
+
+ return result;
+}
+
+static int ftdi_set_cbus_pins(struct usb_serial_port *port)
+{
+ return ftdi_set_bitmode(port, FTDI_SIO_BITMODE_CBUS);
+}
+
+static int ftdi_exit_cbus_mode(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ priv->gpio_output = 0;
+ priv->gpio_value = 0;
+ return ftdi_set_bitmode(port, FTDI_SIO_BITMODE_RESET);
+}
+
+static int ftdi_gpio_request(struct gpio_chip *gc, unsigned int offset)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ int result;
+
+ if (priv->gpio_altfunc & BIT(offset))
+ return -ENODEV;
+
+ mutex_lock(&priv->gpio_lock);
+ if (!priv->gpio_used) {
+ /* Set default pin states, as we cannot get them from device */
+ priv->gpio_output = 0x00;
+ priv->gpio_value = 0x00;
+ result = ftdi_set_cbus_pins(port);
+ if (result) {
+ mutex_unlock(&priv->gpio_lock);
+ return result;
+ }
+
+ priv->gpio_used = true;
+ }
+ mutex_unlock(&priv->gpio_lock);
+
+ return 0;
+}
+
+static int ftdi_read_cbus_pins(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ unsigned char *buf;
+ int result;
+
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ result = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ FTDI_SIO_READ_PINS_REQUEST,
+ FTDI_SIO_READ_PINS_REQUEST_TYPE, 0,
+ priv->interface, buf, 1, WDR_TIMEOUT);
+ if (result < 1) {
+ if (result >= 0)
+ result = -EIO;
+ } else {
+ result = buf[0];
+ }
+
+ kfree(buf);
+
+ return result;
+}
+
+static int ftdi_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ int result;
+
+ result = ftdi_read_cbus_pins(port);
+ if (result < 0)
+ return result;
+
+ return !!(result & BIT(gpio));
+}
+
+static void ftdi_gpio_set(struct gpio_chip *gc, unsigned int gpio, int value)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ mutex_lock(&priv->gpio_lock);
+
+ if (value)
+ priv->gpio_value |= BIT(gpio);
+ else
+ priv->gpio_value &= ~BIT(gpio);
+
+ ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+}
+
+static int ftdi_gpio_get_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ int result;
+
+ result = ftdi_read_cbus_pins(port);
+ if (result < 0)
+ return result;
+
+ *bits = result & *mask;
+
+ return 0;
+}
+
+static void ftdi_gpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ mutex_lock(&priv->gpio_lock);
+
+ priv->gpio_value &= ~(*mask);
+ priv->gpio_value |= *bits & *mask;
+ ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+}
+
+static int ftdi_gpio_direction_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ return !(priv->gpio_output & BIT(gpio));
+}
+
+static int ftdi_gpio_direction_input(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ int result;
+
+ mutex_lock(&priv->gpio_lock);
+
+ priv->gpio_output &= ~BIT(gpio);
+ result = ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+
+ return result;
+}
+
+static int ftdi_gpio_direction_output(struct gpio_chip *gc, unsigned int gpio,
+ int value)
+{
+ struct usb_serial_port *port = gpiochip_get_data(gc);
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ int result;
+
+ mutex_lock(&priv->gpio_lock);
+
+ priv->gpio_output |= BIT(gpio);
+ if (value)
+ priv->gpio_value |= BIT(gpio);
+ else
+ priv->gpio_value &= ~BIT(gpio);
+
+ result = ftdi_set_cbus_pins(port);
+
+ mutex_unlock(&priv->gpio_lock);
+
+ return result;
+}
+
+static int ftdi_read_eeprom(struct usb_serial *serial, void *dst, u16 addr,
+ u16 nbytes)
+{
+ int read = 0;
+
+ if (addr % 2 != 0)
+ return -EINVAL;
+ if (nbytes % 2 != 0)
+ return -EINVAL;
+
+ /* Read EEPROM two bytes at a time */
+ while (read < nbytes) {
+ int rv;
+
+ rv = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0),
+ FTDI_SIO_READ_EEPROM_REQUEST,
+ FTDI_SIO_READ_EEPROM_REQUEST_TYPE,
+ 0, (addr + read) / 2, dst + read, 2,
+ WDR_TIMEOUT);
+ if (rv < 2) {
+ if (rv >= 0)
+ return -EIO;
+ else
+ return rv;
+ }
+
+ read += rv;
+ }
+
+ return 0;
+}
+
+static int ftdi_gpio_init_ft232r(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ u16 cbus_config;
+ u8 *buf;
+ int ret;
+ int i;
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = ftdi_read_eeprom(port->serial, buf, 0x14, 2);
+ if (ret < 0)
+ goto out_free;
+
+ cbus_config = le16_to_cpup((__le16 *)buf);
+ dev_dbg(&port->dev, "cbus_config = 0x%04x\n", cbus_config);
+
+ priv->gc.ngpio = 4;
+
+ priv->gpio_altfunc = 0xff;
+ for (i = 0; i < priv->gc.ngpio; ++i) {
+ if ((cbus_config & 0xf) == FTDI_FT232R_CBUS_MUX_GPIO)
+ priv->gpio_altfunc &= ~BIT(i);
+ cbus_config >>= 4;
+ }
+out_free:
+ kfree(buf);
+
+ return ret;
+}
+
+static int ftdi_gpio_init_ftx(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ const u16 cbus_cfg_addr = 0x1a;
+ const u16 cbus_cfg_size = 4;
+ u8 *cbus_cfg_buf;
+ int result;
+ u8 i;
+
+ cbus_cfg_buf = kmalloc(cbus_cfg_size, GFP_KERNEL);
+ if (!cbus_cfg_buf)
+ return -ENOMEM;
+
+ result = ftdi_read_eeprom(serial, cbus_cfg_buf,
+ cbus_cfg_addr, cbus_cfg_size);
+ if (result < 0)
+ goto out_free;
+
+ /* FIXME: FT234XD alone has 1 GPIO, but how to recognize this IC? */
+ priv->gc.ngpio = 4;
+
+ /* Determine which pins are configured for CBUS bitbanging */
+ priv->gpio_altfunc = 0xff;
+ for (i = 0; i < priv->gc.ngpio; ++i) {
+ if (cbus_cfg_buf[i] == FTDI_FTX_CBUS_MUX_GPIO)
+ priv->gpio_altfunc &= ~BIT(i);
+ }
+
+out_free:
+ kfree(cbus_cfg_buf);
+
+ return result;
+}
+
+static int ftdi_gpio_init(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+ struct usb_serial *serial = port->serial;
+ int result;
+
+ switch (priv->chip_type) {
+ case FT232RL:
+ result = ftdi_gpio_init_ft232r(port);
+ break;
+ case FTX:
+ result = ftdi_gpio_init_ftx(port);
+ break;
+ default:
+ return 0;
+ }
+
+ if (result < 0)
+ return result;
+
+ mutex_init(&priv->gpio_lock);
+
+ priv->gc.label = "ftdi-cbus";
+ priv->gc.request = ftdi_gpio_request;
+ priv->gc.get_direction = ftdi_gpio_direction_get;
+ priv->gc.direction_input = ftdi_gpio_direction_input;
+ priv->gc.direction_output = ftdi_gpio_direction_output;
+ priv->gc.get = ftdi_gpio_get;
+ priv->gc.set = ftdi_gpio_set;
+ priv->gc.get_multiple = ftdi_gpio_get_multiple;
+ priv->gc.set_multiple = ftdi_gpio_set_multiple;
+ priv->gc.owner = THIS_MODULE;
+ priv->gc.parent = &serial->interface->dev;
+ priv->gc.base = -1;
+ priv->gc.can_sleep = true;
+
+ result = gpiochip_add_data(&priv->gc, port);
+ if (!result)
+ priv->gpio_registered = true;
+
+ return result;
+}
+
+static void ftdi_gpio_remove(struct usb_serial_port *port)
+{
+ struct ftdi_private *priv = usb_get_serial_port_data(port);
+
+ if (priv->gpio_registered) {
+ gpiochip_remove(&priv->gc);
+ priv->gpio_registered = false;
+ }
+
+ if (priv->gpio_used) {
+ /* Exiting CBUS-mode does not reset pin states. */
+ ftdi_exit_cbus_mode(port);
+ priv->gpio_used = false;
+ }
+}
+
+#else
+
+static int ftdi_gpio_init(struct usb_serial_port *port)
+{
+ return 0;
+}
+
+static void ftdi_gpio_remove(struct usb_serial_port *port) { }
+
+#endif /* CONFIG_GPIOLIB */
+
/*
* ***************************************************************************
* FTDI driver specific functions
@@ -1792,7 +2171,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
{
struct ftdi_private *priv;
const struct ftdi_sio_quirk *quirk = usb_get_serial_data(port->serial);
-
+ int result;
priv = kzalloc(sizeof(struct ftdi_private), GFP_KERNEL);
if (!priv)
@@ -1811,6 +2190,14 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
priv->latency = 16;
write_latency_timer(port);
create_sysfs_attrs(port);
+
+ result = ftdi_gpio_init(port);
+ if (result < 0) {
+ dev_err(&port->serial->interface->dev,
+ "GPIO initialisation failed: %d\n",
+ result);
+ }
+
return 0;
}
@@ -1928,6 +2315,8 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
+ ftdi_gpio_remove(port);
+
remove_sysfs_attrs(port);
kfree(priv);
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index dcd0b6e05baf..a79a1325b4d9 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -35,7 +35,10 @@
#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
-#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
+#define FTDI_SIO_GET_LATENCY_TIMER 0x0a /* Get the latency timer */
+#define FTDI_SIO_SET_BITMODE 0x0b /* Set bitbang mode */
+#define FTDI_SIO_READ_PINS 0x0c /* Read immediate value of pins */
+#define FTDI_SIO_READ_EEPROM 0x90 /* Read EEPROM */
/* Interface indices for FT2232, FT2232H and FT4232H devices */
#define INTERFACE_A 1
@@ -433,6 +436,29 @@ enum ftdi_sio_baudrate {
* 1 = active
*/
+/* FTDI_SIO_SET_BITMODE */
+#define FTDI_SIO_SET_BITMODE_REQUEST_TYPE 0x40
+#define FTDI_SIO_SET_BITMODE_REQUEST FTDI_SIO_SET_BITMODE
+
+/* Possible bitmodes for FTDI_SIO_SET_BITMODE_REQUEST */
+#define FTDI_SIO_BITMODE_RESET 0x00
+#define FTDI_SIO_BITMODE_CBUS 0x20
+
+/* FTDI_SIO_READ_PINS */
+#define FTDI_SIO_READ_PINS_REQUEST_TYPE 0xc0
+#define FTDI_SIO_READ_PINS_REQUEST FTDI_SIO_READ_PINS
+
+/*
+ * FTDI_SIO_READ_EEPROM
+ *
+ * EEPROM format found in FTDI AN_201, "FT-X MTP memory Configuration",
+ * http://www.ftdichip.com/Support/Documents/AppNotes/AN_201_FT-X%20MTP%20Memory%20Configuration.pdf
+ */
+#define FTDI_SIO_READ_EEPROM_REQUEST_TYPE 0xc0
+#define FTDI_SIO_READ_EEPROM_REQUEST FTDI_SIO_READ_EEPROM
+
+#define FTDI_FTX_CBUS_MUX_GPIO 0x8
+#define FTDI_FT232R_CBUS_MUX_GPIO 0xa
/* Descriptors returned by the device
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index ec84758f0e23..6fd427284b12 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -23,16 +23,16 @@ config USB_STORAGE
To compile this driver as a module, choose M here: the
module will be called usb-storage.
+if USB_STORAGE
+
config USB_STORAGE_DEBUG
bool "USB Mass Storage verbose debug"
- depends on USB_STORAGE
help
Say Y here in order to have the USB Mass Storage code generate
verbose debugging messages.
config USB_STORAGE_REALTEK
tristate "Realtek Card Reader support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the power-saving function
for Realtek RTS51xx USB card readers.
@@ -46,7 +46,6 @@ config REALTEK_AUTOPM
config USB_STORAGE_DATAFAB
tristate "Datafab Compact Flash Reader support"
- depends on USB_STORAGE
help
Support for certain Datafab CompactFlash readers.
Datafab has a web page at <http://www.datafab.com/>.
@@ -55,7 +54,6 @@ config USB_STORAGE_DATAFAB
config USB_STORAGE_FREECOM
tristate "Freecom USB/ATAPI Bridge support"
- depends on USB_STORAGE
help
Support for the Freecom USB to IDE/ATAPI adaptor.
Freecom has a web page at <http://www.freecom.de/>.
@@ -64,7 +62,6 @@ config USB_STORAGE_FREECOM
config USB_STORAGE_ISD200
tristate "ISD-200 USB/ATA Bridge support"
- depends on USB_STORAGE
---help---
Say Y here if you want to use USB Mass Store devices based
on the In-Systems Design ISD-200 USB/ATA bridge.
@@ -82,7 +79,6 @@ config USB_STORAGE_ISD200
config USB_STORAGE_USBAT
tristate "USBAT/USBAT02-based storage support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support storage devices
based on the SCM/Shuttle USBAT/USBAT02 processors.
@@ -105,7 +101,6 @@ config USB_STORAGE_USBAT
config USB_STORAGE_SDDR09
tristate "SanDisk SDDR-09 (and other SmartMedia, including DPCM) support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Sandisk SDDR-09
SmartMedia reader in the USB Mass Storage driver.
@@ -115,7 +110,6 @@ config USB_STORAGE_SDDR09
config USB_STORAGE_SDDR55
tristate "SanDisk SDDR-55 SmartMedia support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Sandisk SDDR-55
SmartMedia reader in the USB Mass Storage driver.
@@ -124,7 +118,6 @@ config USB_STORAGE_SDDR55
config USB_STORAGE_JUMPSHOT
tristate "Lexar Jumpshot Compact Flash Reader"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Lexar Jumpshot
USB CompactFlash reader.
@@ -133,7 +126,6 @@ config USB_STORAGE_JUMPSHOT
config USB_STORAGE_ALAUDA
tristate "Olympus MAUSB-10/Fuji DPC-R1 support"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Olympus MAUSB-10
and Fujifilm DPC-R1 USB Card reader/writer devices.
@@ -145,7 +137,6 @@ config USB_STORAGE_ALAUDA
config USB_STORAGE_ONETOUCH
tristate "Support OneTouch Button on Maxtor Hard Drives"
- depends on USB_STORAGE
depends on INPUT=y || INPUT=USB_STORAGE
help
Say Y here to include additional code to support the Maxtor OneTouch
@@ -160,7 +151,6 @@ config USB_STORAGE_ONETOUCH
config USB_STORAGE_KARMA
tristate "Support for Rio Karma music player"
- depends on USB_STORAGE
help
Say Y here to include additional code to support the Rio Karma
USB interface.
@@ -174,7 +164,6 @@ config USB_STORAGE_KARMA
config USB_STORAGE_CYPRESS_ATACB
tristate "SAT emulation on Cypress USB/ATA Bridge with ATACB"
- depends on USB_STORAGE
---help---
Say Y here if you want to use SAT (ata pass through) on devices based
on the Cypress USB/ATA bridge supporting ATACB. This will allow you
@@ -187,19 +176,15 @@ config USB_STORAGE_CYPRESS_ATACB
config USB_STORAGE_ENE_UB6250
tristate "USB ENE card reader support"
- depends on SCSI
- depends on USB_STORAGE
---help---
Say Y here if you wish to control a ENE SD/MS Card reader.
Note that this driver does not support SM cards.
- This option depends on 'SCSI' support being enabled, but you
- probably also need 'SCSI device support: SCSI disk support'
- (BLK_DEV_SD) for most USB storage devices.
-
To compile this driver as a module, choose M here: the
module will be called ums-eneub6250.
+endif # USB_STORAGE
+
config USB_UAS
tristate "USB Attached SCSI"
depends on SCSI && USB_STORAGE
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index f5e4500d9970..2b474d60b4db 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1153,7 +1153,7 @@ static int isd200_get_inquiry_data( struct us_data *us )
/* Fill in vendor identification fields */
src = (__be16 *)&id[ATA_ID_PROD];
dest = (__u16*)info->InquiryData.VendorId;
- for (i=0;i<4;i++)
+ for (i = 0; i < 4; i++)
dest[i] = be16_to_cpu(src[i]);
src = (__be16 *)&id[ATA_ID_PROD + 8/2];
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index 00878c386dd0..30a847c2089d 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -45,50 +45,7 @@ menuconfig TYPEC
if TYPEC
-config TYPEC_TCPM
- tristate "USB Type-C Port Controller Manager"
- depends on USB
- select USB_ROLE_SWITCH
- select POWER_SUPPLY
- help
- The Type-C Port Controller Manager provides a USB PD and USB Type-C
- state machine for use with Type-C Port Controllers.
-
-if TYPEC_TCPM
-
-config TYPEC_TCPCI
- tristate "Type-C Port Controller Interface driver"
- depends on I2C
- select REGMAP_I2C
- help
- Type-C Port Controller driver for TCPCI-compliant controller.
-
-config TYPEC_RT1711H
- tristate "Richtek RT1711H Type-C chip driver"
- depends on I2C
- select TYPEC_TCPCI
- help
- Richtek RT1711H Type-C chip driver that works with
- Type-C Port Controller Manager to provide USB PD and USB
- Type-C functionalities.
-
-source "drivers/usb/typec/fusb302/Kconfig"
-
-config TYPEC_WCOVE
- tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
- depends on ACPI
- depends on INTEL_SOC_PMIC
- depends on INTEL_PMC_IPC
- depends on BXT_WC_PMIC_OPREGION
- help
- This driver adds support for USB Type-C detection on Intel Broxton
- platforms that have Intel Whiskey Cove PMIC. The driver can detect the
- role and cable orientation.
-
- To compile this driver as module, choose M here: the module will be
- called typec_wcove
-
-endif # TYPEC_TCPM
+source "drivers/usb/typec/tcpm/Kconfig"
source "drivers/usb/typec/ucsi/Kconfig"
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index 45b0aef428a8..6696b7263d61 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -2,11 +2,7 @@
obj-$(CONFIG_TYPEC) += typec.o
typec-y := class.o mux.o bus.o
obj-$(CONFIG_TYPEC) += altmodes/
-obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
-obj-y += fusb302/
-obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
+obj-$(CONFIG_TYPEC_TCPM) += tcpm/
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o
obj-$(CONFIG_TYPEC) += mux/
-obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
-obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index e61dffb27a0c..5db0593ca0bd 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -1322,7 +1322,7 @@ void typec_set_pwr_role(struct typec_port *port, enum typec_role role)
EXPORT_SYMBOL_GPL(typec_set_pwr_role);
/**
- * typec_set_pwr_role - Report VCONN source change
+ * typec_set_vconn_role - Report VCONN source change
* @port: The USB Type-C Port which VCONN role changed
* @role: Source when @port is sourcing VCONN, or Sink when it's not
*
@@ -1500,7 +1500,7 @@ typec_port_register_altmode(struct typec_port *port,
sprintf(id, "id%04xm%02x", desc->svid, desc->mode);
- mux = typec_mux_get(port->dev.parent, id);
+ mux = typec_mux_get(&port->dev, id);
if (IS_ERR(mux))
return ERR_CAST(mux);
@@ -1540,18 +1540,6 @@ struct typec_port *typec_register_port(struct device *parent,
return ERR_PTR(id);
}
- port->sw = typec_switch_get(cap->fwnode ? &port->dev : parent);
- if (IS_ERR(port->sw)) {
- ret = PTR_ERR(port->sw);
- goto err_switch;
- }
-
- port->mux = typec_mux_get(parent, "typec-mux");
- if (IS_ERR(port->mux)) {
- ret = PTR_ERR(port->mux);
- goto err_mux;
- }
-
switch (cap->type) {
case TYPEC_PORT_SRC:
port->pwr_role = TYPEC_SOURCE;
@@ -1592,13 +1580,26 @@ struct typec_port *typec_register_port(struct device *parent,
port->port_type = cap->type;
port->prefer_role = cap->prefer_role;
+ device_initialize(&port->dev);
port->dev.class = typec_class;
port->dev.parent = parent;
port->dev.fwnode = cap->fwnode;
port->dev.type = &typec_port_dev_type;
dev_set_name(&port->dev, "port%d", id);
- ret = device_register(&port->dev);
+ port->sw = typec_switch_get(&port->dev);
+ if (IS_ERR(port->sw)) {
+ put_device(&port->dev);
+ return ERR_CAST(port->sw);
+ }
+
+ port->mux = typec_mux_get(&port->dev, "typec-mux");
+ if (IS_ERR(port->mux)) {
+ put_device(&port->dev);
+ return ERR_CAST(port->mux);
+ }
+
+ ret = device_add(&port->dev);
if (ret) {
dev_err(parent, "failed to register port (%d)\n", ret);
put_device(&port->dev);
@@ -1606,15 +1607,6 @@ struct typec_port *typec_register_port(struct device *parent,
}
return port;
-
-err_mux:
- typec_switch_put(port->sw);
-
-err_switch:
- ida_simple_remove(&typec_index_ida, port->id);
- kfree(port);
-
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(typec_register_port);
diff --git a/drivers/usb/typec/fusb302/Kconfig b/drivers/usb/typec/fusb302/Kconfig
deleted file mode 100644
index fce099ff39fe..000000000000
--- a/drivers/usb/typec/fusb302/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-config TYPEC_FUSB302
- tristate "Fairchild FUSB302 Type-C chip driver"
- depends on I2C
- help
- The Fairchild FUSB302 Type-C chip driver that works with
- Type-C Port Controller Manager to provide USB PD and USB
- Type-C functionalities.
diff --git a/drivers/usb/typec/fusb302/Makefile b/drivers/usb/typec/fusb302/Makefile
deleted file mode 100644
index 3b51b33631a0..000000000000
--- a/drivers/usb/typec/fusb302/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o
diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig
new file mode 100644
index 000000000000..f03ea8a61768
--- /dev/null
+++ b/drivers/usb/typec/tcpm/Kconfig
@@ -0,0 +1,52 @@
+config TYPEC_TCPM
+ tristate "USB Type-C Port Controller Manager"
+ depends on USB
+ select USB_ROLE_SWITCH
+ select POWER_SUPPLY
+ help
+ The Type-C Port Controller Manager provides a USB PD and USB Type-C
+ state machine for use with Type-C Port Controllers.
+
+if TYPEC_TCPM
+
+config TYPEC_TCPCI
+ tristate "Type-C Port Controller Interface driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Type-C Port Controller driver for TCPCI-compliant controller.
+
+if TYPEC_TCPCI
+
+config TYPEC_RT1711H
+ tristate "Richtek RT1711H Type-C chip driver"
+ help
+ Richtek RT1711H Type-C chip driver that works with
+ Type-C Port Controller Manager to provide USB PD and USB
+ Type-C functionalities.
+
+endif # TYPEC_TCPCI
+
+config TYPEC_FUSB302
+ tristate "Fairchild FUSB302 Type-C chip driver"
+ depends on I2C
+ help
+ The Fairchild FUSB302 Type-C chip driver that works with
+ Type-C Port Controller Manager to provide USB PD and USB
+ Type-C functionalities.
+
+config TYPEC_WCOVE
+ tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver"
+ depends on ACPI
+ depends on INTEL_SOC_PMIC
+ depends on INTEL_PMC_IPC
+ depends on BXT_WC_PMIC_OPREGION
+ help
+ This driver adds support for USB Type-C on Intel Broxton platforms
+ that have Intel Whiskey Cove PMIC. The driver works with USB Type-C
+ Port Controller Manager to provide USB PD and Type-C functionalities.
+
+ To compile this driver as module, choose M here: the module will be
+ called typec_wcove.ko
+
+endif # TYPEC_TCPM
diff --git a/drivers/usb/typec/tcpm/Makefile b/drivers/usb/typec/tcpm/Makefile
new file mode 100644
index 000000000000..a5ff6c8eb892
--- /dev/null
+++ b/drivers/usb/typec/tcpm/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_TYPEC_TCPM) += tcpm.o
+obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o
+obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o
+typec_wcove-y := wcove.o
+obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o
+obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o
diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 82bed9810be6..43b64d9309d0 100644
--- a/drivers/usb/typec/fusb302/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -42,19 +42,12 @@
#define T_BC_LVL_DEBOUNCE_DELAY_MS 30
enum toggling_mode {
- TOGGLINE_MODE_OFF,
+ TOGGLING_MODE_OFF,
TOGGLING_MODE_DRP,
TOGGLING_MODE_SNK,
TOGGLING_MODE_SRC,
};
-static const char * const toggling_mode_name[] = {
- [TOGGLINE_MODE_OFF] = "toggling_OFF",
- [TOGGLING_MODE_DRP] = "toggling_DRP",
- [TOGGLING_MODE_SNK] = "toggling_SNK",
- [TOGGLING_MODE_SRC] = "toggling_SRC",
-};
-
enum src_current_status {
SRC_CURRENT_DEFAULT,
SRC_CURRENT_MEDIUM,
@@ -601,7 +594,7 @@ static int fusb302_set_toggling(struct fusb302_chip *chip,
chip->intr_comp_chng = false;
/* configure toggling mode: none/snk/src/drp */
switch (mode) {
- case TOGGLINE_MODE_OFF:
+ case TOGGLING_MODE_OFF:
ret = fusb302_i2c_mask_write(chip, FUSB_REG_CONTROL2,
FUSB_REG_CONTROL2_MODE_MASK,
FUSB_REG_CONTROL2_MODE_NONE);
@@ -633,7 +626,7 @@ static int fusb302_set_toggling(struct fusb302_chip *chip,
break;
}
- if (mode == TOGGLINE_MODE_OFF) {
+ if (mode == TOGGLING_MODE_OFF) {
/* mask TOGDONE interrupt */
ret = fusb302_i2c_set_bits(chip, FUSB_REG_MASKA,
FUSB_REG_MASKA_TOGDONE);
@@ -686,6 +679,7 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
int ret = 0;
bool pull_up, pull_down;
u8 rd_mda;
+ enum toggling_mode mode;
mutex_lock(&chip->lock);
switch (cc) {
@@ -709,7 +703,7 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
ret = -EINVAL;
goto done;
}
- ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip, "cannot stop toggling, ret=%d", ret);
goto done;
@@ -771,6 +765,29 @@ static int tcpm_set_cc(struct tcpc_dev *dev, enum typec_cc_status cc)
chip->intr_comp_chng = false;
}
fusb302_log(chip, "cc := %s", typec_cc_status_name[cc]);
+
+ /* Enable detection for fixed SNK or SRC only roles */
+ switch (cc) {
+ case TYPEC_CC_RD:
+ mode = TOGGLING_MODE_SNK;
+ break;
+ case TYPEC_CC_RP_DEF:
+ case TYPEC_CC_RP_1_5:
+ case TYPEC_CC_RP_3_0:
+ mode = TOGGLING_MODE_SRC;
+ break;
+ default:
+ mode = TOGGLING_MODE_OFF;
+ break;
+ }
+
+ if (mode != TOGGLING_MODE_OFF) {
+ ret = fusb302_set_toggling(chip, mode);
+ if (ret < 0)
+ fusb302_log(chip,
+ "cannot set fixed role toggling mode, ret=%d",
+ ret);
+ }
done:
mutex_unlock(&chip->lock);
@@ -1178,10 +1195,6 @@ static const u32 src_pdo[] = {
PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
};
-static const u32 snk_pdo[] = {
- PDO_FIXED(5000, 400, PDO_FIXED_FLAGS),
-};
-
static const struct tcpc_config fusb302_tcpc_config = {
.src_pdo = src_pdo,
.nr_src_pdo = ARRAY_SIZE(src_pdo),
@@ -1303,7 +1316,7 @@ static int fusb302_handle_togdone_snk(struct fusb302_chip *chip,
tcpm_cc_change(chip->tcpm_port);
}
/* turn off toggling */
- ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip,
"cannot set toggling mode off, ret=%d", ret);
@@ -1399,7 +1412,7 @@ static int fusb302_handle_togdone_src(struct fusb302_chip *chip,
tcpm_cc_change(chip->tcpm_port);
}
/* turn off toggling */
- ret = fusb302_set_toggling(chip, TOGGLINE_MODE_OFF);
+ ret = fusb302_set_toggling(chip, TOGGLING_MODE_OFF);
if (ret < 0) {
fusb302_log(chip,
"cannot set toggling mode off, ret=%d", ret);
@@ -1730,12 +1743,14 @@ static int fusb302_probe(struct i2c_client *client,
return -ENOMEM;
chip->i2c_client = client;
- i2c_set_clientdata(client, chip);
chip->dev = &client->dev;
chip->tcpc_config = fusb302_tcpc_config;
chip->tcpc_dev.config = &chip->tcpc_config;
mutex_init(&chip->lock);
+ chip->tcpc_dev.fwnode =
+ device_get_named_child_node(dev, "connector");
+
if (!device_property_read_u32(dev, "fcs,operating-sink-microwatt", &v))
chip->tcpc_config.operating_snk_mw = v / 1000;
@@ -1756,22 +1771,17 @@ static int fusb302_probe(struct i2c_client *client,
return -EPROBE_DEFER;
}
- fusb302_debugfs_init(chip);
+ chip->vbus = devm_regulator_get(chip->dev, "vbus");
+ if (IS_ERR(chip->vbus))
+ return PTR_ERR(chip->vbus);
chip->wq = create_singlethread_workqueue(dev_name(chip->dev));
- if (!chip->wq) {
- ret = -ENOMEM;
- goto clear_client_data;
- }
+ if (!chip->wq)
+ return -ENOMEM;
+
INIT_DELAYED_WORK(&chip->bc_lvl_handler, fusb302_bc_lvl_handler_work);
init_tcpc_dev(&chip->tcpc_dev);
- chip->vbus = devm_regulator_get(chip->dev, "vbus");
- if (IS_ERR(chip->vbus)) {
- ret = PTR_ERR(chip->vbus);
- goto destroy_workqueue;
- }
-
if (client->irq) {
chip->gpio_int_n_irq = client->irq;
} else {
@@ -1797,15 +1807,15 @@ static int fusb302_probe(struct i2c_client *client,
goto tcpm_unregister_port;
}
enable_irq_wake(chip->gpio_int_n_irq);
+ fusb302_debugfs_init(chip);
+ i2c_set_clientdata(client, chip);
+
return ret;
tcpm_unregister_port:
tcpm_unregister_port(chip->tcpm_port);
destroy_workqueue:
destroy_workqueue(chip->wq);
-clear_client_data:
- i2c_set_clientdata(client, NULL);
- fusb302_debugfs_exit(chip);
return ret;
}
@@ -1816,7 +1826,6 @@ static int fusb302_remove(struct i2c_client *client)
tcpm_unregister_port(chip->tcpm_port);
destroy_workqueue(chip->wq);
- i2c_set_clientdata(client, NULL);
fusb302_debugfs_exit(chip);
return 0;
diff --git a/drivers/usb/typec/fusb302/fusb302_reg.h b/drivers/usb/typec/tcpm/fusb302_reg.h
index 00b39d365478..00b39d365478 100644
--- a/drivers/usb/typec/fusb302/fusb302_reg.h
+++ b/drivers/usb/typec/tcpm/fusb302_reg.h
diff --git a/drivers/usb/typec/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index ac6b418b15f1..ac6b418b15f1 100644
--- a/drivers/usb/typec/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
diff --git a/drivers/usb/typec/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
index 303ebde26546..303ebde26546 100644
--- a/drivers/usb/typec/tcpci.h
+++ b/drivers/usb/typec/tcpm/tcpci.h
diff --git a/drivers/usb/typec/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 017389021b96..017389021b96 100644
--- a/drivers/usb/typec/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 4f1f4215f3d6..dbbd71f754d0 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1430,8 +1430,8 @@ static enum pdo_err tcpm_caps_err(struct tcpm_port *port, const u32 *pdo,
if (pdo_apdo_type(pdo[i]) != APDO_TYPE_PPS)
break;
- if (pdo_pps_apdo_max_current(pdo[i]) <
- pdo_pps_apdo_max_current(pdo[i - 1]))
+ if (pdo_pps_apdo_max_voltage(pdo[i]) <
+ pdo_pps_apdo_max_voltage(pdo[i - 1]))
return PDO_ERR_PPS_APDO_NOT_SORTED;
else if (pdo_pps_apdo_min_voltage(pdo[i]) ==
pdo_pps_apdo_min_voltage(pdo[i - 1]) &&
@@ -2209,7 +2209,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
{
unsigned int i, j, max_mw = 0, max_mv = 0;
unsigned int min_src_mv, max_src_mv, src_ma, src_mw;
- unsigned int min_snk_mv, max_snk_mv, snk_ma;
+ unsigned int min_snk_mv, max_snk_mv;
u32 pdo;
unsigned int src_pdo = 0, snk_pdo = 0;
@@ -2253,8 +2253,6 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
pdo_pps_apdo_min_voltage(pdo);
max_snk_mv =
pdo_pps_apdo_max_voltage(pdo);
- snk_ma =
- pdo_pps_apdo_max_current(pdo);
break;
default:
tcpm_log(port,
@@ -2402,7 +2400,7 @@ static int tcpm_pd_send_request(struct tcpm_port *port)
static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
{
- unsigned int out_mv, op_ma, op_mw, min_mv, max_mv, max_ma, flags;
+ unsigned int out_mv, op_ma, op_mw, max_mv, max_ma, flags;
enum pd_pdo_type type;
unsigned int src_pdo_index;
u32 pdo;
@@ -2420,7 +2418,6 @@ static int tcpm_pd_build_pps_request(struct tcpm_port *port, u32 *rdo)
tcpm_log(port, "Invalid APDO selected!");
return -EINVAL;
}
- min_mv = port->pps_data.min_volt;
max_mv = port->pps_data.max_volt;
max_ma = port->pps_data.max_curr;
out_mv = port->pps_data.out_volt;
@@ -4116,6 +4113,9 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
goto port_unlock;
}
+ /* Round down operating current to align with PPS valid steps */
+ op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
+
reinit_completion(&port->pps_complete);
port->pps_data.op_curr = op_curr;
port->pps_status = 0;
@@ -4169,6 +4169,9 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
goto port_unlock;
}
+ /* Round down output voltage to align with PPS valid steps */
+ out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
+
reinit_completion(&port->pps_complete);
port->pps_data.out_volt = out_volt;
port->pps_status = 0;
diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 423208e19383..423208e19383 100644
--- a/drivers/usb/typec/typec_wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
diff --git a/drivers/usb/usbip/vudc_main.c b/drivers/usb/usbip/vudc_main.c
index 3fc22037a82f..390733e6937e 100644
--- a/drivers/usb/usbip/vudc_main.c
+++ b/drivers/usb/usbip/vudc_main.c
@@ -73,6 +73,10 @@ static int __init init(void)
cleanup:
list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
list_del(&udc_dev->dev_entry);
+ /*
+ * Just do platform_device_del() here, put_vudc_device()
+ * calls the platform_device_put()
+ */
platform_device_del(udc_dev->pdev);
put_vudc_device(udc_dev);
}
@@ -89,7 +93,11 @@ static void __exit cleanup(void)
list_for_each_entry_safe(udc_dev, udc_dev2, &vudc_devices, dev_entry) {
list_del(&udc_dev->dev_entry);
- platform_device_unregister(udc_dev->pdev);
+ /*
+ * Just do platform_device_del() here, put_vudc_device()
+ * calls the platform_device_put()
+ */
+ platform_device_del(udc_dev->pdev);
put_vudc_device(udc_dev);
}
platform_driver_unregister(&vudc_driver);
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index aff50eb09ca9..68ddee86a886 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -189,7 +189,7 @@ struct wusb_mac_scratch {
* NOTE: blen is not aligned to a block size, we'll pad zeros, that's
* what sg[4] is for. Maybe there is a smarter way to do this.
*/
-static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
+static int wusb_ccm_mac(struct crypto_sync_skcipher *tfm_cbc,
struct crypto_cipher *tfm_aes,
struct wusb_mac_scratch *scratch,
void *mic,
@@ -198,7 +198,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
size_t blen)
{
int result = 0;
- SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
struct scatterlist sg[4], sg_dst;
void *dst_buf;
size_t dst_size;
@@ -224,7 +224,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
if (!dst_buf)
goto error_dst_buf;
- iv = kzalloc(crypto_skcipher_ivsize(tfm_cbc), GFP_KERNEL);
+ iv = kzalloc(crypto_sync_skcipher_ivsize(tfm_cbc), GFP_KERNEL);
if (!iv)
goto error_iv;
@@ -251,7 +251,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0);
sg_init_one(&sg_dst, dst_buf, dst_size);
- skcipher_request_set_tfm(req, tfm_cbc);
+ skcipher_request_set_sync_tfm(req, tfm_cbc);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, &sg_dst, dst_size, iv);
result = crypto_skcipher_encrypt(req);
@@ -298,19 +298,19 @@ ssize_t wusb_prf(void *out, size_t out_size,
{
ssize_t result, bytes = 0, bitr;
struct aes_ccm_nonce n = *_n;
- struct crypto_skcipher *tfm_cbc;
+ struct crypto_sync_skcipher *tfm_cbc;
struct crypto_cipher *tfm_aes;
struct wusb_mac_scratch *scratch;
u64 sfn = 0;
__le64 sfn_le;
- tfm_cbc = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm_cbc = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
if (IS_ERR(tfm_cbc)) {
result = PTR_ERR(tfm_cbc);
printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
goto error_alloc_cbc;
}
- result = crypto_skcipher_setkey(tfm_cbc, key, 16);
+ result = crypto_sync_skcipher_setkey(tfm_cbc, key, 16);
if (result < 0) {
printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
goto error_setkey_cbc;
@@ -351,7 +351,7 @@ error_setkey_aes:
crypto_free_cipher(tfm_aes);
error_alloc_aes:
error_setkey_cbc:
- crypto_free_skcipher(tfm_cbc);
+ crypto_free_sync_skcipher(tfm_cbc);
error_alloc_cbc:
return result;
}
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index 38884aac862b..a5734cbcd5ad 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -470,9 +470,7 @@ error:
int wa_rpipes_create(struct wahc *wa)
{
wa->rpipes = le16_to_cpu(wa->wa_descr->wNumRPipes);
- wa->rpipe_bm = kcalloc(BITS_TO_LONGS(wa->rpipes),
- sizeof(unsigned long),
- GFP_KERNEL);
+ wa->rpipe_bm = bitmap_zalloc(wa->rpipes, GFP_KERNEL);
if (wa->rpipe_bm == NULL)
return -ENOMEM;
return 0;
@@ -487,7 +485,7 @@ void wa_rpipes_destroy(struct wahc *wa)
dev_err(dev, "BUG: pipes not released on exit: %*pb\n",
wa->rpipes, wa->rpipe_bm);
}
- kfree(wa->rpipe_bm);
+ bitmap_free(wa->rpipe_bm);
}
/*
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 96721b154454..b30926e11d87 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -444,7 +444,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
struct mm_iommu_table_group_mem_t *mem = NULL;
int ret;
unsigned long hpa = 0;
- __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
if (!pua)
return;
@@ -467,8 +467,27 @@ static int tce_iommu_clear(struct tce_container *container,
unsigned long oldhpa;
long ret;
enum dma_data_direction direction;
+ unsigned long lastentry = entry + pages;
+
+ for ( ; entry < lastentry; ++entry) {
+ if (tbl->it_indirect_levels && tbl->it_userspace) {
+ /*
+ * For multilevel tables, we can take a shortcut here
+ * and skip some TCEs as we know that the userspace
+ * addresses cache is a mirror of the real TCE table
+ * and if it is missing some indirect levels, then
+ * the hardware table does not have them allocated
+ * either and therefore does not require updating.
+ */
+ __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl,
+ entry);
+ if (!pua) {
+ /* align to level_size which is power of two */
+ entry |= tbl->it_level_size - 1;
+ continue;
+ }
+ }
- for ( ; pages; --pages, ++entry) {
cond_resched();
direction = DMA_NONE;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 2919e2334052..71ee978c848f 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -111,22 +111,6 @@ config LCD_HP700
If you have an HP Jornada 700 series handheld (710/720/728)
say Y to enable LCD control driver.
-config LCD_S6E63M0
- tristate "S6E63M0 AMOLED LCD Driver"
- depends on SPI && BACKLIGHT_CLASS_DEVICE
- default n
- help
- If you have an S6E63M0 LCD Panel, say Y to enable its
- LCD control driver.
-
-config LCD_LD9040
- tristate "LD9040 AMOLED LCD Driver"
- depends on SPI && BACKLIGHT_CLASS_DEVICE
- default n
- help
- If you have an LD9040 Panel, say Y to enable its
- control driver.
-
config LCD_AMS369FG06
tristate "AMS369FG06 AMOLED LCD Driver"
depends on SPI && BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 0dcc2c745c03..63c507c07437 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -9,13 +9,11 @@ obj-$(CONFIG_LCD_HX8357) += hx8357.o
obj-$(CONFIG_LCD_ILI922X) += ili922x.o
obj-$(CONFIG_LCD_ILI9320) += ili9320.o
obj-$(CONFIG_LCD_L4F00242T03) += l4f00242t03.o
-obj-$(CONFIG_LCD_LD9040) += ld9040.o
obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o
obj-$(CONFIG_LCD_LMS501KF03) += lms501kf03.o
obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
obj-$(CONFIG_LCD_OTM3225A) += otm3225a.o
obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
-obj-$(CONFIG_LCD_S6E63M0) += s6e63m0.o
obj-$(CONFIG_LCD_TDO24M) += tdo24m.o
obj-$(CONFIG_LCD_TOSA) += tosa_lcd.o
obj-$(CONFIG_LCD_VGG2432A4) += vgg2432a4.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 35373e2065b2..5e38353b4423 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -391,7 +391,7 @@ static struct platform_driver adp5520_bl_driver = {
module_platform_driver(adp5520_bl_driver);
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP5520(01) Backlight Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:adp5520-backlight");
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index f1dc41cf19e3..85318236da2f 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -822,5 +822,5 @@ static struct i2c_driver adp8860_driver = {
module_i2c_driver(adp8860_driver);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP8860 Backlight driver");
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 4fec9aa92d9b..8d50e0299578 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -992,5 +992,5 @@ static struct i2c_driver adp8870_driver = {
module_i2c_driver(adp8870_driver);
MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
+MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("ADP8870 Backlight driver");
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
deleted file mode 100644
index 677f8abba27c..000000000000
--- a/drivers/video/backlight/ld9040.c
+++ /dev/null
@@ -1,811 +0,0 @@
-/*
- * ld9040 AMOLED LCD panel driver.
- *
- * Copyright (c) 2011 Samsung Electronics
- * Author: Donghwa Lee <dh09.lee@samsung.com>
- * Derived from drivers/video/backlight/s6e63m0.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/lcd.h>
-#include <linux/module.h>
-#include <linux/regulator/consumer.h>
-#include <linux/spi/spi.h>
-#include <linux/wait.h>
-
-#include "ld9040_gamma.h"
-
-#define SLEEPMSEC 0x1000
-#define ENDDEF 0x2000
-#define DEFMASK 0xFF00
-#define COMMAND_ONLY 0xFE
-#define DATA_ONLY 0xFF
-
-#define MIN_BRIGHTNESS 0
-#define MAX_BRIGHTNESS 24
-
-struct ld9040 {
- struct device *dev;
- struct spi_device *spi;
- unsigned int power;
- unsigned int current_brightness;
-
- struct lcd_device *ld;
- struct backlight_device *bd;
- struct lcd_platform_data *lcd_pd;
-
- struct mutex lock;
- bool enabled;
-};
-
-static struct regulator_bulk_data supplies[] = {
- { .supply = "vdd3", },
- { .supply = "vci", },
-};
-
-static void ld9040_regulator_enable(struct ld9040 *lcd)
-{
- int ret = 0;
- struct lcd_platform_data *pd = NULL;
-
- pd = lcd->lcd_pd;
- mutex_lock(&lcd->lock);
- if (!lcd->enabled) {
- ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
- if (ret)
- goto out;
-
- lcd->enabled = true;
- }
- msleep(pd->power_on_delay);
-out:
- mutex_unlock(&lcd->lock);
-}
-
-static void ld9040_regulator_disable(struct ld9040 *lcd)
-{
- int ret = 0;
-
- mutex_lock(&lcd->lock);
- if (lcd->enabled) {
- ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
- if (ret)
- goto out;
-
- lcd->enabled = false;
- }
-out:
- mutex_unlock(&lcd->lock);
-}
-
-static const unsigned short seq_swreset[] = {
- 0x01, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_user_setting[] = {
- 0xF0, 0x5A,
-
- DATA_ONLY, 0x5A,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_elvss_on[] = {
- 0xB1, 0x0D,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x16,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gtcon[] = {
- 0xF7, 0x09,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_panel_condition[] = {
- 0xF8, 0x05,
-
- DATA_ONLY, 0x65,
- DATA_ONLY, 0x96,
- DATA_ONLY, 0x71,
- DATA_ONLY, 0x7D,
- DATA_ONLY, 0x19,
- DATA_ONLY, 0x3B,
- DATA_ONLY, 0x0D,
- DATA_ONLY, 0x19,
- DATA_ONLY, 0x7E,
- DATA_ONLY, 0x0D,
- DATA_ONLY, 0xE2,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x7E,
- DATA_ONLY, 0x7D,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x02,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gamma_set1[] = {
- 0xF9, 0x00,
-
- DATA_ONLY, 0xA7,
- DATA_ONLY, 0xB4,
- DATA_ONLY, 0xAE,
- DATA_ONLY, 0xBF,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x91,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xB2,
- DATA_ONLY, 0xB4,
- DATA_ONLY, 0xAA,
- DATA_ONLY, 0xBB,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xAC,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xB3,
- DATA_ONLY, 0xB1,
- DATA_ONLY, 0xAA,
- DATA_ONLY, 0xBC,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xB3,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gamma_ctrl[] = {
- 0xFB, 0x02,
-
- DATA_ONLY, 0x5A,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gamma_start[] = {
- 0xF9, COMMAND_ONLY,
-
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_apon[] = {
- 0xF3, 0x00,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x0A,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_display_ctrl[] = {
- 0xF2, 0x02,
-
- DATA_ONLY, 0x08,
- DATA_ONLY, 0x08,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x10,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_manual_pwr[] = {
- 0xB0, 0x04,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_pwr_ctrl[] = {
- 0xF4, 0x0A,
-
- DATA_ONLY, 0x87,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x6A,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x02,
- DATA_ONLY, 0x88,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_sleep_out[] = {
- 0x11, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_sleep_in[] = {
- 0x10, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_display_on[] = {
- 0x29, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_display_off[] = {
- 0x28, COMMAND_ONLY,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vci1_1st_en[] = {
- 0xF3, 0x10,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vl1_en[] = {
- 0xF3, 0x11,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vl2_en[] = {
- 0xF3, 0x13,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vci1_2nd_en[] = {
- 0xF3, 0x33,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vl3_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vreg1_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x01,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vgh_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vgl_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0x31,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x02,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vmos_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xB1,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vint_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xF1,
- /* DATA_ONLY, 0x71, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vbh_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xF9,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_vbl_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFD,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gam_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_sd_amp_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x80,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_gls_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x81,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_els_en[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x83,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static const unsigned short seq_el_on[] = {
- 0xF3, 0x37,
-
- DATA_ONLY, 0xFF,
- /* DATA_ONLY, 0x73, VMOS/VBL/VBH not used */
- DATA_ONLY, 0x87,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- /* DATA_ONLY, 0x02, VMOS/VBL/VBH not used */
- ENDDEF, 0x00
-};
-
-static int ld9040_spi_write_byte(struct ld9040 *lcd, int addr, int data)
-{
- u16 buf[1];
- struct spi_message msg;
-
- struct spi_transfer xfer = {
- .len = 2,
- .tx_buf = buf,
- };
-
- buf[0] = (addr << 8) | data;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- return spi_sync(lcd->spi, &msg);
-}
-
-static int ld9040_spi_write(struct ld9040 *lcd, unsigned char address,
- unsigned char command)
-{
- int ret = 0;
-
- if (address != DATA_ONLY)
- ret = ld9040_spi_write_byte(lcd, 0x0, address);
- if (command != COMMAND_ONLY)
- ret = ld9040_spi_write_byte(lcd, 0x1, command);
-
- return ret;
-}
-
-static int ld9040_panel_send_sequence(struct ld9040 *lcd,
- const unsigned short *wbuf)
-{
- int ret = 0, i = 0;
-
- while ((wbuf[i] & DEFMASK) != ENDDEF) {
- if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
- ret = ld9040_spi_write(lcd, wbuf[i], wbuf[i+1]);
- if (ret)
- break;
- } else {
- msleep(wbuf[i+1]);
- }
- i += 2;
- }
-
- return ret;
-}
-
-static int _ld9040_gamma_ctl(struct ld9040 *lcd, const unsigned int *gamma)
-{
- unsigned int i = 0;
- int ret = 0;
-
- /* start gamma table updating. */
- ret = ld9040_panel_send_sequence(lcd, seq_gamma_start);
- if (ret) {
- dev_err(lcd->dev, "failed to disable gamma table updating.\n");
- goto gamma_err;
- }
-
- for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
- ret = ld9040_spi_write(lcd, DATA_ONLY, gamma[i]);
- if (ret) {
- dev_err(lcd->dev, "failed to set gamma table.\n");
- goto gamma_err;
- }
- }
-
- /* update gamma table. */
- ret = ld9040_panel_send_sequence(lcd, seq_gamma_ctrl);
- if (ret)
- dev_err(lcd->dev, "failed to update gamma table.\n");
-
-gamma_err:
- return ret;
-}
-
-static int ld9040_gamma_ctl(struct ld9040 *lcd, int gamma)
-{
- return _ld9040_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
-}
-
-static int ld9040_ldi_init(struct ld9040 *lcd)
-{
- int ret, i;
- static const unsigned short *init_seq[] = {
- seq_user_setting,
- seq_panel_condition,
- seq_display_ctrl,
- seq_manual_pwr,
- seq_elvss_on,
- seq_gtcon,
- seq_gamma_set1,
- seq_gamma_ctrl,
- seq_sleep_out,
- };
-
- for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
- ret = ld9040_panel_send_sequence(lcd, init_seq[i]);
- /* workaround: minimum delay time for transferring CMD */
- usleep_range(300, 310);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static int ld9040_ldi_enable(struct ld9040 *lcd)
-{
- return ld9040_panel_send_sequence(lcd, seq_display_on);
-}
-
-static int ld9040_ldi_disable(struct ld9040 *lcd)
-{
- int ret;
-
- ret = ld9040_panel_send_sequence(lcd, seq_display_off);
- ret = ld9040_panel_send_sequence(lcd, seq_sleep_in);
-
- return ret;
-}
-
-static int ld9040_power_is_on(int power)
-{
- return power <= FB_BLANK_NORMAL;
-}
-
-static int ld9040_power_on(struct ld9040 *lcd)
-{
- int ret = 0;
- struct lcd_platform_data *pd;
-
- pd = lcd->lcd_pd;
-
- /* lcd power on */
- ld9040_regulator_enable(lcd);
-
- if (!pd->reset) {
- dev_err(lcd->dev, "reset is NULL.\n");
- return -EINVAL;
- }
-
- pd->reset(lcd->ld);
- msleep(pd->reset_delay);
-
- ret = ld9040_ldi_init(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to initialize ldi.\n");
- return ret;
- }
-
- ret = ld9040_ldi_enable(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to enable ldi.\n");
- return ret;
- }
-
- return 0;
-}
-
-static int ld9040_power_off(struct ld9040 *lcd)
-{
- int ret;
- struct lcd_platform_data *pd;
-
- pd = lcd->lcd_pd;
-
- ret = ld9040_ldi_disable(lcd);
- if (ret) {
- dev_err(lcd->dev, "lcd setting failed.\n");
- return -EIO;
- }
-
- msleep(pd->power_off_delay);
-
- /* lcd power off */
- ld9040_regulator_disable(lcd);
-
- return 0;
-}
-
-static int ld9040_power(struct ld9040 *lcd, int power)
-{
- int ret = 0;
-
- if (ld9040_power_is_on(power) && !ld9040_power_is_on(lcd->power))
- ret = ld9040_power_on(lcd);
- else if (!ld9040_power_is_on(power) && ld9040_power_is_on(lcd->power))
- ret = ld9040_power_off(lcd);
-
- if (!ret)
- lcd->power = power;
-
- return ret;
-}
-
-static int ld9040_set_power(struct lcd_device *ld, int power)
-{
- struct ld9040 *lcd = lcd_get_data(ld);
-
- if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
- power != FB_BLANK_NORMAL) {
- dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
- return -EINVAL;
- }
-
- return ld9040_power(lcd, power);
-}
-
-static int ld9040_get_power(struct lcd_device *ld)
-{
- struct ld9040 *lcd = lcd_get_data(ld);
-
- return lcd->power;
-}
-
-static int ld9040_set_brightness(struct backlight_device *bd)
-{
- int ret = 0, brightness = bd->props.brightness;
- struct ld9040 *lcd = bl_get_data(bd);
-
- if (brightness < MIN_BRIGHTNESS ||
- brightness > bd->props.max_brightness) {
- dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
- MIN_BRIGHTNESS, MAX_BRIGHTNESS);
- return -EINVAL;
- }
-
- ret = ld9040_gamma_ctl(lcd, bd->props.brightness);
- if (ret) {
- dev_err(&bd->dev, "lcd brightness setting failed.\n");
- return -EIO;
- }
-
- return ret;
-}
-
-static struct lcd_ops ld9040_lcd_ops = {
- .set_power = ld9040_set_power,
- .get_power = ld9040_get_power,
-};
-
-static const struct backlight_ops ld9040_backlight_ops = {
- .update_status = ld9040_set_brightness,
-};
-
-static int ld9040_probe(struct spi_device *spi)
-{
- int ret = 0;
- struct ld9040 *lcd = NULL;
- struct lcd_device *ld = NULL;
- struct backlight_device *bd = NULL;
- struct backlight_properties props;
-
- lcd = devm_kzalloc(&spi->dev, sizeof(struct ld9040), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
-
- /* ld9040 lcd panel uses 3-wire 9bits SPI Mode. */
- spi->bits_per_word = 9;
-
- ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(&spi->dev, "spi setup failed.\n");
- return ret;
- }
-
- lcd->spi = spi;
- lcd->dev = &spi->dev;
-
- lcd->lcd_pd = dev_get_platdata(&spi->dev);
- if (!lcd->lcd_pd) {
- dev_err(&spi->dev, "platform data is NULL.\n");
- return -EINVAL;
- }
-
- mutex_init(&lcd->lock);
-
- ret = devm_regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
- if (ret) {
- dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
- return ret;
- }
-
- ld = devm_lcd_device_register(&spi->dev, "ld9040", &spi->dev, lcd,
- &ld9040_lcd_ops);
- if (IS_ERR(ld))
- return PTR_ERR(ld);
-
- lcd->ld = ld;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = MAX_BRIGHTNESS;
-
- bd = devm_backlight_device_register(&spi->dev, "ld9040-bl", &spi->dev,
- lcd, &ld9040_backlight_ops, &props);
- if (IS_ERR(bd))
- return PTR_ERR(bd);
-
- bd->props.brightness = MAX_BRIGHTNESS;
- lcd->bd = bd;
-
- /*
- * if lcd panel was on from bootloader like u-boot then
- * do not lcd on.
- */
- if (!lcd->lcd_pd->lcd_enabled) {
- /*
- * if lcd panel was off from bootloader then
- * current lcd status is powerdown and then
- * it enables lcd panel.
- */
- lcd->power = FB_BLANK_POWERDOWN;
-
- ld9040_power(lcd, FB_BLANK_UNBLANK);
- } else {
- lcd->power = FB_BLANK_UNBLANK;
- }
-
- spi_set_drvdata(spi, lcd);
-
- dev_info(&spi->dev, "ld9040 panel driver has been probed.\n");
- return 0;
-}
-
-static int ld9040_remove(struct spi_device *spi)
-{
- struct ld9040 *lcd = spi_get_drvdata(spi);
-
- ld9040_power(lcd, FB_BLANK_POWERDOWN);
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int ld9040_suspend(struct device *dev)
-{
- struct ld9040 *lcd = dev_get_drvdata(dev);
-
- dev_dbg(dev, "lcd->power = %d\n", lcd->power);
-
- /*
- * when lcd panel is suspend, lcd panel becomes off
- * regardless of status.
- */
- return ld9040_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static int ld9040_resume(struct device *dev)
-{
- struct ld9040 *lcd = dev_get_drvdata(dev);
-
- lcd->power = FB_BLANK_POWERDOWN;
-
- return ld9040_power(lcd, FB_BLANK_UNBLANK);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ld9040_pm_ops, ld9040_suspend, ld9040_resume);
-
-/* Power down all displays on reboot, poweroff or halt. */
-static void ld9040_shutdown(struct spi_device *spi)
-{
- struct ld9040 *lcd = spi_get_drvdata(spi);
-
- ld9040_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static struct spi_driver ld9040_driver = {
- .driver = {
- .name = "ld9040",
- .pm = &ld9040_pm_ops,
- },
- .probe = ld9040_probe,
- .remove = ld9040_remove,
- .shutdown = ld9040_shutdown,
-};
-
-module_spi_driver(ld9040_driver);
-
-MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
-MODULE_DESCRIPTION("ld9040 LCD Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/ld9040_gamma.h b/drivers/video/backlight/ld9040_gamma.h
deleted file mode 100644
index c5e586d97385..000000000000
--- a/drivers/video/backlight/ld9040_gamma.h
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- * Gamma level definitions.
- *
- * Copyright (c) 2011 Samsung Electronics
- * InKi Dae <inki.dae@samsung.com>
- * Donghwa Lee <dh09.lee@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _LD9040_BRIGHTNESS_H
-#define _LD9040_BRIGHTNESS_H
-
-#define MAX_GAMMA_LEVEL 25
-#define GAMMA_TABLE_COUNT 21
-
-/* gamma value: 2.2 */
-static const unsigned int ld9040_22_300[] = {
- 0x00, 0xa7, 0xb4, 0xae, 0xbf, 0x00, 0x91,
- 0x00, 0xb2, 0xb4, 0xaa, 0xbb, 0x00, 0xac,
- 0x00, 0xb3, 0xb1, 0xaa, 0xbc, 0x00, 0xb3
-};
-
-static const unsigned int ld9040_22_290[] = {
- 0x00, 0xa9, 0xb7, 0xae, 0xbd, 0x00, 0x89,
- 0x00, 0xb7, 0xb6, 0xa8, 0xba, 0x00, 0xa4,
- 0x00, 0xb1, 0xb4, 0xaa, 0xbb, 0x00, 0xaa
-};
-
-static const unsigned int ld9040_22_280[] = {
- 0x00, 0xa9, 0xb6, 0xad, 0xbf, 0x00, 0x86,
- 0x00, 0xb8, 0xb5, 0xa8, 0xbc, 0x00, 0xa0,
- 0x00, 0xb3, 0xb3, 0xa9, 0xbc, 0x00, 0xa7
-};
-
-static const unsigned int ld9040_22_270[] = {
- 0x00, 0xa8, 0xb8, 0xae, 0xbe, 0x00, 0x84,
- 0x00, 0xb9, 0xb7, 0xa8, 0xbc, 0x00, 0x9d,
- 0x00, 0xb2, 0xb5, 0xaa, 0xbc, 0x00, 0xa4
-
-};
-static const unsigned int ld9040_22_260[] = {
- 0x00, 0xa4, 0xb8, 0xb0, 0xbf, 0x00, 0x80,
- 0x00, 0xb8, 0xb6, 0xaa, 0xbc, 0x00, 0x9a,
- 0x00, 0xb0, 0xb5, 0xab, 0xbd, 0x00, 0xa0
-};
-
-static const unsigned int ld9040_22_250[] = {
- 0x00, 0xa4, 0xb9, 0xaf, 0xc1, 0x00, 0x7d,
- 0x00, 0xb9, 0xb6, 0xaa, 0xbb, 0x00, 0x97,
- 0x00, 0xb1, 0xb5, 0xaa, 0xbf, 0x00, 0x9d
-};
-
-static const unsigned int ld9040_22_240[] = {
- 0x00, 0xa2, 0xb9, 0xaf, 0xc2, 0x00, 0x7a,
- 0x00, 0xb9, 0xb7, 0xaa, 0xbd, 0x00, 0x94,
- 0x00, 0xb0, 0xb5, 0xab, 0xbf, 0x00, 0x9a
-};
-
-static const unsigned int ld9040_22_230[] = {
- 0x00, 0xa0, 0xb9, 0xaf, 0xc3, 0x00, 0x77,
- 0x00, 0xb9, 0xb7, 0xab, 0xbe, 0x00, 0x90,
- 0x00, 0xb0, 0xb6, 0xab, 0xbf, 0x00, 0x97
-};
-
-static const unsigned int ld9040_22_220[] = {
- 0x00, 0x9e, 0xba, 0xb0, 0xc2, 0x00, 0x75,
- 0x00, 0xb9, 0xb8, 0xab, 0xbe, 0x00, 0x8e,
- 0x00, 0xb0, 0xb6, 0xac, 0xbf, 0x00, 0x94
-};
-
-static const unsigned int ld9040_22_210[] = {
- 0x00, 0x9c, 0xb9, 0xb0, 0xc4, 0x00, 0x72,
- 0x00, 0xb8, 0xb8, 0xac, 0xbf, 0x00, 0x8a,
- 0x00, 0xb0, 0xb6, 0xac, 0xc0, 0x00, 0x91
-};
-
-static const unsigned int ld9040_22_200[] = {
- 0x00, 0x9a, 0xba, 0xb1, 0xc4, 0x00, 0x6f,
- 0x00, 0xb8, 0xb8, 0xad, 0xc0, 0x00, 0x86,
- 0x00, 0xb0, 0xb7, 0xad, 0xc0, 0x00, 0x8d
-};
-
-static const unsigned int ld9040_22_190[] = {
- 0x00, 0x97, 0xba, 0xb2, 0xc5, 0x00, 0x6c,
- 0x00, 0xb8, 0xb8, 0xae, 0xc1, 0x00, 0x82,
- 0x00, 0xb0, 0xb6, 0xae, 0xc2, 0x00, 0x89
-};
-
-static const unsigned int ld9040_22_180[] = {
- 0x00, 0x93, 0xba, 0xb3, 0xc5, 0x00, 0x69,
- 0x00, 0xb8, 0xb9, 0xae, 0xc1, 0x00, 0x7f,
- 0x00, 0xb0, 0xb6, 0xae, 0xc3, 0x00, 0x85
-};
-
-static const unsigned int ld9040_22_170[] = {
- 0x00, 0x8b, 0xb9, 0xb3, 0xc7, 0x00, 0x65,
- 0x00, 0xb7, 0xb8, 0xaf, 0xc3, 0x00, 0x7a,
- 0x00, 0x80, 0xb6, 0xae, 0xc4, 0x00, 0x81
-};
-
-static const unsigned int ld9040_22_160[] = {
- 0x00, 0x89, 0xba, 0xb3, 0xc8, 0x00, 0x62,
- 0x00, 0xb6, 0xba, 0xaf, 0xc3, 0x00, 0x76,
- 0x00, 0xaf, 0xb7, 0xae, 0xc4, 0x00, 0x7e
-};
-
-static const unsigned int ld9040_22_150[] = {
- 0x00, 0x82, 0xba, 0xb4, 0xc7, 0x00, 0x5f,
- 0x00, 0xb5, 0xba, 0xb0, 0xc3, 0x00, 0x72,
- 0x00, 0xae, 0xb8, 0xb0, 0xc3, 0x00, 0x7a
-};
-
-static const unsigned int ld9040_22_140[] = {
- 0x00, 0x7b, 0xbb, 0xb4, 0xc8, 0x00, 0x5b,
- 0x00, 0xb5, 0xba, 0xb1, 0xc4, 0x00, 0x6e,
- 0x00, 0xae, 0xb9, 0xb0, 0xc5, 0x00, 0x75
-};
-
-static const unsigned int ld9040_22_130[] = {
- 0x00, 0x71, 0xbb, 0xb5, 0xc8, 0x00, 0x57,
- 0x00, 0xb5, 0xbb, 0xb0, 0xc5, 0x00, 0x6a,
- 0x00, 0xae, 0xb9, 0xb1, 0xc6, 0x00, 0x70
-};
-
-static const unsigned int ld9040_22_120[] = {
- 0x00, 0x47, 0xba, 0xb6, 0xca, 0x00, 0x53,
- 0x00, 0xb5, 0xbb, 0xb3, 0xc6, 0x00, 0x65,
- 0x00, 0xae, 0xb8, 0xb3, 0xc7, 0x00, 0x6c
-};
-
-static const unsigned int ld9040_22_110[] = {
- 0x00, 0x13, 0xbb, 0xb7, 0xca, 0x00, 0x4f,
- 0x00, 0xb4, 0xbb, 0xb3, 0xc7, 0x00, 0x60,
- 0x00, 0xad, 0xb8, 0xb4, 0xc7, 0x00, 0x67
-};
-
-static const unsigned int ld9040_22_100[] = {
- 0x00, 0x13, 0xba, 0xb8, 0xcb, 0x00, 0x4b,
- 0x00, 0xb3, 0xbc, 0xb4, 0xc7, 0x00, 0x5c,
- 0x00, 0xac, 0xb8, 0xb4, 0xc8, 0x00, 0x62
-};
-
-static const unsigned int ld9040_22_90[] = {
- 0x00, 0x13, 0xb9, 0xb8, 0xcd, 0x00, 0x46,
- 0x00, 0xb1, 0xbc, 0xb5, 0xc8, 0x00, 0x56,
- 0x00, 0xaa, 0xb8, 0xb4, 0xc9, 0x00, 0x5d
-};
-
-static const unsigned int ld9040_22_80[] = {
- 0x00, 0x13, 0xba, 0xb9, 0xcd, 0x00, 0x41,
- 0x00, 0xb0, 0xbe, 0xb5, 0xc9, 0x00, 0x51,
- 0x00, 0xa9, 0xb9, 0xb5, 0xca, 0x00, 0x57
-};
-
-static const unsigned int ld9040_22_70[] = {
- 0x00, 0x13, 0xb9, 0xb9, 0xd0, 0x00, 0x3c,
- 0x00, 0xaf, 0xbf, 0xb6, 0xcb, 0x00, 0x4b,
- 0x00, 0xa8, 0xb9, 0xb5, 0xcc, 0x00, 0x52
-};
-
-static const unsigned int ld9040_22_50[] = {
- 0x00, 0x13, 0xb2, 0xba, 0xd2, 0x00, 0x30,
- 0x00, 0xaf, 0xc0, 0xb8, 0xcd, 0x00, 0x3d,
- 0x00, 0xa8, 0xb8, 0xb7, 0xcd, 0x00, 0x44
-};
-
-struct ld9040_gamma {
- unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
-};
-
-static struct ld9040_gamma gamma_table = {
- .gamma_22_table[0] = (unsigned int *)&ld9040_22_50,
- .gamma_22_table[1] = (unsigned int *)&ld9040_22_70,
- .gamma_22_table[2] = (unsigned int *)&ld9040_22_80,
- .gamma_22_table[3] = (unsigned int *)&ld9040_22_90,
- .gamma_22_table[4] = (unsigned int *)&ld9040_22_100,
- .gamma_22_table[5] = (unsigned int *)&ld9040_22_110,
- .gamma_22_table[6] = (unsigned int *)&ld9040_22_120,
- .gamma_22_table[7] = (unsigned int *)&ld9040_22_130,
- .gamma_22_table[8] = (unsigned int *)&ld9040_22_140,
- .gamma_22_table[9] = (unsigned int *)&ld9040_22_150,
- .gamma_22_table[10] = (unsigned int *)&ld9040_22_160,
- .gamma_22_table[11] = (unsigned int *)&ld9040_22_170,
- .gamma_22_table[12] = (unsigned int *)&ld9040_22_180,
- .gamma_22_table[13] = (unsigned int *)&ld9040_22_190,
- .gamma_22_table[14] = (unsigned int *)&ld9040_22_200,
- .gamma_22_table[15] = (unsigned int *)&ld9040_22_210,
- .gamma_22_table[16] = (unsigned int *)&ld9040_22_220,
- .gamma_22_table[17] = (unsigned int *)&ld9040_22_230,
- .gamma_22_table[18] = (unsigned int *)&ld9040_22_240,
- .gamma_22_table[19] = (unsigned int *)&ld9040_22_250,
- .gamma_22_table[20] = (unsigned int *)&ld9040_22_260,
- .gamma_22_table[21] = (unsigned int *)&ld9040_22_270,
- .gamma_22_table[22] = (unsigned int *)&ld9040_22_280,
- .gamma_22_table[23] = (unsigned int *)&ld9040_22_290,
- .gamma_22_table[24] = (unsigned int *)&ld9040_22_300,
-};
-
-#endif
diff --git a/drivers/video/backlight/lm3639_bl.c b/drivers/video/backlight/lm3639_bl.c
index cd50df5807ea..086611c7bc03 100644
--- a/drivers/video/backlight/lm3639_bl.c
+++ b/drivers/video/backlight/lm3639_bl.c
@@ -400,10 +400,8 @@ static int lm3639_remove(struct i2c_client *client)
regmap_write(pchip->regmap, REG_ENABLE, 0x00);
- if (&pchip->cdev_torch)
- led_classdev_unregister(&pchip->cdev_torch);
- if (&pchip->cdev_flash)
- led_classdev_unregister(&pchip->cdev_flash);
+ led_classdev_unregister(&pchip->cdev_torch);
+ led_classdev_unregister(&pchip->cdev_flash);
if (pchip->bled)
device_remove_file(&(pchip->bled->dev), &dev_attr_bled_mode);
return 0;
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index bdfcc0a71db1..678b27063198 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -28,10 +28,8 @@
struct pwm_bl_data {
struct pwm_device *pwm;
struct device *dev;
- unsigned int period;
unsigned int lth_brightness;
unsigned int *levels;
- bool enabled;
struct regulator *power_supply;
struct gpio_desc *enable_gpio;
unsigned int scale;
@@ -46,31 +44,35 @@ struct pwm_bl_data {
void (*exit)(struct device *);
};
-static void pwm_backlight_power_on(struct pwm_bl_data *pb, int brightness)
+static void pwm_backlight_power_on(struct pwm_bl_data *pb)
{
+ struct pwm_state state;
int err;
- if (pb->enabled)
+ pwm_get_state(pb->pwm, &state);
+ if (state.enabled)
return;
err = regulator_enable(pb->power_supply);
if (err < 0)
dev_err(pb->dev, "failed to enable power supply\n");
- pwm_enable(pb->pwm);
+ state.enabled = true;
+ pwm_apply_state(pb->pwm, &state);
if (pb->post_pwm_on_delay)
msleep(pb->post_pwm_on_delay);
if (pb->enable_gpio)
gpiod_set_value_cansleep(pb->enable_gpio, 1);
-
- pb->enabled = true;
}
static void pwm_backlight_power_off(struct pwm_bl_data *pb)
{
- if (!pb->enabled)
+ struct pwm_state state;
+
+ pwm_get_state(pb->pwm, &state);
+ if (!state.enabled)
return;
if (pb->enable_gpio)
@@ -79,24 +81,27 @@ static void pwm_backlight_power_off(struct pwm_bl_data *pb)
if (pb->pwm_off_delay)
msleep(pb->pwm_off_delay);
- pwm_config(pb->pwm, 0, pb->period);
- pwm_disable(pb->pwm);
+ state.enabled = false;
+ state.duty_cycle = 0;
+ pwm_apply_state(pb->pwm, &state);
regulator_disable(pb->power_supply);
- pb->enabled = false;
}
static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness)
{
unsigned int lth = pb->lth_brightness;
+ struct pwm_state state;
u64 duty_cycle;
+ pwm_get_state(pb->pwm, &state);
+
if (pb->levels)
duty_cycle = pb->levels[brightness];
else
duty_cycle = brightness;
- duty_cycle *= pb->period - lth;
+ duty_cycle *= state.period - lth;
do_div(duty_cycle, pb->scale);
return duty_cycle + lth;
@@ -106,7 +111,7 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
{
struct pwm_bl_data *pb = bl_get_data(bl);
int brightness = bl->props.brightness;
- int duty_cycle;
+ struct pwm_state state;
if (bl->props.power != FB_BLANK_UNBLANK ||
bl->props.fb_blank != FB_BLANK_UNBLANK ||
@@ -117,9 +122,10 @@ static int pwm_backlight_update_status(struct backlight_device *bl)
brightness = pb->notify(pb->dev, brightness);
if (brightness > 0) {
- duty_cycle = compute_duty_cycle(pb, brightness);
- pwm_config(pb->pwm, duty_cycle, pb->period);
- pwm_backlight_power_on(pb, brightness);
+ pwm_get_state(pb->pwm, &state);
+ state.duty_cycle = compute_duty_cycle(pb, brightness);
+ pwm_apply_state(pb->pwm, &state);
+ pwm_backlight_power_on(pb);
} else
pwm_backlight_power_off(pb);
@@ -447,7 +453,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct pwm_bl_data *pb;
struct pwm_state state;
- struct pwm_args pargs;
unsigned int i;
int ret;
@@ -478,7 +483,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->check_fb = data->check_fb;
pb->exit = data->exit;
pb->dev = &pdev->dev;
- pb->enabled = false;
pb->post_pwm_on_delay = data->post_pwm_on_delay;
pb->pwm_off_delay = data->pwm_off_delay;
@@ -539,10 +543,26 @@ static int pwm_backlight_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "got pwm for backlight\n");
- if (!data->levels) {
- /* Get the PWM period (in nanoseconds) */
- pwm_get_state(pb->pwm, &state);
+ /* Sync up PWM state. */
+ pwm_init_state(pb->pwm, &state);
+ /*
+ * The DT case will set the pwm_period_ns field to 0 and store the
+ * period, parsed from the DT, in the PWM device. For the non-DT case,
+ * set the period from platform data if it has not already been set
+ * via the PWM lookup table.
+ */
+ if (!state.period && (data->pwm_period_ns > 0))
+ state.period = data->pwm_period_ns;
+
+ ret = pwm_apply_state(pb->pwm, &state);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to apply initial PWM state: %d\n",
+ ret);
+ goto err_alloc;
+ }
+
+ if (!data->levels) {
ret = pwm_backlight_brightness_default(&pdev->dev, data,
state.period);
if (ret < 0) {
@@ -559,24 +579,7 @@ static int pwm_backlight_probe(struct platform_device *pdev)
pb->levels = data->levels;
}
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(pb->pwm);
-
- /*
- * The DT case will set the pwm_period_ns field to 0 and store the
- * period, parsed from the DT, in the PWM device. For the non-DT case,
- * set the period from platform data if it has not already been set
- * via the PWM lookup table.
- */
- pwm_get_args(pb->pwm, &pargs);
- pb->period = pargs.period;
- if (!pb->period && (data->pwm_period_ns > 0))
- pb->period = data->pwm_period_ns;
-
- pb->lth_brightness = data->lth_brightness * (pb->period / pb->scale);
+ pb->lth_brightness = data->lth_brightness * (state.period / pb->scale);
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_RAW;
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
deleted file mode 100644
index 3c4a22a3063a..000000000000
--- a/drivers/video/backlight/s6e63m0.c
+++ /dev/null
@@ -1,857 +0,0 @@
-/*
- * S6E63M0 AMOLED LCD panel driver.
- *
- * Author: InKi Dae <inki.dae@samsung.com>
- *
- * Derived from drivers/video/omap/lcd-apollon.c
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/backlight.h>
-#include <linux/delay.h>
-#include <linux/fb.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/lcd.h>
-#include <linux/module.h>
-#include <linux/spi/spi.h>
-#include <linux/wait.h>
-
-#include "s6e63m0_gamma.h"
-
-#define SLEEPMSEC 0x1000
-#define ENDDEF 0x2000
-#define DEFMASK 0xFF00
-#define COMMAND_ONLY 0xFE
-#define DATA_ONLY 0xFF
-
-#define MIN_BRIGHTNESS 0
-#define MAX_BRIGHTNESS 10
-
-struct s6e63m0 {
- struct device *dev;
- struct spi_device *spi;
- unsigned int power;
- unsigned int current_brightness;
- unsigned int gamma_mode;
- unsigned int gamma_table_count;
- struct lcd_device *ld;
- struct backlight_device *bd;
- struct lcd_platform_data *lcd_pd;
-};
-
-static const unsigned short seq_panel_condition_set[] = {
- 0xF8, 0x01,
- DATA_ONLY, 0x27,
- DATA_ONLY, 0x27,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x07,
- DATA_ONLY, 0x54,
- DATA_ONLY, 0x9f,
- DATA_ONLY, 0x63,
- DATA_ONLY, 0x86,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x0d,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_display_condition_set[] = {
- 0xf2, 0x02,
- DATA_ONLY, 0x03,
- DATA_ONLY, 0x1c,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x10,
-
- 0xf7, 0x03,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_gamma_setting[] = {
- 0xfa, 0x00,
- DATA_ONLY, 0x18,
- DATA_ONLY, 0x08,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x64,
- DATA_ONLY, 0x56,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0xb6,
- DATA_ONLY, 0xba,
- DATA_ONLY, 0xa8,
- DATA_ONLY, 0xac,
- DATA_ONLY, 0xb1,
- DATA_ONLY, 0x9d,
- DATA_ONLY, 0xc1,
- DATA_ONLY, 0xc1,
- DATA_ONLY, 0xb7,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x9c,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x9f,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0xd6,
-
- 0xfa, 0x01,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_etc_condition_set[] = {
- 0xf6, 0x00,
- DATA_ONLY, 0x8c,
- DATA_ONLY, 0x07,
-
- 0xb3, 0xc,
-
- 0xb5, 0x2c,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x0c,
- DATA_ONLY, 0x0a,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0e,
- DATA_ONLY, 0x17,
- DATA_ONLY, 0x13,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x2a,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1b,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x17,
-
- DATA_ONLY, 0x2b,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x3a,
- DATA_ONLY, 0x34,
- DATA_ONLY, 0x30,
- DATA_ONLY, 0x2c,
- DATA_ONLY, 0x29,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x23,
- DATA_ONLY, 0x21,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x1e,
- DATA_ONLY, 0x1e,
-
- 0xb6, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
-
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
-
- 0xb7, 0x2c,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x0c,
- DATA_ONLY, 0x0a,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0e,
- DATA_ONLY, 0x17,
- DATA_ONLY, 0x13,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x2a,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1b,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x17,
-
- DATA_ONLY, 0x2b,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x3a,
- DATA_ONLY, 0x34,
- DATA_ONLY, 0x30,
- DATA_ONLY, 0x2c,
- DATA_ONLY, 0x29,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x23,
- DATA_ONLY, 0x21,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x1e,
- DATA_ONLY, 0x1e,
-
- 0xb8, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
-
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
-
- 0xb9, 0x2c,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x0c,
- DATA_ONLY, 0x0a,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0e,
- DATA_ONLY, 0x17,
- DATA_ONLY, 0x13,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x2a,
- DATA_ONLY, 0x24,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x1b,
- DATA_ONLY, 0x1a,
- DATA_ONLY, 0x17,
-
- DATA_ONLY, 0x2b,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x3a,
- DATA_ONLY, 0x34,
- DATA_ONLY, 0x30,
- DATA_ONLY, 0x2c,
- DATA_ONLY, 0x29,
- DATA_ONLY, 0x26,
- DATA_ONLY, 0x25,
- DATA_ONLY, 0x23,
- DATA_ONLY, 0x21,
- DATA_ONLY, 0x20,
- DATA_ONLY, 0x1e,
- DATA_ONLY, 0x1e,
-
- 0xba, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x11,
- DATA_ONLY, 0x22,
- DATA_ONLY, 0x33,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
- DATA_ONLY, 0x44,
-
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x55,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
- DATA_ONLY, 0x66,
-
- 0xc1, 0x4d,
- DATA_ONLY, 0x96,
- DATA_ONLY, 0x1d,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x01,
- DATA_ONLY, 0xdf,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- DATA_ONLY, 0x1f,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x00,
- DATA_ONLY, 0x03,
- DATA_ONLY, 0x06,
- DATA_ONLY, 0x09,
- DATA_ONLY, 0x0d,
- DATA_ONLY, 0x0f,
- DATA_ONLY, 0x12,
- DATA_ONLY, 0x15,
- DATA_ONLY, 0x18,
-
- 0xb2, 0x10,
- DATA_ONLY, 0x10,
- DATA_ONLY, 0x0b,
- DATA_ONLY, 0x05,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_acl_on[] = {
- /* ACL on */
- 0xc0, 0x01,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_acl_off[] = {
- /* ACL off */
- 0xc0, 0x00,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_elvss_on[] = {
- /* ELVSS on */
- 0xb1, 0x0b,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_elvss_off[] = {
- /* ELVSS off */
- 0xb1, 0x0a,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_stand_by_off[] = {
- 0x11, COMMAND_ONLY,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_stand_by_on[] = {
- 0x10, COMMAND_ONLY,
-
- ENDDEF, 0x0000
-};
-
-static const unsigned short seq_display_on[] = {
- 0x29, COMMAND_ONLY,
-
- ENDDEF, 0x0000
-};
-
-
-static int s6e63m0_spi_write_byte(struct s6e63m0 *lcd, int addr, int data)
-{
- u16 buf[1];
- struct spi_message msg;
-
- struct spi_transfer xfer = {
- .len = 2,
- .tx_buf = buf,
- };
-
- buf[0] = (addr << 8) | data;
-
- spi_message_init(&msg);
- spi_message_add_tail(&xfer, &msg);
-
- return spi_sync(lcd->spi, &msg);
-}
-
-static int s6e63m0_spi_write(struct s6e63m0 *lcd, unsigned char address,
- unsigned char command)
-{
- int ret = 0;
-
- if (address != DATA_ONLY)
- ret = s6e63m0_spi_write_byte(lcd, 0x0, address);
- if (command != COMMAND_ONLY)
- ret = s6e63m0_spi_write_byte(lcd, 0x1, command);
-
- return ret;
-}
-
-static int s6e63m0_panel_send_sequence(struct s6e63m0 *lcd,
- const unsigned short *wbuf)
-{
- int ret = 0, i = 0;
-
- while ((wbuf[i] & DEFMASK) != ENDDEF) {
- if ((wbuf[i] & DEFMASK) != SLEEPMSEC) {
- ret = s6e63m0_spi_write(lcd, wbuf[i], wbuf[i+1]);
- if (ret)
- break;
- } else {
- msleep(wbuf[i+1]);
- }
- i += 2;
- }
-
- return ret;
-}
-
-static int _s6e63m0_gamma_ctl(struct s6e63m0 *lcd, const unsigned int *gamma)
-{
- unsigned int i = 0;
- int ret = 0;
-
- /* disable gamma table updating. */
- ret = s6e63m0_spi_write(lcd, 0xfa, 0x00);
- if (ret) {
- dev_err(lcd->dev, "failed to disable gamma table updating.\n");
- goto gamma_err;
- }
-
- for (i = 0 ; i < GAMMA_TABLE_COUNT; i++) {
- ret = s6e63m0_spi_write(lcd, DATA_ONLY, gamma[i]);
- if (ret) {
- dev_err(lcd->dev, "failed to set gamma table.\n");
- goto gamma_err;
- }
- }
-
- /* update gamma table. */
- ret = s6e63m0_spi_write(lcd, 0xfa, 0x01);
- if (ret)
- dev_err(lcd->dev, "failed to update gamma table.\n");
-
-gamma_err:
- return ret;
-}
-
-static int s6e63m0_gamma_ctl(struct s6e63m0 *lcd, int gamma)
-{
- int ret = 0;
-
- ret = _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[gamma]);
-
- return ret;
-}
-
-
-static int s6e63m0_ldi_init(struct s6e63m0 *lcd)
-{
- int ret, i;
- const unsigned short *init_seq[] = {
- seq_panel_condition_set,
- seq_display_condition_set,
- seq_gamma_setting,
- seq_etc_condition_set,
- seq_acl_on,
- seq_elvss_on,
- };
-
- for (i = 0; i < ARRAY_SIZE(init_seq); i++) {
- ret = s6e63m0_panel_send_sequence(lcd, init_seq[i]);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static int s6e63m0_ldi_enable(struct s6e63m0 *lcd)
-{
- int ret = 0, i;
- const unsigned short *enable_seq[] = {
- seq_stand_by_off,
- seq_display_on,
- };
-
- for (i = 0; i < ARRAY_SIZE(enable_seq); i++) {
- ret = s6e63m0_panel_send_sequence(lcd, enable_seq[i]);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-static int s6e63m0_ldi_disable(struct s6e63m0 *lcd)
-{
- int ret;
-
- ret = s6e63m0_panel_send_sequence(lcd, seq_stand_by_on);
-
- return ret;
-}
-
-static int s6e63m0_power_is_on(int power)
-{
- return power <= FB_BLANK_NORMAL;
-}
-
-static int s6e63m0_power_on(struct s6e63m0 *lcd)
-{
- int ret = 0;
- struct lcd_platform_data *pd;
- struct backlight_device *bd;
-
- pd = lcd->lcd_pd;
- bd = lcd->bd;
-
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EINVAL;
- }
-
- pd->power_on(lcd->ld, 1);
- msleep(pd->power_on_delay);
-
- if (!pd->reset) {
- dev_err(lcd->dev, "reset is NULL.\n");
- return -EINVAL;
- }
-
- pd->reset(lcd->ld);
- msleep(pd->reset_delay);
-
- ret = s6e63m0_ldi_init(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to initialize ldi.\n");
- return ret;
- }
-
- ret = s6e63m0_ldi_enable(lcd);
- if (ret) {
- dev_err(lcd->dev, "failed to enable ldi.\n");
- return ret;
- }
-
- /* set brightness to current value after power on or resume. */
- ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness);
- if (ret) {
- dev_err(lcd->dev, "lcd gamma setting failed.\n");
- return ret;
- }
-
- return 0;
-}
-
-static int s6e63m0_power_off(struct s6e63m0 *lcd)
-{
- int ret;
- struct lcd_platform_data *pd;
-
- pd = lcd->lcd_pd;
-
- ret = s6e63m0_ldi_disable(lcd);
- if (ret) {
- dev_err(lcd->dev, "lcd setting failed.\n");
- return -EIO;
- }
-
- msleep(pd->power_off_delay);
-
- pd->power_on(lcd->ld, 0);
-
- return 0;
-}
-
-static int s6e63m0_power(struct s6e63m0 *lcd, int power)
-{
- int ret = 0;
-
- if (s6e63m0_power_is_on(power) && !s6e63m0_power_is_on(lcd->power))
- ret = s6e63m0_power_on(lcd);
- else if (!s6e63m0_power_is_on(power) && s6e63m0_power_is_on(lcd->power))
- ret = s6e63m0_power_off(lcd);
-
- if (!ret)
- lcd->power = power;
-
- return ret;
-}
-
-static int s6e63m0_set_power(struct lcd_device *ld, int power)
-{
- struct s6e63m0 *lcd = lcd_get_data(ld);
-
- if (power != FB_BLANK_UNBLANK && power != FB_BLANK_POWERDOWN &&
- power != FB_BLANK_NORMAL) {
- dev_err(lcd->dev, "power value should be 0, 1 or 4.\n");
- return -EINVAL;
- }
-
- return s6e63m0_power(lcd, power);
-}
-
-static int s6e63m0_get_power(struct lcd_device *ld)
-{
- struct s6e63m0 *lcd = lcd_get_data(ld);
-
- return lcd->power;
-}
-
-static int s6e63m0_set_brightness(struct backlight_device *bd)
-{
- int ret = 0, brightness = bd->props.brightness;
- struct s6e63m0 *lcd = bl_get_data(bd);
-
- if (brightness < MIN_BRIGHTNESS ||
- brightness > bd->props.max_brightness) {
- dev_err(&bd->dev, "lcd brightness should be %d to %d.\n",
- MIN_BRIGHTNESS, MAX_BRIGHTNESS);
- return -EINVAL;
- }
-
- ret = s6e63m0_gamma_ctl(lcd, bd->props.brightness);
- if (ret) {
- dev_err(&bd->dev, "lcd brightness setting failed.\n");
- return -EIO;
- }
-
- return ret;
-}
-
-static struct lcd_ops s6e63m0_lcd_ops = {
- .set_power = s6e63m0_set_power,
- .get_power = s6e63m0_get_power,
-};
-
-static const struct backlight_ops s6e63m0_backlight_ops = {
- .update_status = s6e63m0_set_brightness,
-};
-
-static ssize_t s6e63m0_sysfs_show_gamma_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
- char temp[10];
-
- switch (lcd->gamma_mode) {
- case 0:
- sprintf(temp, "2.2 mode\n");
- strcat(buf, temp);
- break;
- case 1:
- sprintf(temp, "1.9 mode\n");
- strcat(buf, temp);
- break;
- case 2:
- sprintf(temp, "1.7 mode\n");
- strcat(buf, temp);
- break;
- default:
- dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7)n");
- break;
- }
-
- return strlen(buf);
-}
-
-static ssize_t s6e63m0_sysfs_store_gamma_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
- struct backlight_device *bd = NULL;
- int brightness, rc;
-
- rc = kstrtouint(buf, 0, &lcd->gamma_mode);
- if (rc < 0)
- return rc;
-
- bd = lcd->bd;
-
- brightness = bd->props.brightness;
-
- switch (lcd->gamma_mode) {
- case 0:
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]);
- break;
- case 1:
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_19_table[brightness]);
- break;
- case 2:
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_17_table[brightness]);
- break;
- default:
- dev_info(dev, "gamma mode could be 0:2.2, 1:1.9 or 2:1.7\n");
- _s6e63m0_gamma_ctl(lcd, gamma_table.gamma_22_table[brightness]);
- break;
- }
- return len;
-}
-
-static DEVICE_ATTR(gamma_mode, 0644,
- s6e63m0_sysfs_show_gamma_mode, s6e63m0_sysfs_store_gamma_mode);
-
-static ssize_t s6e63m0_sysfs_show_gamma_table(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
- char temp[3];
-
- sprintf(temp, "%u\n", lcd->gamma_table_count);
- strcpy(buf, temp);
-
- return strlen(buf);
-}
-static DEVICE_ATTR(gamma_table, 0444,
- s6e63m0_sysfs_show_gamma_table, NULL);
-
-static int s6e63m0_probe(struct spi_device *spi)
-{
- int ret = 0;
- struct s6e63m0 *lcd = NULL;
- struct lcd_device *ld = NULL;
- struct backlight_device *bd = NULL;
- struct backlight_properties props;
-
- lcd = devm_kzalloc(&spi->dev, sizeof(struct s6e63m0), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
-
- /* s6e63m0 lcd panel uses 3-wire 9bits SPI Mode. */
- spi->bits_per_word = 9;
-
- ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(&spi->dev, "spi setup failed.\n");
- return ret;
- }
-
- lcd->spi = spi;
- lcd->dev = &spi->dev;
-
- lcd->lcd_pd = dev_get_platdata(&spi->dev);
- if (!lcd->lcd_pd) {
- dev_err(&spi->dev, "platform data is NULL.\n");
- return -EINVAL;
- }
-
- ld = devm_lcd_device_register(&spi->dev, "s6e63m0", &spi->dev, lcd,
- &s6e63m0_lcd_ops);
- if (IS_ERR(ld))
- return PTR_ERR(ld);
-
- lcd->ld = ld;
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = MAX_BRIGHTNESS;
-
- bd = devm_backlight_device_register(&spi->dev, "s6e63m0bl-bl",
- &spi->dev, lcd, &s6e63m0_backlight_ops,
- &props);
- if (IS_ERR(bd))
- return PTR_ERR(bd);
-
- bd->props.brightness = MAX_BRIGHTNESS;
- lcd->bd = bd;
-
- /*
- * it gets gamma table count available so it gets user
- * know that.
- */
- lcd->gamma_table_count =
- sizeof(gamma_table) / (MAX_GAMMA_LEVEL * sizeof(int *));
-
- ret = device_create_file(&(spi->dev), &dev_attr_gamma_mode);
- if (ret < 0)
- dev_err(&(spi->dev), "failed to add sysfs entries\n");
-
- ret = device_create_file(&(spi->dev), &dev_attr_gamma_table);
- if (ret < 0)
- dev_err(&(spi->dev), "failed to add sysfs entries\n");
-
- /*
- * if lcd panel was on from bootloader like u-boot then
- * do not lcd on.
- */
- if (!lcd->lcd_pd->lcd_enabled) {
- /*
- * if lcd panel was off from bootloader then
- * current lcd status is powerdown and then
- * it enables lcd panel.
- */
- lcd->power = FB_BLANK_POWERDOWN;
-
- s6e63m0_power(lcd, FB_BLANK_UNBLANK);
- } else {
- lcd->power = FB_BLANK_UNBLANK;
- }
-
- spi_set_drvdata(spi, lcd);
-
- dev_info(&spi->dev, "s6e63m0 panel driver has been probed.\n");
-
- return 0;
-}
-
-static int s6e63m0_remove(struct spi_device *spi)
-{
- struct s6e63m0 *lcd = spi_get_drvdata(spi);
-
- s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
- device_remove_file(&spi->dev, &dev_attr_gamma_table);
- device_remove_file(&spi->dev, &dev_attr_gamma_mode);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int s6e63m0_suspend(struct device *dev)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
-
- dev_dbg(dev, "lcd->power = %d\n", lcd->power);
-
- /*
- * when lcd panel is suspend, lcd panel becomes off
- * regardless of status.
- */
- return s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static int s6e63m0_resume(struct device *dev)
-{
- struct s6e63m0 *lcd = dev_get_drvdata(dev);
-
- lcd->power = FB_BLANK_POWERDOWN;
-
- return s6e63m0_power(lcd, FB_BLANK_UNBLANK);
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(s6e63m0_pm_ops, s6e63m0_suspend, s6e63m0_resume);
-
-/* Power down all displays on reboot, poweroff or halt. */
-static void s6e63m0_shutdown(struct spi_device *spi)
-{
- struct s6e63m0 *lcd = spi_get_drvdata(spi);
-
- s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
-}
-
-static struct spi_driver s6e63m0_driver = {
- .driver = {
- .name = "s6e63m0",
- .pm = &s6e63m0_pm_ops,
- },
- .probe = s6e63m0_probe,
- .remove = s6e63m0_remove,
- .shutdown = s6e63m0_shutdown,
-};
-
-module_spi_driver(s6e63m0_driver);
-
-MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
-MODULE_DESCRIPTION("S6E63M0 LCD Driver");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/video/backlight/s6e63m0_gamma.h b/drivers/video/backlight/s6e63m0_gamma.h
deleted file mode 100644
index 2c44bdb0696b..000000000000
--- a/drivers/video/backlight/s6e63m0_gamma.h
+++ /dev/null
@@ -1,266 +0,0 @@
-/* linux/drivers/video/samsung/s6e63m0_brightness.h
- *
- * Gamma level definitions.
- *
- * Copyright (c) 2009 Samsung Electronics
- * InKi Dae <inki.dae@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef _S6E63M0_BRIGHTNESS_H
-#define _S6E63M0_BRIGHTNESS_H
-
-#define MAX_GAMMA_LEVEL 11
-#define GAMMA_TABLE_COUNT 21
-
-/* gamma value: 2.2 */
-static const unsigned int s6e63m0_22_300[] = {
- 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6,
- 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0,
- 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb
-};
-
-static const unsigned int s6e63m0_22_280[] = {
- 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6,
- 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1,
- 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6
-};
-
-static const unsigned int s6e63m0_22_260[] = {
- 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6,
- 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2,
- 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1
-
-};
-
-static const unsigned int s6e63m0_22_240[] = {
- 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9,
- 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3,
- 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA
-
-};
-static const unsigned int s6e63m0_22_220[] = {
- 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8,
- 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4,
- 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2
-};
-
-static const unsigned int s6e63m0_22_200[] = {
- 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA,
- 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6,
- 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA
-};
-
-static const unsigned int s6e63m0_22_170[] = {
- 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB,
- 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8,
- 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB
-};
-
-static const unsigned int s6e63m0_22_140[] = {
- 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC,
- 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9,
- 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E
-};
-
-static const unsigned int s6e63m0_22_110[] = {
- 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF,
- 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC,
- 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D
-};
-
-static const unsigned int s6e63m0_22_90[] = {
- 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0,
- 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF,
- 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82
-};
-
-static const unsigned int s6e63m0_22_30[] = {
- 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8,
- 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7,
- 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51
-};
-
-/* gamma value: 1.9 */
-static const unsigned int s6e63m0_19_300[] = {
- 0x18, 0x08, 0x24, 0x61, 0x5F, 0x39, 0xBA,
- 0xBD, 0xAD, 0xB1, 0xB6, 0xA5, 0xC4, 0xC5,
- 0xBC, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB
-};
-
-static const unsigned int s6e63m0_19_280[] = {
- 0x18, 0x08, 0x24, 0x61, 0x60, 0x39, 0xBB,
- 0xBE, 0xAD, 0xB2, 0xB6, 0xA6, 0xC5, 0xC7,
- 0xBD, 0x00, 0x9B, 0x00, 0x9E, 0x00, 0xD5
-};
-
-static const unsigned int s6e63m0_19_260[] = {
- 0x18, 0x08, 0x24, 0x63, 0x61, 0x3B, 0xBA,
- 0xBE, 0xAC, 0xB3, 0xB8, 0xA7, 0xC6, 0xC8,
- 0xBD, 0x00, 0x96, 0x00, 0x98, 0x00, 0xCF
-};
-
-static const unsigned int s6e63m0_19_240[] = {
- 0x18, 0x08, 0x24, 0x67, 0x64, 0x3F, 0xBB,
- 0xBE, 0xAD, 0xB3, 0xB9, 0xA7, 0xC8, 0xC9,
- 0xBE, 0x00, 0x90, 0x00, 0x92, 0x00, 0xC8
-};
-
-static const unsigned int s6e63m0_19_220[] = {
- 0x18, 0x08, 0x24, 0x68, 0x64, 0x40, 0xBC,
- 0xBF, 0xAF, 0xB4, 0xBA, 0xA9, 0xC8, 0xCA,
- 0xBE, 0x00, 0x8B, 0x00, 0x8C, 0x00, 0xC0
-};
-
-static const unsigned int s6e63m0_19_200[] = {
- 0x18, 0x08, 0x24, 0x68, 0x64, 0x3F, 0xBE,
- 0xC0, 0xB0, 0xB6, 0xBB, 0xAB, 0xC8, 0xCB,
- 0xBF, 0x00, 0x85, 0x00, 0x86, 0x00, 0xB8
-};
-
-static const unsigned int s6e63m0_19_170[] = {
- 0x18, 0x08, 0x24, 0x69, 0x64, 0x40, 0xBF,
- 0xC1, 0xB0, 0xB9, 0xBE, 0xAD, 0xCB, 0xCD,
- 0xC2, 0x00, 0x7A, 0x00, 0x7B, 0x00, 0xAA
-};
-
-static const unsigned int s6e63m0_19_140[] = {
- 0x18, 0x08, 0x24, 0x6E, 0x65, 0x45, 0xC0,
- 0xC3, 0xB2, 0xBA, 0xBE, 0xAE, 0xCD, 0xD0,
- 0xC4, 0x00, 0x70, 0x00, 0x70, 0x00, 0x9C
-};
-
-static const unsigned int s6e63m0_19_110[] = {
- 0x18, 0x08, 0x24, 0x6F, 0x65, 0x46, 0xC2,
- 0xC4, 0xB3, 0xBF, 0xC2, 0xB2, 0xCF, 0xD1,
- 0xC6, 0x00, 0x64, 0x00, 0x64, 0x00, 0x8D
-};
-
-static const unsigned int s6e63m0_19_90[] = {
- 0x18, 0x08, 0x24, 0x74, 0x60, 0x4A, 0xC3,
- 0xC6, 0xB5, 0xBF, 0xC3, 0xB2, 0xD2, 0xD3,
- 0xC8, 0x00, 0x5B, 0x00, 0x5B, 0x00, 0x81
-};
-
-static const unsigned int s6e63m0_19_30[] = {
- 0x18, 0x08, 0x24, 0x84, 0x45, 0x4F, 0xCA,
- 0xCB, 0xBC, 0xC9, 0xCB, 0xBC, 0xDA, 0xDA,
- 0xD0, 0x00, 0x35, 0x00, 0x34, 0x00, 0x4E
-};
-
-/* gamma value: 1.7 */
-static const unsigned int s6e63m0_17_300[] = {
- 0x18, 0x08, 0x24, 0x70, 0x70, 0x4F, 0xBF,
- 0xC2, 0xB2, 0xB8, 0xBC, 0xAC, 0xCB, 0xCD,
- 0xC3, 0x00, 0xA0, 0x00, 0xA4, 0x00, 0xDB
-};
-
-static const unsigned int s6e63m0_17_280[] = {
- 0x18, 0x08, 0x24, 0x71, 0x71, 0x50, 0xBF,
- 0xC2, 0xB2, 0xBA, 0xBE, 0xAE, 0xCB, 0xCD,
- 0xC3, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6
-};
-
-static const unsigned int s6e63m0_17_260[] = {
- 0x18, 0x08, 0x24, 0x72, 0x72, 0x50, 0xC0,
- 0xC3, 0xB4, 0xB9, 0xBE, 0xAE, 0xCC, 0xCF,
- 0xC4, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1
-};
-
-static const unsigned int s6e63m0_17_240[] = {
- 0x18, 0x08, 0x24, 0x71, 0x72, 0x4F, 0xC2,
- 0xC4, 0xB5, 0xBB, 0xBF, 0xB0, 0xCC, 0xCF,
- 0xC3, 0x00, 0x91, 0x00, 0x95, 0x00, 0xCA
-};
-
-static const unsigned int s6e63m0_17_220[] = {
- 0x18, 0x08, 0x24, 0x71, 0x73, 0x4F, 0xC2,
- 0xC5, 0xB5, 0xBD, 0xC0, 0xB2, 0xCD, 0xD1,
- 0xC5, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2
-};
-
-static const unsigned int s6e63m0_17_200[] = {
- 0x18, 0x08, 0x24, 0x72, 0x75, 0x51, 0xC2,
- 0xC6, 0xB5, 0xBF, 0xC1, 0xB3, 0xCE, 0xD1,
- 0xC6, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA
-};
-
-static const unsigned int s6e63m0_17_170[] = {
- 0x18, 0x08, 0x24, 0x75, 0x77, 0x54, 0xC3,
- 0xC7, 0xB7, 0xC0, 0xC3, 0xB4, 0xD1, 0xD3,
- 0xC9, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB
-};
-
-static const unsigned int s6e63m0_17_140[] = {
- 0x18, 0x08, 0x24, 0x7B, 0x77, 0x58, 0xC3,
- 0xC8, 0xB8, 0xC2, 0xC6, 0xB6, 0xD3, 0xD4,
- 0xCA, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E
-};
-
-static const unsigned int s6e63m0_17_110[] = {
- 0x18, 0x08, 0x24, 0x81, 0x7B, 0x5D, 0xC6,
- 0xCA, 0xBB, 0xC3, 0xC7, 0xB8, 0xD6, 0xD8,
- 0xCD, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D
-};
-
-static const unsigned int s6e63m0_17_90[] = {
- 0x18, 0x08, 0x24, 0x82, 0x7A, 0x5B, 0xC8,
- 0xCB, 0xBD, 0xC5, 0xCA, 0xBA, 0xD6, 0xD8,
- 0xCE, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82
-};
-
-static const unsigned int s6e63m0_17_30[] = {
- 0x18, 0x08, 0x24, 0x8F, 0x73, 0x63, 0xD1,
- 0xD0, 0xC5, 0xCC, 0xD1, 0xC2, 0xDE, 0xE0,
- 0xD6, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51
-};
-
-struct s6e63m0_gamma {
- unsigned int *gamma_22_table[MAX_GAMMA_LEVEL];
- unsigned int *gamma_19_table[MAX_GAMMA_LEVEL];
- unsigned int *gamma_17_table[MAX_GAMMA_LEVEL];
-};
-
-static struct s6e63m0_gamma gamma_table = {
- .gamma_22_table[0] = (unsigned int *)&s6e63m0_22_30,
- .gamma_22_table[1] = (unsigned int *)&s6e63m0_22_90,
- .gamma_22_table[2] = (unsigned int *)&s6e63m0_22_110,
- .gamma_22_table[3] = (unsigned int *)&s6e63m0_22_140,
- .gamma_22_table[4] = (unsigned int *)&s6e63m0_22_170,
- .gamma_22_table[5] = (unsigned int *)&s6e63m0_22_200,
- .gamma_22_table[6] = (unsigned int *)&s6e63m0_22_220,
- .gamma_22_table[7] = (unsigned int *)&s6e63m0_22_240,
- .gamma_22_table[8] = (unsigned int *)&s6e63m0_22_260,
- .gamma_22_table[9] = (unsigned int *)&s6e63m0_22_280,
- .gamma_22_table[10] = (unsigned int *)&s6e63m0_22_300,
-
- .gamma_19_table[0] = (unsigned int *)&s6e63m0_19_30,
- .gamma_19_table[1] = (unsigned int *)&s6e63m0_19_90,
- .gamma_19_table[2] = (unsigned int *)&s6e63m0_19_110,
- .gamma_19_table[3] = (unsigned int *)&s6e63m0_19_140,
- .gamma_19_table[4] = (unsigned int *)&s6e63m0_19_170,
- .gamma_19_table[5] = (unsigned int *)&s6e63m0_19_200,
- .gamma_19_table[6] = (unsigned int *)&s6e63m0_19_220,
- .gamma_19_table[7] = (unsigned int *)&s6e63m0_19_240,
- .gamma_19_table[8] = (unsigned int *)&s6e63m0_19_260,
- .gamma_19_table[9] = (unsigned int *)&s6e63m0_19_280,
- .gamma_19_table[10] = (unsigned int *)&s6e63m0_19_300,
-
- .gamma_17_table[0] = (unsigned int *)&s6e63m0_17_30,
- .gamma_17_table[1] = (unsigned int *)&s6e63m0_17_90,
- .gamma_17_table[2] = (unsigned int *)&s6e63m0_17_110,
- .gamma_17_table[3] = (unsigned int *)&s6e63m0_17_140,
- .gamma_17_table[4] = (unsigned int *)&s6e63m0_17_170,
- .gamma_17_table[5] = (unsigned int *)&s6e63m0_17_200,
- .gamma_17_table[6] = (unsigned int *)&s6e63m0_17_220,
- .gamma_17_table[7] = (unsigned int *)&s6e63m0_17_240,
- .gamma_17_table[8] = (unsigned int *)&s6e63m0_17_260,
- .gamma_17_table[9] = (unsigned int *)&s6e63m0_17_280,
- .gamma_17_table[10] = (unsigned int *)&s6e63m0_17_300,
-};
-
-#endif
-
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index f103665cad43..40182ed85648 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -27,7 +27,6 @@
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/console.h>
-#include <asm/io.h>
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
@@ -401,7 +400,7 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
#endif /* CONFIG_PMAC_BACKLIGHT */
#ifdef CONFIG_PPC
- p->screen_base = __ioremap(addr, 0x200000, _PAGE_NO_CACHE);
+ p->screen_base = ioremap_wc(addr, 0x200000);
#else
p->screen_base = ioremap(addr, 0x200000);
#endif
diff --git a/drivers/video/fbdev/controlfb.c b/drivers/video/fbdev/controlfb.c
index 8d14b29aafea..9cb0ef7ac29e 100644
--- a/drivers/video/fbdev/controlfb.c
+++ b/drivers/video/fbdev/controlfb.c
@@ -48,9 +48,7 @@
#include <linux/nvram.h>
#include <linux/adb.h>
#include <linux/cuda.h>
-#include <asm/io.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include <asm/btext.h>
#include "macmodes.h"
@@ -715,8 +713,7 @@ static int __init control_of_init(struct device_node *dp)
goto error_out;
}
/* map at most 8MB for the frame buffer */
- p->frame_buffer = __ioremap(p->frame_buffer_phys, 0x800000,
- _PAGE_WRITETHRU);
+ p->frame_buffer = ioremap_wt(p->frame_buffer_phys, 0x800000);
if (!p->control_regs_phys ||
!request_mem_region(p->control_regs_phys, p->control_regs_size,
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index bc9eb8afc313..332a56b6811f 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1925,7 +1925,7 @@ static int __init fsl_diu_init(void)
pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n");
#ifdef CONFIG_NOT_COHERENT_CACHE
- np = of_find_node_by_type(NULL, "cpu");
+ np = of_get_cpu_node(0, NULL);
if (!np) {
pr_err("fsl-diu-fb: can't find 'cpu' device node\n");
return -ENODEV;
diff --git a/drivers/video/fbdev/platinumfb.c b/drivers/video/fbdev/platinumfb.c
index 377d3399a3ad..bf6b7fb83cf4 100644
--- a/drivers/video/fbdev/platinumfb.c
+++ b/drivers/video/fbdev/platinumfb.c
@@ -32,9 +32,7 @@
#include <linux/nvram.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#include <asm/io.h>
#include <asm/prom.h>
-#include <asm/pgtable.h>
#include "macmodes.h"
#include "platinumfb.h"
@@ -577,8 +575,7 @@ static int platinumfb_probe(struct platform_device* odev)
/* frame buffer - map only 4MB */
pinfo->frame_buffer_phys = pinfo->rsrc_fb.start;
- pinfo->frame_buffer = __ioremap(pinfo->rsrc_fb.start, 0x400000,
- _PAGE_WRITETHRU);
+ pinfo->frame_buffer = ioremap_wt(pinfo->rsrc_fb.start, 0x400000);
pinfo->base_frame_buffer = pinfo->frame_buffer;
/* registers */
diff --git a/drivers/video/fbdev/valkyriefb.c b/drivers/video/fbdev/valkyriefb.c
index 275fb98236d3..d51c3a8009cb 100644
--- a/drivers/video/fbdev/valkyriefb.c
+++ b/drivers/video/fbdev/valkyriefb.c
@@ -54,13 +54,11 @@
#include <linux/nvram.h>
#include <linux/adb.h>
#include <linux/cuda.h>
-#include <asm/io.h>
#ifdef CONFIG_MAC
#include <asm/macintosh.h>
#else
#include <asm/prom.h>
#endif
-#include <asm/pgtable.h>
#include "macmodes.h"
#include "valkyriefb.h"
@@ -318,7 +316,7 @@ static void __init valkyrie_choose_mode(struct fb_info_valkyrie *p)
int __init valkyriefb_init(void)
{
struct fb_info_valkyrie *p;
- unsigned long frame_buffer_phys, cmap_regs_phys, flags;
+ unsigned long frame_buffer_phys, cmap_regs_phys;
int err;
char *option = NULL;
@@ -337,7 +335,6 @@ int __init valkyriefb_init(void)
/* Hardcoded addresses... welcome to 68k Macintosh country :-) */
frame_buffer_phys = 0xf9000000;
cmap_regs_phys = 0x50f24000;
- flags = IOMAP_NOCACHE_SER; /* IOMAP_WRITETHROUGH?? */
#else /* ppc (!CONFIG_MAC) */
{
struct device_node *dp;
@@ -354,7 +351,6 @@ int __init valkyriefb_init(void)
frame_buffer_phys = r.start;
cmap_regs_phys = r.start + 0x304000;
- flags = _PAGE_WRITETHRU;
}
#endif /* ppc (!CONFIG_MAC) */
@@ -369,7 +365,11 @@ int __init valkyriefb_init(void)
}
p->total_vram = 0x100000;
p->frame_buffer_phys = frame_buffer_phys;
- p->frame_buffer = __ioremap(frame_buffer_phys, p->total_vram, flags);
+#ifdef CONFIG_MAC
+ p->frame_buffer = ioremap_nocache(frame_buffer_phys, p->total_vram);
+#else
+ p->frame_buffer = ioremap_wt(frame_buffer_phys, p->total_vram);
+#endif
p->cmap_regs_phys = cmap_regs_phys;
p->cmap_regs = ioremap(p->cmap_regs_phys, 0x1000);
p->valkyrie_regs_phys = cmap_regs_phys+0x6000;
diff --git a/drivers/vme/vme.c b/drivers/vme/vme.c
index 92500f6bdad1..520a5f9c27de 100644
--- a/drivers/vme/vme.c
+++ b/drivers/vme/vme.c
@@ -1890,7 +1890,6 @@ static int __vme_register_driver_bus(struct vme_driver *drv,
err_reg:
put_device(&vdev->dev);
- kfree(vdev);
err_devalloc:
list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
list_del(&vdev->drv_list);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 83fc9aab34e8..3099052e1243 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -763,6 +763,8 @@ static int omap_hdq_remove(struct platform_device *pdev)
/* remove module dependency */
pm_runtime_disable(&pdev->dev);
+ w1_remove_master_device(&omap_w1_master);
+
return 0;
}
diff --git a/drivers/w1/slaves/w1_ds2438.c b/drivers/w1/slaves/w1_ds2438.c
index bf641a191d07..7c4e33dbee4d 100644
--- a/drivers/w1/slaves/w1_ds2438.c
+++ b/drivers/w1/slaves/w1_ds2438.c
@@ -186,8 +186,8 @@ static int w1_ds2438_change_config_bit(struct w1_slave *sl, u8 mask, u8 value)
return -1;
}
-static uint16_t w1_ds2438_get_voltage(struct w1_slave *sl,
- int adc_input, uint16_t *voltage)
+static int w1_ds2438_get_voltage(struct w1_slave *sl,
+ int adc_input, uint16_t *voltage)
{
unsigned int retries = W1_DS2438_RETRIES;
u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
@@ -235,6 +235,25 @@ post_unlock:
return ret;
}
+static int w1_ds2438_get_current(struct w1_slave *sl, int16_t *voltage)
+{
+ u8 w1_buf[DS2438_PAGE_SIZE + 1 /*for CRC*/];
+ int ret;
+
+ mutex_lock(&sl->master->bus_mutex);
+
+ if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
+ /* The voltage measured across current sense resistor RSENS. */
+ *voltage = (((int16_t) w1_buf[DS2438_CURRENT_MSB]) << 8) | ((int16_t) w1_buf[DS2438_CURRENT_LSB]);
+ ret = 0;
+ } else
+ ret = -1;
+
+ mutex_unlock(&sl->master->bus_mutex);
+
+ return ret;
+}
+
static ssize_t iad_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
@@ -257,6 +276,27 @@ static ssize_t iad_write(struct file *filp, struct kobject *kobj,
return ret;
}
+static ssize_t iad_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct w1_slave *sl = kobj_to_w1_slave(kobj);
+ int ret;
+ int16_t voltage;
+
+ if (off != 0)
+ return 0;
+ if (!buf)
+ return -EINVAL;
+
+ if (w1_ds2438_get_current(sl, &voltage) == 0) {
+ ret = snprintf(buf, count, "%i\n", voltage);
+ } else
+ ret = -EIO;
+
+ return ret;
+}
+
static ssize_t page0_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
@@ -272,9 +312,13 @@ static ssize_t page0_read(struct file *filp, struct kobject *kobj,
mutex_lock(&sl->master->bus_mutex);
+ /* Read no more than page0 size */
+ if (count > DS2438_PAGE_SIZE)
+ count = DS2438_PAGE_SIZE;
+
if (w1_ds2438_get_page(sl, 0, w1_buf) == 0) {
- memcpy(buf, &w1_buf, DS2438_PAGE_SIZE);
- ret = DS2438_PAGE_SIZE;
+ memcpy(buf, &w1_buf, count);
+ ret = count;
} else
ret = -EIO;
@@ -289,7 +333,6 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
- ssize_t c = PAGE_SIZE;
int16_t temp;
if (off != 0)
@@ -298,8 +341,7 @@ static ssize_t temperature_read(struct file *filp, struct kobject *kobj,
return -EINVAL;
if (w1_ds2438_get_temperature(sl, &temp) == 0) {
- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", temp);
- ret = PAGE_SIZE - c;
+ ret = snprintf(buf, count, "%i\n", temp);
} else
ret = -EIO;
@@ -312,7 +354,6 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj,
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
- ssize_t c = PAGE_SIZE;
uint16_t voltage;
if (off != 0)
@@ -321,8 +362,7 @@ static ssize_t vad_read(struct file *filp, struct kobject *kobj,
return -EINVAL;
if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VAD, &voltage) == 0) {
- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage);
- ret = PAGE_SIZE - c;
+ ret = snprintf(buf, count, "%u\n", voltage);
} else
ret = -EIO;
@@ -335,7 +375,6 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int ret;
- ssize_t c = PAGE_SIZE;
uint16_t voltage;
if (off != 0)
@@ -344,15 +383,14 @@ static ssize_t vdd_read(struct file *filp, struct kobject *kobj,
return -EINVAL;
if (w1_ds2438_get_voltage(sl, DS2438_ADC_INPUT_VDD, &voltage) == 0) {
- c -= snprintf(buf + PAGE_SIZE - c, c, "%d\n", voltage);
- ret = PAGE_SIZE - c;
+ ret = snprintf(buf, count, "%u\n", voltage);
} else
ret = -EIO;
return ret;
}
-static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, NULL, iad_write, 1);
+static BIN_ATTR(iad, S_IRUGO | S_IWUSR | S_IWGRP, iad_read, iad_write, 0);
static BIN_ATTR_RO(page0, DS2438_PAGE_SIZE);
static BIN_ATTR_RO(temperature, 0/* real length varies */);
static BIN_ATTR_RO(vad, 0/* real length varies */);
diff --git a/fs/aio.c b/fs/aio.c
index b9350f3360c6..301e6314183b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -2135,12 +2135,12 @@ COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
compat_long_t, min_nr,
compat_long_t, nr,
struct io_event __user *, events,
- struct compat_timespec __user *, timeout)
+ struct old_timespec32 __user *, timeout)
{
struct timespec64 t;
int ret;
- if (timeout && compat_get_timespec64(&t, timeout))
+ if (timeout && get_old_timespec32(&t, timeout))
return -EFAULT;
ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
@@ -2160,7 +2160,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
compat_long_t, min_nr,
compat_long_t, nr,
struct io_event __user *, events,
- struct compat_timespec __user *, timeout,
+ struct old_timespec32 __user *, timeout,
const struct __compat_aio_sigset __user *, usig)
{
struct __compat_aio_sigset ksig = { NULL, };
@@ -2168,7 +2168,7 @@ COMPAT_SYSCALL_DEFINE6(io_pgetevents,
struct timespec64 t;
int ret;
- if (timeout && compat_get_timespec64(&t, timeout))
+ if (timeout && get_old_timespec32(&t, timeout))
return -EFAULT;
if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f22f77172c5f..181c58b23110 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5764,16 +5764,10 @@ static int btrfs_dentry_delete(const struct dentry *dentry)
static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
- struct inode *inode;
-
- inode = btrfs_lookup_dentry(dir, dentry);
- if (IS_ERR(inode)) {
- if (PTR_ERR(inode) == -ENOENT)
- inode = NULL;
- else
- return ERR_CAST(inode);
- }
+ struct inode *inode = btrfs_lookup_dentry(dir, dentry);
+ if (inode == ERR_PTR(-ENOENT))
+ inode = NULL;
return d_splice_alias(inode, dentry);
}
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index f1fbea947fef..3e812428ac8d 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -132,7 +132,7 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
- seq_printf(m, "\t\tSpeed: %zu bps\n", iface->speed);
+ seq_printf(m, "\tSpeed: %zu bps\n", iface->speed);
seq_puts(m, "\t\tCapabilities: ");
if (iface->rdma_capable)
seq_puts(m, "rdma ");
@@ -285,7 +285,7 @@ skip_rdma:
if ((ses->serverDomain == NULL) ||
(ses->serverOS == NULL) ||
(ses->serverNOS == NULL)) {
- seq_printf(m, "\n%d) Name: %s Uses: %d Capability: 0x%x\tSession Status: %d\t",
+ seq_printf(m, "\n%d) Name: %s Uses: %d Capability: 0x%x\tSession Status: %d ",
i, ses->serverName, ses->ses_count,
ses->capabilities, ses->status);
if (ses->session_flags & SMB2_SESSION_FLAG_IS_GUEST)
@@ -296,16 +296,18 @@ skip_rdma:
seq_printf(m,
"\n%d) Name: %s Domain: %s Uses: %d OS:"
" %s\n\tNOS: %s\tCapability: 0x%x\n\tSMB"
- " session status: %d\t",
+ " session status: %d ",
i, ses->serverName, ses->serverDomain,
ses->ses_count, ses->serverOS, ses->serverNOS,
ses->capabilities, ses->status);
}
if (server->rdma)
seq_printf(m, "RDMA\n\t");
- seq_printf(m, "TCP status: %d\n\tLocal Users To "
+ seq_printf(m, "TCP status: %d Instance: %d\n\tLocal Users To "
"Server: %d SecMode: 0x%x Req On Wire: %d",
- server->tcpStatus, server->srv_count,
+ server->tcpStatus,
+ server->reconnect_instance,
+ server->srv_count,
server->sec_mode, in_flight(server));
#ifdef CONFIG_CIFS_STATS2
@@ -352,7 +354,7 @@ skip_rdma:
seq_printf(m, "\n\tServer interfaces: %zu\n",
ses->iface_count);
for (j = 0; j < ses->iface_count; j++) {
- seq_printf(m, "\t%d)\n", j);
+ seq_printf(m, "\t%d)", j);
cifs_dump_iface(m, &ses->iface_list[j]);
}
spin_unlock(&ses->iface_lock);
@@ -383,6 +385,9 @@ static ssize_t cifs_stats_proc_write(struct file *file,
atomic_set(&totBufAllocCount, 0);
atomic_set(&totSmBufAllocCount, 0);
#endif /* CONFIG_CIFS_STATS2 */
+ atomic_set(&tcpSesReconnectCount, 0);
+ atomic_set(&tconInfoReconnectCount, 0);
+
spin_lock(&GlobalMid_Lock);
GlobalMaxActiveXid = 0;
GlobalCurrentXid = 0;
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index f4f3f0853c6e..631dc1bb21c1 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -47,6 +47,29 @@ extern int cifsFYI;
*/
#ifdef CONFIG_CIFS_DEBUG
+
+/*
+ * When adding tracepoints and debug messages we have various choices.
+ * Some considerations:
+ *
+ * Use cifs_dbg(VFS, ...) for things we always want logged, and the user to see
+ * cifs_info(...) slightly less important, admin can filter via loglevel > 6
+ * cifs_dbg(FYI, ...) minor debugging messages, off by default
+ * trace_smb3_* ftrace functions are preferred for complex debug messages
+ * intended for developers or experienced admins, off by default
+ */
+
+/* Information level messages, minor events */
+#define cifs_info_func(ratefunc, fmt, ...) \
+do { \
+ pr_info_ ## ratefunc("CIFS: " fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define cifs_info(fmt, ...) \
+do { \
+ cifs_info_func(ratelimited, fmt, ##__VA_ARGS__); \
+} while (0)
+
/* information message: e.g., configuration, major event */
#define cifs_dbg_func(ratefunc, type, fmt, ...) \
do { \
@@ -81,6 +104,11 @@ do { \
if (0) \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
+
+#define cifs_info(fmt, ...) \
+do { \
+ pr_info("CIFS: "fmt, ##__VA_ARGS__); \
+} while (0)
#endif
#endif /* _H_CIFS_DEBUG */
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 6b61df117fd4..b97c74efd04a 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -304,12 +304,17 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
*/
mnt = ERR_PTR(-ENOMEM);
+ cifs_sb = CIFS_SB(mntpt->d_sb);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) {
+ mnt = ERR_PTR(-EREMOTE);
+ goto cdda_exit;
+ }
+
/* always use tree name prefix */
full_path = build_path_from_dentry_optional_prefix(mntpt, true);
if (full_path == NULL)
goto cdda_exit;
- cifs_sb = CIFS_SB(mntpt->d_sb);
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) {
mnt = ERR_CAST(tlink);
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 9731d0d891e7..63d7530f2e1d 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -51,6 +51,7 @@
*/
#define CIFS_MOUNT_UID_FROM_ACL 0x2000000 /* try to get UID via special SID */
#define CIFS_MOUNT_NO_HANDLE_CACHE 0x4000000 /* disable caching dir handles */
+#define CIFS_MOUNT_NO_DFS 0x8000000 /* disable DFS resolving */
struct cifs_sb_info {
struct rb_root tlink_tree;
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
index 57ff0756e30c..d8bce2f862de 100644
--- a/fs/cifs/cifs_ioctl.h
+++ b/fs/cifs/cifs_ioctl.h
@@ -43,8 +43,19 @@ struct smb_snapshot_array {
/* snapshots[]; */
} __packed;
+struct smb_query_info {
+ __u32 info_type;
+ __u32 file_info_class;
+ __u32 additional_information;
+ __u32 flags;
+ __u32 input_buffer_length;
+ __u32 output_buffer_length;
+ /* char buffer[]; */
+} __packed;
+
#define CIFS_IOCTL_MAGIC 0xCF
#define CIFS_IOC_COPYCHUNK_FILE _IOW(CIFS_IOCTL_MAGIC, 3, int)
#define CIFS_IOC_SET_INTEGRITY _IO(CIFS_IOCTL_MAGIC, 4)
#define CIFS_IOC_GET_MNT_INFO _IOR(CIFS_IOCTL_MAGIC, 5, struct smb_mnt_fs_info)
#define CIFS_ENUMERATE_SNAPSHOTS _IOR(CIFS_IOCTL_MAGIC, 6, struct smb_snapshot_array)
+#define CIFS_QUERY_INFO _IOWR(CIFS_IOCTL_MAGIC, 7, struct smb_query_info)
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 7065426b3280..7de9603c54f1 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -81,6 +81,14 @@ module_param(cifs_max_pending, uint, 0444);
MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
"CIFS/SMB1 dialect (N/A for SMB3) "
"Default: 32767 Range: 2 to 32767.");
+#ifdef CONFIG_CIFS_STATS2
+unsigned int slow_rsp_threshold = 1;
+module_param(slow_rsp_threshold, uint, 0644);
+MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
+ "before logging that a response is delayed. "
+ "Default: 1 (if set to 0 disables msg).");
+#endif /* STATS2 */
+
module_param(enable_oplocks, bool, 0644);
MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
@@ -492,6 +500,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
seq_puts(s, ",unix");
else
seq_puts(s, ",nounix");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
+ seq_puts(s, ",nodfs");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
seq_puts(s, ",posixpaths");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
@@ -707,7 +717,14 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
struct cifs_mnt_data mnt_data;
struct dentry *root;
- cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
+ /*
+ * Prints in Kernel / CIFS log the attempted mount operation
+ * If CIFS_DEBUG && cifs_FYI
+ */
+ if (cifsFYI)
+ cifs_dbg(FYI, "Devname: %s flags: %d\n", dev_name, flags);
+ else
+ cifs_info("Attempting to mount %s\n", dev_name);
volume_info = cifs_get_volume_info((char *)data, dev_name, is_smb3);
if (IS_ERR(volume_info))
@@ -1418,6 +1435,11 @@ init_cifs(void)
#ifdef CONFIG_CIFS_STATS2
atomic_set(&totBufAllocCount, 0);
atomic_set(&totSmBufAllocCount, 0);
+ if (slow_rsp_threshold < 1)
+ cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
+ else if (slow_rsp_threshold > 32767)
+ cifs_dbg(VFS,
+ "slow response threshold set higher than recommended (0 to 32767)\n");
#endif /* CONFIG_CIFS_STATS2 */
atomic_set(&midCount, 0);
@@ -1538,11 +1560,11 @@ exit_cifs(void)
cifs_proc_clean();
}
-MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");
+MODULE_AUTHOR("Steve French");
MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
MODULE_DESCRIPTION
- ("VFS to access servers complying with the SNIA CIFS Specification "
- "e.g. Samba and Windows");
+ ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
+ "also older servers complying with the SNIA CIFS Specification)");
MODULE_VERSION(CIFS_VERSION);
MODULE_SOFTDEP("pre: arc4");
MODULE_SOFTDEP("pre: des");
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index f047e87871a1..24e265a51874 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -148,5 +148,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.13"
+#define CIFS_VERSION "2.14"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 9dcaed031843..ed1e0fcb69e3 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -33,6 +33,7 @@
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
+#define SMB_PATH_MAX 260
#define CIFS_PORT 445
#define RFC1001_PORT 139
@@ -465,6 +466,11 @@ struct smb_version_operations {
enum securityEnum (*select_sectype)(struct TCP_Server_Info *,
enum securityEnum);
int (*next_header)(char *);
+ /* ioctl passthrough for query_info */
+ int (*ioctl_query_info)(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ __le16 *path, int is_dir,
+ unsigned long p);
};
struct smb_version_values {
@@ -654,6 +660,7 @@ struct TCP_Server_Info {
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
__u32 sequence_number; /* for signing, protected by srv_mutex */
+ __u32 reconnect_instance; /* incremented on each reconnect */
struct session_key session_key;
unsigned long lstrp; /* when we got last response from this server */
struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
@@ -798,6 +805,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb)
* a single wsize request with a single call.
*/
#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
+#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
/*
* Windows only supports a max of 60kb reads and 65535 byte writes. Default to
@@ -924,6 +932,8 @@ struct cifs_tcon {
struct list_head tcon_list;
int tc_count;
struct list_head rlist; /* reconnect list */
+ atomic_t num_local_opens; /* num of all opens including disconnected */
+ atomic_t num_remote_opens; /* num of all network opens on server */
struct list_head openFileList;
spinlock_t open_file_lock; /* protects list above */
struct cifs_ses *ses; /* pointer to session associated with */
@@ -1072,7 +1082,8 @@ struct cifsLockInfo {
__u64 offset;
__u64 length;
__u32 pid;
- __u32 type;
+ __u16 type;
+ __u16 flags;
};
/*
@@ -1715,6 +1726,7 @@ GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
#ifdef CONFIG_CIFS_STATS2
GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
GLOBAL_EXTERN atomic_t totSmBufAllocCount;
+extern unsigned int slow_rsp_threshold; /* number of secs before logging */
#endif
GLOBAL_EXTERN atomic_t smBufAllocCount;
GLOBAL_EXTERN atomic_t midCount;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 20adda4de83b..fa361bc00602 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -219,7 +219,7 @@ extern void cifs_mark_open_files_invalid(struct cifs_tcon *tcon);
extern void cifs_reopen_persistent_handles(struct cifs_tcon *tcon);
extern bool cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
- __u64 length, __u8 type,
+ __u64 length, __u8 type, __u16 flags,
struct cifsLockInfo **conf_lock,
int rw_check);
extern void cifs_add_pending_open(struct cifs_fid *fid,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 5657b79dbc99..f82fd342bca5 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1607,6 +1607,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 2,
.rq_pages = rdata->pages,
+ .rq_offset = rdata->page_offset,
.rq_npages = rdata->nr_pages,
.rq_pagesz = rdata->pagesz,
.rq_tailsz = rdata->tailsz };
@@ -2210,6 +2211,7 @@ cifs_async_writev(struct cifs_writedata *wdata,
rqst.rq_iov = iov;
rqst.rq_nvec = 2;
rqst.rq_pages = wdata->pages;
+ rqst.rq_offset = wdata->page_offset;
rqst.rq_npages = wdata->nr_pages;
rqst.rq_pagesz = wdata->pagesz;
rqst.rq_tailsz = wdata->tailsz;
@@ -5027,6 +5029,13 @@ oldQFSInfoRetry:
le16_to_cpu(response_data->BytesPerSector) *
le32_to_cpu(response_data->
SectorsPerAllocationUnit);
+ /*
+ * much prefer larger but if server doesn't report
+ * a valid size than 4K is a reasonable minimum
+ */
+ if (FSData->f_bsize < 512)
+ FSData->f_bsize = 4096;
+
FSData->f_blocks =
le32_to_cpu(response_data->TotalAllocationUnits);
FSData->f_bfree = FSData->f_bavail =
@@ -5107,6 +5116,13 @@ QFSInfoRetry:
le32_to_cpu(response_data->BytesPerSector) *
le32_to_cpu(response_data->
SectorsPerAllocationUnit);
+ /*
+ * much prefer larger but if server doesn't report
+ * a valid size than 4K is a reasonable minimum
+ */
+ if (FSData->f_bsize < 512)
+ FSData->f_bsize = 4096;
+
FSData->f_blocks =
le64_to_cpu(response_data->TotalAllocationUnits);
FSData->f_bfree = FSData->f_bavail =
@@ -5470,6 +5486,13 @@ QFSPosixRetry:
data_offset);
FSData->f_bsize =
le32_to_cpu(response_data->BlockSize);
+ /*
+ * much prefer larger but if server doesn't report
+ * a valid size than 4K is a reasonable minimum
+ */
+ if (FSData->f_bsize < 512)
+ FSData->f_bsize = 4096;
+
FSData->f_blocks =
le64_to_cpu(response_data->TotalBlocks);
FSData->f_bfree =
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 52d71b64c0c6..d82f0cc71755 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -250,6 +250,7 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_ignore, "dev" },
{ Opt_ignore, "mand" },
{ Opt_ignore, "nomand" },
+ { Opt_ignore, "relatime" },
{ Opt_ignore, "_netdev" },
{ Opt_err, NULL }
@@ -347,7 +348,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
server->maxBuf = 0;
server->max_read = 0;
- cifs_dbg(FYI, "Reconnecting tcp session\n");
+ cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
trace_smb3_reconnect(server->CurrentMid, server->hostname);
/* before reconnecting the tcp session, mark the smb session (uid)
@@ -2396,6 +2397,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
volume_info->target_rfc1001_name, RFC1001_NAME_LEN_WITH_NULL);
tcp_ses->session_estab = false;
tcp_ses->sequence_number = 0;
+ tcp_ses->reconnect_instance = 0;
tcp_ses->lstrp = jiffies;
spin_lock_init(&tcp_ses->req_lock);
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
@@ -3085,10 +3087,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
if (rc)
goto out_fail;
- if (volume_info->nodfs) {
- tcon->Flags &= ~SMB_SHARE_IS_IN_DFS;
- cifs_dbg(FYI, "DFS disabled (%d)\n", tcon->Flags);
- }
tcon->use_persistent = false;
/* check if SMB2 or later, CIFS does not support persistent handles */
if (volume_info->persistent) {
@@ -3663,6 +3661,8 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->actimeo = pvolume_info->actimeo;
cifs_sb->local_nls = pvolume_info->local_nls;
+ if (pvolume_info->nodfs)
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_DFS;
if (pvolume_info->noperm)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM;
if (pvolume_info->setuids)
@@ -3819,6 +3819,9 @@ expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
struct dfs_info3_param *referrals = NULL;
char *full_path = NULL, *ref_path = NULL, *mdata = NULL;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
+ return -EREMOTE;
+
full_path = build_unc_path_to_root(volume_info, cifs_sb);
if (IS_ERR(full_path))
return PTR_ERR(full_path);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 8d41ca7bfcf1..c620d4b5d5d4 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -334,6 +334,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
server->ops->set_fid(cfile, fid, oplock);
list_add(&cfile->tlist, &tcon->openFileList);
+ atomic_inc(&tcon->num_local_opens);
/* if readable file instance put first in list*/
if (file->f_mode & FMODE_READ)
@@ -395,6 +396,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
/* remove it from the lists */
list_del(&cifs_file->flist);
list_del(&cifs_file->tlist);
+ atomic_dec(&tcon->num_local_opens);
if (list_empty(&cifsi->openFileList)) {
cifs_dbg(FYI, "closing last open instance for inode %p\n",
@@ -864,7 +866,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
}
static struct cifsLockInfo *
-cifs_lock_init(__u64 offset, __u64 length, __u8 type)
+cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 flags)
{
struct cifsLockInfo *lock =
kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
@@ -874,6 +876,7 @@ cifs_lock_init(__u64 offset, __u64 length, __u8 type)
lock->length = length;
lock->type = type;
lock->pid = current->tgid;
+ lock->flags = flags;
INIT_LIST_HEAD(&lock->blist);
init_waitqueue_head(&lock->block_q);
return lock;
@@ -896,7 +899,8 @@ cifs_del_lock_waiters(struct cifsLockInfo *lock)
/* @rw_check : 0 - no op, 1 - read, 2 - write */
static bool
cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
- __u64 length, __u8 type, struct cifsFileInfo *cfile,
+ __u64 length, __u8 type, __u16 flags,
+ struct cifsFileInfo *cfile,
struct cifsLockInfo **conf_lock, int rw_check)
{
struct cifsLockInfo *li;
@@ -918,6 +922,10 @@ cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
((server->ops->compare_fids(cfile, cur_cfile) &&
current->tgid == li->pid) || type == li->type))
continue;
+ if (rw_check == CIFS_LOCK_OP &&
+ (flags & FL_OFDLCK) && (li->flags & FL_OFDLCK) &&
+ server->ops->compare_fids(cfile, cur_cfile))
+ continue;
if (conf_lock)
*conf_lock = li;
return true;
@@ -927,8 +935,8 @@ cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
bool
cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
- __u8 type, struct cifsLockInfo **conf_lock,
- int rw_check)
+ __u8 type, __u16 flags,
+ struct cifsLockInfo **conf_lock, int rw_check)
{
bool rc = false;
struct cifs_fid_locks *cur;
@@ -936,7 +944,8 @@ cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
list_for_each_entry(cur, &cinode->llist, llist) {
rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
- cfile, conf_lock, rw_check);
+ flags, cfile, conf_lock,
+ rw_check);
if (rc)
break;
}
@@ -964,7 +973,8 @@ cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
down_read(&cinode->lock_sem);
exist = cifs_find_lock_conflict(cfile, offset, length, type,
- &conf_lock, CIFS_LOCK_OP);
+ flock->fl_flags, &conf_lock,
+ CIFS_LOCK_OP);
if (exist) {
flock->fl_start = conf_lock->offset;
flock->fl_end = conf_lock->offset + conf_lock->length - 1;
@@ -1011,7 +1021,8 @@ try_again:
down_write(&cinode->lock_sem);
exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
- lock->type, &conf_lock, CIFS_LOCK_OP);
+ lock->type, lock->flags, &conf_lock,
+ CIFS_LOCK_OP);
if (!exist && cinode->can_cache_brlcks) {
list_add_tail(&lock->llist, &cfile->llist->locks);
up_write(&cinode->lock_sem);
@@ -1321,7 +1332,7 @@ cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
cifs_dbg(FYI, "Lease on file - not implemented yet\n");
if (flock->fl_flags &
(~(FL_POSIX | FL_FLOCK | FL_SLEEP |
- FL_ACCESS | FL_LEASE | FL_CLOSE)))
+ FL_ACCESS | FL_LEASE | FL_CLOSE | FL_OFDLCK)))
cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
*type = server->vals->large_lock_type;
@@ -1584,7 +1595,8 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
if (lock) {
struct cifsLockInfo *lock;
- lock = cifs_lock_init(flock->fl_start, length, type);
+ lock = cifs_lock_init(flock->fl_start, length, type,
+ flock->fl_flags);
if (!lock)
return -ENOMEM;
@@ -1653,7 +1665,6 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
tcon->ses->server);
-
cifs_sb = CIFS_FILE_SB(file);
netfid = cfile->fid.netfid;
cinode = CIFS_I(file_inode(file));
@@ -2098,6 +2109,7 @@ static int cifs_writepages(struct address_space *mapping,
pgoff_t end, index;
struct cifs_writedata *wdata;
int rc = 0;
+ unsigned int xid;
/*
* If wsize is smaller than the page cache size, default to writing
@@ -2106,6 +2118,7 @@ static int cifs_writepages(struct address_space *mapping,
if (cifs_sb->wsize < PAGE_SIZE)
return generic_writepages(mapping, wbc);
+ xid = get_xid();
if (wbc->range_cyclic) {
index = mapping->writeback_index; /* Start from prev offset */
end = -1;
@@ -2199,6 +2212,7 @@ retry:
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
mapping->writeback_index = index;
+ free_xid(xid);
return rc;
}
@@ -2817,8 +2831,8 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
goto out;
if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
- server->vals->exclusive_lock_type, NULL,
- CIFS_WRITE_OP))
+ server->vals->exclusive_lock_type, 0,
+ NULL, CIFS_WRITE_OP))
rc = __generic_file_write_iter(iocb, from);
else
rc = -EACCES;
@@ -3388,7 +3402,7 @@ cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
down_read(&cinode->lock_sem);
if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
tcon->ses->server->vals->shared_lock_type,
- NULL, CIFS_READ_OP))
+ 0, NULL, CIFS_READ_OP))
rc = generic_file_read_iter(iocb, to);
up_read(&cinode->lock_sem);
return rc;
@@ -3743,7 +3757,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
struct TCP_Server_Info *server;
pid_t pid;
+ unsigned int xid;
+ xid = get_xid();
/*
* Reads as many pages as possible from fscache. Returns -ENOBUFS
* immediately if the cookie is negative
@@ -3753,8 +3769,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
*/
rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
&num_pages);
- if (rc == 0)
+ if (rc == 0) {
+ free_xid(xid);
return rc;
+ }
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
@@ -3798,6 +3816,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
*/
if (unlikely(rsize < PAGE_SIZE)) {
add_credits_and_wake_if(server, credits, 0);
+ free_xid(xid);
return 0;
}
@@ -3862,6 +3881,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
* allocator.
*/
cifs_fscache_readpages_cancel(mapping->host, page_list);
+ free_xid(xid);
return rc;
}
@@ -3889,8 +3909,12 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
else
cifs_dbg(FYI, "Bytes read %d\n", rc);
- file_inode(file)->i_atime =
- current_time(file_inode(file));
+ /* we do not want atime to be less than mtime, it broke some apps */
+ file_inode(file)->i_atime = current_time(file_inode(file));
+ if (timespec64_compare(&(file_inode(file)->i_atime), &(file_inode(file)->i_mtime)))
+ file_inode(file)->i_atime = file_inode(file)->i_mtime;
+ else
+ file_inode(file)->i_atime = current_time(file_inode(file));
if (PAGE_SIZE > rc)
memset(read_data + rc, 0, PAGE_SIZE - rc);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 6e8765f44508..1023d78673fb 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -162,7 +162,11 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
cifs_revalidate_cache(inode, fattr);
spin_lock(&inode->i_lock);
- inode->i_atime = fattr->cf_atime;
+ /* we do not want atime to be less than mtime, it broke some apps */
+ if (timespec64_compare(&fattr->cf_atime, &fattr->cf_mtime))
+ inode->i_atime = fattr->cf_mtime;
+ else
+ inode->i_atime = fattr->cf_atime;
inode->i_mtime = fattr->cf_mtime;
inode->i_ctime = fattr->cf_ctime;
inode->i_rdev = fattr->cf_rdev;
@@ -777,38 +781,53 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
} else if (rc == -EREMOTE) {
cifs_create_dfs_fattr(&fattr, sb);
rc = 0;
- } else if (rc == -EACCES && backup_cred(cifs_sb)) {
- srchinf = kzalloc(sizeof(struct cifs_search_info),
- GFP_KERNEL);
- if (srchinf == NULL) {
- rc = -ENOMEM;
- goto cgii_exit;
- }
+ } else if ((rc == -EACCES) && backup_cred(cifs_sb) &&
+ (strcmp(server->vals->version_string, SMB1_VERSION_STRING)
+ == 0)) {
+ /*
+ * For SMB2 and later the backup intent flag is already
+ * sent if needed on open and there is no path based
+ * FindFirst operation to use to retry with
+ */
- srchinf->endOfSearch = false;
+ srchinf = kzalloc(sizeof(struct cifs_search_info),
+ GFP_KERNEL);
+ if (srchinf == NULL) {
+ rc = -ENOMEM;
+ goto cgii_exit;
+ }
+
+ srchinf->endOfSearch = false;
+ if (tcon->unix_ext)
+ srchinf->info_level = SMB_FIND_FILE_UNIX;
+ else if ((tcon->ses->capabilities &
+ tcon->ses->server->vals->cap_nt_find) == 0)
+ srchinf->info_level = SMB_FIND_FILE_INFO_STANDARD;
+ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
srchinf->info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
+ else /* no srvino useful for fallback to some netapp */
+ srchinf->info_level = SMB_FIND_FILE_DIRECTORY_INFO;
- srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
- CIFS_SEARCH_CLOSE_AT_END |
- CIFS_SEARCH_BACKUP_SEARCH;
+ srchflgs = CIFS_SEARCH_CLOSE_ALWAYS |
+ CIFS_SEARCH_CLOSE_AT_END |
+ CIFS_SEARCH_BACKUP_SEARCH;
- rc = CIFSFindFirst(xid, tcon, full_path,
- cifs_sb, NULL, srchflgs, srchinf, false);
- if (!rc) {
- data =
- (FILE_ALL_INFO *)srchinf->srch_entries_start;
+ rc = CIFSFindFirst(xid, tcon, full_path,
+ cifs_sb, NULL, srchflgs, srchinf, false);
+ if (!rc) {
+ data = (FILE_ALL_INFO *)srchinf->srch_entries_start;
- cifs_dir_info_to_fattr(&fattr,
- (FILE_DIRECTORY_INFO *)data, cifs_sb);
- fattr.cf_uniqueid = le64_to_cpu(
- ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
- validinum = true;
+ cifs_dir_info_to_fattr(&fattr,
+ (FILE_DIRECTORY_INFO *)data, cifs_sb);
+ fattr.cf_uniqueid = le64_to_cpu(
+ ((SEARCH_ID_FULL_DIR_INFO *)data)->UniqueId);
+ validinum = true;
- cifs_buf_release(srchinf->ntwrk_buf_start);
- }
- kfree(srchinf);
- if (rc)
- goto cgii_exit;
+ cifs_buf_release(srchinf->ntwrk_buf_start);
+ }
+ kfree(srchinf);
+ if (rc)
+ goto cgii_exit;
} else
goto cgii_exit;
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 54f32f9143a9..76ddd98b6298 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -32,8 +32,51 @@
#include "cifs_debug.h"
#include "cifsfs.h"
#include "cifs_ioctl.h"
+#include "smb2proto.h"
#include <linux/btrfs.h>
+static long cifs_ioctl_query_info(unsigned int xid, struct file *filep,
+ unsigned long p)
+{
+ struct inode *inode = file_inode(filep);
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ struct dentry *dentry = filep->f_path.dentry;
+ unsigned char *path;
+ __le16 *utf16_path = NULL, root_path;
+ int rc = 0;
+
+ path = build_path_from_dentry(dentry);
+ if (path == NULL)
+ return -ENOMEM;
+
+ cifs_dbg(FYI, "%s %s\n", __func__, path);
+
+ if (!path[0]) {
+ root_path = 0;
+ utf16_path = &root_path;
+ } else {
+ utf16_path = cifs_convert_path_to_utf16(path + 1, cifs_sb);
+ if (!utf16_path) {
+ rc = -ENOMEM;
+ goto ici_exit;
+ }
+ }
+
+ if (tcon->ses->server->ops->ioctl_query_info)
+ rc = tcon->ses->server->ops->ioctl_query_info(
+ xid, tcon, utf16_path,
+ filep->private_data ? 0 : 1, p);
+ else
+ rc = -EOPNOTSUPP;
+
+ ici_exit:
+ if (utf16_path != &root_path)
+ kfree(utf16_path);
+ kfree(path);
+ return rc;
+}
+
static long cifs_ioctl_copychunk(unsigned int xid, struct file *dst_file,
unsigned long srcfd)
{
@@ -123,7 +166,6 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
struct inode *inode = file_inode(filep);
int rc = -ENOTTY; /* strange error - but the precedent */
unsigned int xid;
- struct cifs_sb_info *cifs_sb;
struct cifsFileInfo *pSMBFile = filep->private_data;
struct cifs_tcon *tcon;
__u64 ExtAttrBits = 0;
@@ -131,7 +173,6 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
xid = get_xid();
- cifs_sb = CIFS_SB(inode->i_sb);
cifs_dbg(FYI, "cifs ioctl 0x%x\n", command);
switch (command) {
case FS_IOC_GETFLAGS:
@@ -196,6 +237,9 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
case CIFS_IOC_COPYCHUNK_FILE:
rc = cifs_ioctl_copychunk(xid, filep, arg);
break;
+ case CIFS_QUERY_INFO:
+ rc = cifs_ioctl_query_info(xid, filep, arg);
+ break;
case CIFS_IOC_SET_INTEGRITY:
if (pSMBFile == NULL)
break;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 6926685e513c..fc43d5d25d1d 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -123,6 +123,8 @@ tconInfoAlloc(void)
ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
GFP_KERNEL);
spin_lock_init(&ret_buf->stat_lock);
+ atomic_set(&ret_buf->num_local_opens, 0);
+ atomic_set(&ret_buf->num_remote_opens, 0);
}
return ret_buf;
}
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index 0ffa18094335..dd10f0ce4cd5 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -33,7 +33,7 @@
/*
* Identifiers for functions that use the open, operation, close pattern
- * in smb2inode.c:smb2_open_op_close()
+ * in smb2inode.c:smb2_compound_op()
*/
#define SMB2_OP_SET_DELETE 1
#define SMB2_OP_SET_INFO 2
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 1eef1791d0c4..9e7ef7ec2d70 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -38,54 +38,83 @@
#include "smb2proto.h"
static int
-smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
- struct cifs_sb_info *cifs_sb, const char *full_path,
- __u32 desired_access, __u32 create_disposition,
- __u32 create_options, void *data, int command)
+smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, const char *full_path,
+ __u32 desired_access, __u32 create_disposition,
+ __u32 create_options, void *ptr, int command)
{
- int rc, tmprc = 0;
+ int rc;
__le16 *utf16_path = NULL;
__u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
struct cifs_open_parms oparms;
struct cifs_fid fid;
- bool use_cached_root_handle = false;
-
- if ((strcmp(full_path, "") == 0) && (create_options == 0) &&
- (desired_access == FILE_READ_ATTRIBUTES) &&
- (create_disposition == FILE_OPEN) &&
- (tcon->nohandlecache == false)) {
- rc = open_shroot(xid, tcon, &fid);
- if (rc == 0)
- use_cached_root_handle = true;
- }
+ struct cifs_ses *ses = tcon->ses;
+ struct TCP_Server_Info *server = ses->server;
+ int num_rqst = 0;
+ struct smb_rqst rqst[3];
+ int resp_buftype[3];
+ struct kvec rsp_iov[3];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct kvec qi_iov[1];
+ struct kvec si_iov[SMB2_SET_INFO_IOV_SIZE];
+ struct kvec close_iov[1];
+ struct smb2_query_info_rsp *qi_rsp = NULL;
+ int flags = 0;
+ __u8 delete_pending[8] = {1, 0, 0, 0, 0, 0, 0, 0};
+ unsigned int size[2];
+ void *data[2];
+ struct smb2_file_rename_info rename_info;
+ struct smb2_file_link_info link_info;
+ int len;
- if (use_cached_root_handle == false) {
- utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
- if (!utf16_path)
- return -ENOMEM;
-
- oparms.tcon = tcon;
- oparms.desired_access = desired_access;
- oparms.disposition = create_disposition;
- oparms.create_options = create_options;
- oparms.fid = &fid;
- oparms.reconnect = false;
-
- rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
- NULL);
- if (rc) {
- kfree(utf16_path);
- return rc;
- }
- }
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ memset(rqst, 0, sizeof(rqst));
+ resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+ memset(rsp_iov, 0, sizeof(rsp_iov));
+
+ /* Open */
+ utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+ if (!utf16_path)
+ return -ENOMEM;
+
+ oparms.tcon = tcon;
+ oparms.desired_access = desired_access;
+ oparms.disposition = create_disposition;
+ oparms.create_options = create_options;
+ if (backup_cred(cifs_sb))
+ oparms.create_options |= CREATE_OPEN_BACKUP_INTENT;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+ memset(&open_iov, 0, sizeof(open_iov));
+ rqst[num_rqst].rq_iov = open_iov;
+ rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE;
+ rc = SMB2_open_init(tcon, &rqst[num_rqst], &oplock, &oparms,
+ utf16_path);
+ kfree(utf16_path);
+ if (rc)
+ goto finished;
+
+ smb2_set_next_command(server, &rqst[num_rqst++]);
+
+ /* Operation */
switch (command) {
- case SMB2_OP_DELETE:
- break;
case SMB2_OP_QUERY_INFO:
- tmprc = SMB2_query_info(xid, tcon, fid.persistent_fid,
- fid.volatile_fid,
- (struct smb2_file_all_info *)data);
+ memset(&qi_iov, 0, sizeof(qi_iov));
+ rqst[num_rqst].rq_iov = qi_iov;
+ rqst[num_rqst].rq_nvec = 1;
+
+ rc = SMB2_query_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, FILE_ALL_INFORMATION,
+ SMB2_O_INFO_FILE, 0,
+ sizeof(struct smb2_file_all_info) +
+ PATH_MAX * 2, 0, NULL);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ break;
+ case SMB2_OP_DELETE:
break;
case SMB2_OP_MKDIR:
/*
@@ -94,39 +123,156 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon,
*/
break;
case SMB2_OP_RMDIR:
- tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid,
- fid.volatile_fid);
- break;
- case SMB2_OP_RENAME:
- tmprc = SMB2_rename(xid, tcon, fid.persistent_fid,
- fid.volatile_fid, (__le16 *)data);
- break;
- case SMB2_OP_HARDLINK:
- tmprc = SMB2_set_hardlink(xid, tcon, fid.persistent_fid,
- fid.volatile_fid, (__le16 *)data);
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_nvec = 1;
+
+ size[0] = 8;
+ data[0] = &delete_pending[0];
+
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, current->tgid,
+ FILE_DISPOSITION_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
break;
case SMB2_OP_SET_EOF:
- tmprc = SMB2_set_eof(xid, tcon, fid.persistent_fid,
- fid.volatile_fid, current->tgid,
- (__le64 *)data, false);
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_nvec = 1;
+
+ size[0] = 8; /* sizeof __le64 */
+ data[0] = ptr;
+
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, current->tgid,
+ FILE_END_OF_FILE_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
break;
case SMB2_OP_SET_INFO:
- tmprc = SMB2_set_info(xid, tcon, fid.persistent_fid,
- fid.volatile_fid,
- (FILE_BASIC_INFO *)data);
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_nvec = 1;
+
+
+ size[0] = sizeof(FILE_BASIC_INFO);
+ data[0] = ptr;
+
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, current->tgid,
+ FILE_BASIC_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ break;
+ case SMB2_OP_RENAME:
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_nvec = 2;
+
+ len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
+
+ rename_info.ReplaceIfExists = 1;
+ rename_info.RootDirectory = 0;
+ rename_info.FileNameLength = cpu_to_le32(len);
+
+ size[0] = sizeof(struct smb2_file_rename_info);
+ data[0] = &rename_info;
+
+ size[1] = len + 2 /* null */;
+ data[1] = (__le16 *)ptr;
+
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, current->tgid,
+ FILE_RENAME_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
+ break;
+ case SMB2_OP_HARDLINK:
+ memset(&si_iov, 0, sizeof(si_iov));
+ rqst[num_rqst].rq_iov = si_iov;
+ rqst[num_rqst].rq_nvec = 2;
+
+ len = (2 * UniStrnlen((wchar_t *)ptr, PATH_MAX));
+
+ link_info.ReplaceIfExists = 0;
+ link_info.RootDirectory = 0;
+ link_info.FileNameLength = cpu_to_le32(len);
+
+ size[0] = sizeof(struct smb2_file_link_info);
+ data[0] = &link_info;
+
+ size[1] = len + 2 /* null */;
+ data[1] = (__le16 *)ptr;
+
+ rc = SMB2_set_info_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID, current->tgid,
+ FILE_LINK_INFORMATION,
+ SMB2_O_INFO_FILE, 0, data, size);
+ smb2_set_next_command(server, &rqst[num_rqst]);
+ smb2_set_related(&rqst[num_rqst++]);
break;
default:
cifs_dbg(VFS, "Invalid command\n");
- break;
+ rc = -EINVAL;
}
+ if (rc)
+ goto finished;
- if (use_cached_root_handle)
- close_shroot(&tcon->crfid);
- else
- rc = SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
- if (tmprc)
- rc = tmprc;
- kfree(utf16_path);
+ /* Close */
+ memset(&close_iov, 0, sizeof(close_iov));
+ rqst[num_rqst].rq_iov = close_iov;
+ rqst[num_rqst].rq_nvec = 1;
+ rc = SMB2_close_init(tcon, &rqst[num_rqst], COMPOUND_FID,
+ COMPOUND_FID);
+ smb2_set_related(&rqst[num_rqst++]);
+ if (rc)
+ goto finished;
+
+ rc = compound_send_recv(xid, ses, flags, num_rqst, rqst,
+ resp_buftype, rsp_iov);
+
+ finished:
+ SMB2_open_free(&rqst[0]);
+ switch (command) {
+ case SMB2_OP_QUERY_INFO:
+ if (rc == 0) {
+ qi_rsp = (struct smb2_query_info_rsp *)
+ rsp_iov[1].iov_base;
+ rc = smb2_validate_and_copy_iov(
+ le16_to_cpu(qi_rsp->OutputBufferOffset),
+ le32_to_cpu(qi_rsp->OutputBufferLength),
+ &rsp_iov[1], sizeof(struct smb2_file_all_info),
+ ptr);
+ }
+ if (rqst[1].rq_iov)
+ SMB2_query_info_free(&rqst[1]);
+ if (rqst[2].rq_iov)
+ SMB2_close_free(&rqst[2]);
+ break;
+ case SMB2_OP_DELETE:
+ case SMB2_OP_MKDIR:
+ if (rqst[1].rq_iov)
+ SMB2_close_free(&rqst[1]);
+ break;
+ case SMB2_OP_HARDLINK:
+ case SMB2_OP_RENAME:
+ case SMB2_OP_RMDIR:
+ case SMB2_OP_SET_EOF:
+ case SMB2_OP_SET_INFO:
+ if (rqst[1].rq_iov)
+ SMB2_set_info_free(&rqst[1]);
+ if (rqst[2].rq_iov)
+ SMB2_close_free(&rqst[2]);
+ break;
+ }
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
return rc;
}
@@ -147,6 +293,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
{
int rc;
struct smb2_file_all_info *smb2_data;
+ __u32 create_options = 0;
*adjust_tz = false;
*symlink = false;
@@ -155,17 +302,21 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
GFP_KERNEL);
if (smb2_data == NULL)
return -ENOMEM;
+ if (backup_cred(cifs_sb))
+ create_options |= CREATE_OPEN_BACKUP_INTENT;
- rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN, 0,
- smb2_data, SMB2_OP_QUERY_INFO);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
+ FILE_READ_ATTRIBUTES, FILE_OPEN, create_options,
+ smb2_data, SMB2_OP_QUERY_INFO);
if (rc == -EOPNOTSUPP) {
*symlink = true;
+ create_options |= OPEN_REPARSE_POINT;
+
/* Failed on a symbolic link - query a reparse point info */
- rc = smb2_open_op_close(xid, tcon, cifs_sb, full_path,
- FILE_READ_ATTRIBUTES, FILE_OPEN,
- OPEN_REPARSE_POINT, smb2_data,
- SMB2_OP_QUERY_INFO);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
+ FILE_READ_ATTRIBUTES, FILE_OPEN,
+ create_options, smb2_data,
+ SMB2_OP_QUERY_INFO);
}
if (rc)
goto out;
@@ -180,9 +331,9 @@ int
smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
- return smb2_open_op_close(xid, tcon, cifs_sb, name,
- FILE_WRITE_ATTRIBUTES, FILE_CREATE,
- CREATE_NOT_FILE, NULL, SMB2_OP_MKDIR);
+ return smb2_compound_op(xid, tcon, cifs_sb, name,
+ FILE_WRITE_ATTRIBUTES, FILE_CREATE,
+ CREATE_NOT_FILE, NULL, SMB2_OP_MKDIR);
}
void
@@ -199,9 +350,9 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
cifs_i = CIFS_I(inode);
dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
data.Attributes = cpu_to_le32(dosattrs);
- tmprc = smb2_open_op_close(xid, tcon, cifs_sb, name,
- FILE_WRITE_ATTRIBUTES, FILE_CREATE,
- CREATE_NOT_FILE, &data, SMB2_OP_SET_INFO);
+ tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
+ FILE_WRITE_ATTRIBUTES, FILE_CREATE,
+ CREATE_NOT_FILE, &data, SMB2_OP_SET_INFO);
if (tmprc == 0)
cifs_i->cifsAttrs = dosattrs;
}
@@ -210,18 +361,18 @@ int
smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
- return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
- CREATE_NOT_FILE,
- NULL, SMB2_OP_RMDIR);
+ return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+ CREATE_NOT_FILE,
+ NULL, SMB2_OP_RMDIR);
}
int
smb2_unlink(const unsigned int xid, struct cifs_tcon *tcon, const char *name,
struct cifs_sb_info *cifs_sb)
{
- return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
- CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
- NULL, SMB2_OP_DELETE);
+ return smb2_compound_op(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN,
+ CREATE_DELETE_ON_CLOSE | OPEN_REPARSE_POINT,
+ NULL, SMB2_OP_DELETE);
}
static int
@@ -238,8 +389,8 @@ smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
goto smb2_rename_path;
}
- rc = smb2_open_op_close(xid, tcon, cifs_sb, from_name, access,
- FILE_OPEN, 0, smb2_to_name, command);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, from_name, access,
+ FILE_OPEN, 0, smb2_to_name, command);
smb2_rename_path:
kfree(smb2_to_name);
return rc;
@@ -269,9 +420,10 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, bool set_alloc)
{
__le64 eof = cpu_to_le64(size);
- return smb2_open_op_close(xid, tcon, cifs_sb, full_path,
- FILE_WRITE_DATA, FILE_OPEN, 0, &eof,
- SMB2_OP_SET_EOF);
+
+ return smb2_compound_op(xid, tcon, cifs_sb, full_path,
+ FILE_WRITE_DATA, FILE_OPEN, 0, &eof,
+ SMB2_OP_SET_EOF);
}
int
@@ -291,9 +443,9 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
if (IS_ERR(tlink))
return PTR_ERR(tlink);
- rc = smb2_open_op_close(xid, tlink_tcon(tlink), cifs_sb, full_path,
- FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
- SMB2_OP_SET_INFO);
+ rc = smb2_compound_op(xid, tlink_tcon(tlink), cifs_sb, full_path,
+ FILE_WRITE_ATTRIBUTES, FILE_OPEN, 0, buf,
+ SMB2_OP_SET_INFO);
cifs_put_tlink(tlink);
return rc;
}
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index 20a2d304c603..d47b7f5dfa6c 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -288,7 +288,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_FLT_BUFFER_TOO_SMALL, -ENOBUFS, "STATUS_FLT_BUFFER_TOO_SMALL"},
{STATUS_FVE_PARTIAL_METADATA, -EIO, "STATUS_FVE_PARTIAL_METADATA"},
{STATUS_UNSUCCESSFUL, -EIO, "STATUS_UNSUCCESSFUL"},
- {STATUS_NOT_IMPLEMENTED, -ENOSYS, "STATUS_NOT_IMPLEMENTED"},
+ {STATUS_NOT_IMPLEMENTED, -EOPNOTSUPP, "STATUS_NOT_IMPLEMENTED"},
{STATUS_INVALID_INFO_CLASS, -EIO, "STATUS_INVALID_INFO_CLASS"},
{STATUS_INFO_LENGTH_MISMATCH, -EIO, "STATUS_INFO_LENGTH_MISMATCH"},
{STATUS_ACCESS_VIOLATION, -EACCES, "STATUS_ACCESS_VIOLATION"},
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 89985a0a6819..f85fc5aa2710 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -74,6 +74,12 @@ smb2_add_credits(struct TCP_Server_Info *server, const unsigned int add,
int *val, rc = 0;
spin_lock(&server->req_lock);
val = server->ops->get_credits_field(server, optype);
+
+ /* eg found case where write overlapping reconnect messed up credits */
+ if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
+ trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
+ server->hostname, *val);
+
*val += add;
if (*val > 65000) {
*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
@@ -104,7 +110,12 @@ smb2_set_credits(struct TCP_Server_Info *server, const int val)
{
spin_lock(&server->req_lock);
server->credits = val;
+ if (val == 1)
+ server->reconnect_instance++;
spin_unlock(&server->req_lock);
+ /* don't log while holding the lock */
+ if (val == 1)
+ cifs_dbg(FYI, "set credits to 1 due to smb2 reconnect\n");
}
static int *
@@ -270,6 +281,31 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
}
static unsigned int
+smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
+{
+ struct TCP_Server_Info *server = tcon->ses->server;
+ unsigned int wsize;
+
+ /* start with specified wsize, or default */
+ wsize = volume_info->wsize ? volume_info->wsize : SMB3_DEFAULT_IOSIZE;
+ wsize = min_t(unsigned int, wsize, server->max_write);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (server->rdma) {
+ if (server->sign)
+ wsize = min_t(unsigned int,
+ wsize, server->smbd_conn->max_fragmented_send_size);
+ else
+ wsize = min_t(unsigned int,
+ wsize, server->smbd_conn->max_readwrite_size);
+ }
+#endif
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
+
+ return wsize;
+}
+
+static unsigned int
smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
{
struct TCP_Server_Info *server = tcon->ses->server;
@@ -295,6 +331,31 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
return rsize;
}
+static unsigned int
+smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
+{
+ struct TCP_Server_Info *server = tcon->ses->server;
+ unsigned int rsize;
+
+ /* start with specified rsize, or default */
+ rsize = volume_info->rsize ? volume_info->rsize : SMB3_DEFAULT_IOSIZE;
+ rsize = min_t(unsigned int, rsize, server->max_read);
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ if (server->rdma) {
+ if (server->sign)
+ rsize = min_t(unsigned int,
+ rsize, server->smbd_conn->max_fragmented_recv_size);
+ else
+ rsize = min_t(unsigned int,
+ rsize, server->smbd_conn->max_readwrite_size);
+ }
+#endif
+
+ if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+ rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
+
+ return rsize;
+}
static int
parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
@@ -962,6 +1023,9 @@ smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
seq_printf(m, "\nBytes read: %llu Bytes written: %llu",
(long long)(tcon->bytes_read),
(long long)(tcon->bytes_written));
+ seq_printf(m, "\nOpen files: %d total (local), %d open on server",
+ atomic_read(&tcon->num_local_opens),
+ atomic_read(&tcon->num_remote_opens));
seq_printf(m, "\nTreeConnects: %d total %d failed",
atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
@@ -1057,6 +1121,131 @@ req_res_key_exit:
return rc;
}
+static int
+smb2_ioctl_query_info(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ __le16 *path, int is_dir,
+ unsigned long p)
+{
+ struct cifs_ses *ses = tcon->ses;
+ char __user *arg = (char __user *)p;
+ struct smb_query_info qi;
+ struct smb_query_info __user *pqi;
+ int rc = 0;
+ int flags = 0;
+ struct smb2_query_info_rsp *rsp = NULL;
+ void *buffer = NULL;
+ struct smb_rqst rqst[3];
+ int resp_buftype[3];
+ struct kvec rsp_iov[3];
+ struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
+ struct cifs_open_parms oparms;
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ struct cifs_fid fid;
+ struct kvec qi_iov[1];
+ struct kvec close_iov[1];
+
+ memset(rqst, 0, sizeof(rqst));
+ resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+ memset(rsp_iov, 0, sizeof(rsp_iov));
+
+ if (copy_from_user(&qi, arg, sizeof(struct smb_query_info)))
+ return -EFAULT;
+
+ if (qi.output_buffer_length > 1024)
+ return -EINVAL;
+
+ if (!ses || !(ses->server))
+ return -EIO;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ buffer = kmalloc(qi.output_buffer_length, GFP_KERNEL);
+ if (buffer == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(buffer, arg + sizeof(struct smb_query_info),
+ qi.output_buffer_length)) {
+ rc = -EFAULT;
+ goto iqinf_exit;
+ }
+
+ /* Open */
+ memset(&open_iov, 0, sizeof(open_iov));
+ rqst[0].rq_iov = open_iov;
+ rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
+
+ memset(&oparms, 0, sizeof(oparms));
+ oparms.tcon = tcon;
+ oparms.desired_access = FILE_READ_ATTRIBUTES | READ_CONTROL;
+ oparms.disposition = FILE_OPEN;
+ if (is_dir)
+ oparms.create_options = CREATE_NOT_FILE;
+ else
+ oparms.create_options = CREATE_NOT_DIR;
+ oparms.fid = &fid;
+ oparms.reconnect = false;
+
+ rc = SMB2_open_init(tcon, &rqst[0], &oplock, &oparms, path);
+ if (rc)
+ goto iqinf_exit;
+ smb2_set_next_command(ses->server, &rqst[0]);
+
+ /* Query */
+ memset(&qi_iov, 0, sizeof(qi_iov));
+ rqst[1].rq_iov = qi_iov;
+ rqst[1].rq_nvec = 1;
+
+ rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
+ qi.file_info_class, qi.info_type,
+ qi.additional_information,
+ qi.input_buffer_length,
+ qi.output_buffer_length, buffer);
+ if (rc)
+ goto iqinf_exit;
+ smb2_set_next_command(ses->server, &rqst[1]);
+ smb2_set_related(&rqst[1]);
+
+ /* Close */
+ memset(&close_iov, 0, sizeof(close_iov));
+ rqst[2].rq_iov = close_iov;
+ rqst[2].rq_nvec = 1;
+
+ rc = SMB2_close_init(tcon, &rqst[2], COMPOUND_FID, COMPOUND_FID);
+ if (rc)
+ goto iqinf_exit;
+ smb2_set_related(&rqst[2]);
+
+ rc = compound_send_recv(xid, ses, flags, 3, rqst,
+ resp_buftype, rsp_iov);
+ if (rc)
+ goto iqinf_exit;
+ pqi = (struct smb_query_info __user *)arg;
+ rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+ if (le32_to_cpu(rsp->OutputBufferLength) < qi.input_buffer_length)
+ qi.input_buffer_length = le32_to_cpu(rsp->OutputBufferLength);
+ if (copy_to_user(&pqi->input_buffer_length, &qi.input_buffer_length,
+ sizeof(qi.input_buffer_length))) {
+ rc = -EFAULT;
+ goto iqinf_exit;
+ }
+ if (copy_to_user(pqi + 1, rsp->Buffer, qi.input_buffer_length)) {
+ rc = -EFAULT;
+ goto iqinf_exit;
+ }
+
+ iqinf_exit:
+ kfree(buffer);
+ SMB2_open_free(&rqst[0]);
+ SMB2_query_info_free(&rqst[1]);
+ SMB2_close_free(&rqst[2]);
+ free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ free_rsp_buf(resp_buftype[2], rsp_iov[2].iov_base);
+ return rc;
+}
+
static ssize_t
smb2_copychunk_range(const unsigned int xid,
struct cifsFileInfo *srcfile,
@@ -1301,7 +1490,7 @@ smb2_set_file_size(const unsigned int xid, struct cifs_tcon *tcon,
}
return SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
- cfile->fid.volatile_fid, cfile->pid, &eof, false);
+ cfile->fid.volatile_fid, cfile->pid, &eof);
}
static int
@@ -1556,7 +1745,7 @@ smb2_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid,
CIFS_CACHE_READ(cinode) ? 1 : 0);
}
-static void
+void
smb2_set_related(struct smb_rqst *rqst)
{
struct smb2_sync_hdr *shdr;
@@ -1567,7 +1756,7 @@ smb2_set_related(struct smb_rqst *rqst)
char smb2_padding[7] = {0, 0, 0, 0, 0, 0, 0};
-static void
+void
smb2_set_next_command(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
struct smb2_sync_hdr *shdr;
@@ -1610,7 +1799,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
flags |= CIFS_TRANSFORM_REQ;
memset(rqst, 0, sizeof(rqst));
- memset(resp_buftype, 0, sizeof(resp_buftype));
+ resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
memset(rsp_iov, 0, sizeof(rsp_iov));
memset(&open_iov, 0, sizeof(open_iov));
@@ -1636,7 +1825,8 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_query_info_init(tcon, &rqst[1], COMPOUND_FID, COMPOUND_FID,
FS_FULL_SIZE_INFORMATION,
SMB2_O_INFO_FILESYSTEM, 0,
- sizeof(struct smb2_fs_full_size_info));
+ sizeof(struct smb2_fs_full_size_info), 0,
+ NULL);
if (rc)
goto qfs_exit;
smb2_set_next_command(server, &rqst[1]);
@@ -3303,6 +3493,7 @@ struct smb_version_operations smb20_operations = {
.set_acl = set_smb2_acl,
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
+ .ioctl_query_info = smb2_ioctl_query_info,
};
struct smb_version_operations smb21_operations = {
@@ -3398,6 +3589,7 @@ struct smb_version_operations smb21_operations = {
.set_acl = set_smb2_acl,
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
+ .ioctl_query_info = smb2_ioctl_query_info,
};
struct smb_version_operations smb30_operations = {
@@ -3425,8 +3617,8 @@ struct smb_version_operations smb30_operations = {
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
- .negotiate_wsize = smb2_negotiate_wsize,
- .negotiate_rsize = smb2_negotiate_rsize,
+ .negotiate_wsize = smb3_negotiate_wsize,
+ .negotiate_rsize = smb3_negotiate_rsize,
.sess_setup = SMB2_sess_setup,
.logoff = SMB2_logoff,
.tree_connect = SMB2_tcon,
@@ -3502,6 +3694,7 @@ struct smb_version_operations smb30_operations = {
.set_acl = set_smb2_acl,
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
+ .ioctl_query_info = smb2_ioctl_query_info,
};
struct smb_version_operations smb311_operations = {
@@ -3529,8 +3722,8 @@ struct smb_version_operations smb311_operations = {
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
- .negotiate_wsize = smb2_negotiate_wsize,
- .negotiate_rsize = smb2_negotiate_rsize,
+ .negotiate_wsize = smb3_negotiate_wsize,
+ .negotiate_rsize = smb3_negotiate_rsize,
.sess_setup = SMB2_sess_setup,
.logoff = SMB2_logoff,
.tree_connect = SMB2_tcon,
@@ -3607,6 +3800,7 @@ struct smb_version_operations smb311_operations = {
.set_acl = set_smb2_acl,
#endif /* CIFS_ACL */
.next_header = smb2_next_header,
+ .ioctl_query_info = smb2_ioctl_query_info,
};
struct smb_version_values smb20_values = {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index f54d07bda067..7d7b016fe8bb 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1478,7 +1478,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
/* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
tcon->tid = 0;
-
+ atomic_set(&tcon->num_remote_opens, 0);
rc = smb2_plain_req_init(SMB2_TREE_CONNECT, tcon, (void **) &req,
&total_len);
if (rc) {
@@ -2243,10 +2243,12 @@ SMB2_open_free(struct smb_rqst *rqst)
{
int i;
- cifs_small_buf_release(rqst->rq_iov[0].iov_base);
- for (i = 1; i < rqst->rq_nvec; i++)
- if (rqst->rq_iov[i].iov_base != smb2_padding)
- kfree(rqst->rq_iov[i].iov_base);
+ if (rqst && rqst->rq_iov) {
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base);
+ for (i = 1; i < rqst->rq_nvec; i++)
+ if (rqst->rq_iov[i].iov_base != smb2_padding)
+ kfree(rqst->rq_iov[i].iov_base);
+ }
}
int
@@ -2261,7 +2263,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
struct cifs_ses *ses = tcon->ses;
struct kvec iov[SMB2_CREATE_IOV_SIZE];
struct kvec rsp_iov = {NULL, 0};
- int resp_buftype;
+ int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
int flags = 0;
@@ -2303,6 +2305,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
ses->Suid, oparms->create_options,
oparms->desired_access);
+ atomic_inc(&tcon->num_remote_opens);
oparms->fid->persistent_fid = rsp->PersistentFileId;
oparms->fid->volatile_fid = rsp->VolatileFileId;
@@ -2474,13 +2477,13 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
goto ioctl_exit;
}
- *out_data = kmalloc(*plen, GFP_KERNEL);
+ *out_data = kmemdup((char *)rsp + le32_to_cpu(rsp->OutputOffset),
+ *plen, GFP_KERNEL);
if (*out_data == NULL) {
rc = -ENOMEM;
goto ioctl_exit;
}
- memcpy(*out_data, (char *)rsp + le32_to_cpu(rsp->OutputOffset), *plen);
ioctl_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
@@ -2535,7 +2538,8 @@ SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
void
SMB2_close_free(struct smb_rqst *rqst)
{
- cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+ if (rqst && rqst->rq_iov)
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
int
@@ -2547,7 +2551,7 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_ses *ses = tcon->ses;
struct kvec iov[1];
struct kvec rsp_iov;
- int resp_buftype;
+ int resp_buftype = CIFS_NO_BUFFER;
int rc = 0;
cifs_dbg(FYI, "Close\n");
@@ -2577,6 +2581,8 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
goto close_exit;
}
+ atomic_dec(&tcon->num_remote_opens);
+
/* BB FIXME - decode close response, update inode for caching */
close_exit:
@@ -2627,10 +2633,10 @@ smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
* If SMB buffer fields are valid, copy into temporary buffer to hold result.
* Caller must free buffer.
*/
-static int
-validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
- struct kvec *iov, unsigned int minbufsize,
- char *data)
+int
+smb2_validate_and_copy_iov(unsigned int offset, unsigned int buffer_length,
+ struct kvec *iov, unsigned int minbufsize,
+ char *data)
{
char *begin_of_buf = offset + (char *)iov->iov_base;
int rc;
@@ -2651,7 +2657,7 @@ int
SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
u8 info_class, u8 info_type, u32 additional_info,
- size_t output_len)
+ size_t output_len, size_t input_len, void *input)
{
struct smb2_query_info_req *req;
struct kvec *iov = rqst->rq_iov;
@@ -2669,23 +2675,25 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
req->VolatileFileId = volatile_fid;
req->AdditionalInformation = cpu_to_le32(additional_info);
- /*
- * We do not use the input buffer (do not send extra byte)
- */
- req->InputBufferOffset = 0;
-
req->OutputBufferLength = cpu_to_le32(output_len);
+ if (input_len) {
+ req->InputBufferLength = cpu_to_le32(input_len);
+ /* total_len for smb query request never close to le16 max */
+ req->InputBufferOffset = cpu_to_le16(total_len - 1);
+ memcpy(req->Buffer, input, input_len);
+ }
iov[0].iov_base = (char *)req;
/* 1 for Buffer */
- iov[0].iov_len = total_len - 1;
+ iov[0].iov_len = total_len - 1 + input_len;
return 0;
}
void
SMB2_query_info_free(struct smb_rqst *rqst)
{
- cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+ if (rqst && rqst->rq_iov)
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
static int
@@ -2699,7 +2707,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec iov[1];
struct kvec rsp_iov;
int rc = 0;
- int resp_buftype;
+ int resp_buftype = CIFS_NO_BUFFER;
struct cifs_ses *ses = tcon->ses;
int flags = 0;
@@ -2718,7 +2726,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_query_info_init(tcon, &rqst, persistent_fid, volatile_fid,
info_class, info_type, additional_info,
- output_len);
+ output_len, 0, NULL);
if (rc)
goto qinf_exit;
@@ -2746,9 +2754,9 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
}
}
- rc = validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
- le32_to_cpu(rsp->OutputBufferLength),
- &rsp_iov, min_len, *data);
+ rc = smb2_validate_and_copy_iov(le16_to_cpu(rsp->OutputBufferOffset),
+ le32_to_cpu(rsp->OutputBufferLength),
+ &rsp_iov, min_len, *data);
qinf_exit:
SMB2_query_info_free(&rqst);
@@ -3754,45 +3762,22 @@ qdir_exit:
return rc;
}
-static int
-send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
+int
+SMB2_set_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
- u8 info_type, u32 additional_info, unsigned int num,
+ u8 info_type, u32 additional_info,
void **data, unsigned int *size)
{
- struct smb_rqst rqst;
struct smb2_set_info_req *req;
- struct smb2_set_info_rsp *rsp = NULL;
- struct kvec *iov;
- struct kvec rsp_iov;
- int rc = 0;
- int resp_buftype;
- unsigned int i;
- struct cifs_ses *ses = tcon->ses;
- int flags = 0;
- unsigned int total_len;
-
- if (!ses || !(ses->server))
- return -EIO;
-
- if (!num)
- return -EINVAL;
-
- iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
- if (!iov)
- return -ENOMEM;
+ struct kvec *iov = rqst->rq_iov;
+ unsigned int i, total_len;
+ int rc;
rc = smb2_plain_req_init(SMB2_SET_INFO, tcon, (void **) &req, &total_len);
- if (rc) {
- kfree(iov);
+ if (rc)
return rc;
- }
-
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
req->sync_hdr.ProcessId = cpu_to_le32(pid);
-
req->InfoType = info_type;
req->FileInfoClass = info_class;
req->PersistentFileId = persistent_fid;
@@ -3810,19 +3795,66 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
- for (i = 1; i < num; i++) {
+ for (i = 1; i < rqst->rq_nvec; i++) {
le32_add_cpu(&req->BufferLength, size[i]);
iov[i].iov_base = (char *)data[i];
iov[i].iov_len = size[i];
}
+ return 0;
+}
+
+void
+SMB2_set_info_free(struct smb_rqst *rqst)
+{
+ if (rqst && rqst->rq_iov)
+ cifs_buf_release(rqst->rq_iov[0].iov_base); /* request */
+}
+
+static int
+send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, u32 pid, u8 info_class,
+ u8 info_type, u32 additional_info, unsigned int num,
+ void **data, unsigned int *size)
+{
+ struct smb_rqst rqst;
+ struct smb2_set_info_rsp *rsp = NULL;
+ struct kvec *iov;
+ struct kvec rsp_iov;
+ int rc = 0;
+ int resp_buftype;
+ struct cifs_ses *ses = tcon->ses;
+ int flags = 0;
+
+ if (!ses || !(ses->server))
+ return -EIO;
+
+ if (!num)
+ return -EINVAL;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+ iov = kmalloc_array(num, sizeof(struct kvec), GFP_KERNEL);
+ if (!iov)
+ return -ENOMEM;
+
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
rqst.rq_nvec = num;
+ rc = SMB2_set_info_init(tcon, &rqst, persistent_fid, volatile_fid, pid,
+ info_class, info_type, additional_info,
+ data, size);
+ if (rc) {
+ kfree(iov);
+ return rc;
+ }
+
+
rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
- cifs_buf_release(req);
+ SMB2_set_info_free(&rqst);
rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
if (rc != 0) {
@@ -3837,88 +3869,8 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
}
int
-SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
-{
- struct smb2_file_rename_info info;
- void **data;
- unsigned int size[2];
- int rc;
- int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
-
- data = kmalloc_array(2, sizeof(void *), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
- /* 0 = fail if target already exists */
- info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
- info.FileNameLength = cpu_to_le32(len);
-
- data[0] = &info;
- size[0] = sizeof(struct smb2_file_rename_info);
-
- data[1] = target_file;
- size[1] = len + 2 /* null */;
-
- rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
- current->tgid, FILE_RENAME_INFORMATION, SMB2_O_INFO_FILE,
- 0, 2, data, size);
- kfree(data);
- return rc;
-}
-
-int
-SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid)
-{
- __u8 delete_pending = 1;
- void *data;
- unsigned int size;
-
- data = &delete_pending;
- size = 1; /* sizeof __u8 */
-
- return send_set_info(xid, tcon, persistent_fid, volatile_fid,
- current->tgid, FILE_DISPOSITION_INFORMATION, SMB2_O_INFO_FILE,
- 0, 1, &data, &size);
-}
-
-int
-SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
-{
- struct smb2_file_link_info info;
- void **data;
- unsigned int size[2];
- int rc;
- int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
-
- data = kmalloc_array(2, sizeof(void *), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
- /* 0 = fail if link already exists */
- info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
- info.FileNameLength = cpu_to_le32(len);
-
- data[0] = &info;
- size[0] = sizeof(struct smb2_file_link_info);
-
- data[1] = target_file;
- size[1] = len + 2 /* null */;
-
- rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
- current->tgid, FILE_LINK_INFORMATION, SMB2_O_INFO_FILE,
- 0, 2, data, size);
- kfree(data);
- return rc;
-}
-
-int
SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
- u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc)
+ u64 volatile_fid, u32 pid, __le64 *eof)
{
struct smb2_file_eof_info info;
void *data;
@@ -3929,28 +3881,12 @@ SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
data = &info;
size = sizeof(struct smb2_file_eof_info);
- if (is_falloc)
- return send_set_info(xid, tcon, persistent_fid, volatile_fid,
- pid, FILE_ALLOCATION_INFORMATION, SMB2_O_INFO_FILE,
- 0, 1, &data, &size);
- else
- return send_set_info(xid, tcon, persistent_fid, volatile_fid,
+ return send_set_info(xid, tcon, persistent_fid, volatile_fid,
pid, FILE_END_OF_FILE_INFORMATION, SMB2_O_INFO_FILE,
0, 1, &data, &size);
}
int
-SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
-{
- unsigned int size;
- size = sizeof(FILE_BASIC_INFO);
- return send_set_info(xid, tcon, persistent_fid, volatile_fid,
- current->tgid, FILE_BASIC_INFORMATION, SMB2_O_INFO_FILE,
- 0, 1, (void **)&buf, &size);
-}
-
-int
SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
@@ -4350,6 +4286,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
struct kvec iov[1];
struct kvec rsp_iov;
int resp_buf_type;
+ __u64 *please_key_high;
+ __u64 *please_key_low;
cifs_dbg(FYI, "SMB2_lease_break\n");
rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req,
@@ -4379,10 +4317,16 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
+ please_key_low = (__u64 *)req->LeaseKey;
+ please_key_high = (__u64 *)(req->LeaseKey+8);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
+ trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
+ ses->Suid, *please_key_low, *please_key_high, rc);
cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
- }
+ } else
+ trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
+ ses->Suid, *please_key_low, *please_key_high);
return rc;
}
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 8fb7887f2b3d..f753f424d7f1 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -613,6 +613,8 @@ struct smb2_tree_disconnect_rsp {
#define SVHDX_OPEN_DEVICE_CONTEX 0x9CCBCF9E04C1E643980E158DA1F6EC83
#define SMB2_CREATE_TAG_POSIX 0x93AD25509CB411E7B42383DE968BCD7C
+/* Flag (SMB3 open response) values */
+#define SMB2_CREATE_FLAG_REPARSEPOINT 0x01
/*
* Maximum number of iovs we need for an open/create request.
@@ -650,7 +652,7 @@ struct smb2_create_rsp {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 89 */
__u8 OplockLevel;
- __u8 Reserved;
+ __u8 Flag; /* 0x01 if reparse point */
__le32 CreateAction;
__le64 CreationTime;
__le64 LastAccessTime;
@@ -1174,6 +1176,15 @@ struct smb2_query_info_rsp {
__u8 Buffer[1];
} __packed;
+/*
+ * Maximum number of iovs we need for a set-info request.
+ * The largest one is rename/hardlink
+ * [0] : struct smb2_set_info_req + smb2_file_[rename|link]_info
+ * [1] : path
+ * [2] : compound padding
+ */
+#define SMB2_SET_INFO_IOV_SIZE 3
+
struct smb2_set_info_req {
struct smb2_sync_hdr sync_hdr;
__le16 StructureSize; /* Must be 33 */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index b4076577eeb7..9f4e9ed9ce53 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -116,6 +116,9 @@ extern void smb2_reconnect_server(struct work_struct *work);
extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
struct smb_rqst *rqst);
+extern void smb2_set_next_command(struct TCP_Server_Info *server,
+ struct smb_rqst *rqst);
+extern void smb2_set_related(struct smb_rqst *rqst);
/*
* SMB2 Worker functions - most of protocol specific implementation details
@@ -160,7 +163,8 @@ extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
u64 persistent_fid, u64 volatile_fid,
u8 info_class, u8 info_type,
- u32 additional_info, size_t output_len);
+ u32 additional_info, size_t output_len,
+ size_t input_len, void *input);
extern void SMB2_query_info_free(struct smb_rqst *rqst);
extern int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
@@ -179,20 +183,14 @@ extern int SMB2_echo(struct TCP_Server_Info *server);
extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf);
-extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid,
- __le16 *target_file);
-extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid);
-extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid,
- __le16 *target_file);
extern int SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 pid,
- __le64 *eof, bool is_fallocate);
-extern int SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid,
- FILE_BASIC_INFO *buf);
+ __le64 *eof);
+extern int SMB2_set_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
+ u64 persistent_fid, u64 volatile_fid, u32 pid,
+ u8 info_class, u8 info_type, u32 additional_info,
+ void **data, unsigned int *size);
+extern void SMB2_set_info_free(struct smb_rqst *rqst);
extern int SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
struct cifs_ntsd *pnntsd, int pacllen, int aclflag);
@@ -232,6 +230,10 @@ extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
extern int smb3_encryption_required(const struct cifs_tcon *tcon);
extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length,
struct kvec *iov, unsigned int min_buf_size);
+extern int smb2_validate_and_copy_iov(unsigned int offset,
+ unsigned int buffer_length,
+ struct kvec *iov,
+ unsigned int minbufsize, char *data);
extern void smb2_copy_fs_info_to_kstatfs(
struct smb2_fs_full_size_info *pfs_inf,
struct kstatfs *kst);
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index 5fdb9a509a97..5e282368cc4a 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -2295,8 +2295,12 @@ static void smbd_mr_recovery_work(struct work_struct *work)
int rc;
list_for_each_entry(smbdirect_mr, &info->mr_list, list) {
- if (smbdirect_mr->state == MR_INVALIDATED ||
- smbdirect_mr->state == MR_ERROR) {
+ if (smbdirect_mr->state == MR_INVALIDATED)
+ ib_dma_unmap_sg(
+ info->id->device, smbdirect_mr->sgl,
+ smbdirect_mr->sgl_count,
+ smbdirect_mr->dir);
+ else if (smbdirect_mr->state == MR_ERROR) {
/* recover this MR entry */
rc = ib_dereg_mr(smbdirect_mr->mr);
@@ -2320,25 +2324,21 @@ static void smbd_mr_recovery_work(struct work_struct *work)
smbd_disconnect_rdma_connection(info);
continue;
}
+ } else
+ /* This MR is being used, don't recover it */
+ continue;
- if (smbdirect_mr->state == MR_INVALIDATED)
- ib_dma_unmap_sg(
- info->id->device, smbdirect_mr->sgl,
- smbdirect_mr->sgl_count,
- smbdirect_mr->dir);
-
- smbdirect_mr->state = MR_READY;
+ smbdirect_mr->state = MR_READY;
- /* smbdirect_mr->state is updated by this function
- * and is read and updated by I/O issuing CPUs trying
- * to get a MR, the call to atomic_inc_return
- * implicates a memory barrier and guarantees this
- * value is updated before waking up any calls to
- * get_mr() from the I/O issuing CPUs
- */
- if (atomic_inc_return(&info->mr_ready_count) == 1)
- wake_up_interruptible(&info->wait_mr);
- }
+ /* smbdirect_mr->state is updated by this function
+ * and is read and updated by I/O issuing CPUs trying
+ * to get a MR, the call to atomic_inc_return
+ * implicates a memory barrier and guarantees this
+ * value is updated before waking up any calls to
+ * get_mr() from the I/O issuing CPUs
+ */
+ if (atomic_inc_return(&info->mr_ready_count) == 1)
+ wake_up_interruptible(&info->wait_mr);
}
}
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index d4aed5217a56..cce8414fe7ec 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -460,6 +460,85 @@ DEFINE_EVENT(smb3_open_done_class, smb3_##name, \
DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
+
+DECLARE_EVENT_CLASS(smb3_lease_done_class,
+ TP_PROTO(__u32 lease_state,
+ __u32 tid,
+ __u64 sesid,
+ __u64 lease_key_low,
+ __u64 lease_key_high),
+ TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high),
+ TP_STRUCT__entry(
+ __field(__u32, lease_state)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(__u64, lease_key_low)
+ __field(__u64, lease_key_high)
+ ),
+ TP_fast_assign(
+ __entry->lease_state = lease_state;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->lease_key_low = lease_key_low;
+ __entry->lease_key_high = lease_key_high;
+ ),
+ TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x",
+ __entry->sesid, __entry->tid, __entry->lease_key_high,
+ __entry->lease_key_low, __entry->lease_state)
+)
+
+#define DEFINE_SMB3_LEASE_DONE_EVENT(name) \
+DEFINE_EVENT(smb3_lease_done_class, smb3_##name, \
+ TP_PROTO(__u32 lease_state, \
+ __u32 tid, \
+ __u64 sesid, \
+ __u64 lease_key_low, \
+ __u64 lease_key_high), \
+ TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high))
+
+DEFINE_SMB3_LEASE_DONE_EVENT(lease_done);
+
+DECLARE_EVENT_CLASS(smb3_lease_err_class,
+ TP_PROTO(__u32 lease_state,
+ __u32 tid,
+ __u64 sesid,
+ __u64 lease_key_low,
+ __u64 lease_key_high,
+ int rc),
+ TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc),
+ TP_STRUCT__entry(
+ __field(__u32, lease_state)
+ __field(__u32, tid)
+ __field(__u64, sesid)
+ __field(__u64, lease_key_low)
+ __field(__u64, lease_key_high)
+ __field(int, rc)
+ ),
+ TP_fast_assign(
+ __entry->lease_state = lease_state;
+ __entry->tid = tid;
+ __entry->sesid = sesid;
+ __entry->lease_key_low = lease_key_low;
+ __entry->lease_key_high = lease_key_high;
+ __entry->rc = rc;
+ ),
+ TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x rc=%d",
+ __entry->sesid, __entry->tid, __entry->lease_key_high,
+ __entry->lease_key_low, __entry->lease_state, __entry->rc)
+)
+
+#define DEFINE_SMB3_LEASE_ERR_EVENT(name) \
+DEFINE_EVENT(smb3_lease_err_class, smb3_##name, \
+ TP_PROTO(__u32 lease_state, \
+ __u32 tid, \
+ __u64 sesid, \
+ __u64 lease_key_low, \
+ __u64 lease_key_high, \
+ int rc), \
+ TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc))
+
+DEFINE_SMB3_LEASE_ERR_EVENT(lease_err);
+
DECLARE_EVENT_CLASS(smb3_reconnect_class,
TP_PROTO(__u64 currmid,
char *hostname),
@@ -486,6 +565,36 @@ DEFINE_EVENT(smb3_reconnect_class, smb3_##name, \
DEFINE_SMB3_RECONNECT_EVENT(reconnect);
DEFINE_SMB3_RECONNECT_EVENT(partial_send_reconnect);
+DECLARE_EVENT_CLASS(smb3_credit_class,
+ TP_PROTO(__u64 currmid,
+ char *hostname,
+ int credits),
+ TP_ARGS(currmid, hostname, credits),
+ TP_STRUCT__entry(
+ __field(__u64, currmid)
+ __field(char *, hostname)
+ __field(int, credits)
+ ),
+ TP_fast_assign(
+ __entry->currmid = currmid;
+ __entry->hostname = hostname;
+ __entry->credits = credits;
+ ),
+ TP_printk("server=%s current_mid=0x%llx credits=%d",
+ __entry->hostname,
+ __entry->currmid,
+ __entry->credits)
+)
+
+#define DEFINE_SMB3_CREDIT_EVENT(name) \
+DEFINE_EVENT(smb3_credit_class, smb3_##name, \
+ TP_PROTO(__u64 currmid, \
+ char *hostname, \
+ int credits), \
+ TP_ARGS(currmid, hostname, credits))
+
+DEFINE_SMB3_CREDIT_EVENT(reconnect_with_invalid_credits);
+
#endif /* _CIFS_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index b48f43963da6..f8112433f0c8 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -113,9 +113,18 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
cifs_small_buf_release(midEntry->resp_buf);
#ifdef CONFIG_CIFS_STATS2
now = jiffies;
- /* commands taking longer than one second are indications that
- something is wrong, unless it is quite a slow link or server */
- if (time_after(now, midEntry->when_alloc + HZ) &&
+ /*
+ * commands taking longer than one second (default) can be indications
+ * that something is wrong, unless it is quite a slow link or a very
+ * busy server. Note that this calc is unlikely or impossible to wrap
+ * as long as slow_rsp_threshold is not set way above recommended max
+ * value (32767 ie 9 hours) and is generally harmless even if wrong
+ * since only affects debug counters - so leaving the calc as simple
+ * comparison rather than doing multiple conversions and overflow
+ * checks
+ */
+ if ((slow_rsp_threshold != 0) &&
+ time_after(now, midEntry->when_alloc + (slow_rsp_threshold * HZ)) &&
(midEntry->command != command)) {
/* smb2slowcmd[NUMBER_OF_SMB2_COMMANDS] counts by command */
if ((le16_to_cpu(midEntry->command) < NUMBER_OF_SMB2_COMMANDS) &&
@@ -128,7 +137,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
if (cifsFYI & CIFS_TIMER) {
pr_debug(" CIFS slow rsp: cmd %d mid %llu",
midEntry->command, midEntry->mid);
- pr_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
+ cifs_info(" A: 0x%lx S: 0x%lx R: 0x%lx\n",
now - midEntry->when_alloc,
now - midEntry->when_sent,
now - midEntry->when_received);
@@ -786,7 +795,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
int i, j, rc = 0;
int timeout, optype;
struct mid_q_entry *midQ[MAX_COMPOUND];
- unsigned int credits = 1;
+ unsigned int credits = 0;
char *buf;
timeout = flags & CIFS_TIMEOUT_MASK;
@@ -851,21 +860,24 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
mutex_unlock(&ses->server->srv_mutex);
- for (i = 0; i < num_rqst; i++) {
- if (rc < 0)
- goto out;
+ if (rc < 0)
+ goto out;
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
- smb311_update_preauth_hash(ses, rqst[i].rq_iov,
- rqst[i].rq_nvec);
+ /*
+ * Compounding is never used during session establish.
+ */
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
+ smb311_update_preauth_hash(ses, rqst[0].rq_iov,
+ rqst[0].rq_nvec);
- if (timeout == CIFS_ASYNC_OP)
- goto out;
+ if (timeout == CIFS_ASYNC_OP)
+ goto out;
+ for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(ses->server, midQ[i]);
if (rc != 0) {
- cifs_dbg(FYI, "Cancelling wait for mid %llu\n",
- midQ[i]->mid);
+ cifs_dbg(VFS, "Cancelling wait for mid %llu cmd: %d\n",
+ midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(ses->server, &rqst[i], midQ[i]);
spin_lock(&GlobalMid_Lock);
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) {
@@ -877,10 +889,21 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
}
spin_unlock(&GlobalMid_Lock);
}
+ }
+
+ for (i = 0; i < num_rqst; i++)
+ if (midQ[i]->resp_buf)
+ credits += ses->server->ops->get_credits(midQ[i]);
+ if (!credits)
+ credits = 1;
+
+ for (i = 0; i < num_rqst; i++) {
+ if (rc < 0)
+ goto out;
rc = cifs_sync_mid_result(midQ[i], ses->server);
if (rc != 0) {
- add_credits(ses->server, 1, optype);
+ add_credits(ses->server, credits, optype);
return rc;
}
@@ -901,23 +924,26 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
else
resp_buf_type[i] = CIFS_SMALL_BUFFER;
- if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
- struct kvec iov = {
- .iov_base = resp_iov[i].iov_base,
- .iov_len = resp_iov[i].iov_len
- };
- smb311_update_preauth_hash(ses, &iov, 1);
- }
-
- credits = ses->server->ops->get_credits(midQ[i]);
-
rc = ses->server->ops->check_receive(midQ[i], ses->server,
flags & CIFS_LOG_ERROR);
/* mark it so buf will not be freed by cifs_delete_mid */
if ((flags & CIFS_NO_RESP) == 0)
midQ[i]->resp_buf = NULL;
+
+ }
+
+ /*
+ * Compounding is never used during session establish.
+ */
+ if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
+ struct kvec iov = {
+ .iov_base = resp_iov[0].iov_base,
+ .iov_len = resp_iov[0].iov_len
+ };
+ smb311_update_preauth_hash(ses, &iov, 1);
}
+
out:
/*
* This will dequeue all mids. After this it is important that the
diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
index 504b3c3539dc..15f6e96b3bd9 100644
--- a/fs/compat_binfmt_elf.c
+++ b/fs/compat_binfmt_elf.c
@@ -52,7 +52,7 @@
#define elf_prpsinfo compat_elf_prpsinfo
#undef ns_to_timeval
-#define ns_to_timeval ns_to_compat_timeval
+#define ns_to_timeval ns_to_old_timeval32
/*
* To use this file, asm/elf.h must define compat_elf_check_arch.
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 0c445a03e682..ce2cc2169040 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -22,37 +22,21 @@
#include <linux/smp.h>
#include <linux/ioctl.h>
#include <linux/if.h>
-#include <linux/if_bridge.h>
#include <linux/raid/md_u.h>
-#include <linux/kd.h>
-#include <linux/route.h>
-#include <linux/in6.h>
-#include <linux/ipv6_route.h>
-#include <linux/skbuff.h>
-#include <linux/netlink.h>
-#include <linux/vt.h>
#include <linux/falloc.h>
-#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/ppp_defs.h>
#include <linux/ppp-ioctl.h>
#include <linux/if_pppox.h>
#include <linux/mtio.h>
#include <linux/tty.h>
#include <linux/vt_kern.h>
-#include <linux/fb.h>
-#include <linux/videodev2.h>
-#include <linux/netdevice.h>
#include <linux/raw.h>
#include <linux/blkdev.h>
-#include <linux/elevator.h>
#include <linux/rtc.h>
#include <linux/pci.h>
#include <linux/serial.h>
-#include <linux/if_tun.h>
#include <linux/ctype.h>
#include <linux/syscalls.h>
-#include <linux/atalk.h>
#include <linux/gfp.h>
#include <linux/cec.h>
@@ -74,32 +58,9 @@
#endif
#include <linux/uaccess.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_bonding.h>
#include <linux/watchdog.h>
#include <linux/soundcard.h>
-#include <linux/lp.h>
-#include <linux/ppdev.h>
-
-#include <linux/atm.h>
-#include <linux/atmarp.h>
-#include <linux/atmclip.h>
-#include <linux/atmdev.h>
-#include <linux/atmioc.h>
-#include <linux/atmlec.h>
-#include <linux/atmmpc.h>
-#include <linux/atmsvc.h>
-#include <linux/atm_tcp.h>
-#include <linux/sonet.h>
-#include <linux/atm_suni.h>
-
-#include <linux/usb.h>
-#include <linux/usbdevice_fs.h>
-#include <linux/nbd.h>
-#include <linux/random.h>
-#include <linux/filter.h>
#include <linux/hiddev.h>
@@ -112,6 +73,7 @@
#include <linux/sort.h>
#ifdef CONFIG_SPARC
+#include <linux/fb.h>
#include <asm/fbio.h>
#endif
@@ -544,22 +506,6 @@ static int mt_ioctl_trans(struct file *file,
#define HCIUARTSETFLAGS _IOW('U', 203, int)
#define HCIUARTGETFLAGS _IOR('U', 204, int)
-#define BNEPCONNADD _IOW('B', 200, int)
-#define BNEPCONNDEL _IOW('B', 201, int)
-#define BNEPGETCONNLIST _IOR('B', 210, int)
-#define BNEPGETCONNINFO _IOR('B', 211, int)
-#define BNEPGETSUPPFEAT _IOR('B', 212, int)
-
-#define CMTPCONNADD _IOW('C', 200, int)
-#define CMTPCONNDEL _IOW('C', 201, int)
-#define CMTPGETCONNLIST _IOR('C', 210, int)
-#define CMTPGETCONNINFO _IOR('C', 211, int)
-
-#define HIDPCONNADD _IOW('H', 200, int)
-#define HIDPCONNDEL _IOW('H', 201, int)
-#define HIDPGETCONNLIST _IOR('H', 210, int)
-#define HIDPGETCONNINFO _IOR('H', 211, int)
-
#define RTC_IRQP_READ32 _IOR('p', 0x0b, compat_ulong_t)
#define RTC_IRQP_SET32 _IOW('p', 0x0c, compat_ulong_t)
#define RTC_EPOCH_READ32 _IOR('p', 0x0d, compat_ulong_t)
@@ -974,19 +920,6 @@ COMPATIBLE_IOCTL(RFCOMMRELEASEDEV)
COMPATIBLE_IOCTL(RFCOMMGETDEVLIST)
COMPATIBLE_IOCTL(RFCOMMGETDEVINFO)
COMPATIBLE_IOCTL(RFCOMMSTEALDLC)
-COMPATIBLE_IOCTL(BNEPCONNADD)
-COMPATIBLE_IOCTL(BNEPCONNDEL)
-COMPATIBLE_IOCTL(BNEPGETCONNLIST)
-COMPATIBLE_IOCTL(BNEPGETCONNINFO)
-COMPATIBLE_IOCTL(BNEPGETSUPPFEAT)
-COMPATIBLE_IOCTL(CMTPCONNADD)
-COMPATIBLE_IOCTL(CMTPCONNDEL)
-COMPATIBLE_IOCTL(CMTPGETCONNLIST)
-COMPATIBLE_IOCTL(CMTPGETCONNINFO)
-COMPATIBLE_IOCTL(HIDPCONNADD)
-COMPATIBLE_IOCTL(HIDPCONNDEL)
-COMPATIBLE_IOCTL(HIDPGETCONNLIST)
-COMPATIBLE_IOCTL(HIDPGETCONNINFO)
/* CAPI */
COMPATIBLE_IOCTL(CAPI_REGISTER)
COMPATIBLE_IOCTL(CAPI_GET_MANUFACTURER)
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 39c20ef26db4..79debfc9cef9 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -83,10 +83,6 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
return true;
- if (contents_mode == FS_ENCRYPTION_MODE_SPECK128_256_XTS &&
- filenames_mode == FS_ENCRYPTION_MODE_SPECK128_256_CTS)
- return true;
-
return false;
}
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index e997ca51192f..7874c9bb2fc5 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -174,16 +174,6 @@ static struct fscrypt_mode {
.cipher_str = "cts(cbc(aes))",
.keysize = 16,
},
- [FS_ENCRYPTION_MODE_SPECK128_256_XTS] = {
- .friendly_name = "Speck128/256-XTS",
- .cipher_str = "xts(speck128)",
- .keysize = 64,
- },
- [FS_ENCRYPTION_MODE_SPECK128_256_CTS] = {
- .friendly_name = "Speck128/256-CTS-CBC",
- .cipher_str = "cts(cbc(speck128))",
- .keysize = 32,
- },
};
static struct fscrypt_mode *
diff --git a/fs/exec.c b/fs/exec.c
index 1ebf6e5a521d..fc281b738a98 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -908,14 +908,14 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
goto out;
i_size = i_size_read(file_inode(file));
- if (max_size > 0 && i_size > max_size) {
- ret = -EFBIG;
- goto out;
- }
if (i_size <= 0) {
ret = -EINVAL;
goto out;
}
+ if (i_size > SIZE_MAX || (max_size > 0 && i_size > max_size)) {
+ ret = -EFBIG;
+ goto out;
+ }
if (id != READING_FIRMWARE_PREALLOC_BUFFER)
*buf = vmalloc(i_size);
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index 305b220af45d..162f43b80c84 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -72,6 +72,9 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
if (base == kn)
break;
+ if ((s - path) + 3 >= PATH_MAX)
+ return -ENAMETOOLONG;
+
strcpy(s, "../");
s += 3;
base = base->parent;
@@ -88,7 +91,7 @@ static int kernfs_get_target_path(struct kernfs_node *parent,
if (len < 2)
return -EINVAL;
len--;
- if ((s - path) + len > PATH_MAX)
+ if ((s - path) + len >= PATH_MAX)
return -ENAMETOOLONG;
/* reverse fillup of target string from target to base */
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index f033f3a69a3b..07b839560576 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -93,7 +93,7 @@ int nfs4_check_delegation(struct inode *inode, fmode_t flags)
return nfs4_do_check_delegation(inode, flags, false);
}
-static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
+static int nfs_delegation_claim_locks(struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct inode *inode = state->inode;
struct file_lock *fl;
@@ -108,7 +108,7 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
spin_lock(&flctx->flc_lock);
restart:
list_for_each_entry(fl, list, fl_list) {
- if (nfs_file_open_context(fl->fl_file) != ctx)
+ if (nfs_file_open_context(fl->fl_file)->state != state)
continue;
spin_unlock(&flctx->flc_lock);
status = nfs4_lock_delegation_recall(fl, state, stateid);
@@ -136,8 +136,8 @@ static int nfs_delegation_claim_opens(struct inode *inode,
int err;
again:
- spin_lock(&inode->i_lock);
- list_for_each_entry(ctx, &nfsi->open_files, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
state = ctx->state;
if (state == NULL)
continue;
@@ -147,15 +147,16 @@ again:
continue;
if (!nfs4_stateid_match(&state->stateid, stateid))
continue;
- get_nfs_open_context(ctx);
- spin_unlock(&inode->i_lock);
+ if (!get_nfs_open_context(ctx))
+ continue;
+ rcu_read_unlock();
sp = state->owner;
/* Block nfs4_proc_unlck */
mutex_lock(&sp->so_delegreturn_mutex);
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
err = nfs4_open_delegation_recall(ctx, state, stateid, type);
if (!err)
- err = nfs_delegation_claim_locks(ctx, state, stateid);
+ err = nfs_delegation_claim_locks(state, stateid);
if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
err = -EAGAIN;
mutex_unlock(&sp->so_delegreturn_mutex);
@@ -164,7 +165,7 @@ again:
return err;
goto again;
}
- spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
return 0;
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 8bfaa658b2c1..71b2e390becf 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1072,6 +1072,100 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU);
}
+static int
+nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
+ struct inode *inode, int error)
+{
+ switch (error) {
+ case 1:
+ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
+ __func__, dentry);
+ return 1;
+ case 0:
+ nfs_mark_for_revalidate(dir);
+ if (inode && S_ISDIR(inode->i_mode)) {
+ /* Purge readdir caches. */
+ nfs_zap_caches(inode);
+ /*
+ * We can't d_drop the root of a disconnected tree:
+ * its d_hash is on the s_anon list and d_drop() would hide
+ * it from shrink_dcache_for_unmount(), leading to busy
+ * inodes on unmount and further oopses.
+ */
+ if (IS_ROOT(dentry))
+ return 1;
+ }
+ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
+ __func__, dentry);
+ return 0;
+ }
+ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
+ __func__, dentry, error);
+ return error;
+}
+
+static int
+nfs_lookup_revalidate_negative(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ int ret = 1;
+ if (nfs_neg_need_reval(dir, dentry, flags)) {
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+ ret = 0;
+ }
+ return nfs_lookup_revalidate_done(dir, dentry, NULL, ret);
+}
+
+static int
+nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
+ struct inode *inode)
+{
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
+}
+
+static int
+nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
+ struct inode *inode)
+{
+ struct nfs_fh *fhandle;
+ struct nfs_fattr *fattr;
+ struct nfs4_label *label;
+ int ret;
+
+ ret = -ENOMEM;
+ fhandle = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
+ if (fhandle == NULL || fattr == NULL || IS_ERR(label))
+ goto out;
+
+ ret = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
+ if (ret < 0) {
+ if (ret == -ESTALE || ret == -ENOENT)
+ ret = 0;
+ goto out;
+ }
+ ret = 0;
+ if (nfs_compare_fh(NFS_FH(inode), fhandle))
+ goto out;
+ if (nfs_refresh_inode(inode, fattr) < 0)
+ goto out;
+
+ nfs_setsecurity(inode, fattr, label);
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+
+ /* set a readdirplus hint that we had a cache miss */
+ nfs_force_use_readdirplus(dir);
+ ret = 1;
+out:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
+ nfs4_label_free(label);
+ return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
+}
+
/*
* This is called every time the dcache has a lookup hit,
* and we should check whether we can really trust that
@@ -1083,58 +1177,36 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
* If the parent directory is seen to have changed, we throw out the
* cached dentry and do a new lookup.
*/
-static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int
+nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
{
- struct inode *dir;
struct inode *inode;
- struct dentry *parent;
- struct nfs_fh *fhandle = NULL;
- struct nfs_fattr *fattr = NULL;
- struct nfs4_label *label = NULL;
int error;
- if (flags & LOOKUP_RCU) {
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- } else {
- parent = dget_parent(dentry);
- dir = d_inode(parent);
- }
nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
inode = d_inode(dentry);
- if (!inode) {
- if (nfs_neg_need_reval(dir, dentry, flags)) {
- if (flags & LOOKUP_RCU)
- return -ECHILD;
- goto out_bad;
- }
- goto out_valid;
- }
+ if (!inode)
+ return nfs_lookup_revalidate_negative(dir, dentry, flags);
if (is_bad_inode(inode)) {
- if (flags & LOOKUP_RCU)
- return -ECHILD;
dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
__func__, dentry);
goto out_bad;
}
if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
- goto out_set_verifier;
+ return nfs_lookup_revalidate_delegated(dir, dentry, inode);
/* Force a full look up iff the parent directory has changed */
if (!(flags & (LOOKUP_EXCL | LOOKUP_REVAL)) &&
nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
error = nfs_lookup_verify_inode(inode, flags);
if (error) {
- if (flags & LOOKUP_RCU)
- return -ECHILD;
if (error == -ESTALE)
- goto out_zap_parent;
- goto out_error;
+ nfs_zap_caches(dir);
+ goto out_bad;
}
nfs_advise_use_readdirplus(dir);
goto out_valid;
@@ -1146,81 +1218,45 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
if (NFS_STALE(inode))
goto out_bad;
- error = -ENOMEM;
- fhandle = nfs_alloc_fhandle();
- fattr = nfs_alloc_fattr();
- if (fhandle == NULL || fattr == NULL)
- goto out_error;
-
- label = nfs4_label_alloc(NFS_SERVER(inode), GFP_NOWAIT);
- if (IS_ERR(label))
- goto out_error;
-
trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
+ error = nfs_lookup_revalidate_dentry(dir, dentry, inode);
trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
- if (error == -ESTALE || error == -ENOENT)
- goto out_bad;
- if (error)
- goto out_error;
- if (nfs_compare_fh(NFS_FH(inode), fhandle))
- goto out_bad;
- if ((error = nfs_refresh_inode(inode, fattr)) != 0)
- goto out_bad;
-
- nfs_setsecurity(inode, fattr, label);
-
- nfs_free_fattr(fattr);
- nfs_free_fhandle(fhandle);
- nfs4_label_free(label);
+ return error;
+out_valid:
+ return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
+out_bad:
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+ return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
+}
- /* set a readdirplus hint that we had a cache miss */
- nfs_force_use_readdirplus(dir);
+static int
+__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
+ int (*reval)(struct inode *, struct dentry *, unsigned int))
+{
+ struct dentry *parent;
+ struct inode *dir;
+ int ret;
-out_set_verifier:
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- out_valid:
if (flags & LOOKUP_RCU) {
+ parent = READ_ONCE(dentry->d_parent);
+ dir = d_inode_rcu(parent);
+ if (!dir)
+ return -ECHILD;
+ ret = reval(dir, dentry, flags);
if (parent != READ_ONCE(dentry->d_parent))
return -ECHILD;
- } else
+ } else {
+ parent = dget_parent(dentry);
+ ret = reval(d_inode(parent), dentry, flags);
dput(parent);
- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
- __func__, dentry);
- return 1;
-out_zap_parent:
- nfs_zap_caches(dir);
- out_bad:
- WARN_ON(flags & LOOKUP_RCU);
- nfs_free_fattr(fattr);
- nfs_free_fhandle(fhandle);
- nfs4_label_free(label);
- nfs_mark_for_revalidate(dir);
- if (inode && S_ISDIR(inode->i_mode)) {
- /* Purge readdir caches. */
- nfs_zap_caches(inode);
- /*
- * We can't d_drop the root of a disconnected tree:
- * its d_hash is on the s_anon list and d_drop() would hide
- * it from shrink_dcache_for_unmount(), leading to busy
- * inodes on unmount and further oopses.
- */
- if (IS_ROOT(dentry))
- goto out_valid;
}
- dput(parent);
- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
- __func__, dentry);
- return 0;
-out_error:
- WARN_ON(flags & LOOKUP_RCU);
- nfs_free_fattr(fattr);
- nfs_free_fhandle(fhandle);
- nfs4_label_free(label);
- dput(parent);
- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
- __func__, dentry, error);
- return error;
+ return ret;
+}
+
+static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
}
/*
@@ -1579,62 +1615,55 @@ no_open:
}
EXPORT_SYMBOL_GPL(nfs_atomic_open);
-static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int
+nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
{
struct inode *inode;
- int ret = 0;
if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
- goto no_open;
+ goto full_reval;
if (d_mountpoint(dentry))
- goto no_open;
- if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1)
- goto no_open;
+ goto full_reval;
inode = d_inode(dentry);
/* We can't create new files in nfs_open_revalidate(), so we
* optimize away revalidation of negative dentries.
*/
- if (inode == NULL) {
- struct dentry *parent;
- struct inode *dir;
-
- if (flags & LOOKUP_RCU) {
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- } else {
- parent = dget_parent(dentry);
- dir = d_inode(parent);
- }
- if (!nfs_neg_need_reval(dir, dentry, flags))
- ret = 1;
- else if (flags & LOOKUP_RCU)
- ret = -ECHILD;
- if (!(flags & LOOKUP_RCU))
- dput(parent);
- else if (parent != READ_ONCE(dentry->d_parent))
- return -ECHILD;
- goto out;
- }
+ if (inode == NULL)
+ goto full_reval;
+
+ if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
+ return nfs_lookup_revalidate_delegated(dir, dentry, inode);
/* NFS only supports OPEN on regular files */
if (!S_ISREG(inode->i_mode))
- goto no_open;
+ goto full_reval;
+
/* We cannot do exclusive creation on a positive dentry */
- if (flags & LOOKUP_EXCL)
- goto no_open;
+ if (flags & (LOOKUP_EXCL | LOOKUP_REVAL))
+ goto reval_dentry;
+
+ /* Check if the directory changed */
+ if (!nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU))
+ goto reval_dentry;
/* Let f_op->open() actually open (and revalidate) the file */
- ret = 1;
+ return 1;
+reval_dentry:
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+ return nfs_lookup_revalidate_dentry(dir, dentry, inode);;
-out:
- return ret;
+full_reval:
+ return nfs_do_lookup_revalidate(dir, dentry, flags);
+}
-no_open:
- return nfs_lookup_revalidate(dentry, flags);
+static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ return __nfs_lookup_revalidate(dentry, flags,
+ nfs4_do_lookup_revalidate);
}
#endif /* CONFIG_NFSV4 */
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index d175724ff566..61f46facb39c 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -1164,6 +1164,7 @@ static struct pnfs_layoutdriver_type filelayout_type = {
.id = LAYOUT_NFSV4_1_FILES,
.name = "LAYOUT_NFSV4_1_FILES",
.owner = THIS_MODULE,
+ .max_layoutget_response = 4096, /* 1 page or so... */
.alloc_layout_hdr = filelayout_alloc_layout_hdr,
.free_layout_hdr = filelayout_free_layout_hdr,
.alloc_lseg = filelayout_alloc_lseg,
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index cae43333ef16..86bcba40ca61 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -2356,6 +2356,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
.name = "LAYOUT_FLEX_FILES",
.owner = THIS_MODULE,
.flags = PNFS_LAYOUTGET_ON_OPEN,
+ .max_layoutget_response = 4096, /* 1 page or so... */
.set_layoutdriver = ff_layout_set_layoutdriver,
.alloc_layout_hdr = ff_layout_alloc_layout_hdr,
.free_layout_hdr = ff_layout_free_layout_hdr,
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index 59aa04976331..74d8d5352438 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -453,7 +453,7 @@ ff_layout_get_ds_cred(struct pnfs_layout_segment *lseg, u32 ds_idx,
struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, ds_idx);
struct rpc_cred *cred;
- if (mirror) {
+ if (mirror && !mirror->mirror_ds->ds_versions[0].tightly_coupled) {
cred = ff_layout_get_mirror_cred(mirror, lseg->pls_range.iomode);
if (!cred)
cred = get_rpccred(mdscred);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b65aee481d13..5b1eee4952b7 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -857,15 +857,14 @@ static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
{
- struct nfs_lock_context *head = &ctx->lock_context;
- struct nfs_lock_context *pos = head;
+ struct nfs_lock_context *pos;
- do {
+ list_for_each_entry_rcu(pos, &ctx->lock_context.list, list) {
if (pos->lockowner != current->files)
continue;
- refcount_inc(&pos->count);
- return pos;
- } while ((pos = list_entry(pos->list.next, typeof(*pos), list)) != head);
+ if (refcount_inc_not_zero(&pos->count))
+ return pos;
+ }
return NULL;
}
@@ -874,10 +873,10 @@ struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
struct nfs_lock_context *res, *new = NULL;
struct inode *inode = d_inode(ctx->dentry);
- spin_lock(&inode->i_lock);
+ rcu_read_lock();
res = __nfs_find_lock_context(ctx);
+ rcu_read_unlock();
if (res == NULL) {
- spin_unlock(&inode->i_lock);
new = kmalloc(sizeof(*new), GFP_KERNEL);
if (new == NULL)
return ERR_PTR(-ENOMEM);
@@ -885,14 +884,14 @@ struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
spin_lock(&inode->i_lock);
res = __nfs_find_lock_context(ctx);
if (res == NULL) {
- list_add_tail(&new->list, &ctx->lock_context.list);
+ list_add_tail_rcu(&new->list, &ctx->lock_context.list);
new->open_context = ctx;
res = new;
new = NULL;
}
+ spin_unlock(&inode->i_lock);
+ kfree(new);
}
- spin_unlock(&inode->i_lock);
- kfree(new);
return res;
}
EXPORT_SYMBOL_GPL(nfs_get_lock_context);
@@ -904,9 +903,9 @@ void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
if (!refcount_dec_and_lock(&l_ctx->count, &inode->i_lock))
return;
- list_del(&l_ctx->list);
+ list_del_rcu(&l_ctx->list);
spin_unlock(&inode->i_lock);
- kfree(l_ctx);
+ kfree_rcu(l_ctx, rcu_head);
}
EXPORT_SYMBOL_GPL(nfs_put_lock_context);
@@ -978,9 +977,9 @@ EXPORT_SYMBOL_GPL(alloc_nfs_open_context);
struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
{
- if (ctx != NULL)
- refcount_inc(&ctx->lock_context.count);
- return ctx;
+ if (ctx != NULL && refcount_inc_not_zero(&ctx->lock_context.count))
+ return ctx;
+ return NULL;
}
EXPORT_SYMBOL_GPL(get_nfs_open_context);
@@ -989,13 +988,13 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
struct inode *inode = d_inode(ctx->dentry);
struct super_block *sb = ctx->dentry->d_sb;
+ if (!refcount_dec_and_test(&ctx->lock_context.count))
+ return;
if (!list_empty(&ctx->list)) {
- if (!refcount_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
- return;
- list_del(&ctx->list);
+ spin_lock(&inode->i_lock);
+ list_del_rcu(&ctx->list);
spin_unlock(&inode->i_lock);
- } else if (!refcount_dec_and_test(&ctx->lock_context.count))
- return;
+ }
if (inode != NULL)
NFS_PROTO(inode)->close_context(ctx, is_sync);
if (ctx->cred != NULL)
@@ -1003,7 +1002,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
dput(ctx->dentry);
nfs_sb_deactive(sb);
kfree(ctx->mdsthreshold);
- kfree(ctx);
+ kfree_rcu(ctx, rcu_head);
}
void put_nfs_open_context(struct nfs_open_context *ctx)
@@ -1027,10 +1026,7 @@ void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
- if (ctx->mode & FMODE_WRITE)
- list_add(&ctx->list, &nfsi->open_files);
- else
- list_add_tail(&ctx->list, &nfsi->open_files);
+ list_add_tail_rcu(&ctx->list, &nfsi->open_files);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL_GPL(nfs_inode_attach_open_context);
@@ -1051,16 +1047,17 @@ struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_c
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_open_context *pos, *ctx = NULL;
- spin_lock(&inode->i_lock);
- list_for_each_entry(pos, &nfsi->open_files, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &nfsi->open_files, list) {
if (cred != NULL && pos->cred != cred)
continue;
if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
continue;
ctx = get_nfs_open_context(pos);
- break;
+ if (ctx)
+ break;
}
- spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
return ctx;
}
@@ -1078,9 +1075,6 @@ void nfs_file_clear_open_context(struct file *filp)
if (ctx->error < 0)
invalidate_inode_pages2(inode->i_mapping);
filp->private_data = NULL;
- spin_lock(&inode->i_lock);
- list_move_tail(&ctx->list, &NFS_I(inode)->open_files);
- spin_unlock(&inode->i_lock);
put_nfs_open_context_sync(ctx);
}
}
@@ -1329,19 +1323,11 @@ static bool nfs_file_has_writers(struct nfs_inode *nfsi)
{
struct inode *inode = &nfsi->vfs_inode;
- assert_spin_locked(&inode->i_lock);
-
if (!S_ISREG(inode->i_mode))
return false;
if (list_empty(&nfsi->open_files))
return false;
- /* Note: This relies on nfsi->open_files being ordered with writers
- * being placed at the head of the list.
- * See nfs_inode_attach_open_context()
- */
- return (list_first_entry(&nfsi->open_files,
- struct nfs_open_context,
- list)->mode & FMODE_WRITE) == FMODE_WRITE;
+ return inode_is_open_for_write(inode);
}
static bool nfs_file_has_buffered_writers(struct nfs_inode *nfsi)
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index ec8a9efa268f..71bc16225b98 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -786,6 +786,7 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
struct inode *inode = hdr->inode;
+ struct nfs_server *server = NFS_SERVER(inode);
if (hdr->pgio_done_cb != NULL)
return hdr->pgio_done_cb(task, hdr);
@@ -793,6 +794,9 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
if (nfs3_async_handle_jukebox(task, inode))
return -EAGAIN;
+ if (task->tk_status >= 0 && !server->read_hdrsize)
+ cmpxchg(&server->read_hdrsize, 0, hdr->res.replen);
+
nfs_invalidate_atime(inode);
nfs_refresh_inode(inode, &hdr->fattr);
return 0;
@@ -802,6 +806,7 @@ static void nfs3_proc_read_setup(struct nfs_pgio_header *hdr,
struct rpc_message *msg)
{
msg->rpc_proc = &nfs3_procedures[NFS3PROC_READ];
+ hdr->args.replen = NFS_SERVER(hdr->inode)->read_hdrsize;
}
static int nfs3_proc_pgio_rpc_prepare(struct rpc_task *task,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 64e4fa33d89f..78df4eb60f85 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -983,10 +983,11 @@ static void nfs3_xdr_enc_read3args(struct rpc_rqst *req,
const void *data)
{
const struct nfs_pgio_args *args = data;
+ unsigned int replen = args->replen ? args->replen : NFS3_readres_sz;
encode_read3args(xdr, args);
prepare_reply_buffer(req, args->pages, args->pgbase,
- args->count, NFS3_readres_sz);
+ args->count, replen);
req->rq_rcv_buf.flags |= XDRBUF_READ;
}
@@ -1364,10 +1365,12 @@ static void nfs3_xdr_enc_getacl3args(struct rpc_rqst *req,
encode_nfs_fh3(xdr, args->fh);
encode_uint32(xdr, args->mask);
- if (args->mask & (NFS_ACL | NFS_DFACL))
+ if (args->mask & (NFS_ACL | NFS_DFACL)) {
prepare_reply_buffer(req, args->pages, 0,
NFSACL_MAXPAGES << PAGE_SHIFT,
ACL3_getaclres_sz);
+ req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
+ }
}
static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req,
@@ -1673,9 +1676,11 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
void *data)
{
struct nfs_pgio_res *result = data;
+ unsigned int pos;
enum nfs_stat status;
int error;
+ pos = xdr_stream_pos(xdr);
error = decode_nfsstat3(xdr, &status);
if (unlikely(error))
goto out;
@@ -1685,6 +1690,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
result->op_status = status;
if (status != NFS3_OK)
goto out_status;
+ result->replen = 3 + ((xdr_stream_pos(xdr) - pos) >> 2);
error = decode_read3resok(xdr, result);
out:
return error;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 3a6904173214..8d59c9655ec4 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -188,9 +188,10 @@ struct nfs4_state {
unsigned int n_wronly; /* Number of write-only references */
unsigned int n_rdwr; /* Number of read/write references */
fmode_t state; /* State on the server (R,W, or RW) */
- atomic_t count;
+ refcount_t count;
wait_queue_head_t waitq;
+ struct rcu_head rcu_head;
};
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 146e30862234..8f53455c4765 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -950,10 +950,10 @@ EXPORT_SYMBOL_GPL(nfs4_set_ds_client);
/*
* Session has been established, and the client marked ready.
- * Set the mount rsize and wsize with negotiated fore channel
- * attributes which will be bound checked in nfs_server_set_fsinfo.
+ * Limit the mount rsize, wsize and dtsize using negotiated fore
+ * channel attributes.
*/
-static void nfs4_session_set_rwsize(struct nfs_server *server)
+static void nfs4_session_limit_rwsize(struct nfs_server *server)
{
#ifdef CONFIG_NFS_V4_1
struct nfs4_session *sess;
@@ -966,9 +966,11 @@ static void nfs4_session_set_rwsize(struct nfs_server *server)
server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead;
server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead;
- if (!server->rsize || server->rsize > server_resp_sz)
+ if (server->dtsize > server_resp_sz)
+ server->dtsize = server_resp_sz;
+ if (server->rsize > server_resp_sz)
server->rsize = server_resp_sz;
- if (!server->wsize || server->wsize > server_rqst_sz)
+ if (server->wsize > server_rqst_sz)
server->wsize = server_rqst_sz;
#endif /* CONFIG_NFS_V4_1 */
}
@@ -1015,12 +1017,12 @@ static int nfs4_server_common_setup(struct nfs_server *server,
(unsigned long long) server->fsid.minor);
nfs_display_fhandle(mntfh, "Pseudo-fs root FH");
- nfs4_session_set_rwsize(server);
-
error = nfs_probe_fsinfo(server, mntfh, fattr);
if (error < 0)
goto out;
+ nfs4_session_limit_rwsize(server);
+
if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN)
server->namelen = NFS4_MAXNAMLEN;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8220a168282e..db84b4adbc49 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1349,12 +1349,20 @@ static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
return false;
}
-static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
+static int can_open_cached(struct nfs4_state *state, fmode_t mode,
+ int open_mode, enum open_claim_type4 claim)
{
int ret = 0;
if (open_mode & (O_EXCL|O_TRUNC))
goto out;
+ switch (claim) {
+ case NFS4_OPEN_CLAIM_NULL:
+ case NFS4_OPEN_CLAIM_FH:
+ goto out;
+ default:
+ break;
+ }
switch (mode & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ:
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
@@ -1747,7 +1755,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
for (;;) {
spin_lock(&state->owner->so_lock);
- if (can_open_cached(state, fmode, open_mode)) {
+ if (can_open_cached(state, fmode, open_mode, claim)) {
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
goto out_return_state;
@@ -1777,7 +1785,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
out:
return ERR_PTR(ret);
out_return_state:
- atomic_inc(&state->count);
+ refcount_inc(&state->count);
return state;
}
@@ -1849,7 +1857,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
update:
update_open_stateid(state, &data->o_res.stateid, NULL,
data->o_arg.fmode);
- atomic_inc(&state->count);
+ refcount_inc(&state->count);
return state;
}
@@ -1887,7 +1895,7 @@ nfs4_opendata_find_nfs4_state(struct nfs4_opendata *data)
return ERR_CAST(inode);
if (data->state != NULL && data->state->inode == inode) {
state = data->state;
- atomic_inc(&state->count);
+ refcount_inc(&state->count);
} else
state = nfs4_get_open_state(inode, data->owner);
iput(inode);
@@ -1933,23 +1941,41 @@ nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
return ret;
}
-static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
+static struct nfs_open_context *
+nfs4_state_find_open_context_mode(struct nfs4_state *state, fmode_t mode)
{
struct nfs_inode *nfsi = NFS_I(state->inode);
struct nfs_open_context *ctx;
- spin_lock(&state->inode->i_lock);
- list_for_each_entry(ctx, &nfsi->open_files, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
if (ctx->state != state)
continue;
- get_nfs_open_context(ctx);
- spin_unlock(&state->inode->i_lock);
+ if ((ctx->mode & mode) != mode)
+ continue;
+ if (!get_nfs_open_context(ctx))
+ continue;
+ rcu_read_unlock();
return ctx;
}
- spin_unlock(&state->inode->i_lock);
+ rcu_read_unlock();
return ERR_PTR(-ENOENT);
}
+static struct nfs_open_context *
+nfs4_state_find_open_context(struct nfs4_state *state)
+{
+ struct nfs_open_context *ctx;
+
+ ctx = nfs4_state_find_open_context_mode(state, FMODE_READ|FMODE_WRITE);
+ if (!IS_ERR(ctx))
+ return ctx;
+ ctx = nfs4_state_find_open_context_mode(state, FMODE_WRITE);
+ if (!IS_ERR(ctx))
+ return ctx;
+ return nfs4_state_find_open_context_mode(state, FMODE_READ);
+}
+
static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
struct nfs4_state *state, enum open_claim_type4 claim)
{
@@ -1960,7 +1986,7 @@ static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context
if (opendata == NULL)
return ERR_PTR(-ENOMEM);
opendata->state = state;
- atomic_inc(&state->count);
+ refcount_inc(&state->count);
return opendata;
}
@@ -2276,7 +2302,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
if (data->state != NULL) {
struct nfs_delegation *delegation;
- if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
+ if (can_open_cached(data->state, data->o_arg.fmode,
+ data->o_arg.open_flags, claim))
goto out_no_action;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 40a08cd483f0..62ae0fd345ad 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -655,7 +655,7 @@ nfs4_alloc_open_state(void)
state = kzalloc(sizeof(*state), GFP_NOFS);
if (!state)
return NULL;
- atomic_set(&state->count, 1);
+ refcount_set(&state->count, 1);
INIT_LIST_HEAD(&state->lock_states);
spin_lock_init(&state->state_lock);
seqlock_init(&state->seqlock);
@@ -684,12 +684,12 @@ __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs4_state *state;
- list_for_each_entry(state, &nfsi->open_states, inode_states) {
+ list_for_each_entry_rcu(state, &nfsi->open_states, inode_states) {
if (state->owner != owner)
continue;
if (!nfs4_valid_open_stateid(state))
continue;
- if (atomic_inc_not_zero(&state->count))
+ if (refcount_inc_not_zero(&state->count))
return state;
}
return NULL;
@@ -698,7 +698,7 @@ __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
static void
nfs4_free_open_state(struct nfs4_state *state)
{
- kfree(state);
+ kfree_rcu(state, rcu_head);
}
struct nfs4_state *
@@ -707,9 +707,9 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
struct nfs4_state *state, *new;
struct nfs_inode *nfsi = NFS_I(inode);
- spin_lock(&inode->i_lock);
+ rcu_read_lock();
state = __nfs4_find_state_byowner(inode, owner);
- spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
if (state)
goto out;
new = nfs4_alloc_open_state();
@@ -720,7 +720,7 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
state = new;
state->owner = owner;
atomic_inc(&owner->so_count);
- list_add(&state->inode_states, &nfsi->open_states);
+ list_add_rcu(&state->inode_states, &nfsi->open_states);
ihold(inode);
state->inode = inode;
spin_unlock(&inode->i_lock);
@@ -743,10 +743,10 @@ void nfs4_put_open_state(struct nfs4_state *state)
struct inode *inode = state->inode;
struct nfs4_state_owner *owner = state->owner;
- if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
+ if (!refcount_dec_and_lock(&state->count, &owner->so_lock))
return;
spin_lock(&inode->i_lock);
- list_del(&state->inode_states);
+ list_del_rcu(&state->inode_states);
list_del(&state->open_states);
spin_unlock(&inode->i_lock);
spin_unlock(&owner->so_lock);
@@ -1437,8 +1437,8 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
struct nfs4_state *state;
bool found = false;
- spin_lock(&inode->i_lock);
- list_for_each_entry(ctx, &nfsi->open_files, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
state = ctx->state;
if (state == NULL)
continue;
@@ -1456,7 +1456,7 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
nfs4_state_mark_reclaim_nograce(clp, state))
found = true;
}
- spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
nfs_inode_find_delegation_state_and_recover(inode, stateid);
if (found)
@@ -1469,13 +1469,13 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_open_context *ctx;
- spin_lock(&inode->i_lock);
- list_for_each_entry(ctx, &nfsi->open_files, list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
if (ctx->state != state)
continue;
set_bit(NFS_CONTEXT_BAD, &ctx->flags);
}
- spin_unlock(&inode->i_lock);
+ rcu_read_unlock();
}
static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error)
@@ -1549,10 +1549,62 @@ out:
return status;
}
+#ifdef CONFIG_NFS_V4_2
+static void nfs42_complete_copies(struct nfs4_state_owner *sp, struct nfs4_state *state)
+{
+ struct nfs4_copy_state *copy;
+
+ if (!test_bit(NFS_CLNT_DST_SSC_COPY_STATE, &state->flags))
+ return;
+
+ spin_lock(&sp->so_server->nfs_client->cl_lock);
+ list_for_each_entry(copy, &sp->so_server->ss_copies, copies) {
+ if (nfs4_stateid_match_other(&state->stateid, &copy->parent_state->stateid))
+ continue;
+ copy->flags = 1;
+ complete(&copy->completion);
+ break;
+ }
+ spin_unlock(&sp->so_server->nfs_client->cl_lock);
+}
+#else /* !CONFIG_NFS_V4_2 */
+static inline void nfs42_complete_copies(struct nfs4_state_owner *sp,
+ struct nfs4_state *state)
+{
+}
+#endif /* CONFIG_NFS_V4_2 */
+
+static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_state *state,
+ const struct nfs4_state_recovery_ops *ops)
+{
+ struct nfs4_lock_state *lock;
+ int status;
+
+ status = ops->recover_open(sp, state);
+ if (status < 0)
+ return status;
+
+ status = nfs4_reclaim_locks(state, ops);
+ if (status < 0)
+ return status;
+
+ if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) {
+ spin_lock(&state->state_lock);
+ list_for_each_entry(lock, &state->lock_states, ls_locks) {
+ if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
+ pr_warn_ratelimited("NFS: %s: Lock reclaim failed!\n", __func__);
+ }
+ spin_unlock(&state->state_lock);
+ }
+
+ nfs42_complete_copies(sp, state);
+ clear_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
+ return status;
+}
+
static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
{
struct nfs4_state *state;
- struct nfs4_lock_state *lock;
int status = 0;
/* Note: we rely on the sp->so_states list being ordered
@@ -1573,79 +1625,45 @@ restart:
continue;
if (state->state == 0)
continue;
- atomic_inc(&state->count);
+ refcount_inc(&state->count);
spin_unlock(&sp->so_lock);
- status = ops->recover_open(sp, state);
- if (status >= 0) {
- status = nfs4_reclaim_locks(state, ops);
- if (status >= 0) {
- if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) {
- spin_lock(&state->state_lock);
- list_for_each_entry(lock, &state->lock_states, ls_locks) {
- if (!test_bit(NFS_LOCK_INITIALIZED, &lock->ls_flags))
- pr_warn_ratelimited("NFS: "
- "%s: Lock reclaim "
- "failed!\n", __func__);
- }
- spin_unlock(&state->state_lock);
- }
- clear_bit(NFS_STATE_RECLAIM_NOGRACE,
- &state->flags);
-#ifdef CONFIG_NFS_V4_2
- if (test_bit(NFS_CLNT_DST_SSC_COPY_STATE, &state->flags)) {
- struct nfs4_copy_state *copy;
-
- spin_lock(&sp->so_server->nfs_client->cl_lock);
- list_for_each_entry(copy, &sp->so_server->ss_copies, copies) {
- if (memcmp(&state->stateid.other, &copy->parent_state->stateid.other, NFS4_STATEID_SIZE))
- continue;
- copy->flags = 1;
- complete(&copy->completion);
- printk("AGLO: server rebooted waking up the copy\n");
- break;
- }
- spin_unlock(&sp->so_server->nfs_client->cl_lock);
- }
-#endif /* CONFIG_NFS_V4_2 */
- nfs4_put_open_state(state);
- spin_lock(&sp->so_lock);
- goto restart;
- }
- }
+ status = __nfs4_reclaim_open_state(sp, state, ops);
+
switch (status) {
- default:
- printk(KERN_ERR "NFS: %s: unhandled error %d\n",
- __func__, status);
- /* Fall through */
- case -ENOENT:
- case -ENOMEM:
- case -EACCES:
- case -EROFS:
- case -EIO:
- case -ESTALE:
- /* Open state on this file cannot be recovered */
- nfs4_state_mark_recovery_failed(state, status);
- break;
- case -EAGAIN:
- ssleep(1);
- /* Fall through */
- case -NFS4ERR_ADMIN_REVOKED:
- case -NFS4ERR_STALE_STATEID:
- case -NFS4ERR_OLD_STATEID:
- case -NFS4ERR_BAD_STATEID:
- case -NFS4ERR_RECLAIM_BAD:
- case -NFS4ERR_RECLAIM_CONFLICT:
- nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
+ default:
+ if (status >= 0)
break;
- case -NFS4ERR_EXPIRED:
- case -NFS4ERR_NO_GRACE:
- nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
- case -NFS4ERR_STALE_CLIENTID:
- case -NFS4ERR_BADSESSION:
- case -NFS4ERR_BADSLOT:
- case -NFS4ERR_BAD_HIGH_SLOT:
- case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
- goto out_err;
+ printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status);
+ /* Fall through */
+ case -ENOENT:
+ case -ENOMEM:
+ case -EACCES:
+ case -EROFS:
+ case -EIO:
+ case -ESTALE:
+ /* Open state on this file cannot be recovered */
+ nfs4_state_mark_recovery_failed(state, status);
+ break;
+ case -EAGAIN:
+ ssleep(1);
+ /* Fall through */
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_OLD_STATEID:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_RECLAIM_BAD:
+ case -NFS4ERR_RECLAIM_CONFLICT:
+ nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
+ break;
+ case -NFS4ERR_EXPIRED:
+ case -NFS4ERR_NO_GRACE:
+ nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ goto out_err;
}
nfs4_put_open_state(state);
spin_lock(&sp->so_lock);
@@ -1795,38 +1813,38 @@ static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
{
switch (error) {
- case 0:
- break;
- case -NFS4ERR_CB_PATH_DOWN:
- nfs40_handle_cb_pathdown(clp);
- break;
- case -NFS4ERR_NO_GRACE:
- nfs4_state_end_reclaim_reboot(clp);
- break;
- case -NFS4ERR_STALE_CLIENTID:
- set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- nfs4_state_start_reclaim_reboot(clp);
- break;
- case -NFS4ERR_EXPIRED:
- set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- nfs4_state_start_reclaim_nograce(clp);
- break;
- case -NFS4ERR_BADSESSION:
- case -NFS4ERR_BADSLOT:
- case -NFS4ERR_BAD_HIGH_SLOT:
- case -NFS4ERR_DEADSESSION:
- case -NFS4ERR_SEQ_FALSE_RETRY:
- case -NFS4ERR_SEQ_MISORDERED:
- set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
- /* Zero session reset errors */
- break;
- case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
- set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
- break;
- default:
- dprintk("%s: failed to handle error %d for server %s\n",
- __func__, error, clp->cl_hostname);
- return error;
+ case 0:
+ break;
+ case -NFS4ERR_CB_PATH_DOWN:
+ nfs40_handle_cb_pathdown(clp);
+ break;
+ case -NFS4ERR_NO_GRACE:
+ nfs4_state_end_reclaim_reboot(clp);
+ break;
+ case -NFS4ERR_STALE_CLIENTID:
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_state_start_reclaim_reboot(clp);
+ break;
+ case -NFS4ERR_EXPIRED:
+ set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
+ nfs4_state_start_reclaim_nograce(clp);
+ break;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_BADSLOT:
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+ set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
+ /* Zero session reset errors */
+ break;
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
+ break;
+ default:
+ dprintk("%s: failed to handle error %d for server %s\n",
+ __func__, error, clp->cl_hostname);
+ return error;
}
dprintk("%s: handled error %d for server %s\n", __func__, error,
clp->cl_hostname);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index b7bde12d8cd5..2fc8f6fa25e4 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3516,7 +3516,7 @@ static int decode_attr_exclcreat_supported(struct xdr_stream *xdr,
static int decode_attr_filehandle(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs_fh *fh)
{
__be32 *p;
- int len;
+ u32 len;
if (fh != NULL)
memset(fh, 0, sizeof(*fh));
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index bb5476a6d264..5c4568a0804b 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -63,14 +63,14 @@ EXPORT_SYMBOL_GPL(nfs_pgheader_init);
void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
{
- spin_lock(&hdr->lock);
- if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags)
- || pos < hdr->io_start + hdr->good_bytes) {
+ unsigned int new = pos - hdr->io_start;
+
+ if (hdr->good_bytes > new) {
+ hdr->good_bytes = new;
clear_bit(NFS_IOHDR_EOF, &hdr->flags);
- hdr->good_bytes = pos - hdr->io_start;
- hdr->error = error;
+ if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags))
+ hdr->error = error;
}
- spin_unlock(&hdr->lock);
}
static inline struct nfs_page *
@@ -494,7 +494,6 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
if (hdr) {
INIT_LIST_HEAD(&hdr->pages);
- spin_lock_init(&hdr->lock);
hdr->rw_ops = ops;
}
return hdr;
@@ -1111,6 +1110,20 @@ static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
return ret;
}
+static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
+{
+ u32 midx;
+ struct nfs_pgio_mirror *mirror;
+
+ if (!desc->pg_error)
+ return;
+
+ for (midx = 0; midx < desc->pg_mirror_count; midx++) {
+ mirror = &desc->pg_mirrors[midx];
+ desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
+ }
+}
+
int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
struct nfs_page *req)
{
@@ -1161,25 +1174,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
return 1;
out_failed:
- /*
- * We might have failed before sending any reqs over wire.
- * Clean up rest of the reqs in mirror pg_list.
- */
- if (desc->pg_error) {
- struct nfs_pgio_mirror *mirror;
- void (*func)(struct list_head *);
-
- /* remember fatal errors */
- if (nfs_error_is_fatal(desc->pg_error))
- nfs_context_set_write_error(req->wb_context,
- desc->pg_error);
-
- func = desc->pg_completion_ops->error_cleanup;
- for (midx = 0; midx < desc->pg_mirror_count; midx++) {
- mirror = &desc->pg_mirrors[midx];
- func(&mirror->pg_list);
- }
- }
+ nfs_pageio_error_cleanup(desc);
return 0;
}
@@ -1251,6 +1246,8 @@ void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
for (midx = 0; midx < desc->pg_mirror_count; midx++)
nfs_pageio_complete_mirror(desc, midx);
+ if (desc->pg_error < 0)
+ nfs_pageio_error_cleanup(desc);
if (desc->pg_ops->pg_cleanup)
desc->pg_ops->pg_cleanup(desc);
nfs_pageio_cleanup_mirroring(desc);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 7d9a51e6b847..06cb90e9bc6e 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -965,7 +965,7 @@ static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
struct page **pages;
int i;
- pages = kcalloc(size, sizeof(struct page *), gfp_flags);
+ pages = kmalloc_array(size, sizeof(struct page *), gfp_flags);
if (!pages) {
dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
return NULL;
@@ -975,7 +975,7 @@ static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
pages[i] = alloc_page(gfp_flags);
if (!pages[i]) {
dprintk("%s: failed to allocate page\n", __func__);
- nfs4_free_pages(pages, size);
+ nfs4_free_pages(pages, i);
return NULL;
}
}
@@ -991,6 +991,7 @@ pnfs_alloc_init_layoutget_args(struct inode *ino,
gfp_t gfp_flags)
{
struct nfs_server *server = pnfs_find_server(ino, ctx);
+ size_t max_reply_sz = server->pnfs_curr_ld->max_layoutget_response;
size_t max_pages = max_response_pages(server);
struct nfs4_layoutget *lgp;
@@ -1000,6 +1001,12 @@ pnfs_alloc_init_layoutget_args(struct inode *ino,
if (lgp == NULL)
return NULL;
+ if (max_reply_sz) {
+ size_t npages = (max_reply_sz + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ if (npages < max_pages)
+ max_pages = npages;
+ }
+
lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
if (!lgp->args.layout.pages) {
kfree(lgp);
@@ -1332,6 +1339,7 @@ bool pnfs_roc(struct inode *ino,
if (!nfs_have_layout(ino))
return false;
retry:
+ rcu_read_lock();
spin_lock(&ino->i_lock);
lo = nfsi->layout;
if (!lo || !pnfs_layout_is_valid(lo) ||
@@ -1342,6 +1350,7 @@ retry:
pnfs_get_layout_hdr(lo);
if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
spin_unlock(&ino->i_lock);
+ rcu_read_unlock();
wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
TASK_UNINTERRUPTIBLE);
pnfs_put_layout_hdr(lo);
@@ -1355,7 +1364,7 @@ retry:
skip_read = true;
}
- list_for_each_entry(ctx, &nfsi->open_files, list) {
+ list_for_each_entry_rcu(ctx, &nfsi->open_files, list) {
state = ctx->state;
if (state == NULL)
continue;
@@ -1403,6 +1412,7 @@ retry:
out_noroc:
spin_unlock(&ino->i_lock);
+ rcu_read_unlock();
pnfs_layoutcommit_inode(ino, true);
if (roc) {
struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index ece367ebde69..e2e9fcd5341d 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -125,6 +125,7 @@ struct pnfs_layoutdriver_type {
struct module *owner;
unsigned flags;
unsigned max_deviceinfo_size;
+ unsigned max_layoutget_response;
int (*set_layoutdriver) (struct nfs_server *, const struct nfs_fh *);
int (*clear_layoutdriver) (struct nfs_server *);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 48d7277c60a9..f9f19784db82 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -276,16 +276,14 @@ static void nfs_readpage_result(struct rpc_task *task,
struct nfs_pgio_header *hdr)
{
if (hdr->res.eof) {
- loff_t bound;
+ loff_t pos = hdr->args.offset + hdr->res.count;
+ unsigned int new = pos - hdr->io_start;
- bound = hdr->args.offset + hdr->res.count;
- spin_lock(&hdr->lock);
- if (bound < hdr->io_start + hdr->good_bytes) {
+ if (hdr->good_bytes > new) {
+ hdr->good_bytes = new;
set_bit(NFS_IOHDR_EOF, &hdr->flags);
clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
- hdr->good_bytes = bound - hdr->io_start;
}
- spin_unlock(&hdr->lock);
} else if (hdr->res.count < hdr->args.count)
nfs_readpage_retry(task, hdr);
}
diff --git a/fs/read_write.c b/fs/read_write.c
index 8a2737f0d61d..603794b207eb 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -331,7 +331,7 @@ COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned i
}
#endif
-#ifdef __ARCH_WANT_SYS_LLSEEK
+#if !defined(CONFIG_64BIT) || defined(CONFIG_COMPAT)
SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
unsigned long, offset_low, loff_t __user *, result,
unsigned int, whence)
diff --git a/fs/select.c b/fs/select.c
index 4a6b6e4b21cb..22b3bf89f051 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -1120,7 +1120,7 @@ int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user *
ts.tv_sec = ts.tv_nsec = 0;
if (timeval) {
- struct compat_timeval rtv;
+ struct old_timeval32 rtv;
rtv.tv_sec = ts.tv_sec;
rtv.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
@@ -1128,7 +1128,7 @@ int compat_poll_select_copy_remaining(struct timespec64 *end_time, void __user *
if (!copy_to_user(p, &rtv, sizeof(rtv)))
return ret;
} else {
- if (!compat_put_timespec64(&ts, p))
+ if (!put_old_timespec32(&ts, p))
return ret;
}
/*
@@ -1257,10 +1257,10 @@ out_nofds:
static int do_compat_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timeval __user *tvp)
+ struct old_timeval32 __user *tvp)
{
struct timespec64 end_time, *to = NULL;
- struct compat_timeval tv;
+ struct old_timeval32 tv;
int ret;
if (tvp) {
@@ -1282,7 +1282,7 @@ static int do_compat_select(int n, compat_ulong_t __user *inp,
COMPAT_SYSCALL_DEFINE5(select, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
- struct compat_timeval __user *, tvp)
+ struct old_timeval32 __user *, tvp)
{
return do_compat_select(n, inp, outp, exp, tvp);
}
@@ -1307,7 +1307,7 @@ COMPAT_SYSCALL_DEFINE1(old_select, struct compat_sel_arg_struct __user *, arg)
static long do_compat_pselect(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
+ struct old_timespec32 __user *tsp, compat_sigset_t __user *sigmask,
compat_size_t sigsetsize)
{
sigset_t ksigmask, sigsaved;
@@ -1315,7 +1315,7 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
int ret;
if (tsp) {
- if (compat_get_timespec64(&ts, tsp))
+ if (get_old_timespec32(&ts, tsp))
return -EFAULT;
to = &end_time;
@@ -1355,7 +1355,7 @@ static long do_compat_pselect(int n, compat_ulong_t __user *inp,
COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
compat_ulong_t __user *, outp, compat_ulong_t __user *, exp,
- struct compat_timespec __user *, tsp, void __user *, sig)
+ struct old_timespec32 __user *, tsp, void __user *, sig)
{
compat_size_t sigsetsize = 0;
compat_uptr_t up = 0;
@@ -1373,7 +1373,7 @@ COMPAT_SYSCALL_DEFINE6(pselect6, int, n, compat_ulong_t __user *, inp,
}
COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
- unsigned int, nfds, struct compat_timespec __user *, tsp,
+ unsigned int, nfds, struct old_timespec32 __user *, tsp,
const compat_sigset_t __user *, sigmask, compat_size_t, sigsetsize)
{
sigset_t ksigmask, sigsaved;
@@ -1381,7 +1381,7 @@ COMPAT_SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds,
int ret;
if (tsp) {
- if (compat_get_timespec64(&ts, tsp))
+ if (get_old_timespec32(&ts, tsp))
return -EFAULT;
to = &end_time;
diff --git a/fs/stat.c b/fs/stat.c
index f8e6fb2c3657..adbfcd86c81b 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -280,6 +280,8 @@ SYSCALL_DEFINE2(fstat, unsigned int, fd, struct __old_kernel_stat __user *, stat
#endif /* __ARCH_WANT_OLD_STAT */
+#ifdef __ARCH_WANT_NEW_STAT
+
#if BITS_PER_LONG == 32
# define choose_32_64(a,b) a
#else
@@ -378,6 +380,7 @@ SYSCALL_DEFINE2(newfstat, unsigned int, fd, struct stat __user *, statbuf)
return error;
}
+#endif
static int do_readlinkat(int dfd, const char __user *pathname,
char __user *buf, int bufsiz)
diff --git a/fs/timerfd.c b/fs/timerfd.c
index d69ad801eb80..803ca070d42e 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -561,29 +561,29 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct __kernel_itimerspec __user *,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
- const struct compat_itimerspec __user *, utmr,
- struct compat_itimerspec __user *, otmr)
+ const struct old_itimerspec32 __user *, utmr,
+ struct old_itimerspec32 __user *, otmr)
{
struct itimerspec64 new, old;
int ret;
- if (get_compat_itimerspec64(&new, utmr))
+ if (get_old_itimerspec32(&new, utmr))
return -EFAULT;
ret = do_timerfd_settime(ufd, flags, &new, &old);
if (ret)
return ret;
- if (otmr && put_compat_itimerspec64(&old, otmr))
+ if (otmr && put_old_itimerspec32(&old, otmr))
return -EFAULT;
return ret;
}
COMPAT_SYSCALL_DEFINE2(timerfd_gettime, int, ufd,
- struct compat_itimerspec __user *, otmr)
+ struct old_itimerspec32 __user *, otmr)
{
struct itimerspec64 kotmr;
int ret = do_timerfd_gettime(ufd, &kotmr);
if (ret)
return ret;
- return put_compat_itimerspec64(&kotmr, otmr) ? -EFAULT : 0;
+ return put_old_itimerspec32(&kotmr, otmr) ? -EFAULT : 0;
}
#endif
diff --git a/fs/utimes.c b/fs/utimes.c
index 69d4b6ba1bfb..bdcf2daf39c1 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -8,35 +8,6 @@
#include <linux/compat.h>
#include <asm/unistd.h>
-#ifdef __ARCH_WANT_SYS_UTIME
-
-/*
- * sys_utime() can be implemented in user-level using sys_utimes().
- * Is this for backwards compatibility? If so, why not move it
- * into the appropriate arch directory (for those architectures that
- * need it).
- */
-
-/* If times==NULL, set access and modification to current time,
- * must be owner or have write permission.
- * Else, update from *times, must be owner or super user.
- */
-SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times)
-{
- struct timespec64 tv[2];
-
- if (times) {
- if (get_user(tv[0].tv_sec, &times->actime) ||
- get_user(tv[1].tv_sec, &times->modtime))
- return -EFAULT;
- tv[0].tv_nsec = 0;
- tv[1].tv_nsec = 0;
- }
- return do_utimes(AT_FDCWD, filename, times ? tv : NULL, 0);
-}
-
-#endif
-
static bool nsec_valid(long nsec)
{
if (nsec == UTIME_OMIT || nsec == UTIME_NOW)
@@ -166,7 +137,7 @@ out:
}
SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename,
- struct timespec __user *, utimes, int, flags)
+ struct __kernel_timespec __user *, utimes, int, flags)
{
struct timespec64 tstimes[2];
@@ -184,6 +155,13 @@ SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename,
return do_utimes(dfd, filename, utimes ? tstimes : NULL, flags);
}
+#ifdef __ARCH_WANT_SYS_UTIME
+/*
+ * futimesat(), utimes() and utime() are older versions of utimensat()
+ * that are provided for compatibility with traditional C libraries.
+ * On modern architectures, we always use libc wrappers around
+ * utimensat() instead.
+ */
static long do_futimesat(int dfd, const char __user *filename,
struct timeval __user *utimes)
{
@@ -225,13 +203,29 @@ SYSCALL_DEFINE2(utimes, char __user *, filename,
return do_futimesat(AT_FDCWD, filename, utimes);
}
-#ifdef CONFIG_COMPAT
+SYSCALL_DEFINE2(utime, char __user *, filename, struct utimbuf __user *, times)
+{
+ struct timespec64 tv[2];
+
+ if (times) {
+ if (get_user(tv[0].tv_sec, &times->actime) ||
+ get_user(tv[1].tv_sec, &times->modtime))
+ return -EFAULT;
+ tv[0].tv_nsec = 0;
+ tv[1].tv_nsec = 0;
+ }
+ return do_utimes(AT_FDCWD, filename, times ? tv : NULL, 0);
+}
+#endif
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
/*
* Not all architectures have sys_utime, so implement this in terms
* of sys_utimes.
*/
+#ifdef __ARCH_WANT_SYS_UTIME32
COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename,
- struct compat_utimbuf __user *, t)
+ struct old_utimbuf32 __user *, t)
{
struct timespec64 tv[2];
@@ -244,14 +238,15 @@ COMPAT_SYSCALL_DEFINE2(utime, const char __user *, filename,
}
return do_utimes(AT_FDCWD, filename, t ? tv : NULL, 0);
}
+#endif
-COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct compat_timespec __user *, t, int, flags)
+COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filename, struct old_timespec32 __user *, t, int, flags)
{
struct timespec64 tv[2];
if (t) {
- if (compat_get_timespec64(&tv[0], &t[0]) ||
- compat_get_timespec64(&tv[1], &t[1]))
+ if (get_old_timespec32(&tv[0], &t[0]) ||
+ get_old_timespec32(&tv[1], &t[1]))
return -EFAULT;
if (tv[0].tv_nsec == UTIME_OMIT && tv[1].tv_nsec == UTIME_OMIT)
@@ -260,8 +255,9 @@ COMPAT_SYSCALL_DEFINE4(utimensat, unsigned int, dfd, const char __user *, filena
return do_utimes(dfd, filename, t ? tv : NULL, flags);
}
+#ifdef __ARCH_WANT_SYS_UTIME32
static long do_compat_futimesat(unsigned int dfd, const char __user *filename,
- struct compat_timeval __user *t)
+ struct old_timeval32 __user *t)
{
struct timespec64 tv[2];
@@ -282,13 +278,14 @@ static long do_compat_futimesat(unsigned int dfd, const char __user *filename,
COMPAT_SYSCALL_DEFINE3(futimesat, unsigned int, dfd,
const char __user *, filename,
- struct compat_timeval __user *, t)
+ struct old_timeval32 __user *, t)
{
return do_compat_futimesat(dfd, filename, t);
}
-COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct compat_timeval __user *, t)
+COMPAT_SYSCALL_DEFINE2(utimes, const char __user *, filename, struct old_timeval32 __user *, t)
{
return do_compat_futimesat(AT_FDCWD, filename, t);
}
#endif
+#endif
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 53600f527a70..0300374101cd 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -346,10 +346,16 @@ struct acpi_device_physical_node {
bool put_online:1;
};
+struct acpi_device_properties {
+ const guid_t *guid;
+ const union acpi_object *properties;
+ struct list_head list;
+};
+
/* ACPI Device Specific Data (_DSD) */
struct acpi_device_data {
const union acpi_object *pointer;
- const union acpi_object *properties;
+ struct list_head properties;
const union acpi_object *of_compatible;
struct list_head subnodes;
};
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index 28819451b6d1..a86f65bffab8 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -1,3 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_COMPAT_H
+#define __ASM_GENERIC_COMPAT_H
-/* This is an empty stub for 32-bit-only architectures */
+/* These types are common across all compat ABIs */
+typedef u32 compat_size_t;
+typedef s32 compat_ssize_t;
+typedef s32 compat_clock_t;
+typedef s32 compat_pid_t;
+typedef u32 compat_ino_t;
+typedef s32 compat_off_t;
+typedef s64 compat_loff_t;
+typedef s32 compat_daddr_t;
+typedef s32 compat_timer_t;
+typedef s32 compat_key_t;
+typedef s16 compat_short_t;
+typedef s32 compat_int_t;
+typedef s32 compat_long_t;
+typedef u16 compat_ushort_t;
+typedef u32 compat_uint_t;
+typedef u32 compat_ulong_t;
+typedef u32 compat_uptr_t;
+typedef u32 compat_aio_context_t;
+
+#endif
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 849cd8eb5ca0..d79abca81a52 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -141,4 +141,18 @@ static inline bool init_section_intersects(void *virt, size_t size)
return memory_intersects(__init_begin, __init_end, virt, size);
}
+/**
+ * is_kernel_rodata - checks if the pointer address is located in the
+ * .rodata section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .rodata, false otherwise.
+ */
+static inline bool is_kernel_rodata(unsigned long addr)
+{
+ return addr >= (unsigned long)__start_rodata &&
+ addr < (unsigned long)__end_rodata;
+}
+
#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
deleted file mode 100644
index cdf904265caf..000000000000
--- a/include/asm-generic/unistd.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <uapi/asm-generic/unistd.h>
-#include <linux/export.h>
-
-/*
- * These are required system calls, we should
- * invert the logic eventually and let them
- * be selected by default.
- */
-#if __BITS_PER_LONG == 32
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_LLSEEK
-#endif
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h
index e328b52425a8..22e6f412c595 100644
--- a/include/crypto/acompress.h
+++ b/include/crypto/acompress.h
@@ -234,6 +234,34 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
}
+static inline void crypto_stat_compress(struct acomp_req *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->compress_cnt);
+ atomic64_add(req->slen, &tfm->base.__crt_alg->compress_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_decompress(struct acomp_req *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->compress_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->decompress_cnt);
+ atomic64_add(req->slen, &tfm->base.__crt_alg->decompress_tlen);
+ }
+#endif
+}
+
/**
* crypto_acomp_compress() -- Invoke asynchronous compress operation
*
@@ -246,8 +274,11 @@ static inline void acomp_request_set_params(struct acomp_req *req,
static inline int crypto_acomp_compress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ int ret;
- return tfm->compress(req);
+ ret = tfm->compress(req);
+ crypto_stat_compress(req, ret);
+ return ret;
}
/**
@@ -262,8 +293,11 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
static inline int crypto_acomp_decompress(struct acomp_req *req)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ int ret;
- return tfm->decompress(req);
+ ret = tfm->decompress(req);
+ crypto_stat_decompress(req, ret);
+ return ret;
}
#endif
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 1e26f790b03f..0d765d7bfb82 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -306,6 +306,34 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
return __crypto_aead_cast(req->base.tfm);
}
+static inline void crypto_stat_aead_encrypt(struct aead_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
+ atomic64_add(req->cryptlen, &tfm->base.__crt_alg->encrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_aead_decrypt(struct aead_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->aead_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
+ atomic64_add(req->cryptlen, &tfm->base.__crt_alg->decrypt_tlen);
+ }
+#endif
+}
+
/**
* crypto_aead_encrypt() - encrypt plaintext
* @req: reference to the aead_request handle that holds all information
@@ -328,11 +356,14 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
static inline int crypto_aead_encrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ret;
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return crypto_aead_alg(aead)->encrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = crypto_aead_alg(aead)->encrypt(req);
+ crypto_stat_aead_encrypt(req, ret);
+ return ret;
}
/**
@@ -360,14 +391,16 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
static inline int crypto_aead_decrypt(struct aead_request *req)
{
struct crypto_aead *aead = crypto_aead_reqtfm(req);
+ int ret;
if (crypto_aead_get_flags(aead) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- if (req->cryptlen < crypto_aead_authsize(aead))
- return -EINVAL;
-
- return crypto_aead_alg(aead)->decrypt(req);
+ ret = -ENOKEY;
+ else if (req->cryptlen < crypto_aead_authsize(aead))
+ ret = -EINVAL;
+ else
+ ret = crypto_aead_alg(aead)->decrypt(req);
+ crypto_stat_aead_decrypt(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h
index b5e11de4d497..afac71119396 100644
--- a/include/crypto/akcipher.h
+++ b/include/crypto/akcipher.h
@@ -271,6 +271,62 @@ static inline unsigned int crypto_akcipher_maxsize(struct crypto_akcipher *tfm)
return alg->max_size(tfm);
}
+static inline void crypto_stat_akcipher_encrypt(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->encrypt_cnt);
+ atomic64_add(req->src_len, &tfm->base.__crt_alg->encrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_akcipher_decrypt(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->decrypt_cnt);
+ atomic64_add(req->src_len, &tfm->base.__crt_alg->decrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_akcipher_sign(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->sign_cnt);
+#endif
+}
+
+static inline void crypto_stat_akcipher_verify(struct akcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->akcipher_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->verify_cnt);
+#endif
+}
+
/**
* crypto_akcipher_encrypt() - Invoke public key encrypt operation
*
@@ -285,8 +341,11 @@ static inline int crypto_akcipher_encrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->encrypt(req);
+ ret = alg->encrypt(req);
+ crypto_stat_akcipher_encrypt(req, ret);
+ return ret;
}
/**
@@ -303,8 +362,11 @@ static inline int crypto_akcipher_decrypt(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->decrypt(req);
+ ret = alg->decrypt(req);
+ crypto_stat_akcipher_decrypt(req, ret);
+ return ret;
}
/**
@@ -321,8 +383,11 @@ static inline int crypto_akcipher_sign(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->sign(req);
+ ret = alg->sign(req);
+ crypto_stat_akcipher_sign(req, ret);
+ return ret;
}
/**
@@ -339,8 +404,11 @@ static inline int crypto_akcipher_verify(struct akcipher_request *req)
{
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct akcipher_alg *alg = crypto_akcipher_alg(tfm);
+ int ret;
- return alg->verify(req);
+ ret = alg->verify(req);
+ crypto_stat_akcipher_verify(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index bd5e8ccf1687..4a5ad10e75f0 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -20,8 +20,10 @@
/*
* Maximum values for blocksize and alignmask, used to allocate
* static buffers that are big enough for any combination of
- * ciphers and architectures.
+ * algs and architectures. Ciphers have a lower maximum size.
*/
+#define MAX_ALGAPI_BLOCKSIZE 160
+#define MAX_ALGAPI_ALIGNMASK 63
#define MAX_CIPHER_BLOCKSIZE 16
#define MAX_CIPHER_ALIGNMASK 15
@@ -425,4 +427,14 @@ static inline void crypto_yield(u32 flags)
#endif
}
+int crypto_register_notifier(struct notifier_block *nb);
+int crypto_unregister_notifier(struct notifier_block *nb);
+
+/* Crypto notification events. */
+enum {
+ CRYPTO_MSG_ALG_REQUEST,
+ CRYPTO_MSG_ALG_REGISTER,
+ CRYPTO_MSG_ALG_LOADED,
+};
+
#endif /* _CRYPTO_ALGAPI_H */
diff --git a/include/crypto/cbc.h b/include/crypto/cbc.h
index f5b8bfc22e6d..3bf28beefa33 100644
--- a/include/crypto/cbc.h
+++ b/include/crypto/cbc.h
@@ -113,7 +113,7 @@ static inline int crypto_cbc_decrypt_inplace(
unsigned int bsize = crypto_skcipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
u8 *src = walk->src.virt.addr;
- u8 last_iv[bsize];
+ u8 last_iv[MAX_CIPHER_BLOCKSIZE];
/* Start of the last block. */
src += nbytes - (nbytes & (bsize - 1)) - bsize;
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
index b83d66073db0..f76302d99e2b 100644
--- a/include/crypto/chacha20.h
+++ b/include/crypto/chacha20.h
@@ -13,13 +13,12 @@
#define CHACHA20_IV_SIZE 16
#define CHACHA20_KEY_SIZE 32
#define CHACHA20_BLOCK_SIZE 64
-#define CHACHA20_BLOCK_WORDS (CHACHA20_BLOCK_SIZE / sizeof(u32))
struct chacha20_ctx {
u32 key[8];
};
-void chacha20_block(u32 *state, u32 *stream);
+void chacha20_block(u32 *state, u8 *stream);
void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
unsigned int keysize);
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 76e432cab75d..bc7796600338 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -151,9 +151,13 @@ struct shash_desc {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+#define HASH_MAX_DIGESTSIZE 64
+#define HASH_MAX_DESCSIZE 360
+#define HASH_MAX_STATESIZE 512
+
#define SHASH_DESC_ON_STACK(shash, ctx) \
char __##shash##_desc[sizeof(struct shash_desc) + \
- crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
+ HASH_MAX_DESCSIZE] CRYPTO_MINALIGN_ATTR; \
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
/**
@@ -408,6 +412,32 @@ static inline void *ahash_request_ctx(struct ahash_request *req)
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen);
+static inline void crypto_stat_ahash_update(struct ahash_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
+ else
+ atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
+#endif
+}
+
+static inline void crypto_stat_ahash_final(struct ahash_request *req, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->hash_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->hash_cnt);
+ atomic64_add(req->nbytes, &tfm->base.__crt_alg->hash_tlen);
+ }
+#endif
+}
+
/**
* crypto_ahash_finup() - update and finalize message digest
* @req: reference to the ahash_request handle that holds all information
@@ -522,7 +552,11 @@ static inline int crypto_ahash_init(struct ahash_request *req)
*/
static inline int crypto_ahash_update(struct ahash_request *req)
{
- return crypto_ahash_reqtfm(req)->update(req);
+ int ret;
+
+ ret = crypto_ahash_reqtfm(req)->update(req);
+ crypto_stat_ahash_update(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/internal/cryptouser.h b/include/crypto/internal/cryptouser.h
new file mode 100644
index 000000000000..8db299c25566
--- /dev/null
+++ b/include/crypto/internal/cryptouser.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <net/netlink.h>
+
+struct crypto_alg *crypto_alg_match(struct crypto_user_alg *p, int exact);
+
+int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb);
+int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, struct nlattr **attrs);
+int crypto_dump_reportstat_done(struct netlink_callback *cb);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 2bcfb931bc5b..71be24cd59bd 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -20,7 +20,7 @@
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
- struct crypto_skcipher *sknull;
+ struct crypto_sync_skcipher *sknull;
u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 1bde0a6514fa..f517ba6d3a27 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -268,6 +268,42 @@ struct kpp_secret {
unsigned short len;
};
+static inline void crypto_stat_kpp_set_secret(struct crypto_kpp *tfm, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret)
+ atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->setsecret_cnt);
+#endif
+}
+
+static inline void crypto_stat_kpp_generate_public_key(struct kpp_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+
+ if (ret)
+ atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->generate_public_key_cnt);
+#endif
+}
+
+static inline void crypto_stat_kpp_compute_shared_secret(struct kpp_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
+
+ if (ret)
+ atomic_inc(&tfm->base.__crt_alg->kpp_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->compute_shared_secret_cnt);
+#endif
+}
+
/**
* crypto_kpp_set_secret() - Invoke kpp operation
*
@@ -287,8 +323,11 @@ static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ int ret;
- return alg->set_secret(tfm, buffer, len);
+ ret = alg->set_secret(tfm, buffer, len);
+ crypto_stat_kpp_set_secret(tfm, ret);
+ return ret;
}
/**
@@ -308,8 +347,11 @@ static inline int crypto_kpp_generate_public_key(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ int ret;
- return alg->generate_public_key(req);
+ ret = alg->generate_public_key(req);
+ crypto_stat_kpp_generate_public_key(req, ret);
+ return ret;
}
/**
@@ -326,8 +368,11 @@ static inline int crypto_kpp_compute_shared_secret(struct kpp_request *req)
{
struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
struct kpp_alg *alg = crypto_kpp_alg(tfm);
+ int ret;
- return alg->compute_shared_secret(req);
+ ret = alg->compute_shared_secret(req);
+ crypto_stat_kpp_compute_shared_secret(req, ret);
+ return ret;
}
/**
diff --git a/include/crypto/mcryptd.h b/include/crypto/mcryptd.h
deleted file mode 100644
index b67404fc4b34..000000000000
--- a/include/crypto/mcryptd.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Software async multibuffer crypto daemon headers
- *
- * Author:
- * Tim Chen <tim.c.chen@linux.intel.com>
- *
- * Copyright (c) 2014, Intel Corporation.
- */
-
-#ifndef _CRYPTO_MCRYPT_H
-#define _CRYPTO_MCRYPT_H
-
-#include <linux/crypto.h>
-#include <linux/kernel.h>
-#include <crypto/hash.h>
-
-struct mcryptd_ahash {
- struct crypto_ahash base;
-};
-
-static inline struct mcryptd_ahash *__mcryptd_ahash_cast(
- struct crypto_ahash *tfm)
-{
- return (struct mcryptd_ahash *)tfm;
-}
-
-struct mcryptd_cpu_queue {
- struct crypto_queue queue;
- spinlock_t q_lock;
- struct work_struct work;
-};
-
-struct mcryptd_queue {
- struct mcryptd_cpu_queue __percpu *cpu_queue;
-};
-
-struct mcryptd_instance_ctx {
- struct crypto_spawn spawn;
- struct mcryptd_queue *queue;
-};
-
-struct mcryptd_hash_ctx {
- struct crypto_ahash *child;
- struct mcryptd_alg_state *alg_state;
-};
-
-struct mcryptd_tag {
- /* seq number of request */
- unsigned seq_num;
- /* arrival time of request */
- unsigned long arrival;
- unsigned long expire;
- int cpu;
-};
-
-struct mcryptd_hash_request_ctx {
- struct list_head waiter;
- crypto_completion_t complete;
- struct mcryptd_tag tag;
- struct crypto_hash_walk walk;
- u8 *out;
- int flag;
- struct ahash_request areq;
-};
-
-struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask);
-struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm);
-struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req);
-void mcryptd_free_ahash(struct mcryptd_ahash *tfm);
-void mcryptd_flusher(struct work_struct *work);
-
-enum mcryptd_req_type {
- MCRYPTD_NONE,
- MCRYPTD_UPDATE,
- MCRYPTD_FINUP,
- MCRYPTD_DIGEST,
- MCRYPTD_FINAL
-};
-
-struct mcryptd_alg_cstate {
- unsigned long next_flush;
- unsigned next_seq_num;
- bool flusher_engaged;
- struct delayed_work flush;
- int cpu;
- struct mcryptd_alg_state *alg_state;
- void *mgr;
- spinlock_t work_lock;
- struct list_head work_list;
- struct list_head flush_list;
-};
-
-struct mcryptd_alg_state {
- struct mcryptd_alg_cstate __percpu *alg_cstate;
- unsigned long (*flusher)(struct mcryptd_alg_cstate *cstate);
-};
-
-/* return delay in jiffies from current time */
-static inline unsigned long get_delay(unsigned long t)
-{
- long delay;
-
- delay = (long) t - (long) jiffies;
- if (delay <= 0)
- return 0;
- else
- return (unsigned long) delay;
-}
-
-void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay);
-
-#endif
diff --git a/include/crypto/morus1280_glue.h b/include/crypto/morus1280_glue.h
index b26dd70efd9a..ba782e10065e 100644
--- a/include/crypto/morus1280_glue.h
+++ b/include/crypto/morus1280_glue.h
@@ -82,7 +82,7 @@ void cryptd_morus1280_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- struct aead_alg crypto_morus1280_##id##_algs[] = {\
+ static struct aead_alg crypto_morus1280_##id##_algs[] = {\
{ \
.setkey = crypto_morus1280_glue_setkey, \
.setauthsize = crypto_morus1280_glue_setauthsize, \
diff --git a/include/crypto/morus640_glue.h b/include/crypto/morus640_glue.h
index 90c8db07e740..27fa790a2362 100644
--- a/include/crypto/morus640_glue.h
+++ b/include/crypto/morus640_glue.h
@@ -82,7 +82,7 @@ void cryptd_morus640_glue_exit_tfm(struct crypto_aead *aead);
{ \
} \
\
- struct aead_alg crypto_morus640_##id##_algs[] = {\
+ static struct aead_alg crypto_morus640_##id##_algs[] = {\
{ \
.setkey = crypto_morus640_glue_setkey, \
.setauthsize = crypto_morus640_glue_setauthsize, \
diff --git a/include/crypto/null.h b/include/crypto/null.h
index 15aeef6e30ef..0ef577cc00e3 100644
--- a/include/crypto/null.h
+++ b/include/crypto/null.h
@@ -9,7 +9,7 @@
#define NULL_DIGEST_SIZE 0
#define NULL_IV_SIZE 0
-struct crypto_skcipher *crypto_get_default_null_skcipher(void);
+struct crypto_sync_skcipher *crypto_get_default_null_skcipher(void);
void crypto_put_default_null_skcipher(void);
#endif
diff --git a/include/crypto/rng.h b/include/crypto/rng.h
index b95ede354a66..6d258f5b68f1 100644
--- a/include/crypto/rng.h
+++ b/include/crypto/rng.h
@@ -122,6 +122,29 @@ static inline void crypto_free_rng(struct crypto_rng *tfm)
crypto_destroy_tfm(tfm, crypto_rng_tfm(tfm));
}
+static inline void crypto_stat_rng_seed(struct crypto_rng *tfm, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY)
+ atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
+ else
+ atomic_inc(&tfm->base.__crt_alg->seed_cnt);
+#endif
+}
+
+static inline void crypto_stat_rng_generate(struct crypto_rng *tfm,
+ unsigned int dlen, int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&tfm->base.__crt_alg->rng_err_cnt);
+ } else {
+ atomic_inc(&tfm->base.__crt_alg->generate_cnt);
+ atomic64_add(dlen, &tfm->base.__crt_alg->generate_tlen);
+ }
+#endif
+}
+
/**
* crypto_rng_generate() - get random number
* @tfm: cipher handle
@@ -140,7 +163,11 @@ static inline int crypto_rng_generate(struct crypto_rng *tfm,
const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen)
{
- return crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
+ int ret;
+
+ ret = crypto_rng_alg(tfm)->generate(tfm, src, slen, dst, dlen);
+ crypto_stat_rng_generate(tfm, dlen, ret);
+ return ret;
}
/**
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 2f327f090c3e..925f547cdcfa 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -65,6 +65,10 @@ struct crypto_skcipher {
struct crypto_tfm base;
};
+struct crypto_sync_skcipher {
+ struct crypto_skcipher base;
+};
+
/**
* struct skcipher_alg - symmetric key cipher definition
* @min_keysize: Minimum key size supported by the transformation. This is the
@@ -139,9 +143,17 @@ struct skcipher_alg {
struct crypto_alg base;
};
-#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+#define MAX_SYNC_SKCIPHER_REQSIZE 384
+/*
+ * This performs a type-check against the "tfm" argument to make sure
+ * all users have the correct skcipher tfm for doing on-stack requests.
+ */
+#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \
char __##name##_desc[sizeof(struct skcipher_request) + \
- crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
+ MAX_SYNC_SKCIPHER_REQSIZE + \
+ (!(sizeof((struct crypto_sync_skcipher *)1 == \
+ (typeof(tfm))1))) \
+ ] CRYPTO_MINALIGN_ATTR; \
struct skcipher_request *name = (void *)__##name##_desc
/**
@@ -197,6 +209,9 @@ static inline struct crypto_skcipher *__crypto_skcipher_cast(
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
u32 type, u32 mask);
+struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(const char *alg_name,
+ u32 type, u32 mask);
+
static inline struct crypto_tfm *crypto_skcipher_tfm(
struct crypto_skcipher *tfm)
{
@@ -212,6 +227,11 @@ static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
}
+static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
+{
+ crypto_free_skcipher(&tfm->base);
+}
+
/**
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -280,6 +300,12 @@ static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
return tfm->ivsize;
}
+static inline unsigned int crypto_sync_skcipher_ivsize(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_ivsize(&tfm->base);
+}
+
static inline unsigned int crypto_skcipher_alg_chunksize(
struct skcipher_alg *alg)
{
@@ -356,6 +382,12 @@ static inline unsigned int crypto_skcipher_blocksize(
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
+static inline unsigned int crypto_sync_skcipher_blocksize(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_blocksize(&tfm->base);
+}
+
static inline unsigned int crypto_skcipher_alignmask(
struct crypto_skcipher *tfm)
{
@@ -379,6 +411,24 @@ static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
}
+static inline u32 crypto_sync_skcipher_get_flags(
+ struct crypto_sync_skcipher *tfm)
+{
+ return crypto_skcipher_get_flags(&tfm->base);
+}
+
+static inline void crypto_sync_skcipher_set_flags(
+ struct crypto_sync_skcipher *tfm, u32 flags)
+{
+ crypto_skcipher_set_flags(&tfm->base, flags);
+}
+
+static inline void crypto_sync_skcipher_clear_flags(
+ struct crypto_sync_skcipher *tfm, u32 flags)
+{
+ crypto_skcipher_clear_flags(&tfm->base, flags);
+}
+
/**
* crypto_skcipher_setkey() - set key for cipher
* @tfm: cipher handle
@@ -401,6 +451,12 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
return tfm->setkey(tfm, key, keylen);
}
+static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return crypto_skcipher_setkey(&tfm->base, key, keylen);
+}
+
static inline unsigned int crypto_skcipher_default_keysize(
struct crypto_skcipher *tfm)
{
@@ -422,6 +478,40 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
return __crypto_skcipher_cast(req->base.tfm);
}
+static inline struct crypto_sync_skcipher *crypto_sync_skcipher_reqtfm(
+ struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+ return container_of(tfm, struct crypto_sync_skcipher, base);
+}
+
+static inline void crypto_stat_skcipher_encrypt(struct skcipher_request *req,
+ int ret, struct crypto_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&alg->cipher_err_cnt);
+ } else {
+ atomic_inc(&alg->encrypt_cnt);
+ atomic64_add(req->cryptlen, &alg->encrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_skcipher_decrypt(struct skcipher_request *req,
+ int ret, struct crypto_alg *alg)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&alg->cipher_err_cnt);
+ } else {
+ atomic_inc(&alg->decrypt_cnt);
+ atomic64_add(req->cryptlen, &alg->decrypt_tlen);
+ }
+#endif
+}
+
/**
* crypto_skcipher_encrypt() - encrypt plaintext
* @req: reference to the skcipher_request handle that holds all information
@@ -436,11 +526,14 @@ static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ int ret;
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->encrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = tfm->encrypt(req);
+ crypto_stat_skcipher_encrypt(req, ret, tfm->base.__crt_alg);
+ return ret;
}
/**
@@ -457,11 +550,14 @@ static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
{
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+ int ret;
if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
- return -ENOKEY;
-
- return tfm->decrypt(req);
+ ret = -ENOKEY;
+ else
+ ret = tfm->decrypt(req);
+ crypto_stat_skcipher_decrypt(req, ret, tfm->base.__crt_alg);
+ return ret;
}
/**
@@ -500,6 +596,12 @@ static inline void skcipher_request_set_tfm(struct skcipher_request *req,
req->base.tfm = crypto_skcipher_tfm(tfm);
}
+static inline void skcipher_request_set_sync_tfm(struct skcipher_request *req,
+ struct crypto_sync_skcipher *tfm)
+{
+ skcipher_request_set_tfm(req, &tfm->base);
+}
+
static inline struct skcipher_request *skcipher_request_cast(
struct crypto_async_request *req)
{
diff --git a/include/crypto/speck.h b/include/crypto/speck.h
deleted file mode 100644
index 73cfc952d405..000000000000
--- a/include/crypto/speck.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Common values for the Speck algorithm
- */
-
-#ifndef _CRYPTO_SPECK_H
-#define _CRYPTO_SPECK_H
-
-#include <linux/types.h>
-
-/* Speck128 */
-
-#define SPECK128_BLOCK_SIZE 16
-
-#define SPECK128_128_KEY_SIZE 16
-#define SPECK128_128_NROUNDS 32
-
-#define SPECK128_192_KEY_SIZE 24
-#define SPECK128_192_NROUNDS 33
-
-#define SPECK128_256_KEY_SIZE 32
-#define SPECK128_256_NROUNDS 34
-
-struct speck128_tfm_ctx {
- u64 round_keys[SPECK128_256_NROUNDS];
- int nrounds;
-};
-
-void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
- unsigned int keysize);
-
-/* Speck64 */
-
-#define SPECK64_BLOCK_SIZE 8
-
-#define SPECK64_96_KEY_SIZE 12
-#define SPECK64_96_NROUNDS 26
-
-#define SPECK64_128_KEY_SIZE 16
-#define SPECK64_128_NROUNDS 27
-
-struct speck64_tfm_ctx {
- u32 round_keys[SPECK64_128_NROUNDS];
- int nrounds;
-};
-
-void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
- unsigned int keysize);
-
-#endif /* _CRYPTO_SPECK_H */
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index c796ff02ceeb..fe8214017b46 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Tomasz Figa <t.figa@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Samsung Exynos3250 clock controllers.
*/
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index e9f9d400c322..5b1d68512360 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* Author: Andrzej Hajda <a.hajda@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Exynos4 clock controller.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_EXYNOS_4_H
#define _DT_BINDINGS_CLOCK_EXYNOS_4_H
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index 15508adcdfde..bc8a3c53a54b 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* Author: Andrzej Hajda <a.hajda@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Exynos5250 clock controller.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5250_H
#define _DT_BINDINGS_CLOCK_EXYNOS_5250_H
diff --git a/include/dt-bindings/clock/exynos5260-clk.h b/include/dt-bindings/clock/exynos5260-clk.h
index a4bac9a1764f..98a58cbd81b2 100644
--- a/include/dt-bindings/clock/exynos5260-clk.h
+++ b/include/dt-bindings/clock/exynos5260-clk.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Rahul Sharma <rahul.sharma@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Provides Constants for Exynos5260 clocks.
-*/
+ */
#ifndef _DT_BINDINGS_CLK_EXYNOS5260_H
#define _DT_BINDINGS_CLK_EXYNOS5260_H
diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h
index 6cb4e90f81fc..f179eabbcdb7 100644
--- a/include/dt-bindings/clock/exynos5410.h
+++ b/include/dt-bindings/clock/exynos5410.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Copyright (c) 2016 Krzysztof Kozlowski
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Exynos5421 clock controller.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5410_H
#define _DT_BINDINGS_CLOCK_EXYNOS_5410_H
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 2740ae0424a9..355f469943f1 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -1,13 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd.
* Author: Andrzej Hajda <a.hajda@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for Exynos5420 clock controller.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5420_H
#define _DT_BINDINGS_CLOCK_EXYNOS_5420_H
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index be39d23e6a32..98bd85ce1e45 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Chanwoo Choi <cw00.choi@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _DT_BINDINGS_CLOCK_EXYNOS5433_H
diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h
index 10c558611085..fce33c7050c8 100644
--- a/include/dt-bindings/clock/exynos7-clk.h
+++ b/include/dt-bindings/clock/exynos7-clk.h
@@ -1,11 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#ifndef _DT_BINDINGS_CLOCK_EXYNOS7_H
#define _DT_BINDINGS_CLOCK_EXYNOS7_H
diff --git a/include/dt-bindings/clock/s3c2410.h b/include/dt-bindings/clock/s3c2410.h
index 352a7673fc69..0fb65c3f2f59 100644
--- a/include/dt-bindings/clock/s3c2410.h
+++ b/include/dt-bindings/clock/s3c2410.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clock controllers of Samsung S3C2410 and later.
*/
diff --git a/include/dt-bindings/clock/s3c2412.h b/include/dt-bindings/clock/s3c2412.h
index aac1dcfda81c..b4656156cc0f 100644
--- a/include/dt-bindings/clock/s3c2412.h
+++ b/include/dt-bindings/clock/s3c2412.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clock controllers of Samsung S3C2412.
*/
diff --git a/include/dt-bindings/clock/s3c2443.h b/include/dt-bindings/clock/s3c2443.h
index f3ba68a25ecb..a9d2f105d536 100644
--- a/include/dt-bindings/clock/s3c2443.h
+++ b/include/dt-bindings/clock/s3c2443.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2013 Heiko Stuebner <heiko@sntech.de>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants clock controllers of Samsung S3C2443 and later.
*/
diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h
index 0c85f65c81c7..35b6f69b7db6 100644
--- a/include/dt-bindings/interrupt-controller/arm-gic.h
+++ b/include/dt-bindings/interrupt-controller/arm-gic.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* This header provides constants for the ARM GIC.
*/
diff --git a/include/dt-bindings/interrupt-controller/irq.h b/include/dt-bindings/interrupt-controller/irq.h
index a8b310555f14..9e3d183e1381 100644
--- a/include/dt-bindings/interrupt-controller/irq.h
+++ b/include/dt-bindings/interrupt-controller/irq.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* This header provides constants for most IRQ bindings.
*
diff --git a/include/dt-bindings/mfd/at91-usart.h b/include/dt-bindings/mfd/at91-usart.h
new file mode 100644
index 000000000000..2de5bc312e1e
--- /dev/null
+++ b/include/dt-bindings/mfd/at91-usart.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides macros for AT91 USART DT bindings.
+ *
+ * Copyright (C) 2018 Microchip Technology
+ *
+ * Author: Radu Pirea <radu.pirea@microchip.com>
+ *
+ */
+
+#ifndef __DT_BINDINGS_AT91_USART_H__
+#define __DT_BINDINGS_AT91_USART_H__
+
+#define AT91_USART_MODE_SERIAL 0
+#define AT91_USART_MODE_SPI 1
+
+#endif /* __DT_BINDINGS_AT91_USART_H__ */
diff --git a/include/dt-bindings/reset/imx7-reset.h b/include/dt-bindings/reset/imx7-reset.h
index 63948170c7b2..31b3f87dde9a 100644
--- a/include/dt-bindings/reset/imx7-reset.h
+++ b/include/dt-bindings/reset/imx7-reset.h
@@ -56,7 +56,9 @@
#define IMX7_RESET_DDRC_PRST 23
#define IMX7_RESET_DDRC_CORE_RST 24
-#define IMX7_RESET_NUM 25
+#define IMX7_RESET_PCIE_CTRL_APPS_TURNOFF 25
+
+#define IMX7_RESET_NUM 26
#endif
diff --git a/include/dt-bindings/thermal/thermal_exynos.h b/include/dt-bindings/thermal/thermal_exynos.h
index 0646500bca69..642e4e7f4084 100644
--- a/include/dt-bindings/thermal/thermal_exynos.h
+++ b/include/dt-bindings/thermal/thermal_exynos.h
@@ -1,19 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* thermal_exynos.h - Samsung EXYNOS TMU device tree definitions
*
* Copyright (C) 2014 Samsung Electronics
* Lukasz Majewski <l.majewski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef _EXYNOS_THERMAL_TMU_DT_H
diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h
index 7b7a92fefa0a..985f2bbd4d24 100644
--- a/include/dt-bindings/usb/pd.h
+++ b/include/dt-bindings/usb/pd.h
@@ -59,4 +59,30 @@
(PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \
PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma))
+#define APDO_TYPE_PPS 0
+
+#define PDO_APDO_TYPE_SHIFT 28 /* Only valid value currently is 0x0 - PPS */
+#define PDO_APDO_TYPE_MASK 0x3
+
+#define PDO_APDO_TYPE(t) ((t) << PDO_APDO_TYPE_SHIFT)
+
+#define PDO_PPS_APDO_MAX_VOLT_SHIFT 17 /* 100mV units */
+#define PDO_PPS_APDO_MIN_VOLT_SHIFT 8 /* 100mV units */
+#define PDO_PPS_APDO_MAX_CURR_SHIFT 0 /* 50mA units */
+
+#define PDO_PPS_APDO_VOLT_MASK 0xff
+#define PDO_PPS_APDO_CURR_MASK 0x7f
+
+#define PDO_PPS_APDO_MIN_VOLT(mv) \
+ ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MIN_VOLT_SHIFT)
+#define PDO_PPS_APDO_MAX_VOLT(mv) \
+ ((((mv) / 100) & PDO_PPS_APDO_VOLT_MASK) << PDO_PPS_APDO_MAX_VOLT_SHIFT)
+#define PDO_PPS_APDO_MAX_CURR(ma) \
+ ((((ma) / 50) & PDO_PPS_APDO_CURR_MASK) << PDO_PPS_APDO_MAX_CURR_SHIFT)
+
+#define PDO_PPS_APDO(min_mv, max_mv, max_ma) \
+ (PDO_TYPE(PDO_TYPE_APDO) | PDO_APDO_TYPE(APDO_TYPE_PPS) | \
+ PDO_PPS_APDO_MIN_VOLT(min_mv) | PDO_PPS_APDO_MAX_VOLT(max_mv) | \
+ PDO_PPS_APDO_MAX_CURR(max_ma))
+
#endif /* __DT_POWER_DELIVERY_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index af4628979d13..ed80f147bd50 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -1072,6 +1072,15 @@ static inline int acpi_node_get_property_reference(
NR_FWNODE_REFERENCE_ARGS, args);
}
+static inline bool acpi_dev_has_props(const struct acpi_device *adev)
+{
+ return !list_empty(&adev->data.properties);
+}
+
+struct acpi_device_properties *
+acpi_data_add_props(struct acpi_device_data *data, const guid_t *guid,
+ const union acpi_object *properties);
+
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
void **valptr);
int acpi_dev_prop_read_single(struct acpi_device *adev,
diff --git a/include/linux/adxl.h b/include/linux/adxl.h
new file mode 100644
index 000000000000..2a629acb4c3f
--- /dev/null
+++ b/include/linux/adxl.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Address translation interface via ACPI DSM.
+ * Copyright (C) 2018 Intel Corporation
+ */
+
+#ifndef _LINUX_ADXL_H
+#define _LINUX_ADXL_H
+
+const char * const *adxl_get_component_names(void);
+int adxl_decode(u64 addr, u64 component_values[]);
+
+#endif /* _LINUX_ADXL_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 9578c7ab1eb6..093a818c5b68 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -283,8 +283,6 @@ enum req_opf {
REQ_OP_FLUSH = 2,
/* discard sectors */
REQ_OP_DISCARD = 3,
- /* get zone information */
- REQ_OP_ZONE_REPORT = 4,
/* securely erase sectors */
REQ_OP_SECURE_ERASE = 5,
/* seset a zone write pointer */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 61207560e826..4293dc1cd160 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -396,16 +396,13 @@ struct queue_limits {
#ifdef CONFIG_BLK_DEV_ZONED
-struct blk_zone_report_hdr {
- unsigned int nr_zones;
- u8 padding[60];
-};
-
+extern unsigned int blkdev_nr_zones(struct block_device *bdev);
extern int blkdev_report_zones(struct block_device *bdev,
sector_t sector, struct blk_zone *zones,
unsigned int *nr_zones, gfp_t gfp_mask);
extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
sector_t nr_sectors, gfp_t gfp_mask);
+extern int blk_revalidate_disk_zones(struct gendisk *disk);
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
@@ -414,6 +411,16 @@ extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int blkdev_nr_zones(struct block_device *bdev)
+{
+ return 0;
+}
+
+static inline int blk_revalidate_disk_zones(struct gendisk *disk)
+{
+ return 0;
+}
+
static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
fmode_t mode, unsigned int cmd,
unsigned long arg)
@@ -704,6 +711,7 @@ struct request_queue {
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
+#define QUEUE_FLAG_PCI_P2PDMA 29 /* device supports PCI p2p requests */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
@@ -736,6 +744,8 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_queue_scsi_passthrough(q) \
test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
+#define blk_queue_pci_p2pdma(q) \
+ test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -803,6 +813,11 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
}
#ifdef CONFIG_BLK_DEV_ZONED
+static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+{
+ return blk_queue_is_zoned(q) ? q->nr_zones : 0;
+}
+
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
sector_t sector)
{
@@ -818,6 +833,11 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
return false;
return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
}
+#else /* CONFIG_BLK_DEV_ZONED */
+static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
+{
+ return 0;
+}
#endif /* CONFIG_BLK_DEV_ZONED */
static inline bool rq_is_sync(struct request *rq)
@@ -1849,6 +1869,9 @@ struct block_device_operations {
int (*getgeo)(struct block_device *, struct hd_geometry *);
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
+ int (*report_zones)(struct gendisk *, sector_t sector,
+ struct blk_zone *zones, unsigned int *nr_zones,
+ gfp_t gfp_mask);
struct module *owner;
const struct pr_ops *pr_ops;
};
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b8bcbdeb2eac..b622d6608605 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -569,20 +569,11 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
int ancestor_level)
{
- struct cgroup *ptr;
-
if (cgrp->level < ancestor_level)
return NULL;
-
- for (ptr = cgrp;
- ptr && ptr->level > ancestor_level;
- ptr = cgroup_parent(ptr))
- ;
-
- if (ptr && ptr->level == ancestor_level)
- return ptr;
-
- return NULL;
+ while (cgrp && cgrp->level > ancestor_level)
+ cgrp = cgroup_parent(cgrp);
+ return cgrp;
}
/**
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 6e6b86f9046d..b21db536fd52 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -262,9 +262,6 @@ extern int clocksource_i8253_init(void);
#define TIMER_OF_DECLARE(name, compat, fn) \
OF_DECLARE_1_RET(timer, name, compat, fn)
-#define CLOCKSOURCE_OF_DECLARE(name, compat, fn) \
- TIMER_OF_DECLARE(name, compat, fn)
-
#ifdef CONFIG_TIMER_PROBE
extern void timer_probe(void);
#else
diff --git a/include/linux/compat.h b/include/linux/compat.h
index e75b926bc5df..d30e4dbd4be2 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -7,7 +7,7 @@
*/
#include <linux/types.h>
-#include <linux/compat_time.h>
+#include <linux/time.h>
#include <linux/stat.h>
#include <linux/param.h> /* for HZ */
@@ -113,19 +113,12 @@ typedef struct compat_sigaltstack {
typedef __compat_uid32_t compat_uid_t;
typedef __compat_gid32_t compat_gid_t;
-typedef compat_ulong_t compat_aio_context_t;
-
struct compat_sel_arg_struct;
struct rusage;
-struct compat_utimbuf {
- compat_time_t actime;
- compat_time_t modtime;
-};
-
struct compat_itimerval {
- struct compat_timeval it_interval;
- struct compat_timeval it_value;
+ struct old_timeval32 it_interval;
+ struct old_timeval32 it_value;
};
struct itimerval;
@@ -149,7 +142,7 @@ struct compat_timex {
compat_long_t constant;
compat_long_t precision;
compat_long_t tolerance;
- struct compat_timeval time;
+ struct old_timeval32 time;
compat_long_t tick;
compat_long_t ppsfreq;
compat_long_t jitter;
@@ -310,8 +303,8 @@ struct compat_rlimit {
};
struct compat_rusage {
- struct compat_timeval ru_utime;
- struct compat_timeval ru_stime;
+ struct old_timeval32 ru_utime;
+ struct old_timeval32 ru_stime;
compat_long_t ru_maxrss;
compat_long_t ru_ixrss;
compat_long_t ru_idrss;
@@ -460,8 +453,8 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginf
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
-static inline int compat_timeval_compare(struct compat_timeval *lhs,
- struct compat_timeval *rhs)
+static inline int old_timeval32_compare(struct old_timeval32 *lhs,
+ struct old_timeval32 *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
@@ -470,8 +463,8 @@ static inline int compat_timeval_compare(struct compat_timeval *lhs,
return lhs->tv_usec - rhs->tv_usec;
}
-static inline int compat_timespec_compare(struct compat_timespec *lhs,
- struct compat_timespec *rhs)
+static inline int old_timespec32_compare(struct old_timespec32 *lhs,
+ struct old_timespec32 *rhs)
{
if (lhs->tv_sec < rhs->tv_sec)
return -1;
@@ -555,12 +548,12 @@ asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id,
compat_long_t min_nr,
compat_long_t nr,
struct io_event __user *events,
- struct compat_timespec __user *timeout);
+ struct old_timespec32 __user *timeout);
asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id,
compat_long_t min_nr,
compat_long_t nr,
struct io_event __user *events,
- struct compat_timespec __user *timeout,
+ struct old_timespec32 __user *timeout,
const struct __compat_aio_sigset __user *usig);
/* fs/cookies.c */
@@ -645,11 +638,11 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
- struct compat_timespec __user *tsp,
+ struct old_timespec32 __user *tsp,
void __user *sig);
asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
unsigned int nfds,
- struct compat_timespec __user *tsp,
+ struct old_timespec32 __user *tsp,
const compat_sigset_t __user *sigmask,
compat_size_t sigsetsize);
@@ -674,15 +667,15 @@ asmlinkage long compat_sys_newfstat(unsigned int fd,
/* fs/timerfd.c */
asmlinkage long compat_sys_timerfd_gettime(int ufd,
- struct compat_itimerspec __user *otmr);
+ struct old_itimerspec32 __user *otmr);
asmlinkage long compat_sys_timerfd_settime(int ufd, int flags,
- const struct compat_itimerspec __user *utmr,
- struct compat_itimerspec __user *otmr);
+ const struct old_itimerspec32 __user *utmr,
+ struct old_itimerspec32 __user *otmr);
/* fs/utimes.c */
asmlinkage long compat_sys_utimensat(unsigned int dfd,
const char __user *filename,
- struct compat_timespec __user *t,
+ struct old_timespec32 __user *t,
int flags);
/* kernel/exit.c */
@@ -694,7 +687,7 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t,
/* kernel/futex.c */
asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
- struct compat_timespec __user *utime, u32 __user *uaddr2,
+ struct old_timespec32 __user *utime, u32 __user *uaddr2,
u32 val3);
asmlinkage long
compat_sys_set_robust_list(struct compat_robust_list_head __user *head,
@@ -704,8 +697,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
compat_size_t __user *len_ptr);
/* kernel/hrtimer.c */
-asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp);
+asmlinkage long compat_sys_nanosleep(struct old_timespec32 __user *rqtp,
+ struct old_timespec32 __user *rmtp);
/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
@@ -725,19 +718,19 @@ asmlinkage long compat_sys_timer_create(clockid_t which_clock,
struct compat_sigevent __user *timer_event_spec,
timer_t __user *created_timer_id);
asmlinkage long compat_sys_timer_gettime(timer_t timer_id,
- struct compat_itimerspec __user *setting);
+ struct old_itimerspec32 __user *setting);
asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags,
- struct compat_itimerspec __user *new,
- struct compat_itimerspec __user *old);
+ struct old_itimerspec32 __user *new,
+ struct old_itimerspec32 __user *old);
asmlinkage long compat_sys_clock_settime(clockid_t which_clock,
- struct compat_timespec __user *tp);
+ struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_gettime(clockid_t which_clock,
- struct compat_timespec __user *tp);
+ struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_getres(clockid_t which_clock,
- struct compat_timespec __user *tp);
+ struct old_timespec32 __user *tp);
asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
- struct compat_timespec __user *rqtp,
- struct compat_timespec __user *rmtp);
+ struct old_timespec32 __user *rqtp,
+ struct old_timespec32 __user *rmtp);
/* kernel/ptrace.c */
asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
@@ -751,7 +744,7 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid,
unsigned int len,
compat_ulong_t __user *user_mask_ptr);
asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
- struct compat_timespec __user *interval);
+ struct old_timespec32 __user *interval);
/* kernel/signal.c */
asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
@@ -771,7 +764,7 @@ asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset,
compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese,
struct compat_siginfo __user *uinfo,
- struct compat_timespec __user *uts, compat_size_t sigsetsize);
+ struct old_timespec32 __user *uts, compat_size_t sigsetsize);
asmlinkage long compat_sys_rt_sigqueueinfo(compat_pid_t pid, int sig,
struct compat_siginfo __user *uinfo);
/* No generic prototype for rt_sigreturn */
@@ -785,9 +778,9 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource,
asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru);
/* kernel/time.c */
-asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
+asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
-asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
+asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv,
struct timezone __user *tz);
asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp);
@@ -801,11 +794,11 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name,
asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes,
const char __user *u_msg_ptr,
compat_size_t msg_len, unsigned int msg_prio,
- const struct compat_timespec __user *u_abs_timeout);
+ const struct old_timespec32 __user *u_abs_timeout);
asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes,
char __user *u_msg_ptr,
compat_size_t msg_len, unsigned int __user *u_msg_prio,
- const struct compat_timespec __user *u_abs_timeout);
+ const struct old_timespec32 __user *u_abs_timeout);
asmlinkage long compat_sys_mq_notify(mqd_t mqdes,
const struct compat_sigevent __user *u_notification);
asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes,
@@ -822,7 +815,7 @@ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
/* ipc/sem.c */
asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems,
- unsigned nsems, const struct compat_timespec __user *timeout);
+ unsigned nsems, const struct old_timespec32 __user *timeout);
/* ipc/shm.c */
asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr);
@@ -879,7 +872,7 @@ asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid,
struct compat_siginfo __user *uinfo);
asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned vlen, unsigned int flags,
- struct compat_timespec __user *timeout);
+ struct old_timespec32 __user *timeout);
asmlinkage long compat_sys_wait4(compat_pid_t pid,
compat_uint_t __user *stat_addr, int options,
struct compat_rusage __user *ru);
@@ -931,7 +924,7 @@ asmlinkage long compat_sys_pwritev64v2(unsigned long fd,
asmlinkage long compat_sys_open(const char __user *filename, int flags,
umode_t mode);
asmlinkage long compat_sys_utimes(const char __user *filename,
- struct compat_timeval __user *t);
+ struct old_timeval32 __user *t);
/* __ARCH_WANT_SYSCALL_NO_FLAGS */
asmlinkage long compat_sys_signalfd(int ufd,
@@ -945,15 +938,15 @@ asmlinkage long compat_sys_newlstat(const char __user *filename,
struct compat_stat __user *statbuf);
/* __ARCH_WANT_SYSCALL_DEPRECATED */
-asmlinkage long compat_sys_time(compat_time_t __user *tloc);
+asmlinkage long compat_sys_time(old_time32_t __user *tloc);
asmlinkage long compat_sys_utime(const char __user *filename,
- struct compat_utimbuf __user *t);
+ struct old_utimbuf32 __user *t);
asmlinkage long compat_sys_futimesat(unsigned int dfd,
const char __user *filename,
- struct compat_timeval __user *t);
+ struct old_timeval32 __user *t);
asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp, compat_ulong_t __user *exp,
- struct compat_timeval __user *tvp);
+ struct old_timeval32 __user *tvp);
asmlinkage long compat_sys_ustat(unsigned dev, struct compat_ustat __user *u32);
asmlinkage long compat_sys_recv(int fd, void __user *buf, compat_size_t len,
unsigned flags);
@@ -986,7 +979,7 @@ asmlinkage long compat_sys_sigaction(int sig,
#endif
/* obsolete: kernel/time/time.c */
-asmlinkage long compat_sys_stime(compat_time_t __user *tptr);
+asmlinkage long compat_sys_stime(old_time32_t __user *tptr);
/* obsolete: net/socket.c */
asmlinkage long compat_sys_socketcall(int call, u32 __user *args);
@@ -1005,15 +998,15 @@ static inline bool in_compat_syscall(void) { return is_compat_task(); }
#endif
/**
- * ns_to_compat_timeval - Compat version of ns_to_timeval
+ * ns_to_old_timeval32 - Compat version of ns_to_timeval
* @nsec: the nanoseconds value to be converted
*
- * Returns the compat_timeval representation of the nsec parameter.
+ * Returns the old_timeval32 representation of the nsec parameter.
*/
-static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
+static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec)
{
struct timeval tv;
- struct compat_timeval ctv;
+ struct old_timeval32 ctv;
tv = ns_to_timeval(nsec);
ctv.tv_sec = tv.tv_sec;
diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h
deleted file mode 100644
index e70bfd1d2c3f..000000000000
--- a/include/linux/compat_time.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_COMPAT_TIME_H
-#define _LINUX_COMPAT_TIME_H
-
-#include <linux/types.h>
-#include <linux/time64.h>
-
-typedef s32 compat_time_t;
-
-struct compat_timespec {
- compat_time_t tv_sec;
- s32 tv_nsec;
-};
-
-struct compat_timeval {
- compat_time_t tv_sec;
- s32 tv_usec;
-};
-
-struct compat_itimerspec {
- struct compat_timespec it_interval;
- struct compat_timespec it_value;
-};
-
-extern int compat_get_timespec64(struct timespec64 *, const void __user *);
-extern int compat_put_timespec64(const struct timespec64 *, void __user *);
-extern int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits);
-extern int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits);
-
-#endif /* _LINUX_COMPAT_TIME_H */
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index db192becfec4..97cfe29b3f0a 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -198,7 +198,6 @@ struct ftrace_likely_data {
*/
#define __pure __attribute__((pure))
#define __aligned(x) __attribute__((aligned(x)))
-#define __aligned_largest __attribute__((aligned))
#define __printf(a, b) __attribute__((format(printf, a, b)))
#define __scanf(a, b) __attribute__((format(scanf, a, b)))
#define __maybe_unused __attribute__((unused))
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index d828a6efe0b1..46c67a764877 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -94,20 +94,15 @@ union coresight_dev_subtype {
* @cpu: the CPU a source belongs to. Only applicable for ETM/PTMs.
* @name: name of the component as shown under sysfs.
* @nr_inport: number of input ports for this component.
- * @outports: list of remote endpoint port number.
- * @child_names:name of all child components connected to this device.
- * @child_ports:child component port number the current component is
- connected to.
* @nr_outport: number of output ports for this component.
+ * @conns: Array of nr_outport connections from this component
*/
struct coresight_platform_data {
int cpu;
const char *name;
int nr_inport;
- int *outports;
- const char **child_names;
- int *child_ports;
int nr_outport;
+ struct coresight_connection *conns;
};
/**
@@ -190,23 +185,15 @@ struct coresight_device {
* @disable: disables the sink.
* @alloc_buffer: initialises perf's ring buffer for trace collection.
* @free_buffer: release memory allocated in @get_config.
- * @set_buffer: initialises buffer mechanic before a trace session.
- * @reset_buffer: finalises buffer mechanic after a trace session.
* @update_buffer: update buffer pointers after a trace session.
*/
struct coresight_ops_sink {
- int (*enable)(struct coresight_device *csdev, u32 mode);
+ int (*enable)(struct coresight_device *csdev, u32 mode, void *data);
void (*disable)(struct coresight_device *csdev);
void *(*alloc_buffer)(struct coresight_device *csdev, int cpu,
void **pages, int nr_pages, bool overwrite);
void (*free_buffer)(void *config);
- int (*set_buffer)(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config);
- unsigned long (*reset_buffer)(struct coresight_device *csdev,
- struct perf_output_handle *handle,
- void *sink_config);
- void (*update_buffer)(struct coresight_device *csdev,
+ unsigned long (*update_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config);
};
@@ -270,6 +257,13 @@ extern int coresight_enable(struct coresight_device *csdev);
extern void coresight_disable(struct coresight_device *csdev);
extern int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value);
+
+extern int coresight_claim_device(void __iomem *base);
+extern int coresight_claim_device_unlocked(void __iomem *base);
+
+extern void coresight_disclaim_device(void __iomem *base);
+extern void coresight_disclaim_device_unlocked(void __iomem *base);
+
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -279,6 +273,19 @@ coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
static inline void coresight_disable(struct coresight_device *csdev) {}
static inline int coresight_timeout(void __iomem *addr, u32 offset,
int position, int value) { return 1; }
+static inline int coresight_claim_device_unlocked(void __iomem *base)
+{
+ return -EINVAL;
+}
+
+static inline int coresight_claim_device(void __iomem *base)
+{
+ return -EINVAL;
+}
+
+static inline void coresight_disclaim_device(void __iomem *base) {}
+static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
+
#endif
#ifdef CONFIG_OF
diff --git a/include/linux/cpufeature.h b/include/linux/cpufeature.h
index 986c06c88d81..84d3c81b5978 100644
--- a/include/linux/cpufeature.h
+++ b/include/linux/cpufeature.h
@@ -45,7 +45,7 @@
* 'asm/cpufeature.h' of your favorite architecture.
*/
#define module_cpu_feature_match(x, __initfunc) \
-static struct cpu_feature const cpu_feature_match_ ## x[] = \
+static struct cpu_feature const __maybe_unused cpu_feature_match_ ## x[] = \
{ { .feature = cpu_feature(x) }, { } }; \
MODULE_DEVICE_TABLE(cpu, cpu_feature_match_ ## x); \
\
diff --git a/include/linux/crc-t10dif.h b/include/linux/crc-t10dif.h
index 1fe0cfcdea30..6bb0c0bf357b 100644
--- a/include/linux/crc-t10dif.h
+++ b/include/linux/crc-t10dif.h
@@ -6,6 +6,7 @@
#define CRC_T10DIF_DIGEST_SIZE 2
#define CRC_T10DIF_BLOCK_SIZE 1
+#define CRC_T10DIF_STRING "crct10dif"
extern __u16 crc_t10dif_generic(__u16 crc, const unsigned char *buffer,
size_t len);
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index e8839d3a7559..3634ad6fe202 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -454,6 +454,33 @@ struct compress_alg {
* @cra_refcnt: internally used
* @cra_destroy: internally used
*
+ * All following statistics are for this crypto_alg
+ * @encrypt_cnt: number of encrypt requests
+ * @decrypt_cnt: number of decrypt requests
+ * @compress_cnt: number of compress requests
+ * @decompress_cnt: number of decompress requests
+ * @generate_cnt: number of RNG generate requests
+ * @seed_cnt: number of times the rng was seeded
+ * @hash_cnt: number of hash requests
+ * @sign_cnt: number of sign requests
+ * @setsecret_cnt: number of setsecrey operation
+ * @generate_public_key_cnt: number of generate_public_key operation
+ * @verify_cnt: number of verify operation
+ * @compute_shared_secret_cnt: number of compute_shared_secret operation
+ * @encrypt_tlen: total data size handled by encrypt requests
+ * @decrypt_tlen: total data size handled by decrypt requests
+ * @compress_tlen: total data size handled by compress requests
+ * @decompress_tlen: total data size handled by decompress requests
+ * @generate_tlen: total data size of generated data by the RNG
+ * @hash_tlen: total data size hashed
+ * @akcipher_err_cnt: number of error for akcipher requests
+ * @cipher_err_cnt: number of error for akcipher requests
+ * @compress_err_cnt: number of error for akcipher requests
+ * @aead_err_cnt: number of error for akcipher requests
+ * @hash_err_cnt: number of error for akcipher requests
+ * @rng_err_cnt: number of error for akcipher requests
+ * @kpp_err_cnt: number of error for akcipher requests
+ *
* The struct crypto_alg describes a generic Crypto API algorithm and is common
* for all of the transformations. Any variable not documented here shall not
* be used by a cipher implementation as it is internal to the Crypto API.
@@ -487,6 +514,45 @@ struct crypto_alg {
void (*cra_destroy)(struct crypto_alg *alg);
struct module *cra_module;
+
+ union {
+ atomic_t encrypt_cnt;
+ atomic_t compress_cnt;
+ atomic_t generate_cnt;
+ atomic_t hash_cnt;
+ atomic_t setsecret_cnt;
+ };
+ union {
+ atomic64_t encrypt_tlen;
+ atomic64_t compress_tlen;
+ atomic64_t generate_tlen;
+ atomic64_t hash_tlen;
+ };
+ union {
+ atomic_t akcipher_err_cnt;
+ atomic_t cipher_err_cnt;
+ atomic_t compress_err_cnt;
+ atomic_t aead_err_cnt;
+ atomic_t hash_err_cnt;
+ atomic_t rng_err_cnt;
+ atomic_t kpp_err_cnt;
+ };
+ union {
+ atomic_t decrypt_cnt;
+ atomic_t decompress_cnt;
+ atomic_t seed_cnt;
+ atomic_t generate_public_key_cnt;
+ };
+ union {
+ atomic64_t decrypt_tlen;
+ atomic64_t decompress_tlen;
+ };
+ union {
+ atomic_t verify_cnt;
+ atomic_t compute_shared_secret_cnt;
+ };
+ atomic_t sign_cnt;
+
} CRYPTO_MINALIGN_ATTR;
/*
@@ -907,6 +973,38 @@ static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
return __crypto_ablkcipher_cast(req->base.tfm);
}
+static inline void crypto_stat_ablkcipher_encrypt(struct ablkcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct ablkcipher_tfm *crt =
+ crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
+ } else {
+ atomic_inc(&crt->base->base.__crt_alg->encrypt_cnt);
+ atomic64_add(req->nbytes, &crt->base->base.__crt_alg->encrypt_tlen);
+ }
+#endif
+}
+
+static inline void crypto_stat_ablkcipher_decrypt(struct ablkcipher_request *req,
+ int ret)
+{
+#ifdef CONFIG_CRYPTO_STATS
+ struct ablkcipher_tfm *crt =
+ crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
+
+ if (ret && ret != -EINPROGRESS && ret != -EBUSY) {
+ atomic_inc(&crt->base->base.__crt_alg->cipher_err_cnt);
+ } else {
+ atomic_inc(&crt->base->base.__crt_alg->decrypt_cnt);
+ atomic64_add(req->nbytes, &crt->base->base.__crt_alg->decrypt_tlen);
+ }
+#endif
+}
+
/**
* crypto_ablkcipher_encrypt() - encrypt plaintext
* @req: reference to the ablkcipher_request handle that holds all information
@@ -922,7 +1020,11 @@ static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- return crt->encrypt(req);
+ int ret;
+
+ ret = crt->encrypt(req);
+ crypto_stat_ablkcipher_encrypt(req, ret);
+ return ret;
}
/**
@@ -940,7 +1042,11 @@ static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
{
struct ablkcipher_tfm *crt =
crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- return crt->decrypt(req);
+ int ret;
+
+ ret = crt->decrypt(req);
+ crypto_stat_ablkcipher_decrypt(req, ret);
+ return ret;
}
/**
diff --git a/include/linux/cuda.h b/include/linux/cuda.h
index 056867f09a01..45bfe9d61271 100644
--- a/include/linux/cuda.h
+++ b/include/linux/cuda.h
@@ -8,6 +8,7 @@
#ifndef _LINUX_CUDA_H
#define _LINUX_CUDA_H
+#include <linux/rtc.h>
#include <uapi/linux/cuda.h>
@@ -16,4 +17,7 @@ extern int cuda_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
extern void cuda_poll(void);
+extern time64_t cuda_get_time(void);
+extern int cuda_set_rtc_time(struct rtc_time *tm);
+
#endif /* _LINUX_CUDA_H */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 6fb0808e87c8..e528baebad69 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -26,9 +26,8 @@ enum dm_queue_mode {
DM_TYPE_NONE = 0,
DM_TYPE_BIO_BASED = 1,
DM_TYPE_REQUEST_BASED = 2,
- DM_TYPE_MQ_REQUEST_BASED = 3,
- DM_TYPE_DAX_BIO_BASED = 4,
- DM_TYPE_NVME_BIO_BASED = 5,
+ DM_TYPE_DAX_BIO_BASED = 3,
+ DM_TYPE_NVME_BIO_BASED = 4,
};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -92,6 +91,11 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
+typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
+ struct blk_zone *zones,
+ unsigned int *nr_zones,
+ gfp_t gfp_mask);
+
/*
* These iteration functions are typically used to check (and combine)
* properties of underlying devices.
@@ -180,6 +184,9 @@ struct target_type {
dm_status_fn status;
dm_message_fn message;
dm_prepare_ioctl_fn prepare_ioctl;
+#ifdef CONFIG_BLK_DEV_ZONED
+ dm_report_zones_fn report_zones;
+#endif
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
@@ -420,8 +427,8 @@ struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
-void dm_remap_zone_report(struct dm_target *ti, struct bio *bio,
- sector_t start);
+void dm_remap_zone_report(struct dm_target *ti, sector_t start,
+ struct blk_zone *zones, unsigned int *nr_zones);
union map_info *dm_get_rq_mapinfo(struct request *rq);
struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
@@ -490,6 +497,7 @@ sector_t dm_table_get_size(struct dm_table *t);
unsigned int dm_table_get_num_targets(struct dm_table *t);
fmode_t dm_table_get_mode(struct dm_table *t);
struct mapped_device *dm_table_get_md(struct dm_table *t);
+const char *dm_table_device_name(struct dm_table *t);
/*
* Trigger an event.
diff --git a/include/linux/device.h b/include/linux/device.h
index 983506789402..1b25c7a43f4c 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -55,6 +55,8 @@ struct bus_attribute {
struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
#define BUS_ATTR_RO(_name) \
struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
+#define BUS_ATTR_WO(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
extern int __must_check bus_create_file(struct bus_type *,
struct bus_attribute *);
@@ -692,8 +694,10 @@ static inline void *devm_kcalloc(struct device *dev,
{
return devm_kmalloc_array(dev, n, size, flags | __GFP_ZERO);
}
-extern void devm_kfree(struct device *dev, void *p);
+extern void devm_kfree(struct device *dev, const void *p);
extern char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) __malloc;
+extern const char *devm_kstrdup_const(struct device *dev,
+ const char *s, gfp_t gfp);
extern void *devm_kmemdup(struct device *dev, const void *src, size_t len,
gfp_t gfp);
@@ -774,6 +778,30 @@ void device_connection_add(struct device_connection *con);
void device_connection_remove(struct device_connection *con);
/**
+ * device_connections_add - Add multiple device connections at once
+ * @cons: Zero terminated array of device connection descriptors
+ */
+static inline void device_connections_add(struct device_connection *cons)
+{
+ struct device_connection *c;
+
+ for (c = cons; c->endpoint[0]; c++)
+ device_connection_add(c);
+}
+
+/**
+ * device_connections_remove - Remove multiple device connections at once
+ * @cons: Zero terminated array of device connection descriptors
+ */
+static inline void device_connections_remove(struct device_connection *cons)
+{
+ struct device_connection *c;
+
+ for (c = cons; c->endpoint[0]; c++)
+ device_connection_remove(c);
+}
+
+/**
* enum device_link_state - Device link states.
* @DL_STATE_NONE: The presence of the drivers is not being tracked.
* @DL_STATE_DORMANT: None of the supplier/consumer drivers is present.
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index fbca184ff5a0..bd73e7a91410 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -5,6 +5,8 @@
#include <linux/dma-mapping.h>
#include <linux/mem_encrypt.h>
+#define DIRECT_MAPPING_ERROR 0
+
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
#else
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
index b0115e340fbc..b42b80e52cc2 100644
--- a/include/linux/dma/sprd-dma.h
+++ b/include/linux/dma/sprd-dma.h
@@ -58,4 +58,73 @@ enum sprd_dma_int_type {
SPRD_DMA_CFGERR_INT,
};
+/*
+ * struct sprd_dma_linklist - DMA link-list address structure
+ * @virt_addr: link-list virtual address to configure link-list node
+ * @phy_addr: link-list physical address to link DMA transfer
+ *
+ * The Spreadtrum DMA controller supports the link-list mode, that means slaves
+ * can supply several groups configurations (each configuration represents one
+ * DMA transfer) saved in memory, and DMA controller will link these groups
+ * configurations by writing the physical address of each configuration into the
+ * link-list register.
+ *
+ * Just as shown below, the link-list pointer register will be pointed to the
+ * physical address of 'configuration 1', and the 'configuration 1' link-list
+ * pointer will be pointed to 'configuration 2', and so on.
+ * Once trigger the DMA transfer, the DMA controller will load 'configuration
+ * 1' to its registers automatically, after 'configuration 1' transaction is
+ * done, DMA controller will load 'configuration 2' automatically, until all
+ * DMA transactions are done.
+ *
+ * Note: The last link-list pointer should point to the physical address
+ * of 'configuration 1', which can avoid DMA controller loads incorrect
+ * configuration when the last configuration transaction is done.
+ *
+ * DMA controller linklist memory
+ * ====================== -----------------------
+ *| | | configuration 1 |<---
+ *| DMA controller | ------->| | |
+ *| | | | | |
+ *| | | | | |
+ *| | | | | |
+ *| linklist pointer reg |---- ----| linklist pointer | |
+ * ====================== | ----------------------- |
+ * | |
+ * | ----------------------- |
+ * | | configuration 2 | |
+ * --->| | |
+ * | | |
+ * | | |
+ * | | |
+ * ----| linklist pointer | |
+ * | ----------------------- |
+ * | |
+ * | ----------------------- |
+ * | | configuration 3 | |
+ * --->| | |
+ * | | |
+ * | . | |
+ * . |
+ * . |
+ * . |
+ * | . |
+ * | ----------------------- |
+ * | | configuration n | |
+ * --->| | |
+ * | | |
+ * | | |
+ * | | |
+ * | linklist pointer |----
+ * -----------------------
+ *
+ * To support the link-list mode, DMA slaves should allocate one segment memory
+ * from always-on IRAM or dma coherent memory to store these groups of DMA
+ * configuration, and pass the virtual and physical address to DMA controller.
+ */
+struct sprd_dma_linklist {
+ unsigned long virt_addr;
+ phys_addr_t phy_addr;
+};
+
#endif
diff --git a/include/linux/edac.h b/include/linux/edac.h
index bffb97828ed6..1d0c9ea8825d 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -17,6 +17,7 @@
#include <linux/completion.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
+#include <linux/numa.h>
#define EDAC_DEVICE_NAME_LEN 31
@@ -451,6 +452,8 @@ struct dimm_info {
u32 nr_pages; /* number of pages on this dimm */
unsigned csrow, cschannel; /* Points to the old API data */
+
+ u16 smbios_handle; /* Handle for SMBIOS type 17 */
};
/**
@@ -670,6 +673,6 @@ struct mem_ctl_info {
/*
* Maximum number of memory controllers in the coherent fabric.
*/
-#define EDAC_MAX_MCS 16
+#define EDAC_MAX_MCS 2 * MAX_NUMNODES
#endif
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index b5f2efdd05e0..7a37f4ce9fd2 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -27,10 +27,10 @@ struct compat_elf_prstatus
compat_pid_t pr_ppid;
compat_pid_t pr_pgrp;
compat_pid_t pr_sid;
- struct compat_timeval pr_utime;
- struct compat_timeval pr_stime;
- struct compat_timeval pr_cutime;
- struct compat_timeval pr_cstime;
+ struct old_timeval32 pr_utime;
+ struct old_timeval32 pr_stime;
+ struct old_timeval32 pr_cutime;
+ struct old_timeval32 pr_cstime;
compat_elf_gregset_t pr_reg;
#ifdef CONFIG_BINFMT_ELF_FDPIC
compat_ulong_t pr_exec_fdpic_loadmap;
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index ce550fcf6360..817600a32c93 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -69,4 +69,8 @@ void fpga_bridge_free(struct fpga_bridge *br);
int fpga_bridge_register(struct fpga_bridge *br);
void fpga_bridge_unregister(struct fpga_bridge *br);
+struct fpga_bridge
+*devm_fpga_bridge_create(struct device *dev, const char *name,
+ const struct fpga_bridge_ops *br_ops, void *priv);
+
#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 8ab5df769923..e8ca62b2cb5b 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -198,4 +198,8 @@ void fpga_mgr_free(struct fpga_manager *mgr);
int fpga_mgr_register(struct fpga_manager *mgr);
void fpga_mgr_unregister(struct fpga_manager *mgr);
+struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
+ const struct fpga_manager_ops *mops,
+ void *priv);
+
#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index 0521b7f577a4..27cb706275db 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -44,4 +44,8 @@ void fpga_region_free(struct fpga_region *region);
int fpga_region_register(struct fpga_region *region);
void fpga_region_unregister(struct fpga_region *region);
+struct fpga_region
+*devm_fpga_region_create(struct device *dev, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *));
+
#endif /* _FPGA_REGION_H */
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index f27cb14088a4..9d3f668df7df 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -351,6 +351,14 @@ int mc_send_command(struct fsl_mc_io *mc_io, struct fsl_mc_command *cmd);
#define dev_is_fsl_mc(_dev) (0)
#endif
+/* Macro to check if a device is a container device */
+#define fsl_mc_is_cont_dev(_dev) (to_fsl_mc_device(_dev)->flags & \
+ FSL_MC_IS_DPRC)
+
+/* Macro to get the container device of a MC device */
+#define fsl_mc_cont_dev(_dev) (fsl_mc_is_cont_dev(_dev) ? \
+ (_dev) : (_dev)->parent)
+
/*
* module_fsl_mc_driver() - Helper macro for drivers that don't do
* anything special in module init/exit. This eliminates a lot of
@@ -405,6 +413,7 @@ extern struct device_type fsl_mc_bus_dpcon_type;
extern struct device_type fsl_mc_bus_dpmcp_type;
extern struct device_type fsl_mc_bus_dpmac_type;
extern struct device_type fsl_mc_bus_dprtc_type;
+extern struct device_type fsl_mc_bus_dpseci_type;
static inline bool is_fsl_mc_bus_dprc(const struct fsl_mc_device *mc_dev)
{
@@ -451,6 +460,11 @@ static inline bool is_fsl_mc_bus_dprtc(const struct fsl_mc_device *mc_dev)
return mc_dev->dev.type == &fsl_mc_bus_dprtc_type;
}
+static inline bool is_fsl_mc_bus_dpseci(const struct fsl_mc_device *mc_dev)
+{
+ return mc_dev->dev.type == &fsl_mc_bus_dpseci_type;
+}
+
/*
* Data Path Buffer Pool (DPBP) API
* Contains initialization APIs and runtime control APIs for DPBP
diff --git a/include/linux/hid.h b/include/linux/hid.h
index d44a78362942..2827b87590d8 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1139,6 +1139,34 @@ static inline u32 hid_report_len(struct hid_report *report)
int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
int interrupt);
+
+/**
+ * struct hid_scroll_counter - Utility class for processing high-resolution
+ * scroll events.
+ * @dev: the input device for which events should be reported.
+ * @microns_per_hi_res_unit: the amount moved by the user's finger for each
+ * high-resolution unit reported by the mouse, in
+ * microns.
+ * @resolution_multiplier: the wheel's resolution in high-resolution mode as a
+ * multiple of its lower resolution. For example, if
+ * moving the wheel by one "notch" would result in a
+ * value of 1 in low-resolution mode but 8 in
+ * high-resolution, the multiplier is 8.
+ * @remainder: counts the number of high-resolution units moved since the last
+ * low-resolution event (REL_WHEEL or REL_HWHEEL) was sent. Should
+ * only be used by class methods.
+ */
+struct hid_scroll_counter {
+ struct input_dev *dev;
+ int microns_per_hi_res_unit;
+ int resolution_multiplier;
+
+ int remainder;
+};
+
+void hid_scroll_counter_handle_scroll(struct hid_scroll_counter *counter,
+ int hi_res_value);
+
/* HID quirks API */
unsigned long hid_lookup_quirk(const struct hid_device *hdev);
int hid_quirks_init(char **quirks_param, __u16 bus, int count);
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index bee0827766a3..c0b93e0ff0c0 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -33,7 +33,8 @@
* and max is a multiple of 4 and >= 32 bytes.
* @priv: Private data, for use by the RNG driver.
* @quality: Estimation of true entropy in RNG's bitstream
- * (per mill).
+ * (in bits of entropy per 1024 bits of input;
+ * valid values: 1 to 1024, or 0 for unknown).
*/
struct hwrng {
const char *name;
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index efda23cf32c7..b3e24368930a 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -739,8 +739,9 @@ struct vmbus_channel {
u32 ringbuffer_gpadlhandle;
/* Allocated memory for ring buffer */
- void *ringbuffer_pages;
+ struct page *ringbuffer_page;
u32 ringbuffer_pagecount;
+ u32 ringbuffer_send_offset;
struct hv_ring_buffer_info outbound; /* send to parent */
struct hv_ring_buffer_info inbound; /* receive from parent */
@@ -1021,6 +1022,14 @@ struct vmbus_packet_mpb_array {
struct hv_mpb_array range;
} __packed;
+int vmbus_alloc_ring(struct vmbus_channel *channel,
+ u32 send_size, u32 recv_size);
+void vmbus_free_ring(struct vmbus_channel *channel);
+
+int vmbus_connect_ring(struct vmbus_channel *channel,
+ void (*onchannel_callback)(void *context),
+ void *context);
+int vmbus_disconnect_ring(struct vmbus_channel *channel);
extern int vmbus_open(struct vmbus_channel *channel,
u32 send_ringbuffersize,
@@ -1125,6 +1134,7 @@ struct hv_device {
u16 device_id;
struct device device;
+ char *driver_override; /* Driver name to force a match */
struct vmbus_channel *channel;
struct kset *channels_kset;
@@ -1442,7 +1452,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf,
const int *srv_version, int srv_vercnt,
int *nego_fw_version, int *nego_srv_version);
-void hv_process_channel_removal(u32 relid);
+void hv_process_channel_removal(struct vmbus_channel *channel);
void vmbus_setevent(struct vmbus_channel *channel);
/*
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 28004d74ae04..b0ae25837361 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -72,6 +72,42 @@
#define DMAR_PEDATA_REG 0xe4 /* Page request event interrupt data register */
#define DMAR_PEADDR_REG 0xe8 /* Page request event interrupt addr register */
#define DMAR_PEUADDR_REG 0xec /* Page request event Upper address register */
+#define DMAR_MTRRCAP_REG 0x100 /* MTRR capability register */
+#define DMAR_MTRRDEF_REG 0x108 /* MTRR default type register */
+#define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
+#define DMAR_MTRR_FIX16K_80000_REG 0x128
+#define DMAR_MTRR_FIX16K_A0000_REG 0x130
+#define DMAR_MTRR_FIX4K_C0000_REG 0x138
+#define DMAR_MTRR_FIX4K_C8000_REG 0x140
+#define DMAR_MTRR_FIX4K_D0000_REG 0x148
+#define DMAR_MTRR_FIX4K_D8000_REG 0x150
+#define DMAR_MTRR_FIX4K_E0000_REG 0x158
+#define DMAR_MTRR_FIX4K_E8000_REG 0x160
+#define DMAR_MTRR_FIX4K_F0000_REG 0x168
+#define DMAR_MTRR_FIX4K_F8000_REG 0x170
+#define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
+#define DMAR_MTRR_PHYSMASK0_REG 0x188
+#define DMAR_MTRR_PHYSBASE1_REG 0x190
+#define DMAR_MTRR_PHYSMASK1_REG 0x198
+#define DMAR_MTRR_PHYSBASE2_REG 0x1a0
+#define DMAR_MTRR_PHYSMASK2_REG 0x1a8
+#define DMAR_MTRR_PHYSBASE3_REG 0x1b0
+#define DMAR_MTRR_PHYSMASK3_REG 0x1b8
+#define DMAR_MTRR_PHYSBASE4_REG 0x1c0
+#define DMAR_MTRR_PHYSMASK4_REG 0x1c8
+#define DMAR_MTRR_PHYSBASE5_REG 0x1d0
+#define DMAR_MTRR_PHYSMASK5_REG 0x1d8
+#define DMAR_MTRR_PHYSBASE6_REG 0x1e0
+#define DMAR_MTRR_PHYSMASK6_REG 0x1e8
+#define DMAR_MTRR_PHYSBASE7_REG 0x1f0
+#define DMAR_MTRR_PHYSMASK7_REG 0x1f8
+#define DMAR_MTRR_PHYSBASE8_REG 0x200
+#define DMAR_MTRR_PHYSMASK8_REG 0x208
+#define DMAR_MTRR_PHYSBASE9_REG 0x210
+#define DMAR_MTRR_PHYSMASK9_REG 0x218
+#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */
+#define DMAR_VCMD_REG 0xe10 /* Virtual command register */
+#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */
#define OFFSET_STRIDE (9)
@@ -389,6 +425,33 @@ struct pasid_entry;
struct pasid_state_entry;
struct page_req_dsc;
+/*
+ * 0: Present
+ * 1-11: Reserved
+ * 12-63: Context Ptr (12 - (haw-1))
+ * 64-127: Reserved
+ */
+struct root_entry {
+ u64 lo;
+ u64 hi;
+};
+
+/*
+ * low 64 bits:
+ * 0: present
+ * 1: fault processing disable
+ * 2-3: translation type
+ * 12-63: address space root
+ * high 64 bits:
+ * 0-2: address width
+ * 3-6: aval
+ * 8-23: domain id
+ */
+struct context_entry {
+ u64 lo;
+ u64 hi;
+};
+
struct dmar_domain {
int nid; /* node id */
@@ -558,6 +621,15 @@ extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_
extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
#endif
+#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
+void intel_iommu_debugfs_init(void);
+#else
+static inline void intel_iommu_debugfs_init(void) {}
+#endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
+
extern const struct attribute_group *intel_iommu_groups[];
+bool context_present(struct context_entry *context);
+struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
+ u8 devfn, int alloc);
#endif
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index eeceac3376fc..1d6711c28271 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -45,7 +45,7 @@
* IRQF_PERCPU - Interrupt is per cpu
* IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
* IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
- * registered first in an shared interrupt is considered for
+ * registered first in a shared interrupt is considered for
* performance reasons)
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 87994c265bf5..a1d28f42cb77 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -124,6 +124,7 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMU_ENABLE,
DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_NESTING, /* two stages of translation */
+ DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
DOMAIN_ATTR_MAX,
};
@@ -181,8 +182,6 @@ struct iommu_resv_region {
* @apply_resv_region: Temporary helper call-back for iova reserved ranges
* @domain_window_enable: Configure and enable a particular window for a domain
* @domain_window_disable: Disable a particular window for a domain
- * @domain_set_windows: Set the number of windows for a domain
- * @domain_get_windows: Return the number of windows for a domain
* @of_xlate: add OF master IDs to iommu grouping
* @pgsize_bitmap: bitmap of all possible supported page sizes
*/
@@ -223,10 +222,6 @@ struct iommu_ops {
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot);
void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
- /* Set the number of windows per domain */
- int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
- /* Get the number of windows per domain */
- u32 (*domain_get_windows)(struct iommu_domain *domain);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
@@ -293,6 +288,7 @@ extern int iommu_attach_device(struct iommu_domain *domain,
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
+extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -377,6 +373,8 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain)
extern struct iommu_group *pci_device_group(struct device *dev);
/* Generic device grouping function */
extern struct iommu_group *generic_device_group(struct device *dev);
+/* FSL-MC device grouping function */
+struct iommu_group *fsl_mc_device_group(struct device *dev);
/**
* struct iommu_fwspec - per-device IOMMU instance data
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 928442dda565..0b93bf96693e 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -75,6 +75,7 @@ struct iova_domain {
unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
+ unsigned long max32_alloc_size; /* Size of last failed allocation */
struct iova anchor; /* rbtree lookup anchor */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index 0a83b4379f34..9a1a479a2bf4 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -13,6 +13,12 @@
#include <linux/types.h>
#include <linux/ioport.h>
+#define GICD_INT_DEF_PRI 0xa0
+#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
+ (GICD_INT_DEF_PRI << 16) |\
+ (GICD_INT_DEF_PRI << 8) |\
+ GICD_INT_DEF_PRI)
+
enum gic_type {
GIC_V2,
GIC_V3,
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 8bdbb5f29494..071b4cbdf010 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -357,6 +357,8 @@
#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt)
#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb)
+#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12))
+
#define GITS_BASER_NR_REGS 8
#define GITS_BASER_VALID (1ULL << 63)
@@ -388,6 +390,9 @@
#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
#define GITS_BASER_PHYS_52_to_48(phys) \
(((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12)
+#define GITS_BASER_ADDR_48_to_52(baser) \
+ (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48)
+
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
@@ -585,8 +590,10 @@ struct rdists {
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
+ bool lpi_enabled;
} __percpu *rdist;
- struct page *prop_page;
+ phys_addr_t prop_table_pa;
+ void *prop_table_va;
u64 flags;
u32 gicd_typer;
bool has_vlpis;
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 6c4aaf04046c..626179077bb0 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -65,11 +65,6 @@
#define GICD_INT_EN_CLR_X32 0xffffffff
#define GICD_INT_EN_SET_SGI 0x0000ffff
#define GICD_INT_EN_CLR_PPI 0xffff0000
-#define GICD_INT_DEF_PRI 0xa0
-#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
- (GICD_INT_DEF_PRI << 16) |\
- (GICD_INT_DEF_PRI << 8) |\
- GICD_INT_DEF_PRI)
#define GICD_IIDR_IMPLEMENTER_SHIFT 0
#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index dccfa65aee96..068aa46f0d55 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -75,6 +75,7 @@ struct irq_fwspec {
enum irq_domain_bus_token {
DOMAIN_BUS_ANY = 0,
DOMAIN_BUS_WIRED,
+ DOMAIN_BUS_GENERIC_MSI,
DOMAIN_BUS_PCI_MSI,
DOMAIN_BUS_PLATFORM_MSI,
DOMAIN_BUS_NEXUS,
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 814643f7ee52..5b36b1287a5a 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -477,10 +477,11 @@ static inline void kernfs_init(void) { }
* @buf: buffer to copy @kn's name into
* @buflen: size of @buf
*
- * Builds and returns the full path of @kn in @buf of @buflen bytes. The
- * path is built from the end of @buf so the returned pointer usually
- * doesn't match @buf. If @buf isn't long enough, @buf is nul terminated
- * and %NULL is returned.
+ * If @kn is NULL result will be "(null)".
+ *
+ * Returns the length of the full path. If the full length is equal to or
+ * greater than @buflen, @buf contains the truncated path with the trailing
+ * '\0'. On error, -errno is returned.
*/
static inline int kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
{
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index c6ac1fe7ec68..edb0f0c30904 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -2,6 +2,7 @@
#ifndef LIBFDT_ENV_H
#define LIBFDT_ENV_H
+#include <linux/kernel.h> /* For INT_MAX */
#include <linux/string.h>
#include <asm/byteorder.h>
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index f91f9e763557..0ac69ddf5fc4 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -53,11 +53,16 @@ struct vmem_altmap {
* wakeup event whenever a page is unpinned and becomes idle. This
* wakeup is used to coordinate physical address space management (ex:
* fs truncate/hole punch) vs pinned pages (ex: device dma).
+ *
+ * MEMORY_DEVICE_PCI_P2PDMA:
+ * Device memory residing in a PCI BAR intended for use with Peer-to-Peer
+ * transactions.
*/
enum memory_type {
MEMORY_DEVICE_PRIVATE = 1,
MEMORY_DEVICE_PUBLIC,
MEMORY_DEVICE_FS_DAX,
+ MEMORY_DEVICE_PCI_P2PDMA,
};
/*
@@ -120,6 +125,7 @@ struct dev_pagemap {
struct device *dev;
void *data;
enum memory_type type;
+ u64 pci_p2pdma_bus_offset;
};
#ifdef CONFIG_ZONE_DEVICE
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index 6e1ab9bead28..5fd0e429f472 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -2132,6 +2132,7 @@ struct ec_response_get_next_event_v1 {
/* Switches */
#define EC_MKBP_LID_OPEN 0
#define EC_MKBP_TABLET_MODE 1
+#define EC_MKBP_BASE_ATTACHED 2
/*****************************************************************************/
/* Temperature sensor commands */
@@ -3102,6 +3103,16 @@ struct ec_params_usb_pd_info_request {
uint8_t port;
} __packed;
+/*
+ * This command will return the number of USB PD charge port + the number
+ * of dedicated port present.
+ * EC_CMD_USB_PD_PORTS does NOT include the dedicated ports
+ */
+#define EC_CMD_CHARGE_PORT_COUNT 0x0105
+struct ec_response_charge_port_count {
+ uint8_t port_count;
+} __packed;
+
/* Read USB-PD Device discovery info */
#define EC_CMD_USB_PD_DISCOVERY 0x0113
struct ec_params_usb_pd_discovery_entry {
diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h
new file mode 100644
index 000000000000..ab16ad283def
--- /dev/null
+++ b/include/linux/mfd/ingenic-tcu.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Header file for the Ingenic JZ47xx TCU driver
+ */
+#ifndef __LINUX_MFD_INGENIC_TCU_H_
+#define __LINUX_MFD_INGENIC_TCU_H_
+
+#include <linux/bitops.h>
+
+#define TCU_REG_WDT_TDR 0x00
+#define TCU_REG_WDT_TCER 0x04
+#define TCU_REG_WDT_TCNT 0x08
+#define TCU_REG_WDT_TCSR 0x0c
+#define TCU_REG_TER 0x10
+#define TCU_REG_TESR 0x14
+#define TCU_REG_TECR 0x18
+#define TCU_REG_TSR 0x1c
+#define TCU_REG_TFR 0x20
+#define TCU_REG_TFSR 0x24
+#define TCU_REG_TFCR 0x28
+#define TCU_REG_TSSR 0x2c
+#define TCU_REG_TMR 0x30
+#define TCU_REG_TMSR 0x34
+#define TCU_REG_TMCR 0x38
+#define TCU_REG_TSCR 0x3c
+#define TCU_REG_TDFR0 0x40
+#define TCU_REG_TDHR0 0x44
+#define TCU_REG_TCNT0 0x48
+#define TCU_REG_TCSR0 0x4c
+#define TCU_REG_OST_DR 0xe0
+#define TCU_REG_OST_CNTL 0xe4
+#define TCU_REG_OST_CNTH 0xe8
+#define TCU_REG_OST_TCSR 0xec
+#define TCU_REG_TSTR 0xf0
+#define TCU_REG_TSTSR 0xf4
+#define TCU_REG_TSTCR 0xf8
+#define TCU_REG_OST_CNTHBUF 0xfc
+
+#define TCU_TCSR_RESERVED_BITS 0x3f
+#define TCU_TCSR_PARENT_CLOCK_MASK 0x07
+#define TCU_TCSR_PRESCALE_LSB 3
+#define TCU_TCSR_PRESCALE_MASK 0x38
+
+#define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */
+#define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */
+#define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */
+
+#define TCU_WDT_TCER_TCEN BIT(0) /* Watchdog timer enable */
+
+#define TCU_CHANNEL_STRIDE 0x10
+#define TCU_REG_TDFRc(c) (TCU_REG_TDFR0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TDHRc(c) (TCU_REG_TDHR0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TCNTc(c) (TCU_REG_TCNT0 + ((c) * TCU_CHANNEL_STRIDE))
+#define TCU_REG_TCSRc(c) (TCU_REG_TCSR0 + ((c) * TCU_CHANNEL_STRIDE))
+
+#endif /* __LINUX_MFD_INGENIC_TCU_H_ */
diff --git a/include/linux/mfd/intel_msic.h b/include/linux/mfd/intel_msic.h
index 439a7a617bc9..317e8608cf41 100644
--- a/include/linux/mfd/intel_msic.h
+++ b/include/linux/mfd/intel_msic.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * include/linux/mfd/intel_msic.h - Core interface for Intel MSIC
+ * Core interface for Intel MSIC
*
* Copyright (C) 2011, Intel Corporation
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_MFD_INTEL_MSIC_H__
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h
index 5aacdb017a9f..ed1dfba5e5f9 100644
--- a/include/linux/mfd/intel_soc_pmic.h
+++ b/include/linux/mfd/intel_soc_pmic.h
@@ -1,17 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * intel_soc_pmic.h - Intel SoC PMIC Driver
+ * Intel SoC PMIC Driver
*
* Copyright (C) 2012-2014 Intel Corporation. All rights reserved.
*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* Author: Yang, Bin <bin.yang@intel.com>
* Author: Zhu, Lejun <lejun.zhu@linux.intel.com>
*/
@@ -25,6 +17,7 @@ struct intel_soc_pmic {
int irq;
struct regmap *regmap;
struct regmap_irq_chip_data *irq_chip_data;
+ struct regmap_irq_chip_data *irq_chip_data_pwrbtn;
struct regmap_irq_chip_data *irq_chip_data_tmu;
struct regmap_irq_chip_data *irq_chip_data_bcu;
struct regmap_irq_chip_data *irq_chip_data_adc;
diff --git a/include/linux/mfd/intel_soc_pmic_bxtwc.h b/include/linux/mfd/intel_soc_pmic_bxtwc.h
index 0c351bc85d2d..9be566cc58c6 100644
--- a/include/linux/mfd/intel_soc_pmic_bxtwc.h
+++ b/include/linux/mfd/intel_soc_pmic_bxtwc.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header file for Intel Broxton Whiskey Cove PMIC
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
*/
#ifndef __INTEL_BXTWC_H__
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
index c332681848ef..fe69c0f4398f 100644
--- a/include/linux/mfd/madera/core.h
+++ b/include/linux/mfd/madera/core.h
@@ -148,6 +148,7 @@ struct snd_soc_dapm_context;
* @internal_dcvdd: true if DCVDD is supplied from the internal LDO1
* @pdata: our pdata
* @irq_dev: the irqchip child driver device
+ * @irq_data: pointer to irqchip data for the child irqchip driver
* @irq: host irq number from SPI or I2C configuration
* @out_clamp: indicates output clamp state for each analogue output
* @out_shorted: indicates short circuit state for each analogue output
@@ -175,6 +176,7 @@ struct madera {
struct madera_pdata pdata;
struct device *irq_dev;
+ struct regmap_irq_chip_data *irq_data;
int irq;
unsigned int num_micbias;
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index 0b311f39c8f4..8dc852402dbb 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -24,7 +24,6 @@
struct gpio_desc;
struct pinctrl_map;
-struct madera_irqchip_pdata;
struct madera_codec_pdata;
/**
diff --git a/include/linux/mfd/max14577-private.h b/include/linux/mfd/max14577-private.h
index df75234f979d..a21374f8ad26 100644
--- a/include/linux/mfd/max14577-private.h
+++ b/include/linux/mfd/max14577-private.h
@@ -1,19 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max14577-private.h - Common API for the Maxim 14577/77836 internal sub chip
*
* Copyright (C) 2014 Samsung Electrnoics
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __MAX14577_PRIVATE_H__
diff --git a/include/linux/mfd/max14577.h b/include/linux/mfd/max14577.h
index d81b52bb8bee..8b3ef891ba42 100644
--- a/include/linux/mfd/max14577.h
+++ b/include/linux/mfd/max14577.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max14577.h - Driver for the Maxim 14577/77836
*
@@ -5,16 +6,6 @@
* Chanwoo Choi <cw00.choi@samsung.com>
* Krzysztof Kozlowski <krzk@kernel.org>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
* This driver is based on max8997.h
*
* MAX14577 has MUIC, Charger devices.
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 643dae777b43..833e578e051e 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77686-private.h - Voltage regulator driver for the Maxim 77686/802
*
* Copyright (C) 2012 Samsung Electrnoics
* Chiwoong Byun <woong.byun@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX77686_PRIV_H
diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h
index d4b72d519115..d0fb510875e6 100644
--- a/include/linux/mfd/max77686.h
+++ b/include/linux/mfd/max77686.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77686.h - Driver for the Maxim 77686/802
*
* Copyright (C) 2012 Samsung Electrnoics
* Chiwoong Byun <woong.byun@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8997.h
*
* MAX77686 has PMIC, RTC devices.
diff --git a/include/linux/mfd/max77693-common.h b/include/linux/mfd/max77693-common.h
index 095b121aa725..a5bce099f1ed 100644
--- a/include/linux/mfd/max77693-common.h
+++ b/include/linux/mfd/max77693-common.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Common data shared between Maxim 77693 and 77843 drivers
*
* Copyright (C) 2015 Samsung Electronics
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __LINUX_MFD_MAX77693_COMMON_H
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index 3c7a63b98ad6..e798c81aec31 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77693-private.h - Voltage regulator driver for the Maxim 77693
*
@@ -5,20 +6,6 @@
* SangYoung Son <hello.son@samsung.com>
*
* This program is not provided / owned by Maxim Integrated Products.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX77693_PRIV_H
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
index d450f687301b..c67c16ba8649 100644
--- a/include/linux/mfd/max77693.h
+++ b/include/linux/mfd/max77693.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max77693.h - Driver for the Maxim 77693
*
@@ -6,20 +7,6 @@
*
* This program is not provided / owned by Maxim Integrated Products.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8997.h
*
* MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
diff --git a/include/linux/mfd/max77843-private.h b/include/linux/mfd/max77843-private.h
index b8908bf8d315..0bc7454c4dbe 100644
--- a/include/linux/mfd/max77843-private.h
+++ b/include/linux/mfd/max77843-private.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Common variables for the Maxim MAX77843 driver
*
* Copyright (C) 2015 Samsung Electronics
* Author: Jaewon Kim <jaewon02.kim@samsung.com>
* Author: Beomho Seo <beomho.seo@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
*/
#ifndef __MAX77843_PRIVATE_H_
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index 78c76cd4d37b..a10cd6945232 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -1,22 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8997-private.h - Voltage regulator driver for the Maxim 8997
*
* Copyright (C) 2010 Samsung Electrnoics
* MyungJoo Ham <myungjoo.ham@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8997_PRIV_H
diff --git a/include/linux/mfd/max8997.h b/include/linux/mfd/max8997.h
index cf815577bd68..e955e2f0a2cc 100644
--- a/include/linux/mfd/max8997.h
+++ b/include/linux/mfd/max8997.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8997.h - Driver for the Maxim 8997/8966
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* MyungJoo Ham <myungjoo.ham@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
* This driver is based on max8998.h
*
* MAX8997 has PMIC, MUIC, HAPTIC, RTC, FLASH, and Fuel Gauge devices.
@@ -178,7 +165,6 @@ struct max8997_led_platform_data {
struct max8997_platform_data {
/* IRQ */
int ono;
- int wakeup;
/* ---- PMIC ---- */
struct max8997_regulator_data *regulators;
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index d68ada502ff3..6deb5f577602 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8998-private.h - Voltage regulator driver for the Maxim 8998
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8998_PRIV_H
diff --git a/include/linux/mfd/max8998.h b/include/linux/mfd/max8998.h
index e3956a654cbc..061af220dcd3 100644
--- a/include/linux/mfd/max8998.h
+++ b/include/linux/mfd/max8998.h
@@ -1,23 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* max8998.h - Voltage regulator driver for the Maxim 8998
*
* Copyright (C) 2009-2010 Samsung Electrnoics
* Kyungmin Park <kyungmin.park@samsung.com>
* Marek Szyprowski <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __LINUX_MFD_MAX8998_H
diff --git a/include/linux/mfd/mc13xxx.h b/include/linux/mfd/mc13xxx.h
index 54a3cd808f9e..2ad9bdc0a5ec 100644
--- a/include/linux/mfd/mc13xxx.h
+++ b/include/linux/mfd/mc13xxx.h
@@ -249,6 +249,7 @@ struct mc13xxx_platform_data {
#define MC13XXX_ADC0_TSMOD0 (1 << 12)
#define MC13XXX_ADC0_TSMOD1 (1 << 13)
#define MC13XXX_ADC0_TSMOD2 (1 << 14)
+#define MC13XXX_ADC0_CHRGRAWDIV (1 << 15)
#define MC13XXX_ADC0_ADINC1 (1 << 16)
#define MC13XXX_ADC0_ADINC2 (1 << 17)
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 28f4ae76271d..3ca17eb89aa2 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * core.h
- *
- * copyright (c) 2011 Samsung Electronics Co., Ltd
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_SEC_CORE_H
diff --git a/include/linux/mfd/samsung/irq.h b/include/linux/mfd/samsung/irq.h
index 667aa40486dd..6cfe4201a106 100644
--- a/include/linux/mfd/samsung/irq.h
+++ b/include/linux/mfd/samsung/irq.h
@@ -1,13 +1,7 @@
-/* irq.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2012 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_SEC_IRQ_H
diff --git a/include/linux/mfd/samsung/rtc.h b/include/linux/mfd/samsung/rtc.h
index 9ed2871ea335..0204decfc9aa 100644
--- a/include/linux/mfd/samsung/rtc.h
+++ b/include/linux/mfd/samsung/rtc.h
@@ -1,18 +1,7 @@
-/* rtc.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011-2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_SEC_RTC_H
diff --git a/include/linux/mfd/samsung/s2mpa01.h b/include/linux/mfd/samsung/s2mpa01.h
index 2766108bca2f..0762e9de6f2f 100644
--- a/include/linux/mfd/samsung/s2mpa01.h
+++ b/include/linux/mfd/samsung/s2mpa01.h
@@ -1,12 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2013 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S2MPA01_H
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index 2c14eeca46f0..6e7668a389a1 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -1,14 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps11.h
- *
* Copyright (c) 2012 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S2MPS11_H
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
index 239e977ba45d..b96d8a11dcd3 100644
--- a/include/linux/mfd/samsung/s2mps13.h
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps13.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPS13_H
diff --git a/include/linux/mfd/samsung/s2mps14.h b/include/linux/mfd/samsung/s2mps14.h
index c92f4782afb5..f4afa0cfc24f 100644
--- a/include/linux/mfd/samsung/s2mps14.h
+++ b/include/linux/mfd/samsung/s2mps14.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mps14.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPS14_H
diff --git a/include/linux/mfd/samsung/s2mps15.h b/include/linux/mfd/samsung/s2mps15.h
index 36d35287c3c0..eac6bf74b72e 100644
--- a/include/linux/mfd/samsung/s2mps15.h
+++ b/include/linux/mfd/samsung/s2mps15.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (c) 2015 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef __LINUX_MFD_S2MPS15_H
diff --git a/include/linux/mfd/samsung/s2mpu02.h b/include/linux/mfd/samsung/s2mpu02.h
index 47ae9bc583a7..76cd5380cf0f 100644
--- a/include/linux/mfd/samsung/s2mpu02.h
+++ b/include/linux/mfd/samsung/s2mpu02.h
@@ -1,19 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * s2mpu02.h
- *
* Copyright (c) 2014 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
*/
#ifndef __LINUX_MFD_S2MPU02_H
diff --git a/include/linux/mfd/samsung/s5m8763.h b/include/linux/mfd/samsung/s5m8763.h
index e025418e5589..c534f086ca16 100644
--- a/include/linux/mfd/samsung/s5m8763.h
+++ b/include/linux/mfd/samsung/s5m8763.h
@@ -1,13 +1,7 @@
-/* s5m8763.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S5M8763_H
diff --git a/include/linux/mfd/samsung/s5m8767.h b/include/linux/mfd/samsung/s5m8767.h
index 243b58fec33d..704f8d80e96e 100644
--- a/include/linux/mfd/samsung/s5m8767.h
+++ b/include/linux/mfd/samsung/s5m8767.h
@@ -1,13 +1,7 @@
-/* s5m8767.h
- *
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
*/
#ifndef __LINUX_MFD_S5M8767_H
diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h
index 09d5f30384e5..1ef51ed36be5 100644
--- a/include/linux/mfd/ti-lmu.h
+++ b/include/linux/mfd/ti-lmu.h
@@ -16,6 +16,7 @@
#include <linux/gpio.h>
#include <linux/notifier.h>
#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
/* Notifier event */
#define LMU_EVENT_MONITOR_DONE 0x01
@@ -81,7 +82,7 @@ enum lm363x_regulator_id {
struct ti_lmu {
struct device *dev;
struct regmap *regmap;
- int en_gpio;
+ struct gpio_desc *en_gpio;
struct blocking_notifier_head notifier;
};
#endif
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 31460eeb6fe0..aa5963b5d38e 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -97,14 +97,15 @@ enum {
};
enum {
- MLX5_ATOMIC_MODE_IB_COMP = 1 << 16,
- MLX5_ATOMIC_MODE_CX = 2 << 16,
- MLX5_ATOMIC_MODE_8B = 3 << 16,
- MLX5_ATOMIC_MODE_16B = 4 << 16,
- MLX5_ATOMIC_MODE_32B = 5 << 16,
- MLX5_ATOMIC_MODE_64B = 6 << 16,
- MLX5_ATOMIC_MODE_128B = 7 << 16,
- MLX5_ATOMIC_MODE_256B = 8 << 16,
+ MLX5_ATOMIC_MODE_OFFSET = 16,
+ MLX5_ATOMIC_MODE_IB_COMP = 1,
+ MLX5_ATOMIC_MODE_CX = 2,
+ MLX5_ATOMIC_MODE_8B = 3,
+ MLX5_ATOMIC_MODE_16B = 4,
+ MLX5_ATOMIC_MODE_32B = 5,
+ MLX5_ATOMIC_MODE_64B = 6,
+ MLX5_ATOMIC_MODE_128B = 7,
+ MLX5_ATOMIC_MODE_256B = 8,
};
enum {
@@ -163,13 +164,11 @@ enum mlx5_dcbx_oper_mode {
MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
};
-enum mlx5_dct_atomic_mode {
- MLX5_ATOMIC_MODE_DCT_CX = 2,
-};
-
enum {
MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
+ MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
+ MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
};
enum mlx5_page_fault_resume_flags {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0416a7204be3..daa2b8f1e9a8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -890,6 +890,19 @@ static inline bool is_device_public_page(const struct page *page)
page->pgmap->type == MEMORY_DEVICE_PUBLIC;
}
+#ifdef CONFIG_PCI_P2PDMA
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return is_zone_device_page(page) &&
+ page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA;
+}
+#else /* CONFIG_PCI_P2PDMA */
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return false;
+}
+#endif /* CONFIG_PCI_P2PDMA */
+
#else /* CONFIG_DEV_PAGEMAP_OPS */
static inline void dev_pagemap_get_ops(void)
{
@@ -913,6 +926,11 @@ static inline bool is_device_public_page(const struct page *page)
{
return false;
}
+
+static inline bool is_pci_p2pdma_page(const struct page *page)
+{
+ return false;
+}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline void get_page(struct page *page)
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 5839d8062dfc..0e9c50052ff3 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -317,11 +317,18 @@ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
int virq, int nvec, msi_alloc_info_t *args);
struct irq_domain *
-platform_msi_create_device_domain(struct device *dev,
- unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg,
- const struct irq_domain_ops *ops,
- void *host_data);
+__platform_msi_create_device_domain(struct device *dev,
+ unsigned int nvec,
+ bool is_tree,
+ irq_write_msi_msg_t write_msi_msg,
+ const struct irq_domain_ops *ops,
+ void *host_data);
+
+#define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
+ __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
+#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
+ __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
+
int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs);
void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
diff --git a/include/linux/ndctl.h b/include/linux/ndctl.h
new file mode 100644
index 000000000000..cd5a293ce3ae
--- /dev/null
+++ b/include/linux/ndctl.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2014-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU Lesser General Public License,
+ * version 2.1, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT ANY
+ * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
+ * more details.
+ */
+#ifndef _LINUX_NDCTL_H
+#define _LINUX_NDCTL_H
+
+#include <uapi/linux/ndctl.h>
+
+enum {
+ ND_MIN_NAMESPACE_SIZE = PAGE_SIZE,
+};
+
+#endif /* _LINUX_NDCTL_H */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index a0831e9d19c9..6e0417c02279 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -62,6 +62,7 @@ struct nfs_lock_context {
struct nfs_open_context *open_context;
fl_owner_t lockowner;
atomic_t io_count;
+ struct rcu_head rcu_head;
};
struct nfs4_state;
@@ -82,6 +83,7 @@ struct nfs_open_context {
struct list_head list;
struct nfs4_threshold *mdsthreshold;
+ struct rcu_head rcu_head;
};
struct nfs_open_dir_context {
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index bf39d9c92201..0fc0b9135d46 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -228,6 +228,9 @@ struct nfs_server {
unsigned short mountd_port;
unsigned short mountd_protocol;
struct rpc_wait_queue uoc_rpcwaitq;
+
+ /* XDR related information */
+ unsigned int read_hdrsize;
};
/* Server capabilities */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index bd1c889a9ed9..0e016252cfc6 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -608,8 +608,13 @@ struct nfs_pgio_args {
__u32 count;
unsigned int pgbase;
struct page ** pages;
- const u32 * bitmask; /* used by write */
- enum nfs3_stable_how stable; /* used by write */
+ union {
+ unsigned int replen; /* used by read */
+ struct {
+ const u32 * bitmask; /* used by write */
+ enum nfs3_stable_how stable; /* used by write */
+ };
+ };
};
struct nfs_pgio_res {
@@ -617,10 +622,16 @@ struct nfs_pgio_res {
struct nfs_fattr * fattr;
__u32 count;
__u32 op_status;
- int eof; /* used by read */
- struct nfs_writeverf * verf; /* used by write */
- const struct nfs_server *server; /* used by write */
-
+ union {
+ struct {
+ unsigned int replen; /* used by read */
+ int eof; /* used by read */
+ };
+ struct {
+ struct nfs_writeverf * verf; /* used by write */
+ const struct nfs_server *server; /* used by write */
+ };
+ };
};
/*
@@ -1471,11 +1482,10 @@ struct nfs_pgio_header {
const struct nfs_rw_ops *rw_ops;
struct nfs_io_completion *io_completion;
struct nfs_direct_req *dreq;
- spinlock_t lock;
- /* fields protected by lock */
+
int pnfs_error;
int error; /* merge with pnfs_error */
- unsigned long good_bytes; /* boundary of good data */
+ unsigned int good_bytes; /* boundary of good data */
unsigned long flags;
/*
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 4e85447f7860..312bfa5efd80 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* nvmem framework consumer.
*
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
* Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _LINUX_NVMEM_CONSUMER_H
@@ -14,6 +11,7 @@
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/notifier.h>
struct device;
struct device_node;
@@ -29,11 +27,36 @@ struct nvmem_cell_info {
unsigned int nbits;
};
+/**
+ * struct nvmem_cell_lookup - cell lookup entry
+ *
+ * @nvmem_name: Name of the provider.
+ * @cell_name: Name of the nvmem cell as defined in the name field of
+ * struct nvmem_cell_info.
+ * @dev_id: Name of the consumer device that will be associated with
+ * this cell.
+ * @con_id: Connector id for this cell lookup.
+ */
+struct nvmem_cell_lookup {
+ const char *nvmem_name;
+ const char *cell_name;
+ const char *dev_id;
+ const char *con_id;
+ struct list_head node;
+};
+
+enum {
+ NVMEM_ADD = 1,
+ NVMEM_REMOVE,
+ NVMEM_CELL_ADD,
+ NVMEM_CELL_REMOVE,
+};
+
#if IS_ENABLED(CONFIG_NVMEM)
/* Cell based interface */
-struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *name);
-struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *name);
+struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id);
+struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id);
void nvmem_cell_put(struct nvmem_cell *cell);
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell);
void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len);
@@ -55,18 +78,28 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf);
+const char *nvmem_dev_name(struct nvmem_device *nvmem);
+
+void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries,
+ size_t nentries);
+void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries,
+ size_t nentries);
+
+int nvmem_register_notifier(struct notifier_block *nb);
+int nvmem_unregister_notifier(struct notifier_block *nb);
+
#else
static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
- const char *name)
+ const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct nvmem_cell *devm_nvmem_cell_get(struct device *dev,
- const char *name)
+ const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void devm_nvmem_cell_put(struct device *dev,
@@ -80,31 +113,31 @@ static inline void nvmem_cell_put(struct nvmem_cell *cell)
static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline int nvmem_cell_write(struct nvmem_cell *cell,
const char *buf, size_t len)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int nvmem_cell_read_u32(struct device *dev,
const char *cell_id, u32 *val)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline struct nvmem_device *nvmem_device_get(struct device *dev,
const char *name)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct nvmem_device *devm_nvmem_device_get(struct device *dev,
const char *name)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void nvmem_device_put(struct nvmem_device *nvmem)
@@ -120,47 +153,68 @@ static inline ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
struct nvmem_cell_info *info,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int nvmem_device_read(struct nvmem_device *nvmem,
unsigned int offset, size_t bytes,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
static inline int nvmem_device_write(struct nvmem_device *nvmem,
unsigned int offset, size_t bytes,
void *buf)
{
- return -ENOSYS;
+ return -EOPNOTSUPP;
}
+
+static inline const char *nvmem_dev_name(struct nvmem_device *nvmem)
+{
+ return NULL;
+}
+
+static inline void
+nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {}
+static inline void
+nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries) {}
+
+static inline int nvmem_register_notifier(struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nvmem_unregister_notifier(struct notifier_block *nb)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_NVMEM */
#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
- const char *name);
+ const char *id);
struct nvmem_device *of_nvmem_device_get(struct device_node *np,
const char *name);
#else
static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
- const char *name)
+ const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
const char *name)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
#endif /* CONFIG_NVMEM && CONFIG_OF */
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index 24def6ad09bb..1e3283c2af77 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* nvmem framework provider.
*
* Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
* Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#ifndef _LINUX_NVMEM_PROVIDER_H
@@ -67,30 +64,46 @@ struct nvmem_config {
struct device *base_dev;
};
+/**
+ * struct nvmem_cell_table - NVMEM cell definitions for given provider
+ *
+ * @nvmem_name: Provider name.
+ * @cells: Array of cell definitions.
+ * @ncells: Number of cell definitions in the array.
+ * @node: List node.
+ *
+ * This structure together with related helper functions is provided for users
+ * that don't can't access the nvmem provided structure but wish to register
+ * cell definitions for it e.g. board files registering an EEPROM device.
+ */
+struct nvmem_cell_table {
+ const char *nvmem_name;
+ const struct nvmem_cell_info *cells;
+ size_t ncells;
+ struct list_head node;
+};
+
#if IS_ENABLED(CONFIG_NVMEM)
struct nvmem_device *nvmem_register(const struct nvmem_config *cfg);
-int nvmem_unregister(struct nvmem_device *nvmem);
+void nvmem_unregister(struct nvmem_device *nvmem);
struct nvmem_device *devm_nvmem_register(struct device *dev,
const struct nvmem_config *cfg);
int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem);
-int nvmem_add_cells(struct nvmem_device *nvmem,
- const struct nvmem_cell_info *info,
- int ncells);
+void nvmem_add_cell_table(struct nvmem_cell_table *table);
+void nvmem_del_cell_table(struct nvmem_cell_table *table);
+
#else
static inline struct nvmem_device *nvmem_register(const struct nvmem_config *c)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-EOPNOTSUPP);
}
-static inline int nvmem_unregister(struct nvmem_device *nvmem)
-{
- return -ENOSYS;
-}
+static inline void nvmem_unregister(struct nvmem_device *nvmem) {}
static inline struct nvmem_device *
devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
@@ -101,16 +114,11 @@ devm_nvmem_register(struct device *dev, const struct nvmem_config *c)
static inline int
devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
{
- return nvmem_unregister(nvmem);
-
+ return -EOPNOTSUPP;
}
-static inline int nvmem_add_cells(struct nvmem_device *nvmem,
- const struct nvmem_cell_info *info,
- int ncells)
-{
- return -ENOSYS;
-}
+static inline void nvmem_add_cell_table(struct nvmem_cell_table *table) {}
+static inline void nvmem_del_cell_table(struct nvmem_cell_table *table) {}
#endif /* CONFIG_NVMEM */
#endif /* ifndef _LINUX_NVMEM_PROVIDER_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 99b0ebf49632..ab96025b2382 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -247,12 +247,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#include <asm/prom.h>
#endif
-/* Default #address and #size cells. Allow arch asm/prom.h to override */
-#if !defined(OF_ROOT_NODE_ADDR_CELLS_DEFAULT)
-#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 1
-#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
-#endif
-
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
@@ -353,6 +347,8 @@ extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
+extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
+
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -550,6 +546,10 @@ bool of_console_check(struct device_node *dn, char *name, int index);
extern int of_cpu_node_to_id(struct device_node *np);
+int of_map_rid(struct device_node *np, u32 rid,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out);
+
#else /* CONFIG_OF */
static inline void of_core_init(void)
@@ -754,6 +754,11 @@ static inline struct device_node *of_get_cpu_node(int cpu,
return NULL;
}
+static inline struct device_node *of_get_next_cpu_node(struct device_node *prev)
+{
+ return NULL;
+}
+
static inline int of_n_addr_cells(struct device_node *np)
{
return 0;
@@ -952,6 +957,13 @@ static inline int of_cpu_node_to_id(struct device_node *np)
return -ENODEV;
}
+static inline int of_map_rid(struct device_node *np, u32 rid,
+ const char *map_name, const char *map_mask_name,
+ struct device_node **target, u32 *id_out)
+{
+ return -EINVAL;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#endif /* CONFIG_OF */
@@ -990,7 +1002,7 @@ static inline struct device_node *of_find_matching_node(
static inline const char *of_node_get_device_type(const struct device_node *np)
{
- return of_get_property(np, "type", NULL);
+ return of_get_property(np, "device_type", NULL);
}
static inline bool of_node_is_type(const struct device_node *np, const char *type)
@@ -1217,6 +1229,10 @@ static inline int of_property_read_s32(const struct device_node *np,
for (child = of_get_next_available_child(parent, NULL); child != NULL; \
child = of_get_next_available_child(parent, child))
+#define for_each_of_cpu_node(cpu) \
+ for (cpu = of_get_next_cpu_node(NULL); cpu != NULL; \
+ cpu = of_get_next_cpu_node(cpu))
+
#define for_each_node_with_property(dn, prop_name) \
for (dn = of_find_node_with_property(NULL, prop_name); dn; \
dn = of_find_node_with_property(dn, prop_name))
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index e83d87fc5673..21a89c4880fa 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -14,9 +14,6 @@ struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn);
int of_pci_get_devfn(struct device_node *np);
void of_pci_check_probe_only(void);
-int of_pci_map_rid(struct device_node *np, u32 rid,
- const char *map_name, const char *map_mask_name,
- struct device_node **target, u32 *id_out);
#else
static inline struct device_node *of_pci_find_child_device(struct device_node *parent,
unsigned int devfn)
@@ -29,13 +26,6 @@ static inline int of_pci_get_devfn(struct device_node *np)
return -EINVAL;
}
-static inline int of_pci_map_rid(struct device_node *np, u32 rid,
- const char *map_name, const char *map_mask_name,
- struct device_node **target, u32 *id_out)
-{
- return -EINVAL;
-}
-
static inline void of_pci_check_probe_only(void) { }
#endif
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index c3f1b44ade29..cb1adf0b78a9 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -119,29 +119,11 @@ static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{
return dma_set_coherent_mask(&dev->dev, mask);
}
-
-static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
- unsigned int size)
-{
- return dma_set_max_seg_size(&dev->dev, size);
-}
-
-static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
- unsigned long mask)
-{
- return dma_set_seg_boundary(&dev->dev, mask);
-}
#else
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
{ return -EIO; }
-static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
- unsigned int size)
-{ return -EIO; }
-static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
- unsigned long mask)
-{ return -EIO; }
#endif
#endif
diff --git a/include/linux/pci-dma.h b/include/linux/pci-dma.h
deleted file mode 100644
index 0f7aa7353ca3..000000000000
--- a/include/linux/pci-dma.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PCI_DMA_H
-#define _LINUX_PCI_DMA_H
-
-#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) DEFINE_DMA_UNMAP_ADDR(ADDR_NAME);
-#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) DEFINE_DMA_UNMAP_LEN(LEN_NAME);
-#define pci_unmap_addr dma_unmap_addr
-#define pci_unmap_addr_set dma_unmap_addr_set
-#define pci_unmap_len dma_unmap_len
-#define pci_unmap_len_set dma_unmap_len_set
-
-#endif
diff --git a/include/linux/pci-p2pdma.h b/include/linux/pci-p2pdma.h
new file mode 100644
index 000000000000..bca9bc3e5be7
--- /dev/null
+++ b/include/linux/pci-p2pdma.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCI Peer 2 Peer DMA support.
+ *
+ * Copyright (c) 2016-2018, Logan Gunthorpe
+ * Copyright (c) 2016-2017, Microsemi Corporation
+ * Copyright (c) 2017, Christoph Hellwig
+ * Copyright (c) 2018, Eideticom Inc.
+ */
+
+#ifndef _LINUX_PCI_P2PDMA_H
+#define _LINUX_PCI_P2PDMA_H
+
+#include <linux/pci.h>
+
+struct block_device;
+struct scatterlist;
+
+#ifdef CONFIG_PCI_P2PDMA
+int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
+ u64 offset);
+int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
+ int num_clients, bool verbose);
+bool pci_has_p2pmem(struct pci_dev *pdev);
+struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients);
+void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size);
+void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size);
+pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr);
+struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
+ unsigned int *nents, u32 length);
+void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl);
+void pci_p2pmem_publish(struct pci_dev *pdev, bool publish);
+int pci_p2pdma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir);
+int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
+ bool *use_p2pdma);
+ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
+ bool use_p2pdma);
+#else /* CONFIG_PCI_P2PDMA */
+static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar,
+ size_t size, u64 offset)
+{
+ return -EOPNOTSUPP;
+}
+static inline int pci_p2pdma_distance_many(struct pci_dev *provider,
+ struct device **clients, int num_clients, bool verbose)
+{
+ return -1;
+}
+static inline bool pci_has_p2pmem(struct pci_dev *pdev)
+{
+ return false;
+}
+static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients,
+ int num_clients)
+{
+ return NULL;
+}
+static inline void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
+{
+ return NULL;
+}
+static inline void pci_free_p2pmem(struct pci_dev *pdev, void *addr,
+ size_t size)
+{
+}
+static inline pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev,
+ void *addr)
+{
+ return 0;
+}
+static inline struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
+ unsigned int *nents, u32 length)
+{
+ return NULL;
+}
+static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev,
+ struct scatterlist *sgl)
+{
+}
+static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
+{
+}
+static inline int pci_p2pdma_map_sg(struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir)
+{
+ return 0;
+}
+static inline int pci_p2pdma_enable_store(const char *page,
+ struct pci_dev **p2p_dev, bool *use_p2pdma)
+{
+ *use_p2pdma = false;
+ return 0;
+}
+static inline ssize_t pci_p2pdma_enable_show(char *page,
+ struct pci_dev *p2p_dev, bool use_p2pdma)
+{
+ return sprintf(page, "none\n");
+}
+#endif /* CONFIG_PCI_P2PDMA */
+
+
+static inline int pci_p2pdma_distance(struct pci_dev *provider,
+ struct device *client, bool verbose)
+{
+ return pci_p2pdma_distance_many(provider, &client, 1, verbose);
+}
+
+static inline struct pci_dev *pci_p2pmem_find(struct device *client)
+{
+ return pci_p2pmem_find_many(&client, 1);
+}
+
+#endif /* _LINUX_PCI_P2P_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 2c4755032475..11c71c4ecf75 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -281,6 +281,7 @@ struct pcie_link_state;
struct pci_vpd;
struct pci_sriov;
struct pci_ats;
+struct pci_p2pdma;
/* The pci_dev structure describes PCI devices */
struct pci_dev {
@@ -325,6 +326,7 @@ struct pci_dev {
pci_power_t current_state; /* Current operating state. In ACPI,
this is D0-D3, D0 being fully
functional, and D3 being off. */
+ unsigned int imm_ready:1; /* Supports Immediate Readiness */
u8 pm_cap; /* PM capability offset */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
@@ -402,6 +404,7 @@ struct pci_dev {
unsigned int has_secondary_link:1;
unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */
unsigned int is_probed:1; /* Device probing in progress */
+ unsigned int link_active_reporting:1;/* Device capable of reporting link active */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -439,6 +442,9 @@ struct pci_dev {
#ifdef CONFIG_PCI_PASID
u16 pasid_features;
#endif
+#ifdef CONFIG_PCI_P2PDMA
+ struct pci_p2pdma *p2pdma;
+#endif
phys_addr_t rom; /* Physical address if not from BAR */
size_t romlen; /* Length if not from BAR */
char *driver_override; /* Driver name to force a match */
@@ -1342,7 +1348,6 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode,
/* kmem_cache style wrapper around pci_alloc_consistent() */
-#include <linux/pci-dma.h>
#include <linux/dmapool.h>
#define pci_pool dma_pool
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index a6d6650a0490..7acc9f91e72b 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -16,8 +16,6 @@
/**
* struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
- * @owner: The module owner of this structure
- * @mod_name: The module name (KBUILD_MODNAME) of this structure
* @enable_slot: Called when the user wants to enable a specific pci slot
* @disable_slot: Called when the user wants to disable a specific pci slot
* @set_attention_status: Called to set the specific slot's attention LED to
@@ -25,17 +23,9 @@
* @hardware_test: Called to run a specified hardware test on the specified
* slot.
* @get_power_status: Called to get the current power status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_attention_status: Called to get the current attention status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_latch_status: Called to get the current latch status of a slot.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @get_adapter_status: Called to get see if an adapter is present in the slot or not.
- * If this field is NULL, the value passed in the struct hotplug_slot_info
- * will be used when this value is requested by a user.
* @reset_slot: Optional interface to allow override of a bus reset for the
* slot for cases where a secondary bus reset can result in spurious
* hotplug events or where a slot can be reset independent of the bus.
@@ -46,8 +36,6 @@
* set an LED, enable / disable power, etc.)
*/
struct hotplug_slot_ops {
- struct module *owner;
- const char *mod_name;
int (*enable_slot) (struct hotplug_slot *slot);
int (*disable_slot) (struct hotplug_slot *slot);
int (*set_attention_status) (struct hotplug_slot *slot, u8 value);
@@ -60,37 +48,19 @@ struct hotplug_slot_ops {
};
/**
- * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
- * @power_status: if power is enabled or not (1/0)
- * @attention_status: if the attention light is enabled or not (1/0)
- * @latch_status: if the latch (if any) is open or closed (1/0)
- * @adapter_status: if there is a pci board present in the slot or not (1/0)
- *
- * Used to notify the hotplug pci core of the status of a specific slot.
- */
-struct hotplug_slot_info {
- u8 power_status;
- u8 attention_status;
- u8 latch_status;
- u8 adapter_status;
-};
-
-/**
* struct hotplug_slot - used to register a physical slot with the hotplug pci core
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
- * @info: pointer to the &struct hotplug_slot_info for the initial values for
- * this slot.
- * @private: used by the hotplug pci controller driver to store whatever it
- * needs.
+ * @owner: The module owner of this structure
+ * @mod_name: The module name (KBUILD_MODNAME) of this structure
*/
struct hotplug_slot {
- struct hotplug_slot_ops *ops;
- struct hotplug_slot_info *info;
- void *private;
+ const struct hotplug_slot_ops *ops;
/* Variables below this are for use only by the hotplug pci core. */
struct list_head slot_list;
struct pci_slot *pci_slot;
+ struct module *owner;
+ const char *mod_name;
};
static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
@@ -110,9 +80,6 @@ void pci_hp_del(struct hotplug_slot *slot);
void pci_hp_destroy(struct hotplug_slot *slot);
void pci_hp_deregister(struct hotplug_slot *slot);
-int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
- struct hotplug_slot_info *info);
-
/* use a define to avoid include chaining to get THIS_MODULE & friends */
#define pci_hp_register(slot, pbus, devnr, name) \
__pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 8ddc2ffe3895..69f0abe1ba1a 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2543,8 +2543,6 @@
#define PCI_VENDOR_ID_HUAWEI 0x19e5
#define PCI_VENDOR_ID_NETRONOME 0x19ee
-#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
-#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000
#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
deleted file mode 100644
index 0a2c18a9771d..000000000000
--- a/include/linux/phy/phy-qcom-ufs.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef PHY_QCOM_UFS_H_
-#define PHY_QCOM_UFS_H_
-
-#include "phy.h"
-
-/**
- * ufs_qcom_phy_enable_dev_ref_clk() - Enable the device
- * ref clock.
- * @phy: reference to a generic phy.
- */
-void ufs_qcom_phy_enable_dev_ref_clk(struct phy *phy);
-
-/**
- * ufs_qcom_phy_disable_dev_ref_clk() - Disable the device
- * ref clock.
- * @phy: reference to a generic phy.
- */
-void ufs_qcom_phy_disable_dev_ref_clk(struct phy *phy);
-
-int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
-void ufs_qcom_phy_save_controller_version(struct phy *phy,
- u8 major, u16 minor, u16 step);
-
-#endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/platform_data/dma-ep93xx.h b/include/linux/platform_data/dma-ep93xx.h
index f8f1f6b952a6..eb9805bb3fe8 100644
--- a/include/linux/platform_data/dma-ep93xx.h
+++ b/include/linux/platform_data/dma-ep93xx.h
@@ -85,7 +85,7 @@ static inline enum dma_transfer_direction
ep93xx_dma_chan_direction(struct dma_chan *chan)
{
if (!ep93xx_dma_chan_is_m2p(chan))
- return DMA_NONE;
+ return DMA_TRANS_NONE;
/* even channels are for TX, odd for RX */
return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
diff --git a/include/linux/platform_data/dma-mcf-edma.h b/include/linux/platform_data/dma-mcf-edma.h
new file mode 100644
index 000000000000..d718ccfa3421
--- /dev/null
+++ b/include/linux/platform_data/dma-mcf-edma.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Freescale eDMA platform data, ColdFire SoC's family.
+ *
+ * Copyright (c) 2017 Angelo Dureghello <angelo@sysam.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_MCF_EDMA_H__
+#define __LINUX_PLATFORM_DATA_MCF_EDMA_H__
+
+struct dma_slave_map;
+
+bool mcf_edma_filter_fn(struct dma_chan *chan, void *param);
+
+#define MCF_EDMA_FILTER_PARAM(ch) ((void *)ch)
+
+/**
+ * struct mcf_edma_platform_data - platform specific data for eDMA engine
+ *
+ * @ver The eDMA module version.
+ * @dma_channels The number of eDMA channels.
+ */
+struct mcf_edma_platform_data {
+ int dma_channels;
+ const struct dma_slave_map *slave_map;
+ int slavecnt;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_MCF_EDMA_H__ */
diff --git a/include/linux/platform_data/ehci-sh.h b/include/linux/platform_data/ehci-sh.h
index 5c15a738e116..219bd79dabfc 100644
--- a/include/linux/platform_data/ehci-sh.h
+++ b/include/linux/platform_data/ehci-sh.h
@@ -1,21 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* EHCI SuperH driver platform data
*
* Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
* Copyright (C) 2012 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef __USB_EHCI_SH_H
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
index 98b7925f1a2d..c0f624aca81c 100644
--- a/include/linux/platform_data/mv_usb.h
+++ b/include/linux/platform_data/mv_usb.h
@@ -48,6 +48,5 @@ struct mv_usb_platform_data {
int (*phy_init)(void __iomem *regbase);
void (*phy_deinit)(void __iomem *regbase);
int (*set_vbus)(unsigned int vbus);
- int (*private_init)(void __iomem *opregs, void __iomem *phyregs);
};
#endif
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 9ac8fc60ad49..52453a24a24f 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -9,6 +9,7 @@
#ifndef _LINUX_PMU_H
#define _LINUX_PMU_H
+#include <linux/rtc.h>
#include <uapi/linux/pmu.h>
@@ -36,6 +37,9 @@ static inline void pmu_resume(void)
extern void pmu_enable_irled(int on);
+extern time64_t pmu_get_time(void);
+extern int pmu_set_rtc_time(struct rtc_time *tm);
+
extern void pmu_restart(void);
extern void pmu_shutdown(void);
extern void pmu_unlock(void);
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index d6355f49fbae..507c5e214c42 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -24,6 +24,7 @@ enum bq27xxx_chip {
BQ27546,
BQ27742,
BQ27545, /* bq27545 */
+ BQ27411,
BQ27421, /* bq27421, bq27441, bq27621 */
BQ27425,
BQ27426,
diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h
index df4d13f7e191..d15f8e4815e3 100644
--- a/include/linux/qed/qed_rdma_if.h
+++ b/include/linux/qed/qed_rdma_if.h
@@ -39,15 +39,6 @@
#include <linux/qed/qed_ll2_if.h>
#include <linux/qed/rdma_common.h>
-enum qed_roce_ll2_tx_dest {
- /* Light L2 TX Destination to the Network */
- QED_ROCE_LL2_TX_DEST_NW,
-
- /* Light L2 TX Destination to the Loopback */
- QED_ROCE_LL2_TX_DEST_LB,
- QED_ROCE_LL2_TX_DEST_MAX
-};
-
#define QED_RDMA_MAX_CNQ_SIZE (0xFFFF)
/* rdma interface */
@@ -581,7 +572,7 @@ struct qed_roce_ll2_packet {
int n_seg;
struct qed_roce_ll2_buffer payload[RDMA_MAX_SGE_PER_SQ_WQE];
int roce_mode;
- enum qed_roce_ll2_tx_dest tx_dest;
+ enum qed_ll2_tx_dest tx_dest;
};
enum qed_rdma_type {
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
index 5d83d0c1d06c..bba2920e9c05 100644
--- a/include/linux/restart_block.h
+++ b/include/linux/restart_block.h
@@ -10,7 +10,7 @@
#include <linux/time64.h>
struct timespec;
-struct compat_timespec;
+struct old_timespec32;
struct pollfd;
enum timespec_type {
@@ -40,7 +40,7 @@ struct restart_block {
enum timespec_type type;
union {
struct __kernel_timespec __user *rmtp;
- struct compat_timespec __user *compat_rmtp;
+ struct old_timespec32 __user *compat_rmtp;
};
u64 expires;
} nanosleep;
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 7ed4713d5337..8b571e9b9f76 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -348,7 +348,7 @@ struct ucred {
extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
-struct timespec;
+struct timespec64;
/* The __sys_...msg variants allow MSG_CMSG_COMPAT iff
* forbid_cmsg_compat==false
@@ -358,7 +358,7 @@ extern long __sys_recvmsg(int fd, struct user_msghdr __user *msg,
extern long __sys_sendmsg(int fd, struct user_msghdr __user *msg,
unsigned int flags, bool forbid_cmsg_compat);
extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
- unsigned int flags, struct timespec *timeout);
+ unsigned int flags, struct timespec64 *timeout);
extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
bool forbid_cmsg_compat);
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 962971e6a9c7..df313913e856 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -678,6 +678,9 @@ struct sdw_master_ops {
* @defer_msg: Defer message
* @clk_stop_timeout: Clock stop timeout computed
* @bank_switch_timeout: Bank switch timeout computed
+ * @multi_link: Store bus property that indicates if multi links
+ * are supported. This flag is populated by drivers after reading
+ * appropriate firmware (ACPI/DT).
*/
struct sdw_bus {
struct device *dev;
@@ -694,6 +697,7 @@ struct sdw_bus {
struct sdw_defer defer_msg;
unsigned int clk_stop_timeout;
u32 bank_switch_timeout;
+ bool multi_link;
};
int sdw_add_bus_master(struct sdw_bus *bus);
@@ -768,14 +772,18 @@ struct sdw_stream_params {
* @params: Stream parameters
* @state: Current state of the stream
* @type: Stream type PCM or PDM
- * @m_rt: Master runtime
+ * @master_list: List of Master runtime(s) in this stream.
+ * master_list can contain only one m_rt per Master instance
+ * for a stream
+ * @m_rt_count: Count of Master runtime(s) in this stream
*/
struct sdw_stream_runtime {
char *name;
struct sdw_stream_params params;
enum sdw_stream_state state;
enum sdw_stream_type type;
- struct sdw_master_runtime *m_rt;
+ struct list_head master_list;
+ int m_rt_count;
};
struct sdw_stream_runtime *sdw_alloc_stream(char *stream_name);
diff --git a/include/linux/string.h b/include/linux/string.h
index 4a5a0eb7df51..27d0482e5e05 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -131,6 +131,13 @@ static inline void *memset_p(void **p, void *v, __kernel_size_t n)
return memset64((uint64_t *)p, (uintptr_t)v, n);
}
+extern void **__memcat_p(void **a, void **b);
+#define memcat_p(a, b) ({ \
+ BUILD_BUG_ON_MSG(!__same_type(*(a), *(b)), \
+ "type mismatch in memcat_p()"); \
+ (typeof(*a) *)__memcat_p((void **)(a), (void **)(b)); \
+})
+
#ifndef __HAVE_ARCH_MEMCPY
extern void * memcpy(void *,const void *,__kernel_size_t);
#endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 58a6765c1c5e..c4db9424b63b 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -67,7 +67,7 @@ struct rpc_cred {
const struct rpc_credops *cr_ops;
unsigned long cr_expire; /* when to gc */
unsigned long cr_flags; /* various flags */
- atomic_t cr_count; /* ref count */
+ refcount_t cr_count; /* ref count */
kuid_t cr_uid;
@@ -100,7 +100,7 @@ struct rpc_auth {
* differ from the flavor in
* au_ops->au_flavor in gss
* case) */
- atomic_t au_count; /* Reference counter */
+ refcount_t au_count; /* Reference counter */
struct rpc_cred_cache * au_credcache;
/* per-flavor data */
@@ -157,6 +157,7 @@ struct rpc_credops {
int (*crkey_timeout)(struct rpc_cred *);
bool (*crkey_to_expire)(struct rpc_cred *);
char * (*crstringify_acceptor)(struct rpc_cred *);
+ bool (*crneed_reencode)(struct rpc_task *);
};
extern const struct rpc_authops authunix_ops;
@@ -192,6 +193,7 @@ __be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
__be32 * rpcauth_checkverf(struct rpc_task *, __be32 *);
int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj);
int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj);
+bool rpcauth_xmit_need_reencode(struct rpc_task *task);
int rpcauth_refreshcred(struct rpc_task *);
void rpcauth_invalcred(struct rpc_task *);
int rpcauth_uptodatecred(struct rpc_task *);
@@ -204,11 +206,11 @@ bool rpcauth_cred_key_to_expire(struct rpc_auth *, struct rpc_cred *);
char * rpcauth_stringify_acceptor(struct rpc_cred *);
static inline
-struct rpc_cred * get_rpccred(struct rpc_cred *cred)
+struct rpc_cred *get_rpccred(struct rpc_cred *cred)
{
- if (cred != NULL)
- atomic_inc(&cred->cr_count);
- return cred;
+ if (cred != NULL && refcount_inc_not_zero(&cred->cr_count))
+ return cred;
+ return NULL;
}
/**
@@ -224,9 +226,7 @@ struct rpc_cred * get_rpccred(struct rpc_cred *cred)
static inline struct rpc_cred *
get_rpccred_rcu(struct rpc_cred *cred)
{
- if (atomic_inc_not_zero(&cred->cr_count))
- return cred;
- return NULL;
+ return get_rpccred(cred);
}
#endif /* __KERNEL__ */
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index 0c9eac351aab..30427b729070 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -70,6 +70,7 @@ struct gss_cl_ctx {
refcount_t count;
enum rpc_gss_proc gc_proc;
u32 gc_seq;
+ u32 gc_seq_xmit;
spinlock_t gc_seq_lock;
struct gss_ctx *gc_gss_ctx;
struct xdr_netobj gc_wire_ctx;
diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h
index 4397a4824c81..28721cf73ec3 100644
--- a/include/linux/sunrpc/bc_xprt.h
+++ b/include/linux/sunrpc/bc_xprt.h
@@ -34,6 +34,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifdef CONFIG_SUNRPC_BACKCHANNEL
struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid);
void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied);
+void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task);
void xprt_free_bc_request(struct rpc_rqst *req);
int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs);
void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs);
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 7df625d41e35..131424cefc6a 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -71,10 +71,10 @@ struct gss_krb5_enctype {
const u32 keyed_cksum; /* is it a keyed cksum? */
const u32 keybytes; /* raw key len, in bytes */
const u32 keylength; /* final key len, in bytes */
- u32 (*encrypt) (struct crypto_skcipher *tfm,
+ u32 (*encrypt) (struct crypto_sync_skcipher *tfm,
void *iv, void *in, void *out,
int length); /* encryption function */
- u32 (*decrypt) (struct crypto_skcipher *tfm,
+ u32 (*decrypt) (struct crypto_sync_skcipher *tfm,
void *iv, void *in, void *out,
int length); /* decryption function */
u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
@@ -98,12 +98,12 @@ struct krb5_ctx {
u32 enctype;
u32 flags;
const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
- struct crypto_skcipher *enc;
- struct crypto_skcipher *seq;
- struct crypto_skcipher *acceptor_enc;
- struct crypto_skcipher *initiator_enc;
- struct crypto_skcipher *acceptor_enc_aux;
- struct crypto_skcipher *initiator_enc_aux;
+ struct crypto_sync_skcipher *enc;
+ struct crypto_sync_skcipher *seq;
+ struct crypto_sync_skcipher *acceptor_enc;
+ struct crypto_sync_skcipher *initiator_enc;
+ struct crypto_sync_skcipher *acceptor_enc_aux;
+ struct crypto_sync_skcipher *initiator_enc_aux;
u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
u8 cksum[GSS_KRB5_MAX_KEYLEN];
s32 endtime;
@@ -118,7 +118,8 @@ struct krb5_ctx {
u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
};
-extern spinlock_t krb5_seq_lock;
+extern u32 gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx);
+extern u64 gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx);
/* The length of the Kerberos GSS token header */
#define GSS_KRB5_TOK_HDR_LEN (16)
@@ -262,24 +263,24 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
u32
-krb5_encrypt(struct crypto_skcipher *key,
+krb5_encrypt(struct crypto_sync_skcipher *key,
void *iv, void *in, void *out, int length);
u32
-krb5_decrypt(struct crypto_skcipher *key,
+krb5_decrypt(struct crypto_sync_skcipher *key,
void *iv, void *in, void *out, int length);
int
-gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf,
+gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf,
int offset, struct page **pages);
int
-gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf,
+gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf,
int offset);
s32
krb5_make_seq_num(struct krb5_ctx *kctx,
- struct crypto_skcipher *key,
+ struct crypto_sync_skcipher *key,
int direction,
u32 seqnum, unsigned char *cksum, unsigned char *buf);
@@ -320,12 +321,12 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
int
krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
- struct crypto_skcipher *cipher,
+ struct crypto_sync_skcipher *cipher,
unsigned char *cksum);
int
krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
- struct crypto_skcipher *cipher,
+ struct crypto_sync_skcipher *cipher,
s32 seqnum);
void
gss_krb5_make_confounder(char *p, u32 conflen);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 592653becd91..7b540c066594 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -140,8 +140,9 @@ struct rpc_task_setup {
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
#define RPC_TASK_ACTIVE 2
-#define RPC_TASK_MSG_RECV 3
-#define RPC_TASK_MSG_RECV_WAIT 4
+#define RPC_TASK_NEED_XMIT 3
+#define RPC_TASK_NEED_RECV 4
+#define RPC_TASK_MSG_PIN_WAIT 5
#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
@@ -188,7 +189,6 @@ struct rpc_timer {
struct rpc_wait_queue {
spinlock_t lock;
struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
- pid_t owner; /* process id of last task serviced */
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char nr; /* # tasks remaining for cookie */
@@ -204,7 +204,6 @@ struct rpc_wait_queue {
* from a single cookie. The aim is to improve
* performance of NFS operations such as read/write.
*/
-#define RPC_BATCH_COUNT 16
#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
/*
@@ -234,6 +233,9 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
struct rpc_task *task);
void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
+void rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *,
+ struct rpc_task *,
+ int);
void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index c3d72066d4b1..6b7a86c4d6e6 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -84,7 +84,6 @@ struct svc_xprt {
struct sockaddr_storage xpt_remote; /* remote peer's address */
size_t xpt_remotelen; /* length of address */
char xpt_remotebuf[INET6_ADDRSTRLEN + 10];
- struct rpc_wait_queue xpt_bc_pending; /* backchannel wait queue */
struct list_head xpt_users; /* callbacks on free */
struct net *xpt_net;
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 2bd68177a442..43106ffa6788 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -18,6 +18,7 @@
#include <asm/unaligned.h>
#include <linux/scatterlist.h>
+struct bio_vec;
struct rpc_rqst;
/*
@@ -52,12 +53,14 @@ struct xdr_buf {
struct kvec head[1], /* RPC header + non-page data */
tail[1]; /* Appended after page data */
+ struct bio_vec *bvec;
struct page ** pages; /* Array of pages */
unsigned int page_base, /* Start of page data */
page_len, /* Length of page data */
flags; /* Flags for data disposition */
#define XDRBUF_READ 0x01 /* target of file read */
#define XDRBUF_WRITE 0x02 /* source of file write */
+#define XDRBUF_SPARSE_PAGES 0x04 /* Page array is sparse */
unsigned int buflen, /* Total length of storage buffer */
len; /* Length of XDR encoded message */
@@ -69,6 +72,8 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len)
buf->head[0].iov_base = start;
buf->head[0].iov_len = len;
buf->tail[0].iov_len = 0;
+ buf->bvec = NULL;
+ buf->pages = NULL;
buf->page_len = 0;
buf->flags = 0;
buf->len = 0;
@@ -115,6 +120,9 @@ __be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *);
void xdr_inline_pages(struct xdr_buf *, unsigned int,
struct page **, unsigned int, unsigned int);
void xdr_terminate_string(struct xdr_buf *, const u32);
+size_t xdr_buf_pagecount(struct xdr_buf *buf);
+int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp);
+void xdr_free_bvec(struct xdr_buf *buf);
static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len)
{
@@ -177,10 +185,7 @@ struct xdr_skb_reader {
typedef size_t (*xdr_skb_read_actor)(struct xdr_skb_reader *desc, void *to, size_t len);
-size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len);
extern int csum_partial_copy_to_xdr(struct xdr_buf *, struct sk_buff *);
-extern ssize_t xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
- struct xdr_skb_reader *, xdr_skb_read_actor);
extern int xdr_encode_word(struct xdr_buf *, unsigned int, u32);
extern int xdr_decode_word(struct xdr_buf *, unsigned int, u32 *);
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 336fd1a19cca..a4ab4f8d9140 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -82,7 +82,14 @@ struct rpc_rqst {
struct page **rq_enc_pages; /* scratch pages for use by
gss privacy code */
void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
- struct list_head rq_list;
+
+ union {
+ struct list_head rq_list; /* Slot allocation list */
+ struct rb_node rq_recv; /* Receive queue */
+ };
+
+ struct list_head rq_xmit; /* Send queue */
+ struct list_head rq_xmit2; /* Send queue */
void *rq_buffer; /* Call XDR encode buffer */
size_t rq_callsize;
@@ -103,6 +110,7 @@ struct rpc_rqst {
/* A cookie used to track the
state of the transport
connection */
+ atomic_t rq_pin;
/*
* Partial send handling
@@ -133,7 +141,8 @@ struct rpc_xprt_ops {
void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
int (*buf_alloc)(struct rpc_task *task);
void (*buf_free)(struct rpc_task *task);
- int (*send_request)(struct rpc_task *task);
+ void (*prepare_request)(struct rpc_rqst *req);
+ int (*send_request)(struct rpc_rqst *req);
void (*set_retrans_timeout)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
void (*release_request)(struct rpc_task *task);
@@ -234,9 +243,12 @@ struct rpc_xprt {
*/
spinlock_t transport_lock; /* lock transport info */
spinlock_t reserve_lock; /* lock slot table */
- spinlock_t recv_lock; /* lock receive list */
+ spinlock_t queue_lock; /* send/receive queue lock */
u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */
+
+ struct list_head xmit_queue; /* Send queue */
+
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
struct svc_serv *bc_serv; /* The RPC service which will */
@@ -248,7 +260,8 @@ struct rpc_xprt {
struct list_head bc_pa_list; /* List of preallocated
* backchannel rpc_rqst's */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
- struct list_head recv;
+
+ struct rb_root recv_queue; /* Receive queue */
struct {
unsigned long bind_count, /* total number of binds */
@@ -325,15 +338,18 @@ struct xprt_class {
struct rpc_xprt *xprt_create_transport(struct xprt_create *args);
void xprt_connect(struct rpc_task *task);
void xprt_reserve(struct rpc_task *task);
-void xprt_request_init(struct rpc_task *task);
void xprt_retry_reserve(struct rpc_task *task);
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_free_slot(struct rpc_xprt *xprt,
struct rpc_rqst *req);
-void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
+void xprt_request_prepare(struct rpc_rqst *req);
bool xprt_prepare_transmit(struct rpc_task *task);
+void xprt_request_enqueue_transmit(struct rpc_task *task);
+void xprt_request_enqueue_receive(struct rpc_task *task);
+void xprt_request_wait_receive(struct rpc_task *task);
+bool xprt_request_need_retransmit(struct rpc_task *task);
void xprt_transmit(struct rpc_task *task);
void xprt_end_transmit(struct rpc_task *task);
int xprt_adjust_timeout(struct rpc_rqst *req);
@@ -373,8 +389,8 @@ int xprt_load_transport(const char *);
void xprt_set_retrans_timeout_def(struct rpc_task *task);
void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
-void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action);
-void xprt_write_space(struct rpc_xprt *xprt);
+void xprt_wait_for_buffer_space(struct rpc_xprt *xprt);
+bool xprt_write_space(struct rpc_xprt *xprt);
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
void xprt_update_rtt(struct rpc_task *task);
@@ -382,6 +398,7 @@ void xprt_complete_rqst(struct rpc_task *task, int copied);
void xprt_pin_rqst(struct rpc_rqst *req);
void xprt_unpin_rqst(struct rpc_rqst *req);
void xprt_release_rqst_cong(struct rpc_task *task);
+bool xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req);
void xprt_disconnect_done(struct rpc_xprt *xprt);
void xprt_force_disconnect(struct rpc_xprt *xprt);
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
@@ -400,6 +417,8 @@ void xprt_unlock_connect(struct rpc_xprt *, void *);
#define XPRT_BINDING (5)
#define XPRT_CLOSING (6)
#define XPRT_CONGESTED (9)
+#define XPRT_CWND_WAIT (10)
+#define XPRT_WRITE_SPACE (11)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index ae0f99b9b965..458bfe0137f5 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -30,15 +30,25 @@ struct sock_xprt {
/*
* State of TCP reply receive
*/
- __be32 tcp_fraghdr,
- tcp_xid,
- tcp_calldir;
+ struct {
+ struct {
+ __be32 fraghdr,
+ xid,
+ calldir;
+ } __attribute__((packed));
- u32 tcp_offset,
- tcp_reclen;
+ u32 offset,
+ len;
- unsigned long tcp_copied,
- tcp_flags;
+ unsigned long copied;
+ } recv;
+
+ /*
+ * State of TCP transmit queue
+ */
+ struct {
+ u32 offset;
+ } xmit;
/*
* Connection of transports
@@ -68,20 +78,8 @@ struct sock_xprt {
};
/*
- * TCP receive state flags
- */
-#define TCP_RCV_LAST_FRAG (1UL << 0)
-#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
-#define TCP_RCV_COPY_XID (1UL << 2)
-#define TCP_RCV_COPY_DATA (1UL << 3)
-#define TCP_RCV_READ_CALLDIR (1UL << 4)
-#define TCP_RCV_COPY_CALLDIR (1UL << 5)
-
-/*
* TCP RPC flags
*/
-#define TCP_RPC_REPLY (1UL << 6)
-
#define XPRT_SOCK_CONNECTING 1U
#define XPRT_SOCK_DATA_READY (2)
#define XPRT_SOCK_UPD_TIMEOUT (3)
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 965be92c33b5..a387b59640a4 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -67,11 +67,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
/* Accessory functions. */
-void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
- gfp_t flags, unsigned long attrs);
-void swiotlb_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, unsigned long attrs);
-
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
@@ -107,9 +102,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir);
extern int
-swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
-
-extern int
swiotlb_dma_supported(struct device *hwdev, u64 mask);
#ifdef CONFIG_SWIOTLB
@@ -121,7 +113,6 @@ static inline unsigned int swiotlb_max_segment(void) { return 0; }
#endif
extern void swiotlb_print_info(void);
-extern int is_swiotlb_buffer(phys_addr_t paddr);
extern void swiotlb_set_max_segment(unsigned int);
extern const struct dma_map_ops swiotlb_dma_ops;
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 2ff814c92f7f..2ac3d13a915b 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -60,7 +60,7 @@ struct tms;
struct utimbuf;
struct mq_attr;
struct compat_stat;
-struct compat_timeval;
+struct old_timeval32;
struct robust_list_head;
struct getcpu_cache;
struct old_linux_dirent;
@@ -513,7 +513,8 @@ asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *
/* fs/utimes.c */
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
- struct timespec __user *utimes, int flags);
+ struct __kernel_timespec __user *utimes,
+ int flags);
/* kernel/acct.c */
asmlinkage long sys_acct(const char __user *name);
@@ -613,7 +614,7 @@ asmlinkage long sys_sched_yield(void);
asmlinkage long sys_sched_get_priority_max(int policy);
asmlinkage long sys_sched_get_priority_min(int policy);
asmlinkage long sys_sched_rr_get_interval(pid_t pid,
- struct timespec __user *interval);
+ struct __kernel_timespec __user *interval);
/* kernel/signal.c */
asmlinkage long sys_restart_syscall(void);
@@ -634,7 +635,7 @@ asmlinkage long sys_rt_sigprocmask(int how, sigset_t __user *set,
asmlinkage long sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize);
asmlinkage long sys_rt_sigtimedwait(const sigset_t __user *uthese,
siginfo_t __user *uinfo,
- const struct timespec __user *uts,
+ const struct __kernel_timespec __user *uts,
size_t sigsetsize);
asmlinkage long sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo);
@@ -829,7 +830,7 @@ asmlinkage long sys_perf_event_open(
asmlinkage long sys_accept4(int, struct sockaddr __user *, int __user *, int);
asmlinkage long sys_recvmmsg(int fd, struct mmsghdr __user *msg,
unsigned int vlen, unsigned flags,
- struct timespec __user *timeout);
+ struct __kernel_timespec __user *timeout);
asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr,
int options, struct rusage __user *ru);
@@ -954,8 +955,6 @@ asmlinkage long sys_access(const char __user *filename, int mode);
asmlinkage long sys_rename(const char __user *oldname,
const char __user *newname);
asmlinkage long sys_symlink(const char __user *old, const char __user *new);
-asmlinkage long sys_utimes(char __user *filename,
- struct timeval __user *utimes);
#if defined(__ARCH_WANT_STAT64) || defined(__ARCH_WANT_COMPAT_STAT64)
asmlinkage long sys_stat64(const char __user *filename,
struct stat64 __user *statbuf);
@@ -985,14 +984,18 @@ asmlinkage long sys_alarm(unsigned int seconds);
asmlinkage long sys_getpgrp(void);
asmlinkage long sys_pause(void);
asmlinkage long sys_time(time_t __user *tloc);
+#ifdef __ARCH_WANT_SYS_UTIME
asmlinkage long sys_utime(char __user *filename,
struct utimbuf __user *times);
+asmlinkage long sys_utimes(char __user *filename,
+ struct timeval __user *utimes);
+asmlinkage long sys_futimesat(int dfd, const char __user *filename,
+ struct timeval __user *utimes);
+#endif
asmlinkage long sys_creat(const char __user *pathname, umode_t mode);
asmlinkage long sys_getdents(unsigned int fd,
struct linux_dirent __user *dirent,
unsigned int count);
-asmlinkage long sys_futimesat(int dfd, const char __user *filename,
- struct timeval __user *utimes);
asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timeval __user *tvp);
asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
diff --git a/include/linux/tc.h b/include/linux/tc.h
index f92511e57cdb..a60639f37963 100644
--- a/include/linux/tc.h
+++ b/include/linux/tc.h
@@ -84,6 +84,7 @@ struct tc_dev {
device. */
struct device dev; /* Generic device interface. */
struct resource resource; /* Address space of this device. */
+ u64 dma_mask; /* DMA addressable range. */
char vendor[9];
char name[9];
char firmware[9];
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index a3ed26082bc1..bf6ec83e60ee 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Thunderbolt service API
*
@@ -5,10 +6,6 @@
* Copyright (C) 2017, Intel Corporation
* Authors: Michael Jamet <michael.jamet@intel.com>
* Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef THUNDERBOLT_H_
diff --git a/include/linux/time32.h b/include/linux/time32.h
index d1ae43c13e25..61904a6c098f 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -13,6 +13,36 @@
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
+typedef s32 old_time32_t;
+
+struct old_timespec32 {
+ old_time32_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct old_timeval32 {
+ old_time32_t tv_sec;
+ s32 tv_usec;
+};
+
+struct old_itimerspec32 {
+ struct old_timespec32 it_interval;
+ struct old_timespec32 it_value;
+};
+
+struct old_utimbuf32 {
+ old_time32_t actime;
+ old_time32_t modtime;
+};
+
+extern int get_old_timespec32(struct timespec64 *, const void __user *);
+extern int put_old_timespec32(const struct timespec64 *, void __user *);
+extern int get_old_itimerspec32(struct itimerspec64 *its,
+ const struct old_itimerspec32 __user *uits);
+extern int put_old_itimerspec32(const struct itimerspec64 *its,
+ struct old_itimerspec32 __user *uits);
+
+
#if __BITS_PER_LONG == 64
/* timespec64 is defined as timespec here */
@@ -105,16 +135,6 @@ static inline bool timespec_valid(const struct timespec *ts)
return true;
}
-static inline bool timespec_valid_strict(const struct timespec *ts)
-{
- if (!timespec_valid(ts))
- return false;
- /* Disallow values that could overflow ktime_t */
- if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
- return false;
- return true;
-}
-
/**
* timespec_to_ns - Convert timespec to nanoseconds
* @ts: pointer to the timespec variable to be converted
@@ -149,19 +169,6 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
a->tv_nsec = ns;
}
-/**
- * time_to_tm - converts the calendar time to local broken-down time
- *
- * @totalsecs the number of seconds elapsed since 00:00:00 on January 1, 1970,
- * Coordinated Universal Time (UTC).
- * @offset offset seconds adding to totalsecs.
- * @result pointer to struct tm variable to receive broken-down time
- */
-static inline void time_to_tm(time_t totalsecs, int offset, struct tm *result)
-{
- time64_to_tm(totalsecs, offset, result);
-}
-
static inline unsigned long mktime(const unsigned int year,
const unsigned int mon, const unsigned int day,
const unsigned int hour, const unsigned int min,
@@ -183,8 +190,6 @@ static inline bool timeval_valid(const struct timeval *tv)
return true;
}
-extern struct timespec timespec_trunc(struct timespec t, unsigned int gran);
-
/**
* timeval_to_ns - Convert timeval to nanoseconds
* @ts: pointer to the timeval variable to be converted
@@ -208,18 +213,17 @@ extern struct timeval ns_to_timeval(const s64 nsec);
extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec);
/*
- * New aliases for compat time functions. These will be used to replace
- * the compat code so it can be shared between 32-bit and 64-bit builds
- * both of which provide compatibility with old 32-bit tasks.
+ * Old names for the 32-bit time_t interfaces, these will be removed
+ * when everything uses the new names.
*/
-#define old_time32_t compat_time_t
-#define old_timeval32 compat_timeval
-#define old_timespec32 compat_timespec
-#define old_itimerspec32 compat_itimerspec
-#define ns_to_old_timeval32 ns_to_compat_timeval
-#define get_old_itimerspec32 get_compat_itimerspec64
-#define put_old_itimerspec32 put_compat_itimerspec64
-#define get_old_timespec32 compat_get_timespec64
-#define put_old_timespec32 compat_put_timespec64
+#define compat_time_t old_time32_t
+#define compat_timeval old_timeval32
+#define compat_timespec old_timespec32
+#define compat_itimerspec old_itimerspec32
+#define ns_to_compat_timeval ns_to_old_timeval32
+#define get_compat_itimerspec64 get_old_itimerspec32
+#define put_compat_itimerspec64 put_old_itimerspec32
+#define compat_get_timespec64 get_old_timespec32
+#define compat_put_timespec64 put_old_timespec32
#endif
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index a5a3cfc3c2fa..29975e93fcb8 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -266,9 +266,6 @@ extern int update_persistent_clock64(struct timespec64 now);
* deprecated aliases, don't use in new code
*/
#define getnstimeofday64(ts) ktime_get_real_ts64(ts)
-#define get_monotonic_boottime64(ts) ktime_get_boottime_ts64(ts)
-#define getrawmonotonic64(ts) ktime_get_raw_ts64(ts)
-#define timekeeping_clocktai64(ts) ktime_get_clocktai_ts64(ts)
static inline struct timespec64 current_kernel_time64(void)
{
@@ -279,13 +276,4 @@ static inline struct timespec64 current_kernel_time64(void)
return ts;
}
-static inline struct timespec64 get_monotonic_coarse64(void)
-{
- struct timespec64 ts;
-
- ktime_get_coarse_ts64(&ts);
-
- return ts;
-}
-
#endif
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
index 8762c2f45f8b..a502616f7e1c 100644
--- a/include/linux/timekeeping32.h
+++ b/include/linux/timekeeping32.h
@@ -6,27 +6,18 @@
* over time so we can remove the file here.
*/
-extern void do_gettimeofday(struct timeval *tv);
-unsigned long get_seconds(void);
-
-static inline struct timespec current_kernel_time(void)
+static inline void do_gettimeofday(struct timeval *tv)
{
- struct timespec64 ts64;
+ struct timespec64 now;
- ktime_get_coarse_real_ts64(&ts64);
-
- return timespec64_to_timespec(ts64);
+ ktime_get_real_ts64(&now);
+ tv->tv_sec = now.tv_sec;
+ tv->tv_usec = now.tv_nsec/1000;
}
-/**
- * Deprecated. Use do_settimeofday64().
- */
-static inline int do_settimeofday(const struct timespec *ts)
+static inline unsigned long get_seconds(void)
{
- struct timespec64 ts64;
-
- ts64 = timespec_to_timespec64(*ts);
- return do_settimeofday64(&ts64);
+ return ktime_get_real_seconds();
}
static inline void getnstimeofday(struct timespec *ts)
@@ -45,14 +36,6 @@ static inline void ktime_get_ts(struct timespec *ts)
*ts = timespec64_to_timespec(ts64);
}
-static inline void ktime_get_real_ts(struct timespec *ts)
-{
- struct timespec64 ts64;
-
- ktime_get_real_ts64(&ts64);
- *ts = timespec64_to_timespec(ts64);
-}
-
static inline void getrawmonotonic(struct timespec *ts)
{
struct timespec64 ts64;
@@ -61,15 +44,6 @@ static inline void getrawmonotonic(struct timespec *ts)
*ts = timespec64_to_timespec(ts64);
}
-static inline struct timespec get_monotonic_coarse(void)
-{
- struct timespec64 ts64;
-
- ktime_get_coarse_ts64(&ts64);
-
- return timespec64_to_timespec(ts64);
-}
-
static inline void getboottime(struct timespec *ts)
{
struct timespec64 ts64;
@@ -79,19 +53,6 @@ static inline void getboottime(struct timespec *ts)
}
/*
- * Timespec interfaces utilizing the ktime based ones
- */
-static inline void get_monotonic_boottime(struct timespec *ts)
-{
- *ts = ktime_to_timespec(ktime_get_boottime());
-}
-
-static inline void timekeeping_clocktai(struct timespec *ts)
-{
- *ts = ktime_to_timespec(ktime_get_clocktai());
-}
-
-/*
* Persistent clock related interfaces
*/
extern void read_persistent_clock(struct timespec *ts);
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 6f8b68cd460f..a3cd7cb67a69 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -133,6 +133,7 @@ extern void uio_event_notify(struct uio_info *info);
#define UIO_MEM_PHYS 1
#define UIO_MEM_LOGICAL 2
#define UIO_MEM_VIRTUAL 3
+#define UIO_MEM_IOVA 4
/* defines for uio_port->porttype */
#define UIO_PORT_NONE 0
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 07f99362bc90..63758c399e4e 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -77,6 +77,12 @@ struct ci_hdrc_platform_data {
struct ci_hdrc_cable vbus_extcon;
struct ci_hdrc_cable id_extcon;
u32 phy_clkgate_delay_us;
+
+ /* pins */
+ struct pinctrl *pctl;
+ struct pinctrl_state *pins_default;
+ struct pinctrl_state *pins_host;
+ struct pinctrl_state *pins_device;
};
/* Default offset of capability registers */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index d9f131ecf708..ed7c122cb31f 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -1052,10 +1052,9 @@ do { \
__ret; \
})
-#define __wait_event_interruptible_lock_irq_timeout(wq_head, condition, \
- lock, timeout) \
+#define __wait_event_lock_irq_timeout(wq_head, condition, lock, timeout, state) \
___wait_event(wq_head, ___wait_cond_timeout(condition), \
- TASK_INTERRUPTIBLE, 0, timeout, \
+ state, 0, timeout, \
spin_unlock_irq(&lock); \
__ret = schedule_timeout(__ret); \
spin_lock_irq(&lock));
@@ -1089,8 +1088,19 @@ do { \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
- __ret = __wait_event_interruptible_lock_irq_timeout( \
- wq_head, condition, lock, timeout); \
+ __ret = __wait_event_lock_irq_timeout( \
+ wq_head, condition, lock, timeout, \
+ TASK_INTERRUPTIBLE); \
+ __ret; \
+})
+
+#define wait_event_lock_irq_timeout(wq_head, condition, lock, timeout) \
+({ \
+ long __ret = timeout; \
+ if (!___wait_cond_timeout(condition)) \
+ __ret = __wait_event_lock_irq_timeout( \
+ wq_head, condition, lock, timeout, \
+ TASK_UNINTERRUPTIBLE); \
__ret; \
})
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index 77c7908b7d73..2734c895c1bf 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -46,7 +46,6 @@
#include <net/ip.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
-#include <net/ipv6.h>
#include <net/net_namespace.h>
/**
@@ -95,20 +94,18 @@ int rdma_translate_ip(const struct sockaddr *addr,
* @timeout_ms: Amount of time to wait for the address resolution to complete.
* @callback: Call invoked once address resolution has completed, timed out,
* or been canceled. A status of 0 indicates success.
+ * @resolve_by_gid_attr: Resolve the ip based on the GID attribute from
+ * rdma_dev_addr.
* @context: User-specified context associated with the call.
*/
int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
- struct rdma_dev_addr *addr, int timeout_ms,
+ struct rdma_dev_addr *addr, unsigned long timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
- void *context);
+ bool resolve_by_gid_attr, void *context);
void rdma_addr_cancel(struct rdma_dev_addr *addr);
-void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
- const struct net_device *dev,
- const unsigned char *dst_dev_addr);
-
int rdma_addr_size(const struct sockaddr *addr);
int rdma_addr_size_in6(struct sockaddr_in6 *addr);
int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index c10f4b5ea8ab..49f4f75499b3 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -583,7 +583,7 @@ struct ib_cm_sidr_req_param {
struct sa_path_rec *path;
const struct ib_gid_attr *sgid_attr;
__be64 service_id;
- int timeout_ms;
+ unsigned long timeout_ms;
const void *private_data;
u8 private_data_len;
u8 max_cm_retries;
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index b6ddf2a1b9d8..19520979b84c 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -449,28 +449,23 @@ struct ib_sa_query;
void ib_sa_cancel_query(int id, struct ib_sa_query *query);
-int ib_sa_path_rec_get(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- struct sa_path_rec *rec,
- ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
- void (*callback)(int status,
- struct sa_path_rec *resp,
+int ib_sa_path_rec_get(struct ib_sa_client *client, struct ib_device *device,
+ u8 port_num, struct sa_path_rec *rec,
+ ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
+ gfp_t gfp_mask,
+ void (*callback)(int status, struct sa_path_rec *resp,
void *context),
- void *context,
- struct ib_sa_query **query);
+ void *context, struct ib_sa_query **query);
int ib_sa_service_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- u8 method,
- struct ib_sa_service_rec *rec,
- ib_sa_comp_mask comp_mask,
- int timeout_ms, gfp_t gfp_mask,
- void (*callback)(int status,
- struct ib_sa_service_rec *resp,
- void *context),
- void *context,
- struct ib_sa_query **sa_query);
+ struct ib_device *device, u8 port_num, u8 method,
+ struct ib_sa_service_rec *rec,
+ ib_sa_comp_mask comp_mask, unsigned long timeout_ms,
+ gfp_t gfp_mask,
+ void (*callback)(int status,
+ struct ib_sa_service_rec *resp,
+ void *context),
+ void *context, struct ib_sa_query **sa_query);
struct ib_sa_multicast {
struct ib_sa_mcmember_rec rec;
@@ -573,12 +568,11 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
struct ib_sa_guidinfo_rec *rec,
ib_sa_comp_mask comp_mask, u8 method,
- int timeout_ms, gfp_t gfp_mask,
+ unsigned long timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
struct ib_sa_guidinfo_rec *resp,
void *context),
- void *context,
- struct ib_sa_query **sa_query);
+ void *context, struct ib_sa_query **sa_query);
bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
struct ib_device *device,
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index a1fd63871d17..5d3755ec5afa 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -42,15 +42,14 @@ struct ib_umem_odp;
struct ib_umem {
struct ib_ucontext *context;
+ struct mm_struct *owning_mm;
size_t length;
unsigned long address;
int page_shift;
- int writable;
- int hugetlb;
+ u32 writable : 1;
+ u32 hugetlb : 1;
+ u32 is_odp : 1;
struct work_struct work;
- struct mm_struct *mm;
- unsigned long diff;
- struct ib_umem_odp *odp_data;
struct sg_table sg_head;
int nmap;
int npages;
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 381cdf5a9bd1..0b1446fe2fab 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -43,6 +43,9 @@ struct umem_odp_node {
};
struct ib_umem_odp {
+ struct ib_umem umem;
+ struct ib_ucontext_per_mm *per_mm;
+
/*
* An array of the pages included in the on-demand paging umem.
* Indices of pages that are currently not mapped into the device will
@@ -64,16 +67,9 @@ struct ib_umem_odp {
struct mutex umem_mutex;
void *private; /* for the HW driver to use. */
- /* When false, use the notifier counter in the ucontext struct. */
- bool mn_counters_active;
int notifiers_seq;
int notifiers_count;
- /* A linked list of umems that don't have private mmu notifier
- * counters yet. */
- struct list_head no_private_counters;
- struct ib_umem *umem;
-
/* Tree tracking */
struct umem_odp_node interval_tree;
@@ -82,15 +78,34 @@ struct ib_umem_odp {
struct work_struct work;
};
+static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
+{
+ return container_of(umem, struct ib_umem_odp, umem);
+}
+
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
- int access);
-struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
- unsigned long addr,
- size_t size);
+struct ib_ucontext_per_mm {
+ struct ib_ucontext *context;
+ struct mm_struct *mm;
+ struct pid *tgid;
+ bool active;
+
+ struct rb_root_cached umem_tree;
+ /* Protects umem_tree */
+ struct rw_semaphore umem_rwsem;
-void ib_umem_odp_release(struct ib_umem *umem);
+ struct mmu_notifier mn;
+ unsigned int odp_mrs_count;
+
+ struct list_head ucontext_list;
+ struct rcu_head rcu;
+};
+
+int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
+struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
+ unsigned long addr, size_t size);
+void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
/*
* The lower 2 bits of the DMA address signal the R/W permissions for
@@ -105,13 +120,14 @@ void ib_umem_odp_release(struct ib_umem *umem);
#define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
-int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt,
- u64 access_mask, unsigned long current_seq);
+int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
+ u64 bcnt, u64 access_mask,
+ unsigned long current_seq);
-void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset,
+void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
u64 bound);
-typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
+typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
void *cookie);
/*
* Call the callback on each ib_umem in the range. Returns the logical or of
@@ -129,46 +145,37 @@ int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
u64 addr, u64 length);
-static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,
+static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
unsigned long mmu_seq)
{
/*
* This code is strongly based on the KVM code from
* mmu_notifier_retry. Should be called with
- * the relevant locks taken (item->odp_data->umem_mutex
+ * the relevant locks taken (umem_odp->umem_mutex
* and the ucontext umem_mutex semaphore locked for read).
*/
- /* Do not allow page faults while the new ib_umem hasn't seen a state
- * with zero notifiers yet, and doesn't have its own valid set of
- * private counters. */
- if (!item->odp_data->mn_counters_active)
- return 1;
-
- if (unlikely(item->odp_data->notifiers_count))
+ if (unlikely(umem_odp->notifiers_count))
return 1;
- if (item->odp_data->notifiers_seq != mmu_seq)
+ if (umem_odp->notifiers_seq != mmu_seq)
return 1;
return 0;
}
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline int ib_umem_odp_get(struct ib_ucontext *context,
- struct ib_umem *umem,
- int access)
+static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
{
return -EINVAL;
}
-static inline struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
- unsigned long addr,
- size_t size)
+static inline struct ib_umem_odp *
+ib_alloc_odp_umem(struct ib_ucontext *context, unsigned long addr, size_t size)
{
return ERR_PTR(-EINVAL);
}
-static inline void ib_umem_odp_release(struct ib_umem *umem) {}
+static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 0ed5d913a492..9c0c2132a2d6 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -69,8 +69,11 @@
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
+struct ib_umem_odp;
+
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
+extern struct workqueue_struct *ib_comp_unbound_wq;
union ib_gid {
u8 raw[16];
@@ -1137,7 +1140,9 @@ enum ib_qp_create_flags {
*/
struct ib_qp_init_attr {
+ /* Consumer's event_handler callback must not block */
void (*event_handler)(struct ib_event *, void *);
+
void *qp_context;
struct ib_cq *send_cq;
struct ib_cq *recv_cq;
@@ -1146,7 +1151,7 @@ struct ib_qp_init_attr {
struct ib_qp_cap cap;
enum ib_sig_type sq_sig_type;
enum ib_qp_type qp_type;
- enum ib_qp_create_flags create_flags;
+ u32 create_flags;
/*
* Only needed for special QP types, or when using the RW API.
@@ -1278,21 +1283,27 @@ struct ib_qp_attr {
};
enum ib_wr_opcode {
- IB_WR_RDMA_WRITE,
- IB_WR_RDMA_WRITE_WITH_IMM,
- IB_WR_SEND,
- IB_WR_SEND_WITH_IMM,
- IB_WR_RDMA_READ,
- IB_WR_ATOMIC_CMP_AND_SWP,
- IB_WR_ATOMIC_FETCH_AND_ADD,
- IB_WR_LSO,
- IB_WR_SEND_WITH_INV,
- IB_WR_RDMA_READ_WITH_INV,
- IB_WR_LOCAL_INV,
- IB_WR_REG_MR,
- IB_WR_MASKED_ATOMIC_CMP_AND_SWP,
- IB_WR_MASKED_ATOMIC_FETCH_AND_ADD,
+ /* These are shared with userspace */
+ IB_WR_RDMA_WRITE = IB_UVERBS_WR_RDMA_WRITE,
+ IB_WR_RDMA_WRITE_WITH_IMM = IB_UVERBS_WR_RDMA_WRITE_WITH_IMM,
+ IB_WR_SEND = IB_UVERBS_WR_SEND,
+ IB_WR_SEND_WITH_IMM = IB_UVERBS_WR_SEND_WITH_IMM,
+ IB_WR_RDMA_READ = IB_UVERBS_WR_RDMA_READ,
+ IB_WR_ATOMIC_CMP_AND_SWP = IB_UVERBS_WR_ATOMIC_CMP_AND_SWP,
+ IB_WR_ATOMIC_FETCH_AND_ADD = IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD,
+ IB_WR_LSO = IB_UVERBS_WR_TSO,
+ IB_WR_SEND_WITH_INV = IB_UVERBS_WR_SEND_WITH_INV,
+ IB_WR_RDMA_READ_WITH_INV = IB_UVERBS_WR_RDMA_READ_WITH_INV,
+ IB_WR_LOCAL_INV = IB_UVERBS_WR_LOCAL_INV,
+ IB_WR_MASKED_ATOMIC_CMP_AND_SWP =
+ IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP,
+ IB_WR_MASKED_ATOMIC_FETCH_AND_ADD =
+ IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD,
+
+ /* These are kernel only and can not be issued by userspace */
+ IB_WR_REG_MR = 0x20,
IB_WR_REG_SIG_MR,
+
/* reserve values for low level drivers' internal use.
* These values will not be used at all in the ib core layer.
*/
@@ -1485,26 +1496,15 @@ struct ib_ucontext {
* it is set when we are closing the file descriptor and indicates
* that mm_sem may be locked.
*/
- int closing;
+ bool closing;
bool cleanup_retryable;
- struct pid *tgid;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
- struct rb_root_cached umem_tree;
- /*
- * Protects .umem_rbroot and tree, as well as odp_mrs_count and
- * mmu notifiers registration.
- */
- struct rw_semaphore umem_rwsem;
- void (*invalidate_range)(struct ib_umem *umem,
+ void (*invalidate_range)(struct ib_umem_odp *umem_odp,
unsigned long start, unsigned long end);
-
- struct mmu_notifier mn;
- atomic_t notifier_count;
- /* A list of umems that don't have private mmu notifier counters yet. */
- struct list_head no_private_counters;
- int odp_mrs_count;
+ struct mutex per_mm_list_lock;
+ struct list_head per_mm_list;
#endif
struct ib_rdmacg_object cg_obj;
@@ -1570,9 +1570,10 @@ struct ib_ah {
typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
enum ib_poll_context {
- IB_POLL_DIRECT, /* caller context, no hw completions */
- IB_POLL_SOFTIRQ, /* poll from softirq context */
- IB_POLL_WORKQUEUE, /* poll from workqueue */
+ IB_POLL_DIRECT, /* caller context, no hw completions */
+ IB_POLL_SOFTIRQ, /* poll from softirq context */
+ IB_POLL_WORKQUEUE, /* poll from workqueue */
+ IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
};
struct ib_cq {
@@ -1589,6 +1590,7 @@ struct ib_cq {
struct irq_poll iop;
struct work_struct work;
};
+ struct workqueue_struct *comp_wq;
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -2263,10 +2265,11 @@ struct ib_device {
struct list_head event_handler_list;
spinlock_t event_handler_lock;
- spinlock_t client_data_lock;
+ rwlock_t client_data_lock;
struct list_head core_list;
/* Access to the client_data_list is protected by the client_data_lock
- * spinlock and the lists_rwsem read-write semaphore */
+ * rwlock and the lists_rwsem read-write semaphore
+ */
struct list_head client_data_list;
struct ib_cache cache;
@@ -2550,7 +2553,13 @@ struct ib_device {
struct module *owner;
struct device dev;
- struct kobject *ports_parent;
+ /* First group for device attributes,
+ * Second group for driver provided attributes (optional).
+ * It is NULL terminated array.
+ */
+ const struct attribute_group *groups[3];
+
+ struct kobject *ports_kobj;
struct list_head port_list;
enum {
@@ -2633,9 +2642,9 @@ void ib_dealloc_device(struct ib_device *device);
void ib_get_device_fw_str(struct ib_device *device, char *str);
-int ib_register_device(struct ib_device *device,
- int (*port_callback)(struct ib_device *,
- u8, struct kobject *));
+int ib_register_device(struct ib_device *device, const char *name,
+ int (*port_callback)(struct ib_device *, u8,
+ struct kobject *));
void ib_unregister_device(struct ib_device *device);
int ib_register_client (struct ib_client *client);
@@ -2645,6 +2654,28 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
void ib_set_client_data(struct ib_device *device, struct ib_client *client,
void *data);
+#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
+int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size, pgprot_t prot);
+int rdma_user_mmap_page(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma, struct page *page,
+ unsigned long size);
+#else
+static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size,
+ pgprot_t prot)
+{
+ return -EINVAL;
+}
+static inline int rdma_user_mmap_page(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma, struct page *page,
+ unsigned long size)
+{
+ return -EINVAL;
+}
+#endif
+
static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
{
return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
@@ -2728,7 +2759,6 @@ static inline int ib_destroy_usecnt(atomic_t *usecnt,
* @next_state: Next QP state
* @type: QP type
* @mask: Mask of supplied QP attributes
- * @ll : link layer of port
*
* This function is a helper function that a low-level driver's
* modify_qp method can use to validate the consumer's input. It
@@ -2737,8 +2767,7 @@ static inline int ib_destroy_usecnt(atomic_t *usecnt,
* and that the attribute mask supplied is allowed for the transition.
*/
bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
- enum ib_qp_type type, enum ib_qp_attr_mask mask,
- enum rdma_link_layer ll);
+ enum ib_qp_type type, enum ib_qp_attr_mask mask);
void ib_register_event_handler(struct ib_event_handler *event_handler);
void ib_unregister_event_handler(struct ib_event_handler *event_handler);
@@ -4167,20 +4196,6 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
}
-static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
- struct ib_qp *qp, struct ib_device *device)
-{
- uobj->object = ibflow;
- ibflow->uobject = uobj;
-
- if (qp) {
- atomic_inc(&qp->usecnt);
- ibflow->qp = qp;
- }
-
- ibflow->device = device;
-}
-
/**
* rdma_roce_rescan_device - Rescan all of the network devices in the system
* and add their gids, as needed, to the relevant RoCE devices.
@@ -4205,4 +4220,26 @@ int rdma_init_netdev(struct ib_device *device, u8 port_num,
void (*setup)(struct net_device *),
struct net_device *netdev);
+/**
+ * rdma_set_device_sysfs_group - Set device attributes group to have
+ * driver specific sysfs entries at
+ * for infiniband class.
+ *
+ * @device: device pointer for which attributes to be created
+ * @group: Pointer to group which should be added when device
+ * is registered with sysfs.
+ * rdma_set_device_sysfs_group() allows existing drivers to expose one
+ * group per device to have sysfs attributes.
+ *
+ * NOTE: New drivers should not make use of this API; instead new device
+ * parameter should be exposed via netlink command. This API and mechanism
+ * exist only for existing drivers.
+ */
+static inline void
+rdma_set_device_sysfs_group(struct ib_device *dev,
+ const struct attribute_group *group)
+{
+ dev->groups[1] = group;
+}
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 5d71a7f51a9f..60987a5903b7 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -152,7 +152,11 @@ struct rdma_cm_id *__rdma_create_id(struct net *net,
* @ps: RDMA port space.
* @qp_type: type of queue pair associated with the id.
*
- * The id holds a reference on the network namespace until it is destroyed.
+ * Returns a new rdma_cm_id. The id holds a reference on the network
+ * namespace until it is destroyed.
+ *
+ * The event handler callback serializes on the id's mutex and is
+ * allowed to sleep.
*/
#define rdma_create_id(net, event_handler, context, ps, qp_type) \
__rdma_create_id((net), (event_handler), (context), (ps), (qp_type), \
@@ -192,7 +196,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr);
* @timeout_ms: Time to wait for resolution to complete.
*/
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- const struct sockaddr *dst_addr, int timeout_ms);
+ const struct sockaddr *dst_addr,
+ unsigned long timeout_ms);
/**
* rdma_resolve_route - Resolve the RDMA address bound to the RDMA identifier
@@ -202,7 +207,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
* Users must have first called rdma_resolve_addr to resolve a dst_addr
* into an RDMA address before calling this routine.
*/
-int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms);
+int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms);
/**
* rdma_create_qp - Allocate a QP and associate it with the specified RDMA
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h
index c369703fcd69..70218e6b5187 100644
--- a/include/rdma/rdma_netlink.h
+++ b/include/rdma/rdma_netlink.h
@@ -96,7 +96,7 @@ int rdma_nl_multicast(struct sk_buff *skb, unsigned int group, gfp_t flags);
/**
* Check if there are any listeners to the netlink group
* @group: the netlink group ID
- * Returns 0 on success or a negative for no listeners.
+ * Returns true on success or false if no listeners.
*/
-int rdma_nl_chk_listeners(unsigned int group);
+bool rdma_nl_chk_listeners(unsigned int group);
#endif /* _RDMA_NETLINK_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index e79229a0cf01..3584d0816fcd 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -149,6 +149,10 @@ struct rvt_ibport {
#define RVT_CQN_MAX 16 /* maximum length of cq name */
+#define RVT_SGE_COPY_MEMCPY 0
+#define RVT_SGE_COPY_CACHELESS 1
+#define RVT_SGE_COPY_ADAPTIVE 2
+
/*
* Things that are driver specific, module parameters in hfi1 and qib
*/
@@ -161,6 +165,9 @@ struct rvt_driver_params {
*/
unsigned int lkey_table_size;
unsigned int qp_table_size;
+ unsigned int sge_copy_mode;
+ unsigned int wss_threshold;
+ unsigned int wss_clean_period;
int qpn_start;
int qpn_inc;
int qpn_res_start;
@@ -193,6 +200,19 @@ struct rvt_ah {
u8 log_pmtu;
};
+/* memory working set size */
+struct rvt_wss {
+ unsigned long *entries;
+ atomic_t total_count;
+ atomic_t clean_counter;
+ atomic_t clean_entry;
+
+ int threshold;
+ int num_entries;
+ long pages_mask;
+ unsigned int clean_period;
+};
+
struct rvt_dev_info;
struct rvt_swqe;
struct rvt_driver_provided {
@@ -211,11 +231,18 @@ struct rvt_driver_provided {
* version requires the s_lock not to be held. The other assumes the
* s_lock is held.
*/
- void (*schedule_send)(struct rvt_qp *qp);
- void (*schedule_send_no_lock)(struct rvt_qp *qp);
+ bool (*schedule_send)(struct rvt_qp *qp);
+ bool (*schedule_send_no_lock)(struct rvt_qp *qp);
- /* Driver specific work request checking */
- int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
+ /*
+ * Driver specific work request setup and checking.
+ * This function is allowed to perform any setup, checks, or
+ * adjustments required to the SWQE in order to be usable by
+ * underlying protocols. This includes private data structure
+ * allocations.
+ */
+ int (*setup_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ bool *call_send);
/*
* Sometimes rdmavt needs to kick the driver's send progress. That is
@@ -371,6 +398,9 @@ struct rvt_dev_info {
/* post send table */
const struct rvt_operation_params *post_parms;
+ /* opcode translation table */
+ const enum ib_wc_opcode *wc_opcode;
+
/* Driver specific helper functions */
struct rvt_driver_provided driver_f;
@@ -411,6 +441,8 @@ struct rvt_dev_info {
u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
spinlock_t n_mcast_grps_lock;
+ /* Memory Working Set Size */
+ struct rvt_wss *wss;
};
/**
@@ -423,7 +455,14 @@ static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
const char *fmt, const char *name,
const int unit)
{
- snprintf(rdi->ibdev.name, sizeof(rdi->ibdev.name), fmt, name, unit);
+ /*
+ * FIXME: rvt and its users want to touch the ibdev before
+ * registration and have things like the name work. We don't have the
+ * infrastructure in the core to support this directly today, hack it
+ * to work by setting the name manually here.
+ */
+ dev_set_name(&rdi->ibdev.dev, fmt, name, unit);
+ strlcpy(rdi->ibdev.name, dev_name(&rdi->ibdev.dev), IB_DEVICE_NAME_MAX);
}
/**
@@ -434,7 +473,7 @@ static inline void rvt_set_ibdev_name(struct rvt_dev_info *rdi,
*/
static inline const char *rvt_get_ibdev_name(const struct rvt_dev_info *rdi)
{
- return rdi->ibdev.name;
+ return dev_name(&rdi->ibdev.dev);
}
static inline struct rvt_pd *ibpd_to_rvtpd(struct ib_pd *ibpd)
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 927f6d5b6d0f..cbafb1878669 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -678,6 +678,13 @@ void rvt_del_timers_sync(struct rvt_qp *qp);
void rvt_stop_rc_timers(struct rvt_qp *qp);
void rvt_add_retry_timer(struct rvt_qp *qp);
+void rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss,
+ void *data, u32 length,
+ bool release, bool copy_last);
+void rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
+ enum ib_wc_status status);
+void rvt_ruc_loopback(struct rvt_qp *qp);
+
/**
* struct rvt_qp_iter - the iterator for QPs
* @qp - the current QP
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 9654d33edd98..2638fa7cd702 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -173,16 +173,10 @@ int rdma_restrack_put(struct rdma_restrack_entry *res);
/**
* rdma_restrack_set_task() - set the task for this resource
* @res: resource entry
- * @task: task struct
+ * @caller: kernel name, the current task will be used if the caller is NULL.
*/
-static inline void rdma_restrack_set_task(struct rdma_restrack_entry *res,
- struct task_struct *task)
-{
- if (res->task)
- put_task_struct(res->task);
- get_task_struct(task);
- res->task = task;
-}
+void rdma_restrack_set_task(struct rdma_restrack_entry *res,
+ const char *caller);
/*
* Helper functions for rdma drivers when filling out
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 9e997c3c2f04..84d3d15f1f38 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -52,6 +52,7 @@ enum uverbs_attr_type {
UVERBS_ATTR_TYPE_IDR,
UVERBS_ATTR_TYPE_FD,
UVERBS_ATTR_TYPE_ENUM_IN,
+ UVERBS_ATTR_TYPE_IDRS_ARRAY,
};
enum uverbs_obj_access {
@@ -101,7 +102,7 @@ struct uverbs_attr_spec {
} enum_def;
} u;
- /* This weird split of the enum lets us remove some padding */
+ /* This weird split lets us remove some padding */
union {
struct {
/*
@@ -111,6 +112,17 @@ struct uverbs_attr_spec {
*/
const struct uverbs_attr_spec *ids;
} enum_def;
+
+ struct {
+ /*
+ * higher bits mean the namespace and lower bits mean
+ * the type id within the namespace.
+ */
+ u16 obj_type;
+ u16 min_len;
+ u16 max_len;
+ u8 access;
+ } objs_arr;
} u2;
};
@@ -251,6 +263,11 @@ static inline __attribute_const__ u32 uapi_bkey_attr(u32 attr_key)
return attr_key - 1;
}
+static inline __attribute_const__ u32 uapi_bkey_to_key_attr(u32 attr_bkey)
+{
+ return attr_bkey + 1;
+}
+
/*
* =======================================
* Verbs definitions
@@ -323,6 +340,27 @@ struct uverbs_object_tree_def {
#define UA_MANDATORY .mandatory = 1
#define UA_OPTIONAL .mandatory = 0
+/*
+ * min_len must be bigger than 0 and _max_len must be smaller than 4095. Only
+ * READ\WRITE accesses are supported.
+ */
+#define UVERBS_ATTR_IDRS_ARR(_attr_id, _idr_type, _access, _min_len, _max_len, \
+ ...) \
+ (&(const struct uverbs_attr_def){ \
+ .id = (_attr_id) + \
+ BUILD_BUG_ON_ZERO((_min_len) == 0 || \
+ (_max_len) > \
+ PAGE_SIZE / sizeof(void *) || \
+ (_min_len) > (_max_len) || \
+ (_access) == UVERBS_ACCESS_NEW || \
+ (_access) == UVERBS_ACCESS_DESTROY), \
+ .attr = { .type = UVERBS_ATTR_TYPE_IDRS_ARRAY, \
+ .u2.objs_arr.obj_type = _idr_type, \
+ .u2.objs_arr.access = _access, \
+ .u2.objs_arr.min_len = _min_len, \
+ .u2.objs_arr.max_len = _max_len, \
+ __VA_ARGS__ } })
+
#define UVERBS_ATTR_IDR(_attr_id, _idr_type, _access, ...) \
(&(const struct uverbs_attr_def){ \
.id = _attr_id, \
@@ -365,6 +403,15 @@ struct uverbs_object_tree_def {
__VA_ARGS__ }, \
})
+/* An input value that is a member in the enum _enum_type. */
+#define UVERBS_ATTR_CONST_IN(_attr_id, _enum_type, ...) \
+ UVERBS_ATTR_PTR_IN( \
+ _attr_id, \
+ UVERBS_ATTR_SIZE( \
+ sizeof(u64) + BUILD_BUG_ON_ZERO(!sizeof(_enum_type)), \
+ sizeof(u64)), \
+ __VA_ARGS__)
+
/*
* An input value that is a bitwise combination of values of _enum_type.
* This permits the flag value to be passed as either a u32 or u64, it must
@@ -431,10 +478,16 @@ struct uverbs_obj_attr {
const struct uverbs_api_attr *attr_elm;
};
+struct uverbs_objs_arr_attr {
+ struct ib_uobject **uobjects;
+ u16 len;
+};
+
struct uverbs_attr {
union {
struct uverbs_ptr_attr ptr_attr;
struct uverbs_obj_attr obj_attr;
+ struct uverbs_objs_arr_attr objs_arr_attr;
};
};
@@ -507,6 +560,31 @@ uverbs_attr_get_len(const struct uverbs_attr_bundle *attrs_bundle, u16 idx)
return attr->ptr_attr.len;
}
+/**
+ * uverbs_attr_get_uobjs_arr() - Provides array's properties for attribute for
+ * UVERBS_ATTR_TYPE_IDRS_ARRAY.
+ * @arr: Returned pointer to array of pointers for uobjects or NULL if
+ * the attribute isn't provided.
+ *
+ * Return: The array length or 0 if no attribute was provided.
+ */
+static inline int uverbs_attr_get_uobjs_arr(
+ const struct uverbs_attr_bundle *attrs_bundle, u16 attr_idx,
+ struct ib_uobject ***arr)
+{
+ const struct uverbs_attr *attr =
+ uverbs_attr_get(attrs_bundle, attr_idx);
+
+ if (IS_ERR(attr)) {
+ *arr = NULL;
+ return 0;
+ }
+
+ *arr = attr->objs_arr_attr.uobjects;
+
+ return attr->objs_arr_attr.len;
+}
+
static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr)
{
return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data);
@@ -603,6 +681,9 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
{
return _uverbs_alloc(bundle, size, GFP_KERNEL | __GFP_ZERO);
}
+int _uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val);
#else
static inline int
uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
@@ -631,6 +712,34 @@ static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
{
return ERR_PTR(-EINVAL);
}
+static inline int
+_uverbs_get_const(s64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, s64 lower_bound, u64 upper_bound,
+ s64 *def_val)
+{
+ return -EINVAL;
+}
#endif
+#define uverbs_get_const(_to, _attrs_bundle, _idx) \
+ ({ \
+ s64 _val; \
+ int _ret = _uverbs_get_const(&_val, _attrs_bundle, _idx, \
+ type_min(typeof(*_to)), \
+ type_max(typeof(*_to)), NULL); \
+ (*_to) = _val; \
+ _ret; \
+ })
+
+#define uverbs_get_const_default(_to, _attrs_bundle, _idx, _default) \
+ ({ \
+ s64 _val; \
+ s64 _def_val = _default; \
+ int _ret = \
+ _uverbs_get_const(&_val, _attrs_bundle, _idx, \
+ type_min(typeof(*_to)), \
+ type_max(typeof(*_to)), &_def_val); \
+ (*_to) = _val; \
+ _ret; \
+ })
#endif
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 3b00231cc084..3db2802fbc68 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -140,5 +140,56 @@ __uobj_alloc(const struct uverbs_api_object *obj, struct ib_uverbs_file *ufile,
#define uobj_alloc(_type, _ufile, _ib_dev) \
__uobj_alloc(uobj_get_type(_ufile, _type), _ufile, _ib_dev)
+static inline void uverbs_flow_action_fill_action(struct ib_flow_action *action,
+ struct ib_uobject *uobj,
+ struct ib_device *ib_dev,
+ enum ib_flow_action_type type)
+{
+ atomic_set(&action->usecnt, 0);
+ action->device = ib_dev;
+ action->type = type;
+ action->uobject = uobj;
+ uobj->object = action;
+}
+
+struct ib_uflow_resources {
+ size_t max;
+ size_t num;
+ size_t collection_num;
+ size_t counters_num;
+ struct ib_counters **counters;
+ struct ib_flow_action **collection;
+};
+
+struct ib_uflow_object {
+ struct ib_uobject uobject;
+ struct ib_uflow_resources *resources;
+};
+
+struct ib_uflow_resources *flow_resources_alloc(size_t num_specs);
+void flow_resources_add(struct ib_uflow_resources *uflow_res,
+ enum ib_flow_spec_type type,
+ void *ibobj);
+void ib_uverbs_flow_resources_free(struct ib_uflow_resources *uflow_res);
+
+static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
+ struct ib_qp *qp, struct ib_device *device,
+ struct ib_uflow_resources *uflow_res)
+{
+ struct ib_uflow_object *uflow;
+
+ uobj->object = ibflow;
+ ibflow->uobject = uobj;
+
+ if (qp) {
+ atomic_inc(&qp->usecnt);
+ ibflow->qp = qp;
+ }
+
+ ibflow->device = device;
+ uflow = container_of(uobj, typeof(*uflow), uobject);
+ uflow->resources = uflow_res;
+}
+
#endif
diff --git a/include/soc/fsl/dpaa2-fd.h b/include/soc/fsl/dpaa2-fd.h
index 2576abaa7779..90ae8d191f1a 100644
--- a/include/soc/fsl/dpaa2-fd.h
+++ b/include/soc/fsl/dpaa2-fd.h
@@ -66,6 +66,15 @@ struct dpaa2_fd {
#define SG_BPID_MASK 0x3FFF
#define SG_FINAL_FLAG_MASK 0x1
#define SG_FINAL_FLAG_SHIFT 15
+#define FL_SHORT_LEN_FLAG_MASK 0x1
+#define FL_SHORT_LEN_FLAG_SHIFT 14
+#define FL_SHORT_LEN_MASK 0x3FFFF
+#define FL_OFFSET_MASK 0x0FFF
+#define FL_FORMAT_MASK 0x3
+#define FL_FORMAT_SHIFT 12
+#define FL_BPID_MASK 0x3FFF
+#define FL_FINAL_FLAG_MASK 0x1
+#define FL_FINAL_FLAG_SHIFT 15
/* Error bits in FD CTRL */
#define FD_CTRL_ERR_MASK 0x000000FF
@@ -435,4 +444,237 @@ static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT);
}
+/**
+ * struct dpaa2_fl_entry - structure for frame list entry.
+ * @addr: address in the FLE
+ * @len: length in the FLE
+ * @bpid: buffer pool ID
+ * @format_offset: format, offset, and short-length fields
+ * @frc: frame context
+ * @ctrl: control bits...including pta, pvt1, pvt2, err, etc
+ * @flc: flow context address
+ */
+struct dpaa2_fl_entry {
+ __le64 addr;
+ __le32 len;
+ __le16 bpid;
+ __le16 format_offset;
+ __le32 frc;
+ __le32 ctrl;
+ __le64 flc;
+};
+
+enum dpaa2_fl_format {
+ dpaa2_fl_single = 0,
+ dpaa2_fl_res,
+ dpaa2_fl_sg
+};
+
+/**
+ * dpaa2_fl_get_addr() - get the addr field of FLE
+ * @fle: the given frame list entry
+ *
+ * Return the address in the frame list entry.
+ */
+static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle)
+{
+ return (dma_addr_t)le64_to_cpu(fle->addr);
+}
+
+/**
+ * dpaa2_fl_set_addr() - Set the addr field of FLE
+ * @fle: the given frame list entry
+ * @addr: the address needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle,
+ dma_addr_t addr)
+{
+ fle->addr = cpu_to_le64(addr);
+}
+
+/**
+ * dpaa2_fl_get_frc() - Get the frame context in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the frame context field in the frame lsit entry.
+ */
+static inline u32 dpaa2_fl_get_frc(const struct dpaa2_fl_entry *fle)
+{
+ return le32_to_cpu(fle->frc);
+}
+
+/**
+ * dpaa2_fl_set_frc() - Set the frame context in the FLE
+ * @fle: the given frame list entry
+ * @frc: the frame context needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_frc(struct dpaa2_fl_entry *fle, u32 frc)
+{
+ fle->frc = cpu_to_le32(frc);
+}
+
+/**
+ * dpaa2_fl_get_ctrl() - Get the control bits in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the control bits field in the frame list entry.
+ */
+static inline u32 dpaa2_fl_get_ctrl(const struct dpaa2_fl_entry *fle)
+{
+ return le32_to_cpu(fle->ctrl);
+}
+
+/**
+ * dpaa2_fl_set_ctrl() - Set the control bits in the FLE
+ * @fle: the given frame list entry
+ * @ctrl: the control bits to be set in the frame list entry
+ */
+static inline void dpaa2_fl_set_ctrl(struct dpaa2_fl_entry *fle, u32 ctrl)
+{
+ fle->ctrl = cpu_to_le32(ctrl);
+}
+
+/**
+ * dpaa2_fl_get_flc() - Get the flow context in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the flow context in the frame list entry.
+ */
+static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle)
+{
+ return (dma_addr_t)le64_to_cpu(fle->flc);
+}
+
+/**
+ * dpaa2_fl_set_flc() - Set the flow context field of FLE
+ * @fle: the given frame list entry
+ * @flc_addr: the flow context needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle,
+ dma_addr_t flc_addr)
+{
+ fle->flc = cpu_to_le64(flc_addr);
+}
+
+static inline bool dpaa2_fl_short_len(const struct dpaa2_fl_entry *fle)
+{
+ return !!((le16_to_cpu(fle->format_offset) >>
+ FL_SHORT_LEN_FLAG_SHIFT) & FL_SHORT_LEN_FLAG_MASK);
+}
+
+/**
+ * dpaa2_fl_get_len() - Get the length in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the length field in the frame list entry.
+ */
+static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle)
+{
+ if (dpaa2_fl_short_len(fle))
+ return le32_to_cpu(fle->len) & FL_SHORT_LEN_MASK;
+
+ return le32_to_cpu(fle->len);
+}
+
+/**
+ * dpaa2_fl_set_len() - Set the length field of FLE
+ * @fle: the given frame list entry
+ * @len: the length needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len)
+{
+ fle->len = cpu_to_le32(len);
+}
+
+/**
+ * dpaa2_fl_get_offset() - Get the offset field in the frame list entry
+ * @fle: the given frame list entry
+ *
+ * Return the offset.
+ */
+static inline u16 dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle)
+{
+ return le16_to_cpu(fle->format_offset) & FL_OFFSET_MASK;
+}
+
+/**
+ * dpaa2_fl_set_offset() - Set the offset field of FLE
+ * @fle: the given frame list entry
+ * @offset: the offset needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, u16 offset)
+{
+ fle->format_offset &= cpu_to_le16(~FL_OFFSET_MASK);
+ fle->format_offset |= cpu_to_le16(offset);
+}
+
+/**
+ * dpaa2_fl_get_format() - Get the format field in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the format.
+ */
+static inline enum dpaa2_fl_format dpaa2_fl_get_format(const struct dpaa2_fl_entry *fle)
+{
+ return (enum dpaa2_fl_format)((le16_to_cpu(fle->format_offset) >>
+ FL_FORMAT_SHIFT) & FL_FORMAT_MASK);
+}
+
+/**
+ * dpaa2_fl_set_format() - Set the format field of FLE
+ * @fle: the given frame list entry
+ * @format: the format needs to be set in frame list entry
+ */
+static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle,
+ enum dpaa2_fl_format format)
+{
+ fle->format_offset &= cpu_to_le16(~(FL_FORMAT_MASK << FL_FORMAT_SHIFT));
+ fle->format_offset |= cpu_to_le16(format << FL_FORMAT_SHIFT);
+}
+
+/**
+ * dpaa2_fl_get_bpid() - Get the bpid field in the FLE
+ * @fle: the given frame list entry
+ *
+ * Return the buffer pool id.
+ */
+static inline u16 dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle)
+{
+ return le16_to_cpu(fle->bpid) & FL_BPID_MASK;
+}
+
+/**
+ * dpaa2_fl_set_bpid() - Set the bpid field of FLE
+ * @fle: the given frame list entry
+ * @bpid: buffer pool id to be set
+ */
+static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, u16 bpid)
+{
+ fle->bpid &= cpu_to_le16(~(FL_BPID_MASK));
+ fle->bpid |= cpu_to_le16(bpid);
+}
+
+/**
+ * dpaa2_fl_is_final() - Check final bit in FLE
+ * @fle: the given frame list entry
+ *
+ * Return bool.
+ */
+static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle)
+{
+ return !!(le16_to_cpu(fle->format_offset) >> FL_FINAL_FLAG_SHIFT);
+}
+
+/**
+ * dpaa2_fl_set_final() - Set the final bit in FLE
+ * @fle: the given frame list entry
+ * @final: the final boolean to be set
+ */
+static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final)
+{
+ fle->format_offset &= cpu_to_le16((~(FL_FINAL_FLAG_MASK <<
+ FL_FINAL_FLAG_SHIFT)) & 0xFFFF);
+ fle->format_offset |= cpu_to_le16(final << FL_FINAL_FLAG_SHIFT);
+}
+
#endif /* __FSL_DPAA2_FD_H */
diff --git a/include/soc/fsl/dpaa2-global.h b/include/soc/fsl/dpaa2-global.h
index 9bc0713346a8..2bfc379d3dc9 100644
--- a/include/soc/fsl/dpaa2-global.h
+++ b/include/soc/fsl/dpaa2-global.h
@@ -174,4 +174,19 @@ static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
return (const struct dpaa2_fd *)&dq->dq.fd[0];
}
+#define DPAA2_CSCN_SIZE sizeof(struct dpaa2_dq)
+#define DPAA2_CSCN_ALIGN 16
+#define DPAA2_CSCN_STATE_CG BIT(0)
+
+/**
+ * dpaa2_cscn_state_congested() - Check congestion state
+ * @cscn: congestion SCN (delivered to WQ or memory)
+ *
+i * Return true is congested.
+ */
+static inline bool dpaa2_cscn_state_congested(struct dpaa2_dq *cscn)
+{
+ return !!(cscn->scn.state & DPAA2_CSCN_STATE_CG);
+}
+
#endif /* __FSL_DPAA2_GLOBAL_H */
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
index ab51e40d11db..70997ab2146c 100644
--- a/include/soc/fsl/dpaa2-io.h
+++ b/include/soc/fsl/dpaa2-io.h
@@ -97,9 +97,13 @@ void dpaa2_io_service_deregister(struct dpaa2_io *service,
int dpaa2_io_service_rearm(struct dpaa2_io *service,
struct dpaa2_io_notification_ctx *ctx);
+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, u32 fqid,
+ struct dpaa2_io_store *s);
int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
struct dpaa2_io_store *s);
+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
+ const struct dpaa2_fd *fd);
int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
u16 qdbin, const struct dpaa2_fd *fd);
int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
diff --git a/sound/pci/hda/hda_codec.h b/include/sound/hda_codec.h
index 0d98bb9068b1..0d98bb9068b1 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/include/sound/hda_codec.h
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 67561b997915..af3fa577fa06 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -47,10 +47,13 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_UNKNOWN 0 /* not defined */
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
+#define SNDRV_DMA_TYPE_DEV_UC 5 /* continuous non-cahced */
#ifdef CONFIG_SND_DMA_SGBUF
#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
+#define SNDRV_DMA_TYPE_DEV_UC_SG 6 /* SG non-cached */
#else
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
+#define SNDRV_DMA_TYPE_DEV_UC_SG SNDRV_DMA_TYPE_DEV_UC
#endif
#ifdef CONFIG_GENERIC_ALLOCATOR
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 6665cb29e1a2..3b5a061132b6 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -171,6 +171,7 @@ int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
unsigned char *buffer, int count);
int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
int count);
+int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream);
/* main midi functions */
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 8bc5e2d8b13c..fb0318f9b10f 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -51,29 +51,35 @@ int asoc_simple_card_parse_card_name(struct snd_soc_card *card,
#define asoc_simple_card_parse_clk_cpu(dev, node, dai_link, simple_dai) \
asoc_simple_card_parse_clk(dev, node, dai_link->cpu_of_node, simple_dai, \
- dai_link->cpu_dai_name)
+ dai_link->cpu_dai_name, NULL)
#define asoc_simple_card_parse_clk_codec(dev, node, dai_link, simple_dai) \
asoc_simple_card_parse_clk(dev, node, dai_link->codec_of_node, simple_dai,\
- dai_link->codec_dai_name)
+ dai_link->codec_dai_name, dai_link->codecs)
int asoc_simple_card_parse_clk(struct device *dev,
struct device_node *node,
struct device_node *dai_of_node,
struct asoc_simple_dai *simple_dai,
- const char *name);
+ const char *dai_name,
+ struct snd_soc_dai_link_component *dlc);
int asoc_simple_card_clk_enable(struct asoc_simple_dai *dai);
void asoc_simple_card_clk_disable(struct asoc_simple_dai *dai);
#define asoc_simple_card_parse_cpu(node, dai_link, \
list_name, cells_name, is_single_link) \
- asoc_simple_card_parse_dai(node, &dai_link->cpu_of_node, \
+ asoc_simple_card_parse_dai(node, NULL, \
+ &dai_link->cpu_of_node, \
&dai_link->cpu_dai_name, list_name, cells_name, is_single_link)
#define asoc_simple_card_parse_codec(node, dai_link, list_name, cells_name) \
- asoc_simple_card_parse_dai(node, &dai_link->codec_of_node, \
- &dai_link->codec_dai_name, list_name, cells_name, NULL)
+ asoc_simple_card_parse_dai(node, dai_link->codecs, \
+ &dai_link->codec_of_node, \
+ &dai_link->codec_dai_name, \
+ list_name, cells_name, NULL)
#define asoc_simple_card_parse_platform(node, dai_link, list_name, cells_name) \
- asoc_simple_card_parse_dai(node, &dai_link->platform_of_node, \
+ asoc_simple_card_parse_dai(node, dai_link->platform, \
+ &dai_link->platform_of_node, \
NULL, list_name, cells_name, NULL)
int asoc_simple_card_parse_dai(struct device_node *node,
+ struct snd_soc_dai_link_component *dlc,
struct device_node **endpoint_np,
const char **dai_name,
const char *list_name,
@@ -81,12 +87,15 @@ int asoc_simple_card_parse_dai(struct device_node *node,
int *is_single_links);
#define asoc_simple_card_parse_graph_cpu(ep, dai_link) \
- asoc_simple_card_parse_graph_dai(ep, &dai_link->cpu_of_node, \
+ asoc_simple_card_parse_graph_dai(ep, NULL, \
+ &dai_link->cpu_of_node, \
&dai_link->cpu_dai_name)
#define asoc_simple_card_parse_graph_codec(ep, dai_link) \
- asoc_simple_card_parse_graph_dai(ep, &dai_link->codec_of_node, \
+ asoc_simple_card_parse_graph_dai(ep, dai_link->codecs, \
+ &dai_link->codec_of_node, \
&dai_link->codec_dai_name)
int asoc_simple_card_parse_graph_dai(struct device_node *ep,
+ struct snd_soc_dai_link_component *dlc,
struct device_node **endpoint_np,
const char **dai_name);
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index bb1d24b703fb..f48f59e5b7b0 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -25,4 +25,10 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
+/*
+ * generic table used for HDA codec-based platforms, possibly with
+ * additional ACPI-enumerated codecs
+ */
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_hda_machines[];
+
#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index fdaaafdc7a00..bd8163f151cb 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -406,12 +406,6 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
struct snd_soc_dai *dai);
int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card);
void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card);
-int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
- struct snd_soc_pcm_runtime *rtd,
- const struct snd_soc_pcm_stream *params,
- unsigned int num_params,
- struct snd_soc_dapm_widget *source,
- struct snd_soc_dapm_widget *sink);
/* dapm path setup */
int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
@@ -590,9 +584,6 @@ struct snd_soc_dapm_widget {
void *priv; /* widget specific data */
struct regulator *regulator; /* attached regulator */
struct pinctrl *pinctrl; /* attached pinctrl */
- const struct snd_soc_pcm_stream *params; /* params for dai links */
- unsigned int num_params; /* number of params for dai links */
- unsigned int params_select; /* currently selected param for dai link */
/* dapm control */
int reg; /* negative reg = no direct dapm */
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 9bb92f187af8..4be3a2b7c106 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -103,6 +103,16 @@ struct snd_soc_dpcm_runtime {
int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
};
+#define for_each_dpcm_fe(be, stream, dpcm) \
+ list_for_each_entry(dpcm, &(be)->dpcm[stream].fe_clients, list_fe)
+
+#define for_each_dpcm_be(fe, stream, dpcm) \
+ list_for_each_entry(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_safe(fe, stream, dpcm, _dpcm) \
+ list_for_each_entry_safe(dpcm, _dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_rollback(fe, stream, dpcm) \
+ list_for_each_entry_continue_reverse(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+
/* can this BE stop and free */
int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be, int stream);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 41cec42fb456..f1dab1f4b194 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -372,6 +372,11 @@
#define SND_SOC_COMP_ORDER_LATE 1
#define SND_SOC_COMP_ORDER_LAST 2
+#define for_each_comp_order(order) \
+ for (order = SND_SOC_COMP_ORDER_FIRST; \
+ order <= SND_SOC_COMP_ORDER_LAST; \
+ order++)
+
/*
* Bias levels
*
@@ -859,6 +864,11 @@ struct snd_soc_component {
#endif
};
+#define for_each_component_dais(component, dai)\
+ list_for_each_entry(dai, &(component)->dai_list, list)
+#define for_each_component_dais_safe(component, dai, _dai)\
+ list_for_each_entry_safe(dai, _dai, &(component)->dai_list, list)
+
struct snd_soc_rtdcom_list {
struct snd_soc_component *component;
struct list_head list; /* rtd::component_list */
@@ -915,6 +925,8 @@ struct snd_soc_dai_link {
*/
const char *platform_name;
struct device_node *platform_of_node;
+ struct snd_soc_dai_link_component *platform;
+
int id; /* optional ID for machine driver link identification */
const struct snd_soc_pcm_stream *params;
@@ -976,6 +988,10 @@ struct snd_soc_dai_link {
struct list_head list; /* DAI link list of the soc card */
struct snd_soc_dobj dobj; /* For topology */
};
+#define for_each_link_codecs(link, i, codec) \
+ for ((i) = 0; \
+ ((i) < link->num_codecs) && ((codec) = &link->codecs[i]); \
+ (i)++)
struct snd_soc_codec_conf {
/*
@@ -1054,7 +1070,6 @@ struct snd_soc_card {
struct snd_soc_dai_link *dai_link; /* predefined links only */
int num_links; /* predefined links only */
struct list_head dai_link_list; /* all links */
- int num_dai_links;
struct list_head rtd_list;
int num_rtd;
@@ -1092,6 +1107,7 @@ struct snd_soc_card {
/* lists of probed devices belonging to this card */
struct list_head component_dev_list;
+ struct list_head list;
struct list_head widgets;
struct list_head paths;
@@ -1114,6 +1130,23 @@ struct snd_soc_card {
void *drvdata;
};
+#define for_each_card_prelinks(card, i, link) \
+ for ((i) = 0; \
+ ((i) < (card)->num_links) && ((link) = &(card)->dai_link[i]); \
+ (i)++)
+
+#define for_each_card_links(card, link) \
+ list_for_each_entry(dai_link, &(card)->dai_link_list, list)
+#define for_each_card_links_safe(card, link, _link) \
+ list_for_each_entry_safe(link, _link, &(card)->dai_link_list, list)
+
+#define for_each_card_rtds(card, rtd) \
+ list_for_each_entry(rtd, &(card)->rtd_list, list)
+#define for_each_card_rtds_safe(card, rtd, _rtd) \
+ list_for_each_entry_safe(rtd, _rtd, &(card)->rtd_list, list)
+
+#define for_each_card_components(card, component) \
+ list_for_each_entry(component, &(card)->component_dev_list, card_list)
/* SoC machine DAI configuration, glues a codec and cpu DAI together */
struct snd_soc_pcm_runtime {
@@ -1124,6 +1157,8 @@ struct snd_soc_pcm_runtime {
enum snd_soc_pcm_subclass pcm_subclass;
struct snd_pcm_ops ops;
+ unsigned int params_select; /* currently selected param for dai link */
+
/* Dynamic PCM BE runtime data */
struct snd_soc_dpcm_runtime dpcm[2];
int fe_compr;
@@ -1152,6 +1187,13 @@ struct snd_soc_pcm_runtime {
unsigned int dev_registered:1;
unsigned int pop_wait:1;
};
+#define for_each_rtd_codec_dai(rtd, i, dai)\
+ for ((i) = 0; \
+ ((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
+ (i)++)
+#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
+ for (; ((i--) >= 0) && ((dai) = rtd->codec_dais[i]);)
+
/* mixer control */
struct soc_mixer_control {
@@ -1359,6 +1401,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
INIT_LIST_HEAD(&card->dapm_list);
INIT_LIST_HEAD(&card->aux_comp_list);
INIT_LIST_HEAD(&card->component_dev_list);
+ INIT_LIST_HEAD(&card->list);
}
static inline bool snd_soc_volsw_is_stereo(struct soc_mixer_control *mc)
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index f2e6abea8490..24c398f4a68f 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -25,6 +25,7 @@ struct sock;
#define ISCSIT_TCP_BACKLOG 256
#define ISCSI_RX_THREAD_NAME "iscsi_trx"
#define ISCSI_TX_THREAD_NAME "iscsi_ttx"
+#define ISCSI_IQN_LEN 224
/* struct iscsi_node_attrib sanity values */
#define NA_DATAOUT_TIMEOUT 3
@@ -270,9 +271,9 @@ struct iscsi_conn_ops {
};
struct iscsi_sess_ops {
- char InitiatorName[224];
+ char InitiatorName[ISCSI_IQN_LEN];
char InitiatorAlias[256];
- char TargetName[224];
+ char TargetName[ISCSI_IQN_LEN];
char TargetAlias[256];
char TargetAddress[256];
u16 TargetPortalGroupTag; /* [0..65535] */
@@ -855,7 +856,6 @@ struct iscsi_wwn_stat_grps {
};
struct iscsi_tiqn {
-#define ISCSI_IQN_LEN 224
unsigned char tiqn[ISCSI_IQN_LEN];
enum tiqn_state_table tiqn_state;
int tiqn_access_count;
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
index 4d75a2c426ca..ff6a47209313 100644
--- a/include/target/iscsi/iscsi_target_stat.h
+++ b/include/target/iscsi/iscsi_target_stat.h
@@ -33,7 +33,7 @@ struct iscsi_sess_err_stats {
u32 cxn_timeout_errors;
u32 pdu_format_errors;
u32 last_sess_failure_type;
- char last_sess_fail_rem_name[224];
+ char last_sess_fail_rem_name[ISCSI_IQN_LEN];
} ____cacheline_aligned;
/* iSCSI login failure types (sub oids) */
@@ -56,7 +56,7 @@ struct iscsi_login_stats {
u32 last_fail_type;
int last_intr_fail_ip_family;
struct sockaddr_storage last_intr_fail_sockaddr;
- char last_intr_fail_name[224];
+ char last_intr_fail_name[ISCSI_IQN_LEN];
} ____cacheline_aligned;
/* iSCSI logout stats */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7a4ee7852ca4..e3bdb0550a59 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -138,7 +138,6 @@ enum se_cmd_flags_table {
SCF_ALUA_NON_OPTIMIZED = 0x00008000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00020000,
SCF_COMPARE_AND_WRITE = 0x00080000,
- SCF_COMPARE_AND_WRITE_POST = 0x00100000,
SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
SCF_ACK_KREF = 0x00400000,
SCF_USE_CPUID = 0x00800000,
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 795698925d20..3ec73f17ee2a 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -82,7 +82,6 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ REQ_OP_WRITE, "WRITE" }, \
{ REQ_OP_FLUSH, "FLUSH" }, \
{ REQ_OP_DISCARD, "DISCARD" }, \
- { REQ_OP_ZONE_REPORT, "ZONE_REPORT" }, \
{ REQ_OP_SECURE_ERASE, "SECURE_ERASE" }, \
{ REQ_OP_ZONE_RESET, "ZONE_RESET" }, \
{ REQ_OP_WRITE_SAME, "WRITE_SAME" }, \
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 53df203b8057..b093058f78aa 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -263,7 +263,7 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
);
#define DEFINE_MR_EVENT(name) \
- DEFINE_EVENT(xprtrdma_mr, name, \
+ DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \
TP_PROTO( \
const struct rpcrdma_mr *mr \
), \
@@ -306,7 +306,7 @@ DECLARE_EVENT_CLASS(xprtrdma_cb_event,
** Connection events
**/
-TRACE_EVENT(xprtrdma_conn_upcall,
+TRACE_EVENT(xprtrdma_cm_event,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
struct rdma_cm_event *event
@@ -377,7 +377,7 @@ DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
-TRACE_EVENT(xprtrdma_qp_error,
+TRACE_EVENT(xprtrdma_qp_event,
TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
const struct ib_event *event
@@ -509,7 +509,7 @@ TRACE_EVENT(xprtrdma_post_send,
TP_STRUCT__entry(
__field(const void *, req)
__field(int, num_sge)
- __field(bool, signaled)
+ __field(int, signaled)
__field(int, status)
),
@@ -651,11 +651,11 @@ DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
-DEFINE_MR_EVENT(xprtrdma_localinv);
-DEFINE_MR_EVENT(xprtrdma_dma_map);
-DEFINE_MR_EVENT(xprtrdma_dma_unmap);
-DEFINE_MR_EVENT(xprtrdma_remoteinv);
-DEFINE_MR_EVENT(xprtrdma_recover_mr);
+DEFINE_MR_EVENT(localinv);
+DEFINE_MR_EVENT(map);
+DEFINE_MR_EVENT(unmap);
+DEFINE_MR_EVENT(remoteinv);
+DEFINE_MR_EVENT(recycle);
/**
** Reply events
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index bbb08a3ef5cc..28e384186c35 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -470,14 +470,14 @@ TRACE_EVENT(xprt_ping,
__get_str(addr), __get_str(port), __entry->status)
);
-TRACE_EVENT(xs_tcp_data_ready,
- TP_PROTO(struct rpc_xprt *xprt, int err, unsigned int total),
+TRACE_EVENT(xs_stream_read_data,
+ TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total),
TP_ARGS(xprt, err, total),
TP_STRUCT__entry(
- __field(int, err)
- __field(unsigned int, total)
+ __field(ssize_t, err)
+ __field(size_t, total)
__string(addr, xprt ? xprt->address_strings[RPC_DISPLAY_ADDR] :
"(null)")
__string(port, xprt ? xprt->address_strings[RPC_DISPLAY_PORT] :
@@ -493,21 +493,11 @@ TRACE_EVENT(xs_tcp_data_ready,
xprt->address_strings[RPC_DISPLAY_PORT] : "(null)");
),
- TP_printk("peer=[%s]:%s err=%d total=%u", __get_str(addr),
+ TP_printk("peer=[%s]:%s err=%zd total=%zu", __get_str(addr),
__get_str(port), __entry->err, __entry->total)
);
-#define rpc_show_sock_xprt_flags(flags) \
- __print_flags(flags, "|", \
- { TCP_RCV_LAST_FRAG, "TCP_RCV_LAST_FRAG" }, \
- { TCP_RCV_COPY_FRAGHDR, "TCP_RCV_COPY_FRAGHDR" }, \
- { TCP_RCV_COPY_XID, "TCP_RCV_COPY_XID" }, \
- { TCP_RCV_COPY_DATA, "TCP_RCV_COPY_DATA" }, \
- { TCP_RCV_READ_CALLDIR, "TCP_RCV_READ_CALLDIR" }, \
- { TCP_RCV_COPY_CALLDIR, "TCP_RCV_COPY_CALLDIR" }, \
- { TCP_RPC_REPLY, "TCP_RPC_REPLY" })
-
-TRACE_EVENT(xs_tcp_data_recv,
+TRACE_EVENT(xs_stream_read_request,
TP_PROTO(struct sock_xprt *xs),
TP_ARGS(xs),
@@ -516,25 +506,22 @@ TRACE_EVENT(xs_tcp_data_recv,
__string(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR])
__string(port, xs->xprt.address_strings[RPC_DISPLAY_PORT])
__field(u32, xid)
- __field(unsigned long, flags)
__field(unsigned long, copied)
__field(unsigned int, reclen)
- __field(unsigned long, offset)
+ __field(unsigned int, offset)
),
TP_fast_assign(
__assign_str(addr, xs->xprt.address_strings[RPC_DISPLAY_ADDR]);
__assign_str(port, xs->xprt.address_strings[RPC_DISPLAY_PORT]);
- __entry->xid = be32_to_cpu(xs->tcp_xid);
- __entry->flags = xs->tcp_flags;
- __entry->copied = xs->tcp_copied;
- __entry->reclen = xs->tcp_reclen;
- __entry->offset = xs->tcp_offset;
+ __entry->xid = be32_to_cpu(xs->recv.xid);
+ __entry->copied = xs->recv.copied;
+ __entry->reclen = xs->recv.len;
+ __entry->offset = xs->recv.offset;
),
- TP_printk("peer=[%s]:%s xid=0x%08x flags=%s copied=%lu reclen=%u offset=%lu",
+ TP_printk("peer=[%s]:%s xid=0x%08x copied=%lu reclen=%u offset=%u",
__get_str(addr), __get_str(port), __entry->xid,
- rpc_show_sock_xprt_flags(__entry->flags),
__entry->copied, __entry->reclen, __entry->offset)
);
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index df4bedb9b01c..538546edbfbd 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -242,10 +242,12 @@ __SYSCALL(__NR_tee, sys_tee)
/* fs/stat.c */
#define __NR_readlinkat 78
__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#if defined(__ARCH_WANT_NEW_STAT) || defined(__ARCH_WANT_STAT64)
#define __NR3264_fstatat 79
__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
#define __NR3264_fstat 80
__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
+#endif
/* fs/sync.c */
#define __NR_sync 81
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index bfaec6903b8b..b9ba520f7e4b 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -200,6 +200,15 @@ struct binder_node_debug_info {
__u32 has_weak_ref;
};
+struct binder_node_info_for_ref {
+ __u32 handle;
+ __u32 strong_count;
+ __u32 weak_count;
+ __u32 reserved1;
+ __u32 reserved2;
+ __u32 reserved3;
+};
+
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -208,6 +217,7 @@ struct binder_node_debug_info {
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
+#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
/*
* NOTE: Two special error codes you should check for when calling
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index ff5a5db8906a..8f08ff9bdea0 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -137,8 +137,11 @@ struct blk_zone_range {
* sector specified in the report request structure.
* @BLKRESETZONE: Reset the write pointer of the zones in the specified
* sector range. The sector range must be zone aligned.
+ * @BLKGETZONESZ: Get the device zone size in number of 512 B sectors.
*/
#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
+#define BLKGETZONESZ _IOW(0x12, 132, __u32)
+#define BLKGETNRZONES _IOW(0x12, 133, __u32)
#endif /* _UAPI_BLKZONED_H */
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 19bf0ca6d635..6dafbc3e4414 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -29,6 +29,7 @@ enum {
CRYPTO_MSG_UPDATEALG,
CRYPTO_MSG_GETALG,
CRYPTO_MSG_DELRNG,
+ CRYPTO_MSG_GETSTAT,
__CRYPTO_MSG_MAX
};
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
@@ -50,6 +51,16 @@ enum crypto_attr_type_t {
CRYPTOCFGA_REPORT_AKCIPHER, /* struct crypto_report_akcipher */
CRYPTOCFGA_REPORT_KPP, /* struct crypto_report_kpp */
CRYPTOCFGA_REPORT_ACOMP, /* struct crypto_report_acomp */
+ CRYPTOCFGA_STAT_LARVAL, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_HASH, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_BLKCIPHER, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_AEAD, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_COMPRESS, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_RNG, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_CIPHER, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_AKCIPHER, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_KPP, /* struct crypto_stat */
+ CRYPTOCFGA_STAT_ACOMP, /* struct crypto_stat */
__CRYPTOCFGA_MAX
#define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1)
@@ -65,6 +76,47 @@ struct crypto_user_alg {
__u32 cru_flags;
};
+struct crypto_stat {
+ char type[CRYPTO_MAX_NAME];
+ union {
+ __u32 stat_encrypt_cnt;
+ __u32 stat_compress_cnt;
+ __u32 stat_generate_cnt;
+ __u32 stat_hash_cnt;
+ __u32 stat_setsecret_cnt;
+ };
+ union {
+ __u64 stat_encrypt_tlen;
+ __u64 stat_compress_tlen;
+ __u64 stat_generate_tlen;
+ __u64 stat_hash_tlen;
+ };
+ union {
+ __u32 stat_akcipher_err_cnt;
+ __u32 stat_cipher_err_cnt;
+ __u32 stat_compress_err_cnt;
+ __u32 stat_aead_err_cnt;
+ __u32 stat_hash_err_cnt;
+ __u32 stat_rng_err_cnt;
+ __u32 stat_kpp_err_cnt;
+ };
+ union {
+ __u32 stat_decrypt_cnt;
+ __u32 stat_decompress_cnt;
+ __u32 stat_seed_cnt;
+ __u32 stat_generate_public_key_cnt;
+ };
+ union {
+ __u64 stat_decrypt_tlen;
+ __u64 stat_decompress_tlen;
+ };
+ union {
+ __u32 stat_verify_cnt;
+ __u32 stat_compute_shared_secret_cnt;
+ };
+ __u32 stat_sign_cnt;
+};
+
struct crypto_report_larval {
char type[CRYPTO_MAX_NAME];
};
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 73e01918f996..a441ea1bfe6d 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -279,8 +279,8 @@ struct fsxattr {
#define FS_ENCRYPTION_MODE_AES_256_CTS 4
#define FS_ENCRYPTION_MODE_AES_128_CBC 5
#define FS_ENCRYPTION_MODE_AES_128_CTS 6
-#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7
-#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8
+#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
+#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
struct fscrypt_policy {
__u8 version;
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 53fbae27b280..6d180cc60a5d 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -708,6 +708,15 @@
#define REL_DIAL 0x07
#define REL_WHEEL 0x08
#define REL_MISC 0x09
+/*
+ * 0x0a is reserved and should not be used in input drivers.
+ * It was used by HID as REL_MISC+1 and userspace needs to detect if
+ * the next REL_* event is correct or is just REL_MISC + n.
+ * We define here REL_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define REL_RESERVED 0x0a
+#define REL_WHEEL_HI_RES 0x0b
#define REL_MAX 0x0f
#define REL_CNT (REL_MAX+1)
@@ -744,6 +753,15 @@
#define ABS_MISC 0x28
+/*
+ * 0x2e is reserved and should not be used in input drivers.
+ * It was used by HID as ABS_MISC+6 and userspace needs to detect if
+ * the next ABS_* event is correct or is just ABS_MISC + n.
+ * We define here ABS_RESERVED so userspace can rely on it and detect
+ * the situation described above.
+ */
+#define ABS_RESERVED 0x2e
+
#define ABS_MT_SLOT 0x2f /* MT slot being modified */
#define ABS_MT_TOUCH_MAJOR 0x30 /* Major axis of touching ellipse */
#define ABS_MT_TOUCH_MINOR 0x31 /* Minor axis (omit if circular) */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 251be353f950..2b7a652c9fa4 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -420,13 +420,19 @@ struct kvm_run {
struct kvm_coalesced_mmio_zone {
__u64 addr;
__u32 size;
- __u32 pad;
+ union {
+ __u32 pad;
+ __u32 pio;
+ };
};
struct kvm_coalesced_mmio {
__u64 phys_addr;
__u32 len;
- __u32 pad;
+ union {
+ __u32 pad;
+ __u32 pio;
+ };
__u8 data[8];
};
@@ -719,6 +725,7 @@ struct kvm_ppc_one_seg_page_size {
#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
#define KVM_PPC_1T_SEGMENTS 0x00000002
+#define KVM_PPC_NO_HASH 0x00000004
struct kvm_ppc_smmu_info {
__u64 flags;
@@ -751,6 +758,15 @@ struct kvm_ppc_resize_hpt {
#define KVM_S390_SIE_PAGE_OFFSET 1
/*
+ * On arm64, machine type can be used to request the physical
+ * address size for the VM. Bits[7-0] are reserved for the guest
+ * PA size shift (i.e, log2(PA_Size)). For backward compatibility,
+ * value 0 implies the default IPA size, 40bits.
+ */
+#define KVM_VM_TYPE_ARM_IPA_SIZE_MASK 0xffULL
+#define KVM_VM_TYPE_ARM_IPA_SIZE(x) \
+ ((x) & KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+/*
* ioctls for /dev/kvm fds:
*/
#define KVM_GET_API_VERSION _IO(KVMIO, 0x00)
@@ -953,6 +969,12 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_NESTED_STATE 157
#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
#define KVM_CAP_MSR_PLATFORM_INFO 159
+#define KVM_CAP_PPC_NESTED_HV 160
+#define KVM_CAP_HYPERV_SEND_IPI 161
+#define KVM_CAP_COALESCED_PIO 162
+#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
+#define KVM_CAP_EXCEPTION_PAYLOAD 164
+#define KVM_CAP_ARM_VM_IPA_SIZE 165
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 7e27070b9440..f57c9e434d2d 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -128,37 +128,31 @@ enum {
static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
{
- static const char * const names[] = {
- [ND_CMD_ARS_CAP] = "ars_cap",
- [ND_CMD_ARS_START] = "ars_start",
- [ND_CMD_ARS_STATUS] = "ars_status",
- [ND_CMD_CLEAR_ERROR] = "clear_error",
- [ND_CMD_CALL] = "cmd_call",
- };
-
- if (cmd < ARRAY_SIZE(names) && names[cmd])
- return names[cmd];
- return "unknown";
+ switch (cmd) {
+ case ND_CMD_ARS_CAP: return "ars_cap";
+ case ND_CMD_ARS_START: return "ars_start";
+ case ND_CMD_ARS_STATUS: return "ars_status";
+ case ND_CMD_CLEAR_ERROR: return "clear_error";
+ case ND_CMD_CALL: return "cmd_call";
+ default: return "unknown";
+ }
}
static inline const char *nvdimm_cmd_name(unsigned cmd)
{
- static const char * const names[] = {
- [ND_CMD_SMART] = "smart",
- [ND_CMD_SMART_THRESHOLD] = "smart_thresh",
- [ND_CMD_DIMM_FLAGS] = "flags",
- [ND_CMD_GET_CONFIG_SIZE] = "get_size",
- [ND_CMD_GET_CONFIG_DATA] = "get_data",
- [ND_CMD_SET_CONFIG_DATA] = "set_data",
- [ND_CMD_VENDOR_EFFECT_LOG_SIZE] = "effect_size",
- [ND_CMD_VENDOR_EFFECT_LOG] = "effect_log",
- [ND_CMD_VENDOR] = "vendor",
- [ND_CMD_CALL] = "cmd_call",
- };
-
- if (cmd < ARRAY_SIZE(names) && names[cmd])
- return names[cmd];
- return "unknown";
+ switch (cmd) {
+ case ND_CMD_SMART: return "smart";
+ case ND_CMD_SMART_THRESHOLD: return "smart_thresh";
+ case ND_CMD_DIMM_FLAGS: return "flags";
+ case ND_CMD_GET_CONFIG_SIZE: return "get_size";
+ case ND_CMD_GET_CONFIG_DATA: return "get_data";
+ case ND_CMD_SET_CONFIG_DATA: return "set_data";
+ case ND_CMD_VENDOR_EFFECT_LOG_SIZE: return "effect_size";
+ case ND_CMD_VENDOR_EFFECT_LOG: return "effect_log";
+ case ND_CMD_VENDOR: return "vendor";
+ case ND_CMD_CALL: return "cmd_call";
+ default: return "unknown";
+ }
}
#define ND_IOCTL 'N'
@@ -208,10 +202,6 @@ enum nd_driver_flags {
ND_DRIVER_DAX_PMEM = 1 << ND_DEVICE_DAX_PMEM,
};
-enum {
- ND_MIN_NAMESPACE_SIZE = PAGE_SIZE,
-};
-
enum ars_masks {
ARS_STATUS_MASK = 0x0000FFFF,
ARS_EXT_STATUS_SHIFT = 16,
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index ee556ccc93f4..e1e9888c85e6 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -52,6 +52,7 @@
#define PCI_COMMAND_INTX_DISABLE 0x400 /* INTx Emulation Disable */
#define PCI_STATUS 0x06 /* 16 bits */
+#define PCI_STATUS_IMM_READY 0x01 /* Immediate Readiness */
#define PCI_STATUS_INTERRUPT 0x08 /* Interrupt status */
#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
#define PCI_STATUS_66MHZ 0x20 /* Support 66 MHz PCI 2.1 bus */
diff --git a/include/uapi/linux/usb/tmc.h b/include/uapi/linux/usb/tmc.h
index 729af2f861a4..fdd4d88a7b95 100644
--- a/include/uapi/linux/usb/tmc.h
+++ b/include/uapi/linux/usb/tmc.h
@@ -4,6 +4,7 @@
* Copyright (C) 2008 Novell, Inc.
* Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
* Copyright (C) 2015 Dave Penkler <dpenkler@gmail.com>
+ * Copyright (C) 2018 IVI Foundation, Inc.
*
* This file holds USB constants defined by the USB Device Class
* and USB488 Subclass Definitions for Test and Measurement devices
@@ -40,11 +41,38 @@
#define USBTMC488_REQUEST_GOTO_LOCAL 161
#define USBTMC488_REQUEST_LOCAL_LOCKOUT 162
+struct usbtmc_request {
+ __u8 bRequestType;
+ __u8 bRequest;
+ __u16 wValue;
+ __u16 wIndex;
+ __u16 wLength;
+} __attribute__ ((packed));
+
+struct usbtmc_ctrlrequest {
+ struct usbtmc_request req;
+ void __user *data; /* pointer to user space */
+} __attribute__ ((packed));
+
struct usbtmc_termchar {
__u8 term_char;
__u8 term_char_enabled;
} __attribute__ ((packed));
+/*
+ * usbtmc_message->flags:
+ */
+#define USBTMC_FLAG_ASYNC 0x0001
+#define USBTMC_FLAG_APPEND 0x0002
+#define USBTMC_FLAG_IGNORE_TRAILER 0x0004
+
+struct usbtmc_message {
+ __u32 transfer_size; /* size of bytes to transfer */
+ __u32 transferred; /* size of received/written bytes */
+ __u32 flags; /* bit 0: 0 = synchronous; 1 = asynchronous */
+ void __user *message; /* pointer to header and data in user space */
+} __attribute__ ((packed));
+
/* Request values for USBTMC driver's ioctl entry point */
#define USBTMC_IOC_NR 91
#define USBTMC_IOCTL_INDICATOR_PULSE _IO(USBTMC_IOC_NR, 1)
@@ -53,10 +81,15 @@ struct usbtmc_termchar {
#define USBTMC_IOCTL_ABORT_BULK_IN _IO(USBTMC_IOC_NR, 4)
#define USBTMC_IOCTL_CLEAR_OUT_HALT _IO(USBTMC_IOC_NR, 6)
#define USBTMC_IOCTL_CLEAR_IN_HALT _IO(USBTMC_IOC_NR, 7)
+#define USBTMC_IOCTL_CTRL_REQUEST _IOWR(USBTMC_IOC_NR, 8, struct usbtmc_ctrlrequest)
#define USBTMC_IOCTL_GET_TIMEOUT _IOR(USBTMC_IOC_NR, 9, __u32)
#define USBTMC_IOCTL_SET_TIMEOUT _IOW(USBTMC_IOC_NR, 10, __u32)
#define USBTMC_IOCTL_EOM_ENABLE _IOW(USBTMC_IOC_NR, 11, __u8)
#define USBTMC_IOCTL_CONFIG_TERMCHAR _IOW(USBTMC_IOC_NR, 12, struct usbtmc_termchar)
+#define USBTMC_IOCTL_WRITE _IOWR(USBTMC_IOC_NR, 13, struct usbtmc_message)
+#define USBTMC_IOCTL_READ _IOWR(USBTMC_IOC_NR, 14, struct usbtmc_message)
+#define USBTMC_IOCTL_WRITE_RESULT _IOWR(USBTMC_IOC_NR, 15, __u32)
+#define USBTMC_IOCTL_API_VERSION _IOR(USBTMC_IOC_NR, 16, __u32)
#define USBTMC488_IOCTL_GET_CAPS _IOR(USBTMC_IOC_NR, 17, unsigned char)
#define USBTMC488_IOCTL_READ_STB _IOR(USBTMC_IOC_NR, 18, unsigned char)
@@ -64,6 +97,14 @@ struct usbtmc_termchar {
#define USBTMC488_IOCTL_GOTO_LOCAL _IO(USBTMC_IOC_NR, 20)
#define USBTMC488_IOCTL_LOCAL_LOCKOUT _IO(USBTMC_IOC_NR, 21)
#define USBTMC488_IOCTL_TRIGGER _IO(USBTMC_IOC_NR, 22)
+#define USBTMC488_IOCTL_WAIT_SRQ _IOW(USBTMC_IOC_NR, 23, __u32)
+
+#define USBTMC_IOCTL_MSG_IN_ATTR _IOR(USBTMC_IOC_NR, 24, __u8)
+#define USBTMC_IOCTL_AUTO_ABORT _IOW(USBTMC_IOC_NR, 25, __u8)
+
+/* Cancel and cleanup asynchronous calls */
+#define USBTMC_IOCTL_CANCEL_IO _IO(USBTMC_IOC_NR, 35)
+#define USBTMC_IOCTL_CLEANUP_IO _IO(USBTMC_IOC_NR, 36)
/* Driver encoded usb488 capabilities */
#define USBTMC488_CAPABILITY_TRIGGER 1
diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h
index ff6cc6cb4227..d854cb19c42c 100644
--- a/include/uapi/linux/usb/video.h
+++ b/include/uapi/linux/usb/video.h
@@ -192,14 +192,14 @@ struct uvc_descriptor_header {
/* 3.7.2. Video Control Interface Header Descriptor */
struct uvc_header_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u16 bcdUVC;
- __u16 wTotalLength;
- __u32 dwClockFrequency;
- __u8 bInCollection;
- __u8 baInterfaceNr[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __le16 bcdUVC;
+ __le16 wTotalLength;
+ __le32 dwClockFrequency;
+ __u8 bInCollection;
+ __u8 baInterfaceNr[];
} __attribute__((__packed__));
#define UVC_DT_HEADER_SIZE(n) (12+(n))
@@ -209,57 +209,57 @@ struct uvc_header_descriptor {
#define DECLARE_UVC_HEADER_DESCRIPTOR(n) \
struct UVC_HEADER_DESCRIPTOR(n) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u16 bcdUVC; \
- __u16 wTotalLength; \
- __u32 dwClockFrequency; \
- __u8 bInCollection; \
- __u8 baInterfaceNr[n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __le16 bcdUVC; \
+ __le16 wTotalLength; \
+ __le32 dwClockFrequency; \
+ __u8 bInCollection; \
+ __u8 baInterfaceNr[n]; \
} __attribute__ ((packed))
/* 3.7.2.1. Input Terminal Descriptor */
struct uvc_input_terminal_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bTerminalID;
- __u16 wTerminalType;
- __u8 bAssocTerminal;
- __u8 iTerminal;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 iTerminal;
} __attribute__((__packed__));
#define UVC_DT_INPUT_TERMINAL_SIZE 8
/* 3.7.2.2. Output Terminal Descriptor */
struct uvc_output_terminal_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bTerminalID;
- __u16 wTerminalType;
- __u8 bAssocTerminal;
- __u8 bSourceID;
- __u8 iTerminal;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 bSourceID;
+ __u8 iTerminal;
} __attribute__((__packed__));
#define UVC_DT_OUTPUT_TERMINAL_SIZE 9
/* 3.7.2.3. Camera Terminal Descriptor */
struct uvc_camera_terminal_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bTerminalID;
- __u16 wTerminalType;
- __u8 bAssocTerminal;
- __u8 iTerminal;
- __u16 wObjectiveFocalLengthMin;
- __u16 wObjectiveFocalLengthMax;
- __u16 wOcularFocalLength;
- __u8 bControlSize;
- __u8 bmControls[3];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bTerminalID;
+ __le16 wTerminalType;
+ __u8 bAssocTerminal;
+ __u8 iTerminal;
+ __le16 wObjectiveFocalLengthMin;
+ __le16 wObjectiveFocalLengthMax;
+ __le16 wOcularFocalLength;
+ __u8 bControlSize;
+ __u8 bmControls[3];
} __attribute__((__packed__));
#define UVC_DT_CAMERA_TERMINAL_SIZE(n) (15+(n))
@@ -293,15 +293,15 @@ struct UVC_SELECTOR_UNIT_DESCRIPTOR(n) { \
/* 3.7.2.5. Processing Unit Descriptor */
struct uvc_processing_unit_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bUnitID;
- __u8 bSourceID;
- __u16 wMaxMultiplier;
- __u8 bControlSize;
- __u8 bmControls[2];
- __u8 iProcessing;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bUnitID;
+ __u8 bSourceID;
+ __le16 wMaxMultiplier;
+ __u8 bControlSize;
+ __u8 bmControls[2];
+ __u8 iProcessing;
} __attribute__((__packed__));
#define UVC_DT_PROCESSING_UNIT_SIZE(n) (9+(n))
@@ -343,29 +343,29 @@ struct UVC_EXTENSION_UNIT_DESCRIPTOR(p, n) { \
/* 3.8.2.2. Video Control Interrupt Endpoint Descriptor */
struct uvc_control_endpoint_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u16 wMaxTransferSize;
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __le16 wMaxTransferSize;
} __attribute__((__packed__));
#define UVC_DT_CONTROL_ENDPOINT_SIZE 5
/* 3.9.2.1. Input Header Descriptor */
struct uvc_input_header_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bNumFormats;
- __u16 wTotalLength;
- __u8 bEndpointAddress;
- __u8 bmInfo;
- __u8 bTerminalLink;
- __u8 bStillCaptureMethod;
- __u8 bTriggerSupport;
- __u8 bTriggerUsage;
- __u8 bControlSize;
- __u8 bmaControls[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bNumFormats;
+ __le16 wTotalLength;
+ __u8 bEndpointAddress;
+ __u8 bmInfo;
+ __u8 bTerminalLink;
+ __u8 bStillCaptureMethod;
+ __u8 bTriggerSupport;
+ __u8 bTriggerUsage;
+ __u8 bControlSize;
+ __u8 bmaControls[];
} __attribute__((__packed__));
#define UVC_DT_INPUT_HEADER_SIZE(n, p) (13+(n*p))
@@ -375,32 +375,32 @@ struct uvc_input_header_descriptor {
#define DECLARE_UVC_INPUT_HEADER_DESCRIPTOR(n, p) \
struct UVC_INPUT_HEADER_DESCRIPTOR(n, p) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bNumFormats; \
- __u16 wTotalLength; \
- __u8 bEndpointAddress; \
- __u8 bmInfo; \
- __u8 bTerminalLink; \
- __u8 bStillCaptureMethod; \
- __u8 bTriggerSupport; \
- __u8 bTriggerUsage; \
- __u8 bControlSize; \
- __u8 bmaControls[p][n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bNumFormats; \
+ __le16 wTotalLength; \
+ __u8 bEndpointAddress; \
+ __u8 bmInfo; \
+ __u8 bTerminalLink; \
+ __u8 bStillCaptureMethod; \
+ __u8 bTriggerSupport; \
+ __u8 bTriggerUsage; \
+ __u8 bControlSize; \
+ __u8 bmaControls[p][n]; \
} __attribute__ ((packed))
/* 3.9.2.2. Output Header Descriptor */
struct uvc_output_header_descriptor {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bNumFormats;
- __u16 wTotalLength;
- __u8 bEndpointAddress;
- __u8 bTerminalLink;
- __u8 bControlSize;
- __u8 bmaControls[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bNumFormats;
+ __le16 wTotalLength;
+ __u8 bEndpointAddress;
+ __u8 bTerminalLink;
+ __u8 bControlSize;
+ __u8 bmaControls[];
} __attribute__((__packed__));
#define UVC_DT_OUTPUT_HEADER_SIZE(n, p) (9+(n*p))
@@ -410,15 +410,15 @@ struct uvc_output_header_descriptor {
#define DECLARE_UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) \
struct UVC_OUTPUT_HEADER_DESCRIPTOR(n, p) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bNumFormats; \
- __u16 wTotalLength; \
- __u8 bEndpointAddress; \
- __u8 bTerminalLink; \
- __u8 bControlSize; \
- __u8 bmaControls[p][n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bNumFormats; \
+ __le16 wTotalLength; \
+ __u8 bEndpointAddress; \
+ __u8 bTerminalLink; \
+ __u8 bControlSize; \
+ __u8 bmaControls[p][n]; \
} __attribute__ ((packed))
/* 3.9.2.6. Color matching descriptor */
@@ -473,19 +473,19 @@ struct uvc_format_uncompressed {
/* Uncompressed Payload - 3.1.2. Uncompressed Video Frame Descriptor */
struct uvc_frame_uncompressed {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bFrameIndex;
- __u8 bmCapabilities;
- __u16 wWidth;
- __u16 wHeight;
- __u32 dwMinBitRate;
- __u32 dwMaxBitRate;
- __u32 dwMaxVideoFrameBufferSize;
- __u32 dwDefaultFrameInterval;
- __u8 bFrameIntervalType;
- __u32 dwFrameInterval[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __le16 wWidth;
+ __le16 wHeight;
+ __le32 dwMinBitRate;
+ __le32 dwMaxBitRate;
+ __le32 dwMaxVideoFrameBufferSize;
+ __le32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __le32 dwFrameInterval[];
} __attribute__((__packed__));
#define UVC_DT_FRAME_UNCOMPRESSED_SIZE(n) (26+4*(n))
@@ -495,19 +495,19 @@ struct uvc_frame_uncompressed {
#define DECLARE_UVC_FRAME_UNCOMPRESSED(n) \
struct UVC_FRAME_UNCOMPRESSED(n) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bFrameIndex; \
- __u8 bmCapabilities; \
- __u16 wWidth; \
- __u16 wHeight; \
- __u32 dwMinBitRate; \
- __u32 dwMaxBitRate; \
- __u32 dwMaxVideoFrameBufferSize; \
- __u32 dwDefaultFrameInterval; \
- __u8 bFrameIntervalType; \
- __u32 dwFrameInterval[n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bFrameIndex; \
+ __u8 bmCapabilities; \
+ __le16 wWidth; \
+ __le16 wHeight; \
+ __le32 dwMinBitRate; \
+ __le32 dwMaxBitRate; \
+ __le32 dwMaxVideoFrameBufferSize; \
+ __le32 dwDefaultFrameInterval; \
+ __u8 bFrameIntervalType; \
+ __le32 dwFrameInterval[n]; \
} __attribute__ ((packed))
/* MJPEG Payload - 3.1.1. MJPEG Video Format Descriptor */
@@ -529,19 +529,19 @@ struct uvc_format_mjpeg {
/* MJPEG Payload - 3.1.2. MJPEG Video Frame Descriptor */
struct uvc_frame_mjpeg {
- __u8 bLength;
- __u8 bDescriptorType;
- __u8 bDescriptorSubType;
- __u8 bFrameIndex;
- __u8 bmCapabilities;
- __u16 wWidth;
- __u16 wHeight;
- __u32 dwMinBitRate;
- __u32 dwMaxBitRate;
- __u32 dwMaxVideoFrameBufferSize;
- __u32 dwDefaultFrameInterval;
- __u8 bFrameIntervalType;
- __u32 dwFrameInterval[];
+ __u8 bLength;
+ __u8 bDescriptorType;
+ __u8 bDescriptorSubType;
+ __u8 bFrameIndex;
+ __u8 bmCapabilities;
+ __le16 wWidth;
+ __le16 wHeight;
+ __le32 dwMinBitRate;
+ __le32 dwMaxBitRate;
+ __le32 dwMaxVideoFrameBufferSize;
+ __le32 dwDefaultFrameInterval;
+ __u8 bFrameIntervalType;
+ __le32 dwFrameInterval[];
} __attribute__((__packed__));
#define UVC_DT_FRAME_MJPEG_SIZE(n) (26+4*(n))
@@ -551,19 +551,19 @@ struct uvc_frame_mjpeg {
#define DECLARE_UVC_FRAME_MJPEG(n) \
struct UVC_FRAME_MJPEG(n) { \
- __u8 bLength; \
- __u8 bDescriptorType; \
- __u8 bDescriptorSubType; \
- __u8 bFrameIndex; \
- __u8 bmCapabilities; \
- __u16 wWidth; \
- __u16 wHeight; \
- __u32 dwMinBitRate; \
- __u32 dwMaxBitRate; \
- __u32 dwMaxVideoFrameBufferSize; \
- __u32 dwDefaultFrameInterval; \
- __u8 bFrameIntervalType; \
- __u32 dwFrameInterval[n]; \
+ __u8 bLength; \
+ __u8 bDescriptorType; \
+ __u8 bDescriptorSubType; \
+ __u8 bFrameIndex; \
+ __u8 bmCapabilities; \
+ __le16 wWidth; \
+ __le16 wHeight; \
+ __le32 dwMinBitRate; \
+ __le32 dwMaxBitRate; \
+ __le32 dwMaxVideoFrameBufferSize; \
+ __le32 dwDefaultFrameInterval; \
+ __u8 bFrameIntervalType; \
+ __le32 dwFrameInterval[n]; \
} __attribute__ ((packed))
#endif /* __LINUX_USB_VIDEO_H */
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 1aa7b82e8169..f378b9802d8b 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -200,6 +200,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
#define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
+#define VFIO_DEVICE_FLAGS_AP (1 << 5) /* vfio-ap device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
};
@@ -215,6 +216,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform"
#define VFIO_DEVICE_API_AMBA_STRING "vfio-amba"
#define VFIO_DEVICE_API_CCW_STRING "vfio-ccw"
+#define VFIO_DEVICE_API_AP_STRING "vfio-ap"
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 25a16760de2a..1254b51a551a 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -763,10 +763,28 @@ struct ib_uverbs_sge {
__u32 lkey;
};
+enum ib_uverbs_wr_opcode {
+ IB_UVERBS_WR_RDMA_WRITE = 0,
+ IB_UVERBS_WR_RDMA_WRITE_WITH_IMM = 1,
+ IB_UVERBS_WR_SEND = 2,
+ IB_UVERBS_WR_SEND_WITH_IMM = 3,
+ IB_UVERBS_WR_RDMA_READ = 4,
+ IB_UVERBS_WR_ATOMIC_CMP_AND_SWP = 5,
+ IB_UVERBS_WR_ATOMIC_FETCH_AND_ADD = 6,
+ IB_UVERBS_WR_LOCAL_INV = 7,
+ IB_UVERBS_WR_BIND_MW = 8,
+ IB_UVERBS_WR_SEND_WITH_INV = 9,
+ IB_UVERBS_WR_TSO = 10,
+ IB_UVERBS_WR_RDMA_READ_WITH_INV = 11,
+ IB_UVERBS_WR_MASKED_ATOMIC_CMP_AND_SWP = 12,
+ IB_UVERBS_WR_MASKED_ATOMIC_FETCH_AND_ADD = 13,
+ /* Review enum ib_wr_opcode before modifying this */
+};
+
struct ib_uverbs_send_wr {
__aligned_u64 wr_id;
__u32 num_sge;
- __u32 opcode;
+ __u32 opcode; /* see enum ib_uverbs_wr_opcode */
__u32 send_flags;
union {
__be32 imm_data;
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index addbb9c4529e..8fa9f90e2bb1 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -45,6 +45,9 @@ enum {
MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
+ MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
};
enum {
@@ -349,9 +352,22 @@ struct mlx5_ib_create_qp_rss {
__u32 flags;
};
+enum mlx5_ib_create_qp_resp_mask {
+ MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
+ MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
+ MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2,
+ MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3,
+};
+
struct mlx5_ib_create_qp_resp {
__u32 bfreg_index;
__u32 reserved;
+ __u32 comp_mask;
+ __u32 tirn;
+ __u32 tisn;
+ __u32 rqn;
+ __u32 sqn;
+ __u32 reserved1;
};
struct mlx5_ib_alloc_mw {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 9c51801b9e64..408e220034de 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -125,6 +125,7 @@ enum mlx5_ib_flow_matcher_create_attrs {
MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_FLAGS,
};
enum mlx5_ib_flow_matcher_destroy_attrs {
@@ -155,6 +156,8 @@ enum mlx5_ib_create_flow_attrs {
MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
+ MLX5_IB_ATTR_CREATE_FLOW_TAG,
};
enum mlx5_ib_destoy_flow_attrs {
@@ -166,4 +169,22 @@ enum mlx5_ib_flow_methods {
MLX5_IB_METHOD_DESTROY_FLOW,
};
+enum mlx5_ib_flow_action_methods {
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_MODIFY_HEADER = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_FLOW_ACTION_CREATE_PACKET_REFORMAT,
+};
+
+enum mlx5_ib_create_flow_action_create_modify_header_attrs {
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_ACTIONS_PRM,
+ MLX5_IB_ATTR_CREATE_MODIFY_HEADER_FT_TYPE,
+};
+
+enum mlx5_ib_create_flow_action_create_packet_reformat_attrs {
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_TYPE,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_FT_TYPE,
+ MLX5_IB_ATTR_CREATE_PACKET_REFORMAT_DATA_BUF,
+};
+
#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_verbs.h b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
index 8a2fb33f3ed4..4ef62c0e8452 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_verbs.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_verbs.h
@@ -39,5 +39,17 @@ enum mlx5_ib_uapi_flow_action_flags {
MLX5_IB_UAPI_FLOW_ACTION_FLAGS_REQUIRE_METADATA = 1 << 0,
};
+enum mlx5_ib_uapi_flow_table_type {
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_RX = 0x0,
+ MLX5_IB_UAPI_FLOW_TABLE_TYPE_NIC_TX = 0x1,
+};
+
+enum mlx5_ib_uapi_flow_action_packet_reformat_type {
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2 = 0x0,
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x1,
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x2,
+ MLX5_IB_UAPI_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x3,
+};
+
#endif
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index edba6351ac13..f9c41bf59efc 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -227,8 +227,9 @@ enum rdma_nldev_command {
RDMA_NLDEV_CMD_UNSPEC,
RDMA_NLDEV_CMD_GET, /* can dump */
+ RDMA_NLDEV_CMD_SET,
- /* 2 - 4 are free to use */
+ /* 3 - 4 are free to use */
RDMA_NLDEV_CMD_PORT_GET = 5, /* can dump */
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
index 24800c6c1f32..06c34d99be85 100644
--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -53,7 +53,7 @@ enum {
struct ib_uverbs_attr {
__u16 attr_id; /* command specific type attribute */
- __u16 len; /* only for pointers */
+ __u16 len; /* only for pointers and IDRs array */
__u16 flags; /* combination of UVERBS_ATTR_F_XXXX */
union {
struct {
@@ -63,7 +63,10 @@ struct ib_uverbs_attr {
__u16 reserved;
} attr_data;
union {
- /* Used by PTR_IN/OUT, ENUM_IN and IDR */
+ /*
+ * ptr to command, inline data, idr/fd or
+ * ptr to __u32 array of IDRs
+ */
__aligned_u64 data;
/* Used by FD_IN and FD_OUT */
__s64 data_s64;
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
new file mode 100644
index 000000000000..17c7abd0803a
--- /dev/null
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * UFS Transport SGIO v4 BSG Message Support
+ *
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (C) 2018 Western Digital Corporation
+ */
+#ifndef SCSI_BSG_UFS_H
+#define SCSI_BSG_UFS_H
+
+#include <linux/types.h>
+/*
+ * This file intended to be included by both kernel and user space
+ */
+
+#define UFS_CDB_SIZE 16
+#define UPIU_TRANSACTION_UIC_CMD 0x1F
+/* uic commands are 4DW long, per UFSHCI V2.1 paragraph 5.6.1 */
+#define UIC_CMD_SIZE (sizeof(__u32) * 4)
+
+/**
+ * struct utp_upiu_header - UPIU header structure
+ * @dword_0: UPIU header DW-0
+ * @dword_1: UPIU header DW-1
+ * @dword_2: UPIU header DW-2
+ */
+struct utp_upiu_header {
+ __be32 dword_0;
+ __be32 dword_1;
+ __be32 dword_2;
+};
+
+/**
+ * struct utp_upiu_query - upiu request buffer structure for
+ * query request.
+ * @opcode: command to perform B-0
+ * @idn: a value that indicates the particular type of data B-1
+ * @index: Index to further identify data B-2
+ * @selector: Index to further identify data B-3
+ * @reserved_osf: spec reserved field B-4,5
+ * @length: number of descriptor bytes to read/write B-6,7
+ * @value: Attribute value to be written DW-5
+ * @reserved: spec reserved DW-6,7
+ */
+struct utp_upiu_query {
+ __u8 opcode;
+ __u8 idn;
+ __u8 index;
+ __u8 selector;
+ __be16 reserved_osf;
+ __be16 length;
+ __be32 value;
+ __be32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_cmd - Command UPIU structure
+ * @data_transfer_len: Data Transfer Length DW-3
+ * @cdb: Command Descriptor Block CDB DW-4 to DW-7
+ */
+struct utp_upiu_cmd {
+ __be32 exp_data_transfer_len;
+ __u8 cdb[UFS_CDB_SIZE];
+};
+
+/**
+ * struct utp_upiu_req - general upiu request structure
+ * @header:UPIU header structure DW-0 to DW-2
+ * @sc: fields structure for scsi command DW-3 to DW-7
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_req {
+ struct utp_upiu_header header;
+ union {
+ struct utp_upiu_cmd sc;
+ struct utp_upiu_query qr;
+ struct utp_upiu_query tr;
+ /* use utp_upiu_query to host the 4 dwords of uic command */
+ struct utp_upiu_query uc;
+ };
+};
+
+/* request (CDB) structure of the sg_io_v4 */
+struct ufs_bsg_request {
+ __u32 msgcode;
+ struct utp_upiu_req upiu_req;
+};
+
+/* response (request sense data) structure of the sg_io_v4 */
+struct ufs_bsg_reply {
+ /*
+ * The completion result. Result exists in two forms:
+ * if negative, it is an -Exxx system errno value. There will
+ * be no further reply information supplied.
+ * else, it's the 4-byte scsi error result, with driver, host,
+ * msg and status fields. The per-msgcode reply structure
+ * will contain valid data.
+ */
+ __u32 result;
+
+ /* If there was reply_payload, how much was received? */
+ __u32 reply_payload_rcv_len;
+
+ struct utp_upiu_req upiu_rsp;
+};
+#endif /* UFS_BSG_H */
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index ed0a120d4f08..404d4b9ffe76 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -752,7 +752,7 @@ struct snd_timer_info {
#define SNDRV_TIMER_PSFLG_EARLY_EVENT (1<<2) /* write early event to the poll queue */
struct snd_timer_params {
- unsigned int flags; /* flags - SNDRV_MIXER_PSFLG_* */
+ unsigned int flags; /* flags - SNDRV_TIMER_PSFLG_* */
unsigned int ticks; /* requested resolution in ticks */
unsigned int queue_size; /* total size of queue (32-1024) */
unsigned int reserved0; /* reserved, was: failure locations */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index cc41de3b8deb..c595bed7bfcb 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1461,10 +1461,10 @@ COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
#endif
#ifdef CONFIG_COMPAT_32BIT_TIME
-static int compat_prepare_timeout(const struct compat_timespec __user *p,
+static int compat_prepare_timeout(const struct old_timespec32 __user *p,
struct timespec64 *ts)
{
- if (compat_get_timespec64(ts, p))
+ if (get_old_timespec32(ts, p))
return -EFAULT;
if (!timespec64_valid(ts))
return -EINVAL;
@@ -1474,7 +1474,7 @@ static int compat_prepare_timeout(const struct compat_timespec __user *p,
COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
const char __user *, u_msg_ptr,
compat_size_t, msg_len, unsigned int, msg_prio,
- const struct compat_timespec __user *, u_abs_timeout)
+ const struct old_timespec32 __user *, u_abs_timeout)
{
struct timespec64 ts, *p = NULL;
if (u_abs_timeout) {
@@ -1489,7 +1489,7 @@ COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes,
char __user *, u_msg_ptr,
compat_size_t, msg_len, unsigned int __user *, u_msg_prio,
- const struct compat_timespec __user *, u_abs_timeout)
+ const struct old_timespec32 __user *, u_abs_timeout)
{
struct timespec64 ts, *p = NULL;
if (u_abs_timeout) {
diff --git a/ipc/msg.c b/ipc/msg.c
index 883642cf2b27..0833c6405915 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -622,9 +622,9 @@ struct compat_msqid_ds {
struct compat_ipc_perm msg_perm;
compat_uptr_t msg_first;
compat_uptr_t msg_last;
- compat_time_t msg_stime;
- compat_time_t msg_rtime;
- compat_time_t msg_ctime;
+ old_time32_t msg_stime;
+ old_time32_t msg_rtime;
+ old_time32_t msg_ctime;
compat_ulong_t msg_lcbytes;
compat_ulong_t msg_lqbytes;
unsigned short msg_cbytes;
diff --git a/ipc/sem.c b/ipc/sem.c
index 26f8e37fcdcb..745dc6187e84 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1698,8 +1698,8 @@ SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
struct compat_semid_ds {
struct compat_ipc_perm sem_perm;
- compat_time_t sem_otime;
- compat_time_t sem_ctime;
+ old_time32_t sem_otime;
+ old_time32_t sem_ctime;
compat_uptr_t sem_base;
compat_uptr_t sem_pending;
compat_uptr_t sem_pending_last;
@@ -2214,11 +2214,11 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
#ifdef CONFIG_COMPAT_32BIT_TIME
long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned int nsops,
- const struct compat_timespec __user *timeout)
+ const struct old_timespec32 __user *timeout)
{
if (timeout) {
struct timespec64 ts;
- if (compat_get_timespec64(&ts, timeout))
+ if (get_old_timespec32(&ts, timeout))
return -EFAULT;
return do_semtimedop(semid, tsems, nsops, &ts);
}
@@ -2227,7 +2227,7 @@ long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems,
unsigned int, nsops,
- const struct compat_timespec __user *, timeout)
+ const struct old_timespec32 __user *, timeout)
{
return compat_ksys_semtimedop(semid, tsems, nsops, timeout);
}
diff --git a/ipc/shm.c b/ipc/shm.c
index 1c65fb357395..0842411cb0e9 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -1202,9 +1202,9 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
struct compat_shmid_ds {
struct compat_ipc_perm shm_perm;
int shm_segsz;
- compat_time_t shm_atime;
- compat_time_t shm_dtime;
- compat_time_t shm_ctime;
+ old_time32_t shm_atime;
+ old_time32_t shm_dtime;
+ old_time32_t shm_ctime;
compat_ipc_pid_t shm_cpid;
compat_ipc_pid_t shm_lpid;
unsigned short shm_nattch;
diff --git a/ipc/syscall.c b/ipc/syscall.c
index 65d405f1ba0c..1ac06e3983c0 100644
--- a/ipc/syscall.c
+++ b/ipc/syscall.c
@@ -35,7 +35,7 @@ SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
(const struct __kernel_timespec __user *)fifth);
else if (IS_ENABLED(CONFIG_COMPAT_32BIT_TIME))
return compat_ksys_semtimedop(first, ptr, second,
- (const struct compat_timespec __user *)fifth);
+ (const struct old_timespec32 __user *)fifth);
else
return -ENOSYS;
diff --git a/ipc/util.h b/ipc/util.h
index 0a159f69b3bb..1ee81bce25e9 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -266,7 +266,7 @@ long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
/* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */
long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned int nsops,
- const struct compat_timespec __user *timeout);
+ const struct old_timespec32 __user *timeout);
#ifdef CONFIG_COMPAT
long compat_ksys_semctl(int semid, int semnum, int cmd, int arg);
long compat_ksys_msgctl(int msqid, int cmd, void __user *uptr);
diff --git a/kernel/compat.c b/kernel/compat.c
index 8e40efc2928a..089d00d0da9c 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -93,28 +93,28 @@ int compat_put_timex(struct compat_timex __user *utp, const struct timex *txc)
return 0;
}
-static int __compat_get_timeval(struct timeval *tv, const struct compat_timeval __user *ctv)
+static int __compat_get_timeval(struct timeval *tv, const struct old_timeval32 __user *ctv)
{
return (!access_ok(VERIFY_READ, ctv, sizeof(*ctv)) ||
__get_user(tv->tv_sec, &ctv->tv_sec) ||
__get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
}
-static int __compat_put_timeval(const struct timeval *tv, struct compat_timeval __user *ctv)
+static int __compat_put_timeval(const struct timeval *tv, struct old_timeval32 __user *ctv)
{
return (!access_ok(VERIFY_WRITE, ctv, sizeof(*ctv)) ||
__put_user(tv->tv_sec, &ctv->tv_sec) ||
__put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
}
-static int __compat_get_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
+static int __compat_get_timespec(struct timespec *ts, const struct old_timespec32 __user *cts)
{
return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
__get_user(ts->tv_sec, &cts->tv_sec) ||
__get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
}
-static int __compat_put_timespec(const struct timespec *ts, struct compat_timespec __user *cts)
+static int __compat_put_timespec(const struct timespec *ts, struct old_timespec32 __user *cts)
{
return (!access_ok(VERIFY_WRITE, cts, sizeof(*cts)) ||
__put_user(ts->tv_sec, &cts->tv_sec) ||
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 87a6bc2a96c0..f14c376937e5 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -14,8 +14,6 @@
#include <linux/pfn.h>
#include <linux/set_memory.h>
-#define DIRECT_MAPPING_ERROR 0
-
/*
* Most architectures use ZONE_DMA for the first 16 Megabytes, but
* some use it for entirely different regions:
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 4f8a6dbf0b60..ebecaf255ea2 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -21,6 +21,7 @@
#include <linux/cache.h>
#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
@@ -73,13 +74,6 @@ static phys_addr_t io_tlb_start, io_tlb_end;
static unsigned long io_tlb_nslabs;
/*
- * When the IOMMU overflows we return a fallback buffer. This sets the size.
- */
-static unsigned long io_tlb_overflow = 32*1024;
-
-static phys_addr_t io_tlb_overflow_buffer;
-
-/*
* This is a free list describing the number of free entries available from
* each index
*/
@@ -126,7 +120,6 @@ setup_io_tlb_npages(char *str)
return 0;
}
early_param("swiotlb", setup_io_tlb_npages);
-/* make io_tlb_overflow tunable too? */
unsigned long swiotlb_nr_tbl(void)
{
@@ -194,16 +187,10 @@ void __init swiotlb_update_mem_attributes(void)
bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
memset(vaddr, 0, bytes);
-
- vaddr = phys_to_virt(io_tlb_overflow_buffer);
- bytes = PAGE_ALIGN(io_tlb_overflow);
- set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
- memset(vaddr, 0, bytes);
}
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{
- void *v_overflow_buffer;
unsigned long i, bytes;
bytes = nslabs << IO_TLB_SHIFT;
@@ -213,17 +200,6 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
io_tlb_end = io_tlb_start + bytes;
/*
- * Get the overflow emergency buffer
- */
- v_overflow_buffer = memblock_virt_alloc_low_nopanic(
- PAGE_ALIGN(io_tlb_overflow),
- PAGE_SIZE);
- if (!v_overflow_buffer)
- return -ENOMEM;
-
- io_tlb_overflow_buffer = __pa(v_overflow_buffer);
-
- /*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
@@ -330,7 +306,6 @@ int
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
{
unsigned long i, bytes;
- unsigned char *v_overflow_buffer;
bytes = nslabs << IO_TLB_SHIFT;
@@ -342,19 +317,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
memset(tlb, 0, bytes);
/*
- * Get the overflow emergency buffer
- */
- v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
- get_order(io_tlb_overflow));
- if (!v_overflow_buffer)
- goto cleanup2;
-
- set_memory_decrypted((unsigned long)v_overflow_buffer,
- io_tlb_overflow >> PAGE_SHIFT);
- memset(v_overflow_buffer, 0, io_tlb_overflow);
- io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
-
- /*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
@@ -390,10 +352,6 @@ cleanup4:
sizeof(int)));
io_tlb_list = NULL;
cleanup3:
- free_pages((unsigned long)v_overflow_buffer,
- get_order(io_tlb_overflow));
- io_tlb_overflow_buffer = 0;
-cleanup2:
io_tlb_end = 0;
io_tlb_start = 0;
io_tlb_nslabs = 0;
@@ -407,8 +365,6 @@ void __init swiotlb_exit(void)
return;
if (late_alloc) {
- free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
- get_order(io_tlb_overflow));
free_pages((unsigned long)io_tlb_orig_addr,
get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
@@ -416,8 +372,6 @@ void __init swiotlb_exit(void)
free_pages((unsigned long)phys_to_virt(io_tlb_start),
get_order(io_tlb_nslabs << IO_TLB_SHIFT));
} else {
- memblock_free_late(io_tlb_overflow_buffer,
- PAGE_ALIGN(io_tlb_overflow));
memblock_free_late(__pa(io_tlb_orig_addr),
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
memblock_free_late(__pa(io_tlb_list),
@@ -429,7 +383,7 @@ void __init swiotlb_exit(void)
max_segment = 0;
}
-int is_swiotlb_buffer(phys_addr_t paddr)
+static int is_swiotlb_buffer(phys_addr_t paddr)
{
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
@@ -591,26 +545,6 @@ found:
}
/*
- * Allocates bounce buffer and returns its physical address.
- */
-static phys_addr_t
-map_single(struct device *hwdev, phys_addr_t phys, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- dma_addr_t start_dma_addr;
-
- if (swiotlb_force == SWIOTLB_NO_FORCE) {
- dev_warn_ratelimited(hwdev, "Cannot do DMA to address %pa\n",
- &phys);
- return SWIOTLB_MAP_ERROR;
- }
-
- start_dma_addr = __phys_to_dma(hwdev, io_tlb_start);
- return swiotlb_tbl_map_single(hwdev, start_dma_addr, phys, size,
- dir, attrs);
-}
-
-/*
* tlb_addr is the physical address of the bounce buffer to unmap.
*/
void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
@@ -689,104 +623,32 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
}
}
-static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
- size_t size)
-{
- u64 mask = DMA_BIT_MASK(32);
-
- if (dev && dev->coherent_dma_mask)
- mask = dev->coherent_dma_mask;
- return addr + size - 1 <= mask;
-}
-
-static void *
-swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
- unsigned long attrs)
+static dma_addr_t swiotlb_bounce_page(struct device *dev, phys_addr_t *phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- phys_addr_t phys_addr;
-
- if (swiotlb_force == SWIOTLB_NO_FORCE)
- goto out_warn;
-
- phys_addr = swiotlb_tbl_map_single(dev,
- __phys_to_dma(dev, io_tlb_start),
- 0, size, DMA_FROM_DEVICE, attrs);
- if (phys_addr == SWIOTLB_MAP_ERROR)
- goto out_warn;
-
- *dma_handle = __phys_to_dma(dev, phys_addr);
- if (!dma_coherent_ok(dev, *dma_handle, size))
- goto out_unmap;
-
- memset(phys_to_virt(phys_addr), 0, size);
- return phys_to_virt(phys_addr);
+ dma_addr_t dma_addr;
-out_unmap:
- dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
- (unsigned long long)dev->coherent_dma_mask,
- (unsigned long long)*dma_handle);
-
- /*
- * DMA_TO_DEVICE to avoid memcpy in unmap_single.
- * DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
-out_warn:
- if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
- dev_warn(dev,
- "swiotlb: coherent allocation failed, size=%zu\n",
- size);
- dump_stack();
+ if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
+ dev_warn_ratelimited(dev,
+ "Cannot do DMA to address %pa\n", phys);
+ return DIRECT_MAPPING_ERROR;
}
- return NULL;
-}
-
-static bool swiotlb_free_buffer(struct device *dev, size_t size,
- dma_addr_t dma_addr)
-{
- phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
- WARN_ON_ONCE(irqs_disabled());
-
- if (!is_swiotlb_buffer(phys_addr))
- return false;
-
- /*
- * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
- * DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- return true;
-}
-
-static void
-swiotlb_full(struct device *dev, size_t size, enum dma_data_direction dir,
- int do_panic)
-{
- if (swiotlb_force == SWIOTLB_NO_FORCE)
- return;
-
- /*
- * Ran out of IOMMU space for this operation. This is very bad.
- * Unfortunately the drivers cannot handle this operation properly.
- * unless they check for dma_mapping_error (most don't)
- * When the mapping is small enough return a static buffer to limit
- * the damage, or panic when the transfer is too big.
- */
- dev_err_ratelimited(dev, "DMA: Out of SW-IOMMU space for %zu bytes\n",
- size);
+ /* Oh well, have to allocate and map a bounce buffer. */
+ *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
+ *phys, size, dir, attrs);
+ if (*phys == SWIOTLB_MAP_ERROR)
+ return DIRECT_MAPPING_ERROR;
- if (size <= io_tlb_overflow || !do_panic)
- return;
+ /* Ensure that the address returned is DMA'ble */
+ dma_addr = __phys_to_dma(dev, *phys);
+ if (unlikely(!dma_capable(dev, dma_addr, size))) {
+ swiotlb_tbl_unmap_single(dev, *phys, size, dir,
+ attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ return DIRECT_MAPPING_ERROR;
+ }
- if (dir == DMA_BIDIRECTIONAL)
- panic("DMA: Random memory could be DMA accessed\n");
- if (dir == DMA_FROM_DEVICE)
- panic("DMA: Random memory could be DMA written\n");
- if (dir == DMA_TO_DEVICE)
- panic("DMA: Random memory could be DMA read\n");
+ return dma_addr;
}
/*
@@ -801,7 +663,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
enum dma_data_direction dir,
unsigned long attrs)
{
- phys_addr_t map, phys = page_to_phys(page) + offset;
+ phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = phys_to_dma(dev, phys);
BUG_ON(dir == DMA_NONE);
@@ -810,28 +672,17 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (dma_capable(dev, dev_addr, size) && swiotlb_force != SWIOTLB_FORCE)
- return dev_addr;
-
- trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
-
- /* Oh well, have to allocate and map a bounce buffer. */
- map = map_single(dev, phys, size, dir, attrs);
- if (map == SWIOTLB_MAP_ERROR) {
- swiotlb_full(dev, size, dir, 1);
- return __phys_to_dma(dev, io_tlb_overflow_buffer);
+ if (!dma_capable(dev, dev_addr, size) ||
+ swiotlb_force == SWIOTLB_FORCE) {
+ trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
+ dev_addr = swiotlb_bounce_page(dev, &phys, size, dir, attrs);
}
- dev_addr = __phys_to_dma(dev, map);
+ if (!dev_is_dma_coherent(dev) &&
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ arch_sync_dma_for_device(dev, phys, size, dir);
- /* Ensure that the address returned is DMA'ble */
- if (dma_capable(dev, dev_addr, size))
- return dev_addr;
-
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
-
- return __phys_to_dma(dev, io_tlb_overflow_buffer);
+ return dev_addr;
}
/*
@@ -842,14 +693,18 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
* After this call, reads by the cpu to the buffer are guaranteed to see
* whatever the device wrote there.
*/
-static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
+void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
{
phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE);
+ if (!dev_is_dma_coherent(hwdev) &&
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+ arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
+
if (is_swiotlb_buffer(paddr)) {
swiotlb_tbl_unmap_single(hwdev, paddr, size, dir, attrs);
return;
@@ -867,13 +722,6 @@ static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
dma_mark_clean(phys_to_virt(paddr), size);
}
-void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- unmap_single(hwdev, dev_addr, size, dir, attrs);
-}
-
/*
* Make physical memory consistent for a single streaming mode DMA translation
* after a transfer.
@@ -893,15 +741,17 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE);
- if (is_swiotlb_buffer(paddr)) {
+ if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_CPU)
+ arch_sync_dma_for_cpu(hwdev, paddr, size, dir);
+
+ if (is_swiotlb_buffer(paddr))
swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
- return;
- }
- if (dir != DMA_FROM_DEVICE)
- return;
+ if (!dev_is_dma_coherent(hwdev) && target == SYNC_FOR_DEVICE)
+ arch_sync_dma_for_device(hwdev, paddr, size, dir);
- dma_mark_clean(phys_to_virt(paddr), size);
+ if (!is_swiotlb_buffer(paddr) && dir == DMA_FROM_DEVICE)
+ dma_mark_clean(phys_to_virt(paddr), size);
}
void
@@ -925,48 +775,31 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
* appropriate dma address and length. They are obtained via
* sg_dma_{address,length}(SG).
*
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
* Device ownership issues as mentioned above for swiotlb_map_page are the
* same here.
*/
int
-swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
- BUG_ON(dir == DMA_NONE);
-
for_each_sg(sgl, sg, nelems, i) {
- phys_addr_t paddr = sg_phys(sg);
- dma_addr_t dev_addr = phys_to_dma(hwdev, paddr);
-
- if (swiotlb_force == SWIOTLB_FORCE ||
- !dma_capable(hwdev, dev_addr, sg->length)) {
- phys_addr_t map = map_single(hwdev, sg_phys(sg),
- sg->length, dir, attrs);
- if (map == SWIOTLB_MAP_ERROR) {
- /* Don't panic here, we expect map_sg users
- to do proper error handling. */
- swiotlb_full(hwdev, sg->length, dir, 0);
- attrs |= DMA_ATTR_SKIP_CPU_SYNC;
- swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
- attrs);
- sg_dma_len(sgl) = 0;
- return 0;
- }
- sg->dma_address = __phys_to_dma(hwdev, map);
- } else
- sg->dma_address = dev_addr;
+ sg->dma_address = swiotlb_map_page(dev, sg_page(sg), sg->offset,
+ sg->length, dir, attrs);
+ if (sg->dma_address == DIRECT_MAPPING_ERROR)
+ goto out_error;
sg_dma_len(sg) = sg->length;
}
+
return nelems;
+
+out_error:
+ swiotlb_unmap_sg_attrs(dev, sgl, i, dir,
+ attrs | DMA_ATTR_SKIP_CPU_SYNC);
+ sg_dma_len(sgl) = 0;
+ return 0;
}
/*
@@ -984,7 +817,7 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
+ swiotlb_unmap_page(hwdev, sg->dma_address, sg_dma_len(sg), dir,
attrs);
}
@@ -1022,12 +855,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
-int
-swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
-{
- return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
-}
-
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
@@ -1040,39 +867,10 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
return __phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
}
-void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, unsigned long attrs)
-{
- void *vaddr;
-
- /* temporary workaround: */
- if (gfp & __GFP_NOWARN)
- attrs |= DMA_ATTR_NO_WARN;
-
- /*
- * Don't print a warning when the first allocation attempt fails.
- * swiotlb_alloc_coherent() will print a warning when the DMA memory
- * allocation ultimately failed.
- */
- gfp |= __GFP_NOWARN;
-
- vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
- if (!vaddr)
- vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
- return vaddr;
-}
-
-void swiotlb_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, unsigned long attrs)
-{
- if (!swiotlb_free_buffer(dev, size, dma_addr))
- dma_direct_free(dev, size, vaddr, dma_addr, attrs);
-}
-
const struct dma_map_ops swiotlb_dma_ops = {
- .mapping_error = swiotlb_dma_mapping_error,
- .alloc = swiotlb_alloc,
- .free = swiotlb_free,
+ .mapping_error = dma_direct_mapping_error,
+ .alloc = dma_direct_alloc,
+ .free = dma_direct_free,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
.sync_single_for_device = swiotlb_sync_single_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 83f830acbb5f..410a77a8f6e2 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -173,7 +173,7 @@ err_unlock:
}
COMPAT_SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
- struct compat_timespec __user *, utime, u32 __user *, uaddr2,
+ struct old_timespec32 __user *, utime, u32 __user *, uaddr2,
u32, val3)
{
struct timespec ts;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 5d9fc01b60a6..3366d11c3e02 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -183,7 +183,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
* unhappy about. Replace them with ':', which does
* the trick and is not as offensive as '\'...
*/
- name = kstrdup(of_node_full_name(of_node), GFP_KERNEL);
+ name = kasprintf(GFP_KERNEL, "%pOF", of_node);
if (!name) {
kfree(domain);
return NULL;
@@ -867,7 +867,7 @@ void irq_dispose_mapping(unsigned int virq)
EXPORT_SYMBOL_GPL(irq_dispose_mapping);
/**
- * irq_find_mapping() - Find a linux irq from an hw irq number.
+ * irq_find_mapping() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
*/
@@ -1741,6 +1741,7 @@ static void debugfs_add_domain_dir(struct irq_domain *d)
static void debugfs_remove_domain_dir(struct irq_domain *d)
{
debugfs_remove(d->debugfs_file);
+ d->debugfs_file = NULL;
}
void __init irq_domain_debugfs_init(struct dentry *root)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index fb86146037a7..9dbdccab3b6a 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -927,6 +927,9 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
+ if (ret == IRQ_HANDLED)
+ atomic_inc(&desc->threads_handled);
+
irq_finalize_oneshot(desc, action);
local_bh_enable();
return ret;
@@ -943,6 +946,9 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
irqreturn_t ret;
ret = action->thread_fn(action->irq, action->dev_id);
+ if (ret == IRQ_HANDLED)
+ atomic_inc(&desc->threads_handled);
+
irq_finalize_oneshot(desc, action);
return ret;
}
@@ -1020,8 +1026,6 @@ static int irq_thread(void *data)
irq_thread_check_affinity(desc, action);
action_ret = handler_fn(desc, action);
- if (action_ret == IRQ_HANDLED)
- atomic_inc(&desc->threads_handled);
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 9bf5404397e0..b77150ad1965 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -16,6 +16,8 @@
* 01Mar01 Andrew Morton
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
@@ -192,16 +194,7 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
return 0;
}
-/*
- * Number of registered extended console drivers.
- *
- * If extended consoles are present, in-kernel cont reassembly is disabled
- * and each fragment is stored as a separate log entry with proper
- * continuation flag so that every emitted message has full metadata. This
- * doesn't change the result for regular consoles or /proc/kmsg. For
- * /dev/kmsg, as long as the reader concatenates messages according to
- * consecutive continuation flags, the end result should be the same too.
- */
+/* Number of registered extended console drivers. */
static int nr_ext_console_drivers;
/*
@@ -423,6 +416,7 @@ static u32 log_next_idx;
/* the next printk record to write to the console */
static u64 console_seq;
static u32 console_idx;
+static u64 exclusive_console_stop_seq;
/* the next printk record to read after the last 'clear' command */
static u64 clear_seq;
@@ -437,6 +431,7 @@ static u32 clear_idx;
/* record buffer */
#define LOG_ALIGN __alignof__(struct printk_log)
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
+#define LOG_BUF_LEN_MAX (u32)(1 << 31)
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
@@ -1037,18 +1032,28 @@ void log_buf_vmcoreinfo_setup(void)
static unsigned long __initdata new_log_buf_len;
/* we practice scaling the ring buffer by powers of 2 */
-static void __init log_buf_len_update(unsigned size)
+static void __init log_buf_len_update(u64 size)
{
+ if (size > (u64)LOG_BUF_LEN_MAX) {
+ size = (u64)LOG_BUF_LEN_MAX;
+ pr_err("log_buf over 2G is not supported.\n");
+ }
+
if (size)
size = roundup_pow_of_two(size);
if (size > log_buf_len)
- new_log_buf_len = size;
+ new_log_buf_len = (unsigned long)size;
}
/* save requested log_buf_len since it's too early to process it */
static int __init log_buf_len_setup(char *str)
{
- unsigned size = memparse(str, &str);
+ u64 size;
+
+ if (!str)
+ return -EINVAL;
+
+ size = memparse(str, &str);
log_buf_len_update(size);
@@ -1093,7 +1098,7 @@ void __init setup_log_buf(int early)
{
unsigned long flags;
char *new_log_buf;
- int free;
+ unsigned int free;
if (log_buf != __log_buf)
return;
@@ -1113,7 +1118,7 @@ void __init setup_log_buf(int early)
}
if (unlikely(!new_log_buf)) {
- pr_err("log_buf_len: %ld bytes not available\n",
+ pr_err("log_buf_len: %lu bytes not available\n",
new_log_buf_len);
return;
}
@@ -1126,8 +1131,8 @@ void __init setup_log_buf(int early)
memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
logbuf_unlock_irqrestore(flags);
- pr_info("log_buf_len: %d bytes\n", log_buf_len);
- pr_info("early log buf free: %d(%d%%)\n",
+ pr_info("log_buf_len: %u bytes\n", log_buf_len);
+ pr_info("early log buf free: %u(%u%%)\n",
free, (free * 100) / __LOG_BUF_LEN);
}
@@ -1767,12 +1772,8 @@ static void cont_flush(void)
static bool cont_add(int facility, int level, enum log_flags flags, const char *text, size_t len)
{
- /*
- * If ext consoles are present, flush and skip in-kernel
- * continuation. See nr_ext_console_drivers definition. Also, if
- * the line gets too long, split it up in separate records.
- */
- if (nr_ext_console_drivers || cont.len + len > sizeof(cont.buf)) {
+ /* If the line gets too long, split it up in separate records. */
+ if (cont.len + len > sizeof(cont.buf)) {
cont_flush();
return false;
}
@@ -1795,9 +1796,6 @@ static bool cont_add(int facility, int level, enum log_flags flags, const char *
cont_flush();
}
- if (cont.len > (sizeof(cont.buf) * 80) / 100)
- cont_flush();
-
return true;
}
@@ -1889,8 +1887,9 @@ asmlinkage int vprintk_emit(int facility, int level,
const char *fmt, va_list args)
{
int printed_len;
- bool in_sched = false;
+ bool in_sched = false, pending_output;
unsigned long flags;
+ u64 curr_log_seq;
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
@@ -1902,11 +1901,13 @@ asmlinkage int vprintk_emit(int facility, int level,
/* This stops the holder of console_sem just where we want him */
logbuf_lock_irqsave(flags);
+ curr_log_seq = log_next_seq;
printed_len = vprintk_store(facility, level, dict, dictlen, fmt, args);
+ pending_output = (curr_log_seq != log_next_seq);
logbuf_unlock_irqrestore(flags);
/* If called from the scheduler, we can not call up(). */
- if (!in_sched) {
+ if (!in_sched && pending_output) {
/*
* Disable preemption to avoid being preempted while holding
* console_sem which would prevent anyone from printing to
@@ -1923,7 +1924,8 @@ asmlinkage int vprintk_emit(int facility, int level,
preempt_enable();
}
- wake_up_klogd();
+ if (pending_output)
+ wake_up_klogd();
return printed_len;
}
EXPORT_SYMBOL(vprintk_emit);
@@ -2009,6 +2011,7 @@ static u64 syslog_seq;
static u32 syslog_idx;
static u64 console_seq;
static u32 console_idx;
+static u64 exclusive_console_stop_seq;
static u64 log_first_seq;
static u32 log_first_idx;
static u64 log_next_seq;
@@ -2351,8 +2354,9 @@ again:
printk_safe_enter_irqsave(flags);
raw_spin_lock(&logbuf_lock);
if (console_seq < log_first_seq) {
- len = sprintf(text, "** %u printk messages dropped **\n",
- (unsigned)(log_first_seq - console_seq));
+ len = sprintf(text,
+ "** %llu printk messages dropped **\n",
+ log_first_seq - console_seq);
/* messages are gone, move to first one */
console_seq = log_first_seq;
@@ -2376,6 +2380,12 @@ skip:
goto skip;
}
+ /* Output to all consoles once old messages replayed. */
+ if (unlikely(exclusive_console &&
+ console_seq >= exclusive_console_stop_seq)) {
+ exclusive_console = NULL;
+ }
+
len += msg_print_text(msg,
console_msg_format & MSG_FORMAT_SYSLOG,
text + len,
@@ -2418,10 +2428,6 @@ skip:
console_locked = 0;
- /* Release the exclusive_console once it is used */
- if (unlikely(exclusive_console))
- exclusive_console = NULL;
-
raw_spin_unlock(&logbuf_lock);
up_console_sem();
@@ -2688,8 +2694,7 @@ void register_console(struct console *newcon)
}
if (newcon->flags & CON_EXTENDED)
- if (!nr_ext_console_drivers++)
- pr_info("printk: continuation disabled due to ext consoles, expect more fragments in /dev/kmsg\n");
+ nr_ext_console_drivers++;
if (newcon->flags & CON_PRINTBUFFER) {
/*
@@ -2699,13 +2704,18 @@ void register_console(struct console *newcon)
logbuf_lock_irqsave(flags);
console_seq = syslog_seq;
console_idx = syslog_idx;
- logbuf_unlock_irqrestore(flags);
/*
* We're about to replay the log buffer. Only do this to the
* just-registered console to avoid excessive message spam to
* the already-registered consoles.
+ *
+ * Set exclusive_console with disabled interrupts to reduce
+ * race window with eventual console_flush_on_panic() that
+ * ignores console_lock.
*/
exclusive_console = newcon;
+ exclusive_console_stop_seq = console_seq;
+ logbuf_unlock_irqrestore(flags);
}
console_unlock();
console_sysfs_notify();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fe0223121883..2e696b03e99d 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5244,7 +5244,7 @@ out_unlock:
* an error code.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
- struct timespec __user *, interval)
+ struct __kernel_timespec __user *, interval)
{
struct timespec64 t;
int retval = sched_rr_get_interval(pid, &t);
@@ -5255,16 +5255,16 @@ SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
return retval;
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
compat_pid_t, pid,
- struct compat_timespec __user *, interval)
+ struct old_timespec32 __user *, interval)
{
struct timespec64 t;
int retval = sched_rr_get_interval(pid, &t);
if (retval == 0)
- retval = compat_put_timespec64(&t, interval);
+ retval = put_old_timespec32(&t, interval);
return retval;
}
#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index dbd2e4db24cf..17565240b1c6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3165,7 +3165,7 @@ int copy_siginfo_from_user32(struct kernel_siginfo *to,
* @ts: upper bound on process time suspension
*/
static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
- const struct timespec *ts)
+ const struct timespec64 *ts)
{
ktime_t *to = NULL, timeout = KTIME_MAX;
struct task_struct *tsk = current;
@@ -3173,9 +3173,9 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
int sig, ret = 0;
if (ts) {
- if (!timespec_valid(ts))
+ if (!timespec64_valid(ts))
return -EINVAL;
- timeout = timespec_to_ktime(*ts);
+ timeout = timespec64_to_ktime(*ts);
to = &timeout;
}
@@ -3223,11 +3223,12 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
* @sigsetsize: size of sigset_t type
*/
SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
- siginfo_t __user *, uinfo, const struct timespec __user *, uts,
+ siginfo_t __user *, uinfo,
+ const struct __kernel_timespec __user *, uts,
size_t, sigsetsize)
{
sigset_t these;
- struct timespec ts;
+ struct timespec64 ts;
kernel_siginfo_t info;
int ret;
@@ -3239,7 +3240,7 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
return -EFAULT;
if (uts) {
- if (copy_from_user(&ts, uts, sizeof(ts)))
+ if (get_timespec64(&ts, uts))
return -EFAULT;
}
@@ -3256,10 +3257,10 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
struct compat_siginfo __user *, uinfo,
- struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
+ struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
{
sigset_t s;
- struct timespec t;
+ struct timespec64 t;
kernel_siginfo_t info;
long ret;
@@ -3270,7 +3271,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
return -EFAULT;
if (uts) {
- if (compat_get_timespec(&t, uts))
+ if (get_old_timespec32(&t, uts))
return -EFAULT;
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 7a0720a20003..d28813306b2c 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -257,9 +257,9 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
int softirq_bit;
/*
- * Mask out PF_MEMALLOC s current task context is borrowed for the
- * softirq. A softirq handled such as network RX might set PF_MEMALLOC
- * again if the socket is related to swap
+ * Mask out PF_MEMALLOC as the current task context is borrowed for the
+ * softirq. A softirq handled, such as network RX, might set PF_MEMALLOC
+ * again if the socket is related to swapping.
*/
current->flags &= ~PF_MEMALLOC;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e1a549c9e399..9cdd74bd2d27 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1660,7 +1660,7 @@ int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
switch(restart->nanosleep.type) {
#ifdef CONFIG_COMPAT_32BIT_TIME
case TT_COMPAT:
- if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp))
+ if (put_old_timespec32(ts, restart->nanosleep.compat_rmtp))
return -EFAULT;
break;
#endif
@@ -1780,12 +1780,12 @@ SYSCALL_DEFINE2(nanosleep, struct __kernel_timespec __user *, rqtp,
#ifdef CONFIG_COMPAT_32BIT_TIME
-COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp,
- struct compat_timespec __user *, rmtp)
+COMPAT_SYSCALL_DEFINE2(nanosleep, struct old_timespec32 __user *, rqtp,
+ struct old_timespec32 __user *, rmtp)
{
struct timespec64 tu;
- if (compat_get_timespec64(&tu, rqtp))
+ if (get_old_timespec32(&tu, rqtp))
return -EFAULT;
if (!timespec64_valid(&tu))
diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c
index 2c6847d5d69b..989ccf028bde 100644
--- a/kernel/time/posix-stubs.c
+++ b/kernel/time/posix-stubs.c
@@ -162,20 +162,20 @@ COMPAT_SYS_NI(setitimer);
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
struct timespec64 new_tp;
if (which_clock != CLOCK_REALTIME)
return -EINVAL;
- if (compat_get_timespec64(&new_tp, tp))
+ if (get_old_timespec32(&new_tp, tp))
return -EFAULT;
return do_sys_settimeofday64(&new_tp, NULL);
}
COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
int ret;
struct timespec64 kernel_tp;
@@ -184,13 +184,13 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
if (ret)
return ret;
- if (compat_put_timespec64(&kernel_tp, tp))
+ if (put_old_timespec32(&kernel_tp, tp))
return -EFAULT;
return 0;
}
COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
struct timespec64 rtn_tp = {
.tv_sec = 0,
@@ -201,7 +201,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
case CLOCK_REALTIME:
case CLOCK_MONOTONIC:
case CLOCK_BOOTTIME:
- if (compat_put_timespec64(&rtn_tp, tp))
+ if (put_old_timespec32(&rtn_tp, tp))
return -EFAULT;
return 0;
default:
@@ -210,8 +210,8 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
}
COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
- struct compat_timespec __user *, rqtp,
- struct compat_timespec __user *, rmtp)
+ struct old_timespec32 __user *, rqtp,
+ struct old_timespec32 __user *, rmtp)
{
struct timespec64 t;
@@ -224,7 +224,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
return -EINVAL;
}
- if (compat_get_timespec64(&t, rqtp))
+ if (get_old_timespec32(&t, rqtp))
return -EFAULT;
if (!timespec64_valid(&t))
return -EINVAL;
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index eabb4c22728d..bd62b5eeb5a0 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -755,13 +755,13 @@ SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
- struct compat_itimerspec __user *, setting)
+ struct old_itimerspec32 __user *, setting)
{
struct itimerspec64 cur_setting;
int ret = do_timer_gettime(timer_id, &cur_setting);
if (!ret) {
- if (put_compat_itimerspec64(&cur_setting, setting))
+ if (put_old_itimerspec32(&cur_setting, setting))
ret = -EFAULT;
}
return ret;
@@ -928,8 +928,8 @@ SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
- struct compat_itimerspec __user *, new,
- struct compat_itimerspec __user *, old)
+ struct old_itimerspec32 __user *, new,
+ struct old_itimerspec32 __user *, old)
{
struct itimerspec64 new_spec, old_spec;
struct itimerspec64 *rtn = old ? &old_spec : NULL;
@@ -937,12 +937,12 @@ COMPAT_SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
if (!new)
return -EINVAL;
- if (get_compat_itimerspec64(&new_spec, new))
+ if (get_old_itimerspec32(&new_spec, new))
return -EFAULT;
error = do_timer_settime(timer_id, flags, &new_spec, rtn);
if (!error && old) {
- if (put_compat_itimerspec64(&old_spec, old))
+ if (put_old_itimerspec32(&old_spec, old))
error = -EFAULT;
}
return error;
@@ -1115,7 +1115,7 @@ SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
@@ -1123,14 +1123,14 @@ COMPAT_SYSCALL_DEFINE2(clock_settime, clockid_t, which_clock,
if (!kc || !kc->clock_set)
return -EINVAL;
- if (compat_get_timespec64(&ts, tp))
+ if (get_old_timespec32(&ts, tp))
return -EFAULT;
return kc->clock_set(which_clock, &ts);
}
COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
@@ -1141,7 +1141,7 @@ COMPAT_SYSCALL_DEFINE2(clock_gettime, clockid_t, which_clock,
err = kc->clock_get(which_clock, &ts);
- if (!err && compat_put_timespec64(&ts, tp))
+ if (!err && put_old_timespec32(&ts, tp))
err = -EFAULT;
return err;
@@ -1180,7 +1180,7 @@ COMPAT_SYSCALL_DEFINE2(clock_adjtime, clockid_t, which_clock,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
- struct compat_timespec __user *, tp)
+ struct old_timespec32 __user *, tp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 ts;
@@ -1190,7 +1190,7 @@ COMPAT_SYSCALL_DEFINE2(clock_getres, clockid_t, which_clock,
return -EINVAL;
err = kc->clock_getres(which_clock, &ts);
- if (!err && tp && compat_put_timespec64(&ts, tp))
+ if (!err && tp && put_old_timespec32(&ts, tp))
return -EFAULT;
return err;
@@ -1237,8 +1237,8 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
#ifdef CONFIG_COMPAT_32BIT_TIME
COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
- struct compat_timespec __user *, rqtp,
- struct compat_timespec __user *, rmtp)
+ struct old_timespec32 __user *, rqtp,
+ struct old_timespec32 __user *, rmtp)
{
const struct k_clock *kc = clockid_to_kclock(which_clock);
struct timespec64 t;
@@ -1248,7 +1248,7 @@ COMPAT_SYSCALL_DEFINE4(clock_nanosleep, clockid_t, which_clock, int, flags,
if (!kc->nsleep)
return -EOPNOTSUPP;
- if (compat_get_timespec64(&t, rqtp))
+ if (get_old_timespec32(&t, rqtp))
return -EFAULT;
if (!timespec64_valid(&t))
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index aa2094d5dd27..be0aac2b4300 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -400,8 +400,6 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
if (tick_broadcast_forced)
break;
cpumask_clear_cpu(cpu, tick_broadcast_on);
- if (!tick_device_is_functional(dev))
- break;
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
if (tick_broadcast_device.mode ==
TICKDEV_MODE_PERIODIC)
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 5b33e2f5c0ed..69e673b88474 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -885,7 +885,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
if (need_resched())
return false;
- if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
+ if (unlikely(local_softirq_pending())) {
static int ratelimit;
if (ratelimit < 10 &&
diff --git a/kernel/time/time.c b/kernel/time/time.c
index ccdb351277ee..e3a7f7fd3abc 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -104,12 +104,12 @@ SYSCALL_DEFINE1(stime, time_t __user *, tptr)
#ifdef CONFIG_COMPAT
#ifdef __ARCH_WANT_COMPAT_SYS_TIME
-/* compat_time_t is a 32 bit "long" and needs to get converted. */
-COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
+/* old_time32_t is a 32 bit "long" and needs to get converted. */
+COMPAT_SYSCALL_DEFINE1(time, old_time32_t __user *, tloc)
{
- compat_time_t i;
+ old_time32_t i;
- i = (compat_time_t)ktime_get_real_seconds();
+ i = (old_time32_t)ktime_get_real_seconds();
if (tloc) {
if (put_user(i,tloc))
@@ -119,7 +119,7 @@ COMPAT_SYSCALL_DEFINE1(time, compat_time_t __user *, tloc)
return i;
}
-COMPAT_SYSCALL_DEFINE1(stime, compat_time_t __user *, tptr)
+COMPAT_SYSCALL_DEFINE1(stime, old_time32_t __user *, tptr)
{
struct timespec64 tv;
int err;
@@ -144,9 +144,11 @@ SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
struct timezone __user *, tz)
{
if (likely(tv != NULL)) {
- struct timeval ktv;
- do_gettimeofday(&ktv);
- if (copy_to_user(tv, &ktv, sizeof(ktv)))
+ struct timespec64 ts;
+
+ ktime_get_real_ts64(&ts);
+ if (put_user(ts.tv_sec, &tv->tv_sec) ||
+ put_user(ts.tv_nsec / 1000, &tv->tv_usec))
return -EFAULT;
}
if (unlikely(tz != NULL)) {
@@ -223,14 +225,15 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
}
#ifdef CONFIG_COMPAT
-COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
+COMPAT_SYSCALL_DEFINE2(gettimeofday, struct old_timeval32 __user *, tv,
struct timezone __user *, tz)
{
if (tv) {
- struct timeval ktv;
+ struct timespec64 ts;
- do_gettimeofday(&ktv);
- if (compat_put_timeval(&ktv, tv))
+ ktime_get_real_ts64(&ts);
+ if (put_user(ts.tv_sec, &tv->tv_sec) ||
+ put_user(ts.tv_nsec / 1000, &tv->tv_usec))
return -EFAULT;
}
if (tz) {
@@ -241,7 +244,7 @@ COMPAT_SYSCALL_DEFINE2(gettimeofday, struct compat_timeval __user *, tv,
return 0;
}
-COMPAT_SYSCALL_DEFINE2(settimeofday, struct compat_timeval __user *, tv,
+COMPAT_SYSCALL_DEFINE2(settimeofday, struct old_timeval32 __user *, tv,
struct timezone __user *, tz)
{
struct timespec64 new_ts;
@@ -342,30 +345,6 @@ unsigned int jiffies_to_usecs(const unsigned long j)
}
EXPORT_SYMBOL(jiffies_to_usecs);
-/**
- * timespec_trunc - Truncate timespec to a granularity
- * @t: Timespec
- * @gran: Granularity in ns.
- *
- * Truncate a timespec to a granularity. Always rounds down. gran must
- * not be 0 nor greater than a second (NSEC_PER_SEC, or 10^9 ns).
- */
-struct timespec timespec_trunc(struct timespec t, unsigned gran)
-{
- /* Avoid division in the common cases 1 ns and 1 s. */
- if (gran == 1) {
- /* nothing */
- } else if (gran == NSEC_PER_SEC) {
- t.tv_nsec = 0;
- } else if (gran > 1 && gran < NSEC_PER_SEC) {
- t.tv_nsec -= t.tv_nsec % gran;
- } else {
- WARN(1, "illegal file time granularity: %u", gran);
- }
- return t;
-}
-EXPORT_SYMBOL(timespec_trunc);
-
/*
* mktime64 - Converts date to seconds.
* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
@@ -884,10 +863,10 @@ int put_timespec64(const struct timespec64 *ts,
}
EXPORT_SYMBOL_GPL(put_timespec64);
-int __compat_get_timespec64(struct timespec64 *ts64,
- const struct compat_timespec __user *cts)
+static int __get_old_timespec32(struct timespec64 *ts64,
+ const struct old_timespec32 __user *cts)
{
- struct compat_timespec ts;
+ struct old_timespec32 ts;
int ret;
ret = copy_from_user(&ts, cts, sizeof(ts));
@@ -900,33 +879,33 @@ int __compat_get_timespec64(struct timespec64 *ts64,
return 0;
}
-int __compat_put_timespec64(const struct timespec64 *ts64,
- struct compat_timespec __user *cts)
+static int __put_old_timespec32(const struct timespec64 *ts64,
+ struct old_timespec32 __user *cts)
{
- struct compat_timespec ts = {
+ struct old_timespec32 ts = {
.tv_sec = ts64->tv_sec,
.tv_nsec = ts64->tv_nsec
};
return copy_to_user(cts, &ts, sizeof(ts)) ? -EFAULT : 0;
}
-int compat_get_timespec64(struct timespec64 *ts, const void __user *uts)
+int get_old_timespec32(struct timespec64 *ts, const void __user *uts)
{
if (COMPAT_USE_64BIT_TIME)
return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
else
- return __compat_get_timespec64(ts, uts);
+ return __get_old_timespec32(ts, uts);
}
-EXPORT_SYMBOL_GPL(compat_get_timespec64);
+EXPORT_SYMBOL_GPL(get_old_timespec32);
-int compat_put_timespec64(const struct timespec64 *ts, void __user *uts)
+int put_old_timespec32(const struct timespec64 *ts, void __user *uts)
{
if (COMPAT_USE_64BIT_TIME)
return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
else
- return __compat_put_timespec64(ts, uts);
+ return __put_old_timespec32(ts, uts);
}
-EXPORT_SYMBOL_GPL(compat_put_timespec64);
+EXPORT_SYMBOL_GPL(put_old_timespec32);
int get_itimerspec64(struct itimerspec64 *it,
const struct __kernel_itimerspec __user *uit)
@@ -958,23 +937,23 @@ int put_itimerspec64(const struct itimerspec64 *it,
}
EXPORT_SYMBOL_GPL(put_itimerspec64);
-int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits)
+int get_old_itimerspec32(struct itimerspec64 *its,
+ const struct old_itimerspec32 __user *uits)
{
- if (__compat_get_timespec64(&its->it_interval, &uits->it_interval) ||
- __compat_get_timespec64(&its->it_value, &uits->it_value))
+ if (__get_old_timespec32(&its->it_interval, &uits->it_interval) ||
+ __get_old_timespec32(&its->it_value, &uits->it_value))
return -EFAULT;
return 0;
}
-EXPORT_SYMBOL_GPL(get_compat_itimerspec64);
+EXPORT_SYMBOL_GPL(get_old_itimerspec32);
-int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits)
+int put_old_itimerspec32(const struct itimerspec64 *its,
+ struct old_itimerspec32 __user *uits)
{
- if (__compat_put_timespec64(&its->it_interval, &uits->it_interval) ||
- __compat_put_timespec64(&its->it_value, &uits->it_value))
+ if (__put_old_timespec32(&its->it_interval, &uits->it_interval) ||
+ __put_old_timespec32(&its->it_value, &uits->it_value))
return -EFAULT;
return 0;
}
-EXPORT_SYMBOL_GPL(put_compat_itimerspec64);
+EXPORT_SYMBOL_GPL(put_old_itimerspec32);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f3b22f456fac..2d110c948805 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1212,22 +1212,6 @@ int get_device_system_crosststamp(int (*get_time_fn)
EXPORT_SYMBOL_GPL(get_device_system_crosststamp);
/**
- * do_gettimeofday - Returns the time of day in a timeval
- * @tv: pointer to the timeval to be set
- *
- * NOTE: Users should be converted to using getnstimeofday()
- */
-void do_gettimeofday(struct timeval *tv)
-{
- struct timespec64 now;
-
- getnstimeofday64(&now);
- tv->tv_sec = now.tv_sec;
- tv->tv_usec = now.tv_nsec/1000;
-}
-EXPORT_SYMBOL(do_gettimeofday);
-
-/**
* do_settimeofday64 - Sets the time of day.
* @ts: pointer to the timespec64 variable containing the new time
*
@@ -2174,14 +2158,6 @@ void getboottime64(struct timespec64 *ts)
}
EXPORT_SYMBOL_GPL(getboottime64);
-unsigned long get_seconds(void)
-{
- struct timekeeper *tk = &tk_core.timekeeper;
-
- return tk->xtime_sec;
-}
-EXPORT_SYMBOL(get_seconds);
-
void ktime_get_coarse_real_ts64(struct timespec64 *ts)
{
struct timekeeper *tk = &tk_core.timekeeper;
diff --git a/lib/Kconfig b/lib/Kconfig
index a3928d4438b5..d82f20609939 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -621,3 +621,6 @@ config GENERIC_LIB_CMPDI2
config GENERIC_LIB_UCMPDI2
bool
+
+config GENERIC_LIB_UMODDI3
+ bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8d24f4ed66fd..04adfc3b185e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1965,6 +1965,14 @@ config TEST_DEBUG_VIRTUAL
If unsure, say N.
+config TEST_MEMCAT_P
+ tristate "Test memcat_p() helper function"
+ help
+ Test the memcat_p() helper for correctly merging two
+ pointer arrays together.
+
+ If unsure, say N.
+
endif # RUNTIME_TESTING_MENU
config MEMTEST
diff --git a/lib/Makefile b/lib/Makefile
index 423876446810..fa3eb1b4c0e3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -24,7 +24,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
- nmi_backtrace.o nodemask.o win_minmax.o
+ nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o
lib-$(CONFIG_PRINTK) += dump_stack.o
lib-$(CONFIG_MMU) += ioremap.o
@@ -71,6 +71,7 @@ obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
obj-$(CONFIG_TEST_DEBUG_VIRTUAL) += test_debug_virtual.o
+obj-$(CONFIG_TEST_MEMCAT_P) += test_memcat_p.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -270,3 +271,4 @@ obj-$(CONFIG_GENERIC_LIB_LSHRDI3) += lshrdi3.o
obj-$(CONFIG_GENERIC_LIB_MULDI3) += muldi3.o
obj-$(CONFIG_GENERIC_LIB_CMPDI2) += cmpdi2.o
obj-$(CONFIG_GENERIC_LIB_UCMPDI2) += ucmpdi2.o
+obj-$(CONFIG_GENERIC_LIB_UMODDI3) += umoddi3.o udivmoddi4.o
diff --git a/lib/chacha20.c b/lib/chacha20.c
index c1cc50fb68c9..d907fec6a9ed 100644
--- a/lib/chacha20.c
+++ b/lib/chacha20.c
@@ -16,9 +16,9 @@
#include <asm/unaligned.h>
#include <crypto/chacha20.h>
-void chacha20_block(u32 *state, u32 *stream)
+void chacha20_block(u32 *state, u8 *stream)
{
- u32 x[16], *out = stream;
+ u32 x[16];
int i;
for (i = 0; i < ARRAY_SIZE(x); i++)
@@ -67,7 +67,7 @@ void chacha20_block(u32 *state, u32 *stream)
}
for (i = 0; i < ARRAY_SIZE(x); i++)
- out[i] = cpu_to_le32(x[i] + state[i]);
+ put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]);
state[12]++;
}
diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c
index 1ad33e555805..4d0d47c1ffbd 100644
--- a/lib/crc-t10dif.c
+++ b/lib/crc-t10dif.c
@@ -14,10 +14,47 @@
#include <linux/err.h>
#include <linux/init.h>
#include <crypto/hash.h>
+#include <crypto/algapi.h>
#include <linux/static_key.h>
+#include <linux/notifier.h>
-static struct crypto_shash *crct10dif_tfm;
+static struct crypto_shash __rcu *crct10dif_tfm;
static struct static_key crct10dif_fallback __read_mostly;
+static DEFINE_MUTEX(crc_t10dif_mutex);
+
+static int crc_t10dif_rehash(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct crypto_alg *alg = data;
+ struct crypto_shash *new, *old;
+
+ if (val != CRYPTO_MSG_ALG_LOADED ||
+ static_key_false(&crct10dif_fallback) ||
+ strncmp(alg->cra_name, CRC_T10DIF_STRING, strlen(CRC_T10DIF_STRING)))
+ return 0;
+
+ mutex_lock(&crc_t10dif_mutex);
+ old = rcu_dereference_protected(crct10dif_tfm,
+ lockdep_is_held(&crc_t10dif_mutex));
+ if (!old) {
+ mutex_unlock(&crc_t10dif_mutex);
+ return 0;
+ }
+ new = crypto_alloc_shash("crct10dif", 0, 0);
+ if (IS_ERR(new)) {
+ mutex_unlock(&crc_t10dif_mutex);
+ return 0;
+ }
+ rcu_assign_pointer(crct10dif_tfm, new);
+ mutex_unlock(&crc_t10dif_mutex);
+
+ synchronize_rcu();
+ crypto_free_shash(old);
+ return 0;
+}
+
+static struct notifier_block crc_t10dif_nb = {
+ .notifier_call = crc_t10dif_rehash,
+};
__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
{
@@ -30,11 +67,14 @@ __u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len)
if (static_key_false(&crct10dif_fallback))
return crc_t10dif_generic(crc, buffer, len);
- desc.shash.tfm = crct10dif_tfm;
+ rcu_read_lock();
+ desc.shash.tfm = rcu_dereference(crct10dif_tfm);
desc.shash.flags = 0;
*(__u16 *)desc.ctx = crc;
err = crypto_shash_update(&desc.shash, buffer, len);
+ rcu_read_unlock();
+
BUG_ON(err);
return *(__u16 *)desc.ctx;
@@ -49,6 +89,7 @@ EXPORT_SYMBOL(crc_t10dif);
static int __init crc_t10dif_mod_init(void)
{
+ crypto_register_notifier(&crc_t10dif_nb);
crct10dif_tfm = crypto_alloc_shash("crct10dif", 0, 0);
if (IS_ERR(crct10dif_tfm)) {
static_key_slow_inc(&crct10dif_fallback);
@@ -59,12 +100,24 @@ static int __init crc_t10dif_mod_init(void)
static void __exit crc_t10dif_mod_fini(void)
{
+ crypto_unregister_notifier(&crc_t10dif_nb);
crypto_free_shash(crct10dif_tfm);
}
module_init(crc_t10dif_mod_init);
module_exit(crc_t10dif_mod_fini);
+static int crc_t10dif_transform_show(char *buffer, const struct kernel_param *kp)
+{
+ if (static_key_false(&crct10dif_fallback))
+ return sprintf(buffer, "fallback\n");
+
+ return sprintf(buffer, "%s\n",
+ crypto_tfm_alg_driver_name(crypto_shash_tfm(crct10dif_tfm)));
+}
+
+module_param_call(transform, NULL, crc_t10dif_transform_show, NULL, 0644);
+
MODULE_DESCRIPTION("T10 DIF CRC calculation");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: crct10dif");
diff --git a/lib/memcat_p.c b/lib/memcat_p.c
new file mode 100644
index 000000000000..b810fbc66962
--- /dev/null
+++ b/lib/memcat_p.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/slab.h>
+
+/*
+ * Merge two NULL-terminated pointer arrays into a newly allocated
+ * array, which is also NULL-terminated. Nomenclature is inspired by
+ * memset_p() and memcat() found elsewhere in the kernel source tree.
+ */
+void **__memcat_p(void **a, void **b)
+{
+ void **p = a, **new;
+ int nr;
+
+ /* count the elements in both arrays */
+ for (nr = 0, p = a; *p; nr++, p++)
+ ;
+ for (p = b; *p; nr++, p++)
+ ;
+ /* one for the NULL-terminator */
+ nr++;
+
+ new = kmalloc_array(nr, sizeof(void *), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ /* nr -> last index; p points to NULL in b[] */
+ for (nr--; nr >= 0; nr--, p = p == b ? &a[nr] : p - 1)
+ new[nr] = *p;
+
+ return new;
+}
+EXPORT_SYMBOL_GPL(__memcat_p);
+
diff --git a/lib/string.c b/lib/string.c
index 2c0900a5d51a..38e4ca08e757 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -27,6 +27,7 @@
#include <linux/export.h>
#include <linux/bug.h>
#include <linux/errno.h>
+#include <linux/slab.h>
#include <asm/byteorder.h>
#include <asm/word-at-a-time.h>
diff --git a/lib/test_memcat_p.c b/lib/test_memcat_p.c
new file mode 100644
index 000000000000..849c477d49d0
--- /dev/null
+++ b/lib/test_memcat_p.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for memcat_p() in lib/memcat_p.c
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+struct test_struct {
+ int num;
+ unsigned int magic;
+};
+
+#define MAGIC 0xf00ff00f
+/* Size of each of the NULL-terminated input arrays */
+#define INPUT_MAX 128
+/* Expected number of non-NULL elements in the output array */
+#define EXPECT (INPUT_MAX * 2 - 2)
+
+static int __init test_memcat_p_init(void)
+{
+ struct test_struct **in0, **in1, **out, **p;
+ int err = -ENOMEM, i, r, total = 0;
+
+ in0 = kcalloc(INPUT_MAX, sizeof(*in0), GFP_KERNEL);
+ if (!in0)
+ return err;
+
+ in1 = kcalloc(INPUT_MAX, sizeof(*in1), GFP_KERNEL);
+ if (!in1)
+ goto err_free_in0;
+
+ for (i = 0, r = 1; i < INPUT_MAX - 1; i++) {
+ in0[i] = kmalloc(sizeof(**in0), GFP_KERNEL);
+ if (!in0[i])
+ goto err_free_elements;
+
+ in1[i] = kmalloc(sizeof(**in1), GFP_KERNEL);
+ if (!in1[i]) {
+ kfree(in0[i]);
+ goto err_free_elements;
+ }
+
+ /* lifted from test_sort.c */
+ r = (r * 725861) % 6599;
+ in0[i]->num = r;
+ in1[i]->num = -r;
+ in0[i]->magic = MAGIC;
+ in1[i]->magic = MAGIC;
+ }
+
+ in0[i] = in1[i] = NULL;
+
+ out = memcat_p(in0, in1);
+ if (!out)
+ goto err_free_all_elements;
+
+ err = -EINVAL;
+ for (i = 0, p = out; *p && (i < INPUT_MAX * 2 - 1); p++, i++) {
+ total += (*p)->num;
+
+ if ((*p)->magic != MAGIC) {
+ pr_err("test failed: wrong magic at %d: %u\n", i,
+ (*p)->magic);
+ goto err_free_out;
+ }
+ }
+
+ if (total) {
+ pr_err("test failed: expected zero total, got %d\n", total);
+ goto err_free_out;
+ }
+
+ if (i != EXPECT) {
+ pr_err("test failed: expected output size %d, got %d\n",
+ EXPECT, i);
+ goto err_free_out;
+ }
+
+ for (i = 0; i < INPUT_MAX - 1; i++)
+ if (out[i] != in0[i] || out[i + INPUT_MAX - 1] != in1[i]) {
+ pr_err("test failed: wrong element order at %d\n", i);
+ goto err_free_out;
+ }
+
+ err = 0;
+ pr_info("test passed\n");
+
+err_free_out:
+ kfree(out);
+err_free_all_elements:
+ i = INPUT_MAX;
+err_free_elements:
+ for (i--; i >= 0; i--) {
+ kfree(in1[i]);
+ kfree(in0[i]);
+ }
+
+ kfree(in1);
+err_free_in0:
+ kfree(in0);
+
+ return err;
+}
+
+static void __exit test_memcat_p_exit(void)
+{
+}
+
+module_init(test_memcat_p_init);
+module_exit(test_memcat_p_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/udivmoddi4.c b/lib/udivmoddi4.c
new file mode 100644
index 000000000000..c08bc8a5f1cf
--- /dev/null
+++ b/lib/udivmoddi4.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
+ */
+
+#include <linux/libgcc.h>
+
+#define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clz(X))
+
+#define W_TYPE_SIZE 32
+
+#define __ll_B ((unsigned long) 1 << (W_TYPE_SIZE / 2))
+#define __ll_lowpart(t) ((unsigned long) (t) & (__ll_B - 1))
+#define __ll_highpart(t) ((unsigned long) (t) >> (W_TYPE_SIZE / 2))
+
+/* If we still don't have umul_ppmm, define it using plain C. */
+#if !defined(umul_ppmm)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ unsigned long __x0, __x1, __x2, __x3; \
+ unsigned short __ul, __vl, __uh, __vh; \
+ \
+ __ul = __ll_lowpart(u); \
+ __uh = __ll_highpart(u); \
+ __vl = __ll_lowpart(v); \
+ __vh = __ll_highpart(v); \
+ \
+ __x0 = (unsigned long) __ul * __vl; \
+ __x1 = (unsigned long) __ul * __vh; \
+ __x2 = (unsigned long) __uh * __vl; \
+ __x3 = (unsigned long) __uh * __vh; \
+ \
+ __x1 += __ll_highpart(__x0); \
+ __x1 += __x2; \
+ if (__x1 < __x2) \
+ __x3 += __ll_B; \
+ \
+ (w1) = __x3 + __ll_highpart(__x1); \
+ (w0) = __ll_lowpart(__x1) * __ll_B + __ll_lowpart(__x0);\
+ } while (0)
+#endif
+
+#if !defined(sub_ddmmss)
+#define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+ do { \
+ unsigned long __x; \
+ __x = (al) - (bl); \
+ (sh) = (ah) - (bh) - (__x > (al)); \
+ (sl) = __x; \
+ } while (0)
+#endif
+
+/* Define this unconditionally, so it can be used for debugging. */
+#define __udiv_qrnnd_c(q, r, n1, n0, d) \
+ do { \
+ unsigned long __d1, __d0, __q1, __q0; \
+ unsigned long __r1, __r0, __m; \
+ __d1 = __ll_highpart(d); \
+ __d0 = __ll_lowpart(d); \
+ \
+ __r1 = (n1) % __d1; \
+ __q1 = (n1) / __d1; \
+ __m = (unsigned long) __q1 * __d0; \
+ __r1 = __r1 * __ll_B | __ll_highpart(n0); \
+ if (__r1 < __m) { \
+ __q1--, __r1 += (d); \
+ if (__r1 >= (d)) \
+ if (__r1 < __m) \
+ __q1--, __r1 += (d); \
+ } \
+ __r1 -= __m; \
+ \
+ __r0 = __r1 % __d1; \
+ __q0 = __r1 / __d1; \
+ __m = (unsigned long) __q0 * __d0; \
+ __r0 = __r0 * __ll_B | __ll_lowpart(n0); \
+ if (__r0 < __m) { \
+ __q0--, __r0 += (d); \
+ if (__r0 >= (d)) \
+ if (__r0 < __m) \
+ __q0--, __r0 += (d); \
+ } \
+ __r0 -= __m; \
+ \
+ (q) = (unsigned long) __q1 * __ll_B | __q0; \
+ (r) = __r0; \
+ } while (0)
+
+/* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */
+#if !defined(udiv_qrnnd)
+#define UDIV_NEEDS_NORMALIZATION 1
+#define udiv_qrnnd __udiv_qrnnd_c
+#endif
+
+unsigned long long __udivmoddi4(unsigned long long u, unsigned long long v,
+ unsigned long long *rp)
+{
+ const DWunion nn = {.ll = u };
+ const DWunion dd = {.ll = v };
+ DWunion rr, ww;
+ unsigned long d0, d1, n0, n1, n2;
+ unsigned long q0 = 0, q1 = 0;
+ unsigned long b, bm;
+
+ d0 = dd.s.low;
+ d1 = dd.s.high;
+ n0 = nn.s.low;
+ n1 = nn.s.high;
+
+#if !UDIV_NEEDS_NORMALIZATION
+
+ if (d1 == 0) {
+ if (d0 > n1) {
+ /* 0q = nn / 0D */
+
+ udiv_qrnnd(q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0. */
+ } else {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ /* Divide intentionally by zero. */
+ d0 = 1 / d0;
+
+ udiv_qrnnd(q1, n1, 0, n1, d0);
+ udiv_qrnnd(q0, n0, n1, n0, d0);
+
+ /* Remainder in n0. */
+ }
+
+ if (rp != 0) {
+ rr.s.low = n0;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+
+#else /* UDIV_NEEDS_NORMALIZATION */
+
+ if (d1 == 0) {
+ if (d0 > n1) {
+ /* 0q = nn / 0D */
+
+ count_leading_zeros(bm, d0);
+
+ if (bm != 0) {
+ /*
+ * Normalize, i.e. make the most significant bit
+ * of the denominator set.
+ */
+
+ d0 = d0 << bm;
+ n1 = (n1 << bm) | (n0 >> (W_TYPE_SIZE - bm));
+ n0 = n0 << bm;
+ }
+
+ udiv_qrnnd(q0, n0, n1, n0, d0);
+ q1 = 0;
+
+ /* Remainder in n0 >> bm. */
+ } else {
+ /* qq = NN / 0d */
+
+ if (d0 == 0)
+ /* Divide intentionally by zero. */
+ d0 = 1 / d0;
+
+ count_leading_zeros(bm, d0);
+
+ if (bm == 0) {
+ /*
+ * From (n1 >= d0) /\ (the most significant bit
+ * of d0 is set), conclude (the most significant
+ * bit of n1 is set) /\ (theleading quotient
+ * digit q1 = 1).
+ *
+ * This special case is necessary, not an
+ * optimization. (Shifts counts of W_TYPE_SIZE
+ * are undefined.)
+ */
+
+ n1 -= d0;
+ q1 = 1;
+ } else {
+ /* Normalize. */
+
+ b = W_TYPE_SIZE - bm;
+
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd(q1, n1, n2, n1, d0);
+ }
+
+ /* n1 != d0... */
+
+ udiv_qrnnd(q0, n0, n1, n0, d0);
+
+ /* Remainder in n0 >> bm. */
+ }
+
+ if (rp != 0) {
+ rr.s.low = n0 >> bm;
+ rr.s.high = 0;
+ *rp = rr.ll;
+ }
+
+#endif /* UDIV_NEEDS_NORMALIZATION */
+
+ } else {
+ if (d1 > n1) {
+ /* 00 = nn / DD */
+
+ q0 = 0;
+ q1 = 0;
+
+ /* Remainder in n1n0. */
+ if (rp != 0) {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ } else {
+ /* 0q = NN / dd */
+
+ count_leading_zeros(bm, d1);
+ if (bm == 0) {
+ /*
+ * From (n1 >= d1) /\ (the most significant bit
+ * of d1 is set), conclude (the most significant
+ * bit of n1 is set) /\ (the quotient digit q0 =
+ * 0 or 1).
+ *
+ * This special case is necessary, not an
+ * optimization.
+ */
+
+ /*
+ * The condition on the next line takes
+ * advantage of that n1 >= d1 (true due to
+ * program flow).
+ */
+ if (n1 > d1 || n0 >= d0) {
+ q0 = 1;
+ sub_ddmmss(n1, n0, n1, n0, d1, d0);
+ } else {
+ q0 = 0;
+ }
+
+ q1 = 0;
+
+ if (rp != 0) {
+ rr.s.low = n0;
+ rr.s.high = n1;
+ *rp = rr.ll;
+ }
+ } else {
+ unsigned long m1, m0;
+ /* Normalize. */
+
+ b = W_TYPE_SIZE - bm;
+
+ d1 = (d1 << bm) | (d0 >> b);
+ d0 = d0 << bm;
+ n2 = n1 >> b;
+ n1 = (n1 << bm) | (n0 >> b);
+ n0 = n0 << bm;
+
+ udiv_qrnnd(q0, n1, n2, n1, d1);
+ umul_ppmm(m1, m0, q0, d0);
+
+ if (m1 > n1 || (m1 == n1 && m0 > n0)) {
+ q0--;
+ sub_ddmmss(m1, m0, m1, m0, d1, d0);
+ }
+
+ q1 = 0;
+
+ /* Remainder in (n1n0 - m1m0) >> bm. */
+ if (rp != 0) {
+ sub_ddmmss(n1, n0, n1, n0, m1, m0);
+ rr.s.low = (n1 << b) | (n0 >> bm);
+ rr.s.high = n1 >> bm;
+ *rp = rr.ll;
+ }
+ }
+ }
+ }
+
+ ww.s.low = q0;
+ ww.s.high = q1;
+
+ return ww.ll;
+}
diff --git a/arch/nios2/boot/linked_dtb.S b/lib/umoddi3.c
index 071f922db338..d7bbf0f85197 100644
--- a/arch/nios2/boot/linked_dtb.S
+++ b/lib/umoddi3.c
@@ -1,6 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+
/*
- * Copyright (C) 2011 Thomas Chou <thomas@wytron.com.tw>
- *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -12,8 +12,21 @@
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.
*/
-.section .dtb.init.rodata,"a"
-.incbin "arch/nios2/boot/system.dtb"
+
+#include <linux/module.h>
+#include <linux/libgcc.h>
+
+extern unsigned long long __udivmoddi4(unsigned long long u,
+ unsigned long long v,
+ unsigned long long *rp);
+
+unsigned long long __umoddi3(unsigned long long u, unsigned long long v)
+{
+ unsigned long long w;
+ (void)__udivmoddi4(u, v, &w);
+ return w;
+}
+EXPORT_SYMBOL(__umoddi3);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 376de10929b3..37a54a6dd594 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -613,6 +613,109 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
}
static noinline_for_stack
+char *pointer_string(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ spec.base = 16;
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = 2 * sizeof(ptr);
+ spec.flags |= ZEROPAD;
+ }
+
+ return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+/* Make pointers available for printing early in the boot sequence. */
+static int debug_boot_weak_hash __ro_after_init;
+
+static int __init debug_boot_weak_hash_enable(char *str)
+{
+ debug_boot_weak_hash = 1;
+ pr_info("debug_boot_weak_hash enabled\n");
+ return 0;
+}
+early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
+
+static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
+static siphash_key_t ptr_key __read_mostly;
+
+static void enable_ptr_key_workfn(struct work_struct *work)
+{
+ get_random_bytes(&ptr_key, sizeof(ptr_key));
+ /* Needs to run from preemptible context */
+ static_branch_disable(&not_filled_random_ptr_key);
+}
+
+static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+ /* This may be in an interrupt handler. */
+ queue_work(system_unbound_wq, &enable_ptr_key_work);
+}
+
+static struct random_ready_callback random_ready = {
+ .func = fill_random_ptr_key
+};
+
+static int __init initialize_ptr_random(void)
+{
+ int key_size = sizeof(ptr_key);
+ int ret;
+
+ /* Use hw RNG if available. */
+ if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
+ static_branch_disable(&not_filled_random_ptr_key);
+ return 0;
+ }
+
+ ret = add_random_ready_callback(&random_ready);
+ if (!ret) {
+ return 0;
+ } else if (ret == -EALREADY) {
+ /* This is in preemptible context */
+ enable_ptr_key_workfn(&enable_ptr_key_work);
+ return 0;
+ }
+
+ return ret;
+}
+early_initcall(initialize_ptr_random);
+
+/* Maps a pointer to a 32 bit unique identifier. */
+static char *ptr_to_id(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
+ unsigned long hashval;
+
+ /* When debugging early boot use non-cryptographically secure hash. */
+ if (unlikely(debug_boot_weak_hash)) {
+ hashval = hash_long((unsigned long)ptr, 32);
+ return pointer_string(buf, end, (const void *)hashval, spec);
+ }
+
+ if (static_branch_unlikely(&not_filled_random_ptr_key)) {
+ spec.field_width = 2 * sizeof(ptr);
+ /* string length must be less than default_width */
+ return string(buf, end, str, spec);
+ }
+
+#ifdef CONFIG_64BIT
+ hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+ /*
+ * Mask off the first 32 bits, this makes explicit that we have
+ * modified the address (and 32 bits is plenty for a unique ID).
+ */
+ hashval = hashval & 0xffffffff;
+#else
+ hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+ return pointer_string(buf, end, (const void *)hashval, spec);
+}
+
+static noinline_for_stack
char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
const char *fmt)
{
@@ -1357,20 +1460,6 @@ char *uuid_string(char *buf, char *end, const u8 *addr,
return string(buf, end, uuid, spec);
}
-static noinline_for_stack
-char *pointer_string(char *buf, char *end, const void *ptr,
- struct printf_spec spec)
-{
- spec.base = 16;
- spec.flags |= SMALL;
- if (spec.field_width == -1) {
- spec.field_width = 2 * sizeof(ptr);
- spec.flags |= ZEROPAD;
- }
-
- return number(buf, end, (unsigned long int)ptr, spec);
-}
-
int kptr_restrict __read_mostly;
static noinline_for_stack
@@ -1421,7 +1510,8 @@ char *restricted_pointer(char *buf, char *end, const void *ptr,
}
static noinline_for_stack
-char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
+char *netdev_bits(char *buf, char *end, const void *addr,
+ struct printf_spec spec, const char *fmt)
{
unsigned long long num;
int size;
@@ -1432,9 +1522,7 @@ char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
size = sizeof(netdev_features_t);
break;
default:
- num = (unsigned long)addr;
- size = sizeof(unsigned long);
- break;
+ return ptr_to_id(buf, end, addr, spec);
}
return special_hex_number(buf, end, num, size);
@@ -1474,7 +1562,7 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
#ifdef CONFIG_COMMON_CLK
return string(buf, end, __clk_get_name(clk), spec);
#else
- return special_hex_number(buf, end, (unsigned long)clk, sizeof(unsigned long));
+ return ptr_to_id(buf, end, clk, spec);
#endif
}
}
@@ -1596,6 +1684,7 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
fmt = "f";
for (pass = false; strspn(fmt,"fnpPFcC"); fmt++, pass = true) {
+ int precision;
if (pass) {
if (buf < end)
*buf = ':';
@@ -1607,7 +1696,11 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
buf = device_node_gen_full_name(dn, buf, end);
break;
case 'n': /* name */
- buf = string(buf, end, dn->name, str_spec);
+ p = kbasename(of_node_full_name(dn));
+ precision = str_spec.precision;
+ str_spec.precision = strchrnul(p, '@') - p;
+ buf = string(buf, end, p, str_spec);
+ str_spec.precision = precision;
break;
case 'p': /* phandle */
buf = number(buf, end, (unsigned int)dn->phandle, num_spec);
@@ -1651,94 +1744,6 @@ char *device_node_string(char *buf, char *end, struct device_node *dn,
return widen_string(buf, buf - buf_start, end, spec);
}
-/* Make pointers available for printing early in the boot sequence. */
-static int debug_boot_weak_hash __ro_after_init;
-
-static int __init debug_boot_weak_hash_enable(char *str)
-{
- debug_boot_weak_hash = 1;
- pr_info("debug_boot_weak_hash enabled\n");
- return 0;
-}
-early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
-
-static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
-static siphash_key_t ptr_key __read_mostly;
-
-static void enable_ptr_key_workfn(struct work_struct *work)
-{
- get_random_bytes(&ptr_key, sizeof(ptr_key));
- /* Needs to run from preemptible context */
- static_branch_disable(&not_filled_random_ptr_key);
-}
-
-static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
-
-static void fill_random_ptr_key(struct random_ready_callback *unused)
-{
- /* This may be in an interrupt handler. */
- queue_work(system_unbound_wq, &enable_ptr_key_work);
-}
-
-static struct random_ready_callback random_ready = {
- .func = fill_random_ptr_key
-};
-
-static int __init initialize_ptr_random(void)
-{
- int key_size = sizeof(ptr_key);
- int ret;
-
- /* Use hw RNG if available. */
- if (get_random_bytes_arch(&ptr_key, key_size) == key_size) {
- static_branch_disable(&not_filled_random_ptr_key);
- return 0;
- }
-
- ret = add_random_ready_callback(&random_ready);
- if (!ret) {
- return 0;
- } else if (ret == -EALREADY) {
- /* This is in preemptible context */
- enable_ptr_key_workfn(&enable_ptr_key_work);
- return 0;
- }
-
- return ret;
-}
-early_initcall(initialize_ptr_random);
-
-/* Maps a pointer to a 32 bit unique identifier. */
-static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
-{
- const char *str = sizeof(ptr) == 8 ? "(____ptrval____)" : "(ptrval)";
- unsigned long hashval;
-
- /* When debugging early boot use non-cryptographically secure hash. */
- if (unlikely(debug_boot_weak_hash)) {
- hashval = hash_long((unsigned long)ptr, 32);
- return pointer_string(buf, end, (const void *)hashval, spec);
- }
-
- if (static_branch_unlikely(&not_filled_random_ptr_key)) {
- spec.field_width = 2 * sizeof(ptr);
- /* string length must be less than default_width */
- return string(buf, end, str, spec);
- }
-
-#ifdef CONFIG_64BIT
- hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
- /*
- * Mask off the first 32 bits, this makes explicit that we have
- * modified the address (and 32 bits is plenty for a unique ID).
- */
- hashval = hashval & 0xffffffff;
-#else
- hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
-#endif
- return pointer_string(buf, end, (const void *)hashval, spec);
-}
-
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@@ -1942,7 +1947,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
break;
return restricted_pointer(buf, end, ptr, spec);
case 'N':
- return netdev_bits(buf, end, ptr, fmt);
+ return netdev_bits(buf, end, ptr, spec, fmt);
case 'a':
return address_val(buf, end, ptr, fmt);
case 'd':
diff --git a/mm/util.c b/mm/util.c
index 9e3ebd2ef65f..470f5cd80b64 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -15,17 +15,10 @@
#include <linux/vmalloc.h>
#include <linux/userfaultfd_k.h>
-#include <asm/sections.h>
#include <linux/uaccess.h>
#include "internal.h"
-static inline int is_kernel_rodata(unsigned long addr)
-{
- return addr >= (unsigned long)__start_rodata &&
- addr < (unsigned long)__end_rodata;
-}
-
/**
* kfree_const - conditionally free memory
* @x: pointer to the memory
diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c
index 00deacdcb51c..cfd83c5521ae 100644
--- a/net/bluetooth/bnep/sock.c
+++ b/net/bluetooth/bnep/sock.c
@@ -49,18 +49,17 @@ static int bnep_sock_release(struct socket *sock)
return 0;
}
-static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int do_bnep_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp)
{
struct bnep_connlist_req cl;
struct bnep_connadd_req ca;
struct bnep_conndel_req cd;
struct bnep_conninfo ci;
struct socket *nsock;
- void __user *argp = (void __user *)arg;
__u32 supp_feat = BIT(BNEP_SETUP_RESPONSE);
int err;
- BT_DBG("cmd %x arg %lx", cmd, arg);
+ BT_DBG("cmd %x arg %p", cmd, argp);
switch (cmd) {
case BNEPCONNADD:
@@ -134,16 +133,22 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
return 0;
}
+static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ return do_bnep_sock_ioctl(sock, cmd, (void __user *)arg);
+}
+
#ifdef CONFIG_COMPAT
static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
+ void __user *argp = compat_ptr(arg);
if (cmd == BNEPGETCONNLIST) {
struct bnep_connlist_req cl;
+ unsigned __user *p = argp;
u32 uci;
int err;
- if (get_user(cl.cnum, (u32 __user *) arg) ||
- get_user(uci, (u32 __user *) (arg + 4)))
+ if (get_user(cl.cnum, p) || get_user(uci, p + 1))
return -EFAULT;
cl.ci = compat_ptr(uci);
@@ -153,13 +158,13 @@ static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
err = bnep_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (u32 __user *) arg))
+ if (!err && put_user(cl.cnum, p))
err = -EFAULT;
return err;
}
- return bnep_sock_ioctl(sock, cmd, arg);
+ return do_bnep_sock_ioctl(sock, cmd, argp);
}
#endif
diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c
index e08f28fadd65..defdd4871919 100644
--- a/net/bluetooth/cmtp/sock.c
+++ b/net/bluetooth/cmtp/sock.c
@@ -63,17 +63,16 @@ static int cmtp_sock_release(struct socket *sock)
return 0;
}
-static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int do_cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp)
{
struct cmtp_connadd_req ca;
struct cmtp_conndel_req cd;
struct cmtp_connlist_req cl;
struct cmtp_conninfo ci;
struct socket *nsock;
- void __user *argp = (void __user *)arg;
int err;
- BT_DBG("cmd %x arg %lx", cmd, arg);
+ BT_DBG("cmd %x arg %p", cmd, argp);
switch (cmd) {
case CMTPCONNADD:
@@ -137,16 +136,22 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
return -EINVAL;
}
+static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ return do_cmtp_sock_ioctl(sock, cmd, (void __user *)arg);
+}
+
#ifdef CONFIG_COMPAT
static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
+ void __user *argp = compat_ptr(arg);
if (cmd == CMTPGETCONNLIST) {
struct cmtp_connlist_req cl;
+ u32 __user *p = argp;
u32 uci;
int err;
- if (get_user(cl.cnum, (u32 __user *) arg) ||
- get_user(uci, (u32 __user *) (arg + 4)))
+ if (get_user(cl.cnum, p) || get_user(uci, p + 1))
return -EFAULT;
cl.ci = compat_ptr(uci);
@@ -156,13 +161,13 @@ static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
err = cmtp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (u32 __user *) arg))
+ if (!err && put_user(cl.cnum, p))
err = -EFAULT;
return err;
}
- return cmtp_sock_ioctl(sock, cmd, arg);
+ return do_cmtp_sock_ioctl(sock, cmd, argp);
}
#endif
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 3734dc1788b4..a442e21f3894 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -649,7 +649,7 @@ static void hidp_process_transmit(struct hidp_session *session,
}
static int hidp_setup_input(struct hidp_session *session,
- struct hidp_connadd_req *req)
+ const struct hidp_connadd_req *req)
{
struct input_dev *input;
int i;
@@ -748,7 +748,7 @@ EXPORT_SYMBOL_GPL(hidp_hid_driver);
/* This function sets up the hid device. It does not add it
to the HID system. That is done in hidp_add_connection(). */
static int hidp_setup_hid(struct hidp_session *session,
- struct hidp_connadd_req *req)
+ const struct hidp_connadd_req *req)
{
struct hid_device *hid;
int err;
@@ -807,7 +807,7 @@ fault:
/* initialize session devices */
static int hidp_session_dev_init(struct hidp_session *session,
- struct hidp_connadd_req *req)
+ const struct hidp_connadd_req *req)
{
int ret;
@@ -906,7 +906,7 @@ static void hidp_session_dev_work(struct work_struct *work)
static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr,
struct socket *ctrl_sock,
struct socket *intr_sock,
- struct hidp_connadd_req *req,
+ const struct hidp_connadd_req *req,
struct l2cap_conn *conn)
{
struct hidp_session *session;
@@ -1338,7 +1338,7 @@ static int hidp_verify_sockets(struct socket *ctrl_sock,
return 0;
}
-int hidp_connection_add(struct hidp_connadd_req *req,
+int hidp_connection_add(const struct hidp_connadd_req *req,
struct socket *ctrl_sock,
struct socket *intr_sock)
{
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index 8798492a6e99..6ef88d0a1919 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -122,7 +122,7 @@ struct hidp_connlist_req {
struct hidp_conninfo __user *ci;
};
-int hidp_connection_add(struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
+int hidp_connection_add(const struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock);
int hidp_connection_del(struct hidp_conndel_req *req);
int hidp_get_connlist(struct hidp_connlist_req *req);
int hidp_get_conninfo(struct hidp_conninfo *ci);
diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c
index 1eaac01f85de..9f85a1943be9 100644
--- a/net/bluetooth/hidp/sock.c
+++ b/net/bluetooth/hidp/sock.c
@@ -46,9 +46,8 @@ static int hidp_sock_release(struct socket *sock)
return 0;
}
-static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+static int do_hidp_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp)
{
- void __user *argp = (void __user *) arg;
struct hidp_connadd_req ca;
struct hidp_conndel_req cd;
struct hidp_connlist_req cl;
@@ -57,7 +56,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
struct socket *isock;
int err;
- BT_DBG("cmd %x arg %lx", cmd, arg);
+ BT_DBG("cmd %x arg %p", cmd, argp);
switch (cmd) {
case HIDPCONNADD:
@@ -122,6 +121,11 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long
return -EINVAL;
}
+static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ return do_hidp_sock_ioctl(sock, cmd, (void __user *)arg);
+}
+
#ifdef CONFIG_COMPAT
struct compat_hidp_connadd_req {
int ctrl_sock; /* Connected control socket */
@@ -141,13 +145,15 @@ struct compat_hidp_connadd_req {
static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
+ void __user *argp = compat_ptr(arg);
+ int err;
+
if (cmd == HIDPGETCONNLIST) {
struct hidp_connlist_req cl;
+ u32 __user *p = argp;
u32 uci;
- int err;
- if (get_user(cl.cnum, (u32 __user *) arg) ||
- get_user(uci, (u32 __user *) (arg + 4)))
+ if (get_user(cl.cnum, p) || get_user(uci, p + 1))
return -EFAULT;
cl.ci = compat_ptr(uci);
@@ -157,39 +163,54 @@ static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigne
err = hidp_get_connlist(&cl);
- if (!err && put_user(cl.cnum, (u32 __user *) arg))
+ if (!err && put_user(cl.cnum, p))
err = -EFAULT;
return err;
} else if (cmd == HIDPCONNADD) {
- struct compat_hidp_connadd_req ca;
- struct hidp_connadd_req __user *uca;
+ struct compat_hidp_connadd_req ca32;
+ struct hidp_connadd_req ca;
+ struct socket *csock;
+ struct socket *isock;
- uca = compat_alloc_user_space(sizeof(*uca));
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
- if (copy_from_user(&ca, (void __user *) arg, sizeof(ca)))
+ if (copy_from_user(&ca32, (void __user *) arg, sizeof(ca32)))
return -EFAULT;
- if (put_user(ca.ctrl_sock, &uca->ctrl_sock) ||
- put_user(ca.intr_sock, &uca->intr_sock) ||
- put_user(ca.parser, &uca->parser) ||
- put_user(ca.rd_size, &uca->rd_size) ||
- put_user(compat_ptr(ca.rd_data), &uca->rd_data) ||
- put_user(ca.country, &uca->country) ||
- put_user(ca.subclass, &uca->subclass) ||
- put_user(ca.vendor, &uca->vendor) ||
- put_user(ca.product, &uca->product) ||
- put_user(ca.version, &uca->version) ||
- put_user(ca.flags, &uca->flags) ||
- put_user(ca.idle_to, &uca->idle_to) ||
- copy_to_user(&uca->name[0], &ca.name[0], 128))
- return -EFAULT;
+ ca.ctrl_sock = ca32.ctrl_sock;
+ ca.intr_sock = ca32.intr_sock;
+ ca.parser = ca32.parser;
+ ca.rd_size = ca32.rd_size;
+ ca.rd_data = compat_ptr(ca32.rd_data);
+ ca.country = ca32.country;
+ ca.subclass = ca32.subclass;
+ ca.vendor = ca32.vendor;
+ ca.product = ca32.product;
+ ca.version = ca32.version;
+ ca.flags = ca32.flags;
+ ca.idle_to = ca32.idle_to;
+ memcpy(ca.name, ca32.name, 128);
+
+ csock = sockfd_lookup(ca.ctrl_sock, &err);
+ if (!csock)
+ return err;
- arg = (unsigned long) uca;
+ isock = sockfd_lookup(ca.intr_sock, &err);
+ if (!isock) {
+ sockfd_put(csock);
+ return err;
+ }
- /* Fall through. We don't actually write back any _changes_
- to the structure anyway, so there's no need to copy back
- into the original compat version */
+ err = hidp_connection_add(&ca, csock, isock);
+ if (!err && copy_to_user(argp, &ca32, sizeof(ca32)))
+ err = -EFAULT;
+
+ sockfd_put(csock);
+ sockfd_put(isock);
+
+ return err;
}
return hidp_sock_ioctl(sock, cmd, arg);
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 02172c408ff2..5d6724cee38f 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -46,9 +46,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
goto fail;
}
- /* crypto_alloc_skcipher() allocates with GFP_KERNEL */
+ /* crypto_alloc_sync_skcipher() allocates with GFP_KERNEL */
noio_flag = memalloc_noio_save();
- key->tfm = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+ key->tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
memalloc_noio_restore(noio_flag);
if (IS_ERR(key->tfm)) {
ret = PTR_ERR(key->tfm);
@@ -56,7 +56,7 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
goto fail;
}
- ret = crypto_skcipher_setkey(key->tfm, key->key, key->len);
+ ret = crypto_sync_skcipher_setkey(key->tfm, key->key, key->len);
if (ret)
goto fail;
@@ -136,7 +136,7 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
if (key) {
kfree(key->key);
key->key = NULL;
- crypto_free_skcipher(key->tfm);
+ crypto_free_sync_skcipher(key->tfm);
key->tfm = NULL;
}
}
@@ -216,7 +216,7 @@ static void teardown_sgtable(struct sg_table *sgt)
static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
void *buf, int buf_len, int in_len, int *pout_len)
{
- SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
struct sg_table sgt;
struct scatterlist prealloc_sg;
char iv[AES_BLOCK_SIZE] __aligned(8);
@@ -232,7 +232,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
return ret;
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
- skcipher_request_set_tfm(req, key->tfm);
+ skcipher_request_set_sync_tfm(req, key->tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
diff --git a/net/ceph/crypto.h b/net/ceph/crypto.h
index bb45c7d43739..96ef4d860bc9 100644
--- a/net/ceph/crypto.h
+++ b/net/ceph/crypto.h
@@ -13,7 +13,7 @@ struct ceph_crypto_key {
struct ceph_timespec created;
int len;
void *key;
- struct crypto_skcipher *tfm;
+ struct crypto_sync_skcipher *tfm;
};
int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
diff --git a/net/compat.c b/net/compat.c
index 3b2105f6549d..47a614b370cd 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -812,21 +812,21 @@ COMPAT_SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, buf, compat_size_t, len
static int __compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
- struct compat_timespec __user *timeout)
+ struct old_timespec32 __user *timeout)
{
int datagrams;
- struct timespec ktspec;
+ struct timespec64 ktspec;
if (timeout == NULL)
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, NULL);
- if (compat_get_timespec(&ktspec, timeout))
+ if (compat_get_timespec64(&ktspec, timeout))
return -EFAULT;
datagrams = __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
flags | MSG_CMSG_COMPAT, &ktspec);
- if (datagrams > 0 && compat_put_timespec(&ktspec, timeout))
+ if (datagrams > 0 && compat_put_timespec64(&ktspec, timeout))
datagrams = -EFAULT;
return datagrams;
@@ -834,7 +834,7 @@ static int __compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags,
- struct compat_timespec __user *, timeout)
+ struct old_timespec32 __user *, timeout)
{
return __compat_sys_recvmmsg(fd, mmsg, vlen, flags, timeout);
}
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 5e4f04004a49..7bf833598615 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -106,6 +106,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
iterate_fd(p->files, 0, update_classid_sock,
(void *)(unsigned long)cs->classid);
task_unlock(p);
+ cond_resched();
}
css_task_iter_end(&it);
diff --git a/net/mac802154/llsec.c b/net/mac802154/llsec.c
index 2fb703d70803..7e29f88dbf6a 100644
--- a/net/mac802154/llsec.c
+++ b/net/mac802154/llsec.c
@@ -146,18 +146,18 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
goto err_tfm;
}
- key->tfm0 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
+ key->tfm0 = crypto_alloc_sync_skcipher("ctr(aes)", 0, 0);
if (IS_ERR(key->tfm0))
goto err_tfm;
- if (crypto_skcipher_setkey(key->tfm0, template->key,
+ if (crypto_sync_skcipher_setkey(key->tfm0, template->key,
IEEE802154_LLSEC_KEY_SIZE))
goto err_tfm0;
return key;
err_tfm0:
- crypto_free_skcipher(key->tfm0);
+ crypto_free_sync_skcipher(key->tfm0);
err_tfm:
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
if (key->tfm[i])
@@ -177,7 +177,7 @@ static void llsec_key_release(struct kref *ref)
for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
crypto_free_aead(key->tfm[i]);
- crypto_free_skcipher(key->tfm0);
+ crypto_free_sync_skcipher(key->tfm0);
kzfree(key);
}
@@ -622,7 +622,7 @@ llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
{
u8 iv[16];
struct scatterlist src;
- SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
int err, datalen;
unsigned char *data;
@@ -632,7 +632,7 @@ llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
datalen = skb_tail_pointer(skb) - data;
sg_init_one(&src, data, datalen);
- skcipher_request_set_tfm(req, key->tfm0);
+ skcipher_request_set_sync_tfm(req, key->tfm0);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &src, &src, datalen, iv);
err = crypto_skcipher_encrypt(req);
@@ -840,7 +840,7 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
unsigned char *data;
int datalen;
struct scatterlist src;
- SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
int err;
llsec_geniv(iv, dev_addr, &hdr->sec);
@@ -849,7 +849,7 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
sg_init_one(&src, data, datalen);
- skcipher_request_set_tfm(req, key->tfm0);
+ skcipher_request_set_sync_tfm(req, key->tfm0);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &src, &src, datalen, iv);
diff --git a/net/mac802154/llsec.h b/net/mac802154/llsec.h
index 6f3b658e3279..8be46d74dc39 100644
--- a/net/mac802154/llsec.h
+++ b/net/mac802154/llsec.h
@@ -29,7 +29,7 @@ struct mac802154_llsec_key {
/* one tfm for each authsize (4/8/16) */
struct crypto_aead *tfm[3];
- struct crypto_skcipher *tfm0;
+ struct crypto_sync_skcipher *tfm0;
struct kref ref;
};
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 0a7c49e8e053..382196e57a26 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -435,7 +435,7 @@ struct rxrpc_connection {
struct sk_buff_head rx_queue; /* received conn-level packets */
const struct rxrpc_security *security; /* applied security module */
struct key *server_key; /* security for this service */
- struct crypto_skcipher *cipher; /* encryption handle */
+ struct crypto_sync_skcipher *cipher; /* encryption handle */
struct rxrpc_crypt csum_iv; /* packet checksum base */
unsigned long flags;
unsigned long events;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index cea16838d588..cbef9ea43dec 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -46,7 +46,7 @@ struct rxkad_level2_hdr {
* alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
* packets
*/
-static struct crypto_skcipher *rxkad_ci;
+static struct crypto_sync_skcipher *rxkad_ci;
static DEFINE_MUTEX(rxkad_ci_mutex);
/*
@@ -54,7 +54,7 @@ static DEFINE_MUTEX(rxkad_ci_mutex);
*/
static int rxkad_init_connection_security(struct rxrpc_connection *conn)
{
- struct crypto_skcipher *ci;
+ struct crypto_sync_skcipher *ci;
struct rxrpc_key_token *token;
int ret;
@@ -63,14 +63,14 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
token = conn->params.key->payload.data[0];
conn->security_ix = token->security_index;
- ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+ ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0);
if (IS_ERR(ci)) {
_debug("no cipher");
ret = PTR_ERR(ci);
goto error;
}
- if (crypto_skcipher_setkey(ci, token->kad->session_key,
+ if (crypto_sync_skcipher_setkey(ci, token->kad->session_key,
sizeof(token->kad->session_key)) < 0)
BUG();
@@ -104,7 +104,7 @@ error:
static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
{
struct rxrpc_key_token *token;
- SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
struct scatterlist sg;
struct rxrpc_crypt iv;
__be32 *tmpbuf;
@@ -128,7 +128,7 @@ static int rxkad_prime_packet_security(struct rxrpc_connection *conn)
tmpbuf[3] = htonl(conn->security_ix);
sg_init_one(&sg, tmpbuf, tmpsize);
- skcipher_request_set_tfm(req, conn->cipher);
+ skcipher_request_set_sync_tfm(req, conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, tmpsize, iv.x);
crypto_skcipher_encrypt(req);
@@ -167,7 +167,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
memset(&iv, 0, sizeof(iv));
sg_init_one(&sg, sechdr, 8);
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
crypto_skcipher_encrypt(req);
@@ -212,7 +212,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
memcpy(&iv, token->kad->session_key, sizeof(iv));
sg_init_one(&sg[0], sechdr, sizeof(rxkhdr));
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg[0], &sg[0], sizeof(rxkhdr), iv.x);
crypto_skcipher_encrypt(req);
@@ -250,7 +250,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
void *sechdr)
{
struct rxrpc_skb_priv *sp;
- SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist sg;
u32 x, y;
@@ -279,7 +279,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
call->crypto_buf[1] = htonl(x);
sg_init_one(&sg, call->crypto_buf, 8);
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
crypto_skcipher_encrypt(req);
@@ -352,7 +352,7 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
/* start the decryption afresh */
memset(&iv, 0, sizeof(iv));
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, 8, iv.x);
crypto_skcipher_decrypt(req);
@@ -450,7 +450,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
token = call->conn->params.key->payload.data[0];
memcpy(&iv, token->kad->session_key, sizeof(iv));
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, len, iv.x);
crypto_skcipher_decrypt(req);
@@ -506,7 +506,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
unsigned int offset, unsigned int len,
rxrpc_seq_t seq, u16 expected_cksum)
{
- SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist sg;
bool aborted;
@@ -529,7 +529,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
call->crypto_buf[1] = htonl(x);
sg_init_one(&sg, call->crypto_buf, 8);
- skcipher_request_set_tfm(req, call->conn->cipher);
+ skcipher_request_set_sync_tfm(req, call->conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, &sg, &sg, 8, iv.x);
crypto_skcipher_encrypt(req);
@@ -755,7 +755,7 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
struct rxkad_response *resp,
const struct rxkad_key *s2)
{
- SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
struct rxrpc_crypt iv;
struct scatterlist sg[1];
@@ -764,7 +764,7 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
sg_init_table(sg, 1);
sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted));
- skcipher_request_set_tfm(req, conn->cipher);
+ skcipher_request_set_sync_tfm(req, conn->cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
crypto_skcipher_encrypt(req);
@@ -1021,7 +1021,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
struct rxkad_response *resp,
const struct rxrpc_crypt *session_key)
{
- SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci);
struct scatterlist sg[1];
struct rxrpc_crypt iv;
@@ -1031,7 +1031,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
ASSERT(rxkad_ci != NULL);
mutex_lock(&rxkad_ci_mutex);
- if (crypto_skcipher_setkey(rxkad_ci, session_key->x,
+ if (crypto_sync_skcipher_setkey(rxkad_ci, session_key->x,
sizeof(*session_key)) < 0)
BUG();
@@ -1039,7 +1039,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
sg_init_table(sg, 1);
sg_set_buf(sg, &resp->encrypted, sizeof(resp->encrypted));
- skcipher_request_set_tfm(req, rxkad_ci);
+ skcipher_request_set_sync_tfm(req, rxkad_ci);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
crypto_skcipher_decrypt(req);
@@ -1218,7 +1218,7 @@ static void rxkad_clear(struct rxrpc_connection *conn)
_enter("");
if (conn->cipher)
- crypto_free_skcipher(conn->cipher);
+ crypto_free_sync_skcipher(conn->cipher);
}
/*
@@ -1228,7 +1228,7 @@ static int rxkad_init(void)
{
/* pin the cipher we need so that the crypto layer doesn't invoke
* keventd to go get it */
- rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+ rxkad_ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0);
return PTR_ERR_OR_ZERO(rxkad_ci);
}
@@ -1238,7 +1238,7 @@ static int rxkad_init(void)
static void rxkad_exit(void)
{
if (rxkad_ci)
- crypto_free_skcipher(rxkad_ci);
+ crypto_free_sync_skcipher(rxkad_ci);
}
/*
diff --git a/net/socket.c b/net/socket.c
index b68801c7d0ab..99c96851469f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2342,7 +2342,7 @@ SYSCALL_DEFINE3(recvmsg, int, fd, struct user_msghdr __user *, msg,
*/
int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
- unsigned int flags, struct timespec *timeout)
+ unsigned int flags, struct timespec64 *timeout)
{
int fput_needed, err, datagrams;
struct socket *sock;
@@ -2407,8 +2407,7 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
if (timeout) {
ktime_get_ts64(&timeout64);
- *timeout = timespec64_to_timespec(
- timespec64_sub(end_time, timeout64));
+ *timeout = timespec64_sub(end_time, timeout64);
if (timeout->tv_sec < 0) {
timeout->tv_sec = timeout->tv_nsec = 0;
break;
@@ -2454,10 +2453,10 @@ out_put:
static int do_sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
- struct timespec __user *timeout)
+ struct __kernel_timespec __user *timeout)
{
int datagrams;
- struct timespec timeout_sys;
+ struct timespec64 timeout_sys;
if (flags & MSG_CMSG_COMPAT)
return -EINVAL;
@@ -2465,13 +2464,12 @@ static int do_sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
if (!timeout)
return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
- if (copy_from_user(&timeout_sys, timeout, sizeof(timeout_sys)))
+ if (get_timespec64(&timeout_sys, timeout))
return -EFAULT;
datagrams = __sys_recvmmsg(fd, mmsg, vlen, flags, &timeout_sys);
- if (datagrams > 0 &&
- copy_to_user(timeout, &timeout_sys, sizeof(timeout_sys)))
+ if (datagrams > 0 && put_timespec64(&timeout_sys, timeout))
datagrams = -EFAULT;
return datagrams;
@@ -2479,7 +2477,7 @@ static int do_sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg,
unsigned int, vlen, unsigned int, flags,
- struct timespec __user *, timeout)
+ struct __kernel_timespec __user *, timeout)
{
return do_sys_recvmmsg(fd, mmsg, vlen, flags, timeout);
}
@@ -2603,7 +2601,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
break;
case SYS_RECVMMSG:
err = do_sys_recvmmsg(a0, (struct mmsghdr __user *)a1, a[2],
- a[3], (struct timespec __user *)a[4]);
+ a[3], (struct __kernel_timespec __user *)a[4]);
break;
case SYS_ACCEPT4:
err = __sys_accept4(a0, (struct sockaddr __user *)a1,
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 305ecea92170..ad8ead738981 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -30,10 +30,9 @@ struct rpc_cred_cache {
static unsigned int auth_hashbits = RPC_CREDCACHE_DEFAULT_HASHBITS;
-static DEFINE_SPINLOCK(rpc_authflavor_lock);
-static const struct rpc_authops *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
- &authnull_ops, /* AUTH_NULL */
- &authunix_ops, /* AUTH_UNIX */
+static const struct rpc_authops __rcu *auth_flavors[RPC_AUTH_MAXFLAVOR] = {
+ [RPC_AUTH_NULL] = (const struct rpc_authops __force __rcu *)&authnull_ops,
+ [RPC_AUTH_UNIX] = (const struct rpc_authops __force __rcu *)&authunix_ops,
NULL, /* others can be loadable modules */
};
@@ -93,39 +92,65 @@ pseudoflavor_to_flavor(u32 flavor) {
int
rpcauth_register(const struct rpc_authops *ops)
{
+ const struct rpc_authops *old;
rpc_authflavor_t flavor;
- int ret = -EPERM;
if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR)
return -EINVAL;
- spin_lock(&rpc_authflavor_lock);
- if (auth_flavors[flavor] == NULL) {
- auth_flavors[flavor] = ops;
- ret = 0;
- }
- spin_unlock(&rpc_authflavor_lock);
- return ret;
+ old = cmpxchg((const struct rpc_authops ** __force)&auth_flavors[flavor], NULL, ops);
+ if (old == NULL || old == ops)
+ return 0;
+ return -EPERM;
}
EXPORT_SYMBOL_GPL(rpcauth_register);
int
rpcauth_unregister(const struct rpc_authops *ops)
{
+ const struct rpc_authops *old;
rpc_authflavor_t flavor;
- int ret = -EPERM;
if ((flavor = ops->au_flavor) >= RPC_AUTH_MAXFLAVOR)
return -EINVAL;
- spin_lock(&rpc_authflavor_lock);
- if (auth_flavors[flavor] == ops) {
- auth_flavors[flavor] = NULL;
- ret = 0;
- }
- spin_unlock(&rpc_authflavor_lock);
- return ret;
+
+ old = cmpxchg((const struct rpc_authops ** __force)&auth_flavors[flavor], ops, NULL);
+ if (old == ops || old == NULL)
+ return 0;
+ return -EPERM;
}
EXPORT_SYMBOL_GPL(rpcauth_unregister);
+static const struct rpc_authops *
+rpcauth_get_authops(rpc_authflavor_t flavor)
+{
+ const struct rpc_authops *ops;
+
+ if (flavor >= RPC_AUTH_MAXFLAVOR)
+ return NULL;
+
+ rcu_read_lock();
+ ops = rcu_dereference(auth_flavors[flavor]);
+ if (ops == NULL) {
+ rcu_read_unlock();
+ request_module("rpc-auth-%u", flavor);
+ rcu_read_lock();
+ ops = rcu_dereference(auth_flavors[flavor]);
+ if (ops == NULL)
+ goto out;
+ }
+ if (!try_module_get(ops->owner))
+ ops = NULL;
+out:
+ rcu_read_unlock();
+ return ops;
+}
+
+static void
+rpcauth_put_authops(const struct rpc_authops *ops)
+{
+ module_put(ops->owner);
+}
+
/**
* rpcauth_get_pseudoflavor - check if security flavor is supported
* @flavor: a security flavor
@@ -138,25 +163,16 @@ EXPORT_SYMBOL_GPL(rpcauth_unregister);
rpc_authflavor_t
rpcauth_get_pseudoflavor(rpc_authflavor_t flavor, struct rpcsec_gss_info *info)
{
- const struct rpc_authops *ops;
+ const struct rpc_authops *ops = rpcauth_get_authops(flavor);
rpc_authflavor_t pseudoflavor;
- ops = auth_flavors[flavor];
- if (ops == NULL)
- request_module("rpc-auth-%u", flavor);
- spin_lock(&rpc_authflavor_lock);
- ops = auth_flavors[flavor];
- if (ops == NULL || !try_module_get(ops->owner)) {
- spin_unlock(&rpc_authflavor_lock);
+ if (!ops)
return RPC_AUTH_MAXFLAVOR;
- }
- spin_unlock(&rpc_authflavor_lock);
-
pseudoflavor = flavor;
if (ops->info2flavor != NULL)
pseudoflavor = ops->info2flavor(info);
- module_put(ops->owner);
+ rpcauth_put_authops(ops);
return pseudoflavor;
}
EXPORT_SYMBOL_GPL(rpcauth_get_pseudoflavor);
@@ -176,25 +192,15 @@ rpcauth_get_gssinfo(rpc_authflavor_t pseudoflavor, struct rpcsec_gss_info *info)
const struct rpc_authops *ops;
int result;
- if (flavor >= RPC_AUTH_MAXFLAVOR)
- return -EINVAL;
-
- ops = auth_flavors[flavor];
+ ops = rpcauth_get_authops(flavor);
if (ops == NULL)
- request_module("rpc-auth-%u", flavor);
- spin_lock(&rpc_authflavor_lock);
- ops = auth_flavors[flavor];
- if (ops == NULL || !try_module_get(ops->owner)) {
- spin_unlock(&rpc_authflavor_lock);
return -ENOENT;
- }
- spin_unlock(&rpc_authflavor_lock);
result = -ENOENT;
if (ops->flavor2info != NULL)
result = ops->flavor2info(pseudoflavor, info);
- module_put(ops->owner);
+ rpcauth_put_authops(ops);
return result;
}
EXPORT_SYMBOL_GPL(rpcauth_get_gssinfo);
@@ -212,15 +218,13 @@ EXPORT_SYMBOL_GPL(rpcauth_get_gssinfo);
int
rpcauth_list_flavors(rpc_authflavor_t *array, int size)
{
- rpc_authflavor_t flavor;
- int result = 0;
+ const struct rpc_authops *ops;
+ rpc_authflavor_t flavor, pseudos[4];
+ int i, len, result = 0;
- spin_lock(&rpc_authflavor_lock);
+ rcu_read_lock();
for (flavor = 0; flavor < RPC_AUTH_MAXFLAVOR; flavor++) {
- const struct rpc_authops *ops = auth_flavors[flavor];
- rpc_authflavor_t pseudos[4];
- int i, len;
-
+ ops = rcu_dereference(auth_flavors[flavor]);
if (result >= size) {
result = -ENOMEM;
break;
@@ -245,7 +249,7 @@ rpcauth_list_flavors(rpc_authflavor_t *array, int size)
array[result++] = pseudos[i];
}
}
- spin_unlock(&rpc_authflavor_lock);
+ rcu_read_unlock();
dprintk("RPC: %s returns %d\n", __func__, result);
return result;
@@ -255,25 +259,17 @@ EXPORT_SYMBOL_GPL(rpcauth_list_flavors);
struct rpc_auth *
rpcauth_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
{
- struct rpc_auth *auth;
+ struct rpc_auth *auth = ERR_PTR(-EINVAL);
const struct rpc_authops *ops;
- u32 flavor = pseudoflavor_to_flavor(args->pseudoflavor);
+ u32 flavor = pseudoflavor_to_flavor(args->pseudoflavor);
- auth = ERR_PTR(-EINVAL);
- if (flavor >= RPC_AUTH_MAXFLAVOR)
+ ops = rpcauth_get_authops(flavor);
+ if (ops == NULL)
goto out;
- if ((ops = auth_flavors[flavor]) == NULL)
- request_module("rpc-auth-%u", flavor);
- spin_lock(&rpc_authflavor_lock);
- ops = auth_flavors[flavor];
- if (ops == NULL || !try_module_get(ops->owner)) {
- spin_unlock(&rpc_authflavor_lock);
- goto out;
- }
- spin_unlock(&rpc_authflavor_lock);
auth = ops->create(args, clnt);
- module_put(ops->owner);
+
+ rpcauth_put_authops(ops);
if (IS_ERR(auth))
return auth;
if (clnt->cl_auth)
@@ -288,32 +284,37 @@ EXPORT_SYMBOL_GPL(rpcauth_create);
void
rpcauth_release(struct rpc_auth *auth)
{
- if (!atomic_dec_and_test(&auth->au_count))
+ if (!refcount_dec_and_test(&auth->au_count))
return;
auth->au_ops->destroy(auth);
}
static DEFINE_SPINLOCK(rpc_credcache_lock);
-static void
+/*
+ * On success, the caller is responsible for freeing the reference
+ * held by the hashtable
+ */
+static bool
rpcauth_unhash_cred_locked(struct rpc_cred *cred)
{
+ if (!test_and_clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags))
+ return false;
hlist_del_rcu(&cred->cr_hash);
- smp_mb__before_atomic();
- clear_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
+ return true;
}
-static int
+static bool
rpcauth_unhash_cred(struct rpc_cred *cred)
{
spinlock_t *cache_lock;
- int ret;
+ bool ret;
+ if (!test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags))
+ return false;
cache_lock = &cred->cr_auth->au_credcache->lock;
spin_lock(cache_lock);
- ret = atomic_read(&cred->cr_count) == 0;
- if (ret)
- rpcauth_unhash_cred_locked(cred);
+ ret = rpcauth_unhash_cred_locked(cred);
spin_unlock(cache_lock);
return ret;
}
@@ -392,6 +393,44 @@ void rpcauth_destroy_credlist(struct list_head *head)
}
}
+static void
+rpcauth_lru_add_locked(struct rpc_cred *cred)
+{
+ if (!list_empty(&cred->cr_lru))
+ return;
+ number_cred_unused++;
+ list_add_tail(&cred->cr_lru, &cred_unused);
+}
+
+static void
+rpcauth_lru_add(struct rpc_cred *cred)
+{
+ if (!list_empty(&cred->cr_lru))
+ return;
+ spin_lock(&rpc_credcache_lock);
+ rpcauth_lru_add_locked(cred);
+ spin_unlock(&rpc_credcache_lock);
+}
+
+static void
+rpcauth_lru_remove_locked(struct rpc_cred *cred)
+{
+ if (list_empty(&cred->cr_lru))
+ return;
+ number_cred_unused--;
+ list_del_init(&cred->cr_lru);
+}
+
+static void
+rpcauth_lru_remove(struct rpc_cred *cred)
+{
+ if (list_empty(&cred->cr_lru))
+ return;
+ spin_lock(&rpc_credcache_lock);
+ rpcauth_lru_remove_locked(cred);
+ spin_unlock(&rpc_credcache_lock);
+}
+
/*
* Clear the RPC credential cache, and delete those credentials
* that are not referenced.
@@ -411,13 +450,10 @@ rpcauth_clear_credcache(struct rpc_cred_cache *cache)
head = &cache->hashtable[i];
while (!hlist_empty(head)) {
cred = hlist_entry(head->first, struct rpc_cred, cr_hash);
- get_rpccred(cred);
- if (!list_empty(&cred->cr_lru)) {
- list_del(&cred->cr_lru);
- number_cred_unused--;
- }
- list_add_tail(&cred->cr_lru, &free);
rpcauth_unhash_cred_locked(cred);
+ /* Note: We now hold a reference to cred */
+ rpcauth_lru_remove_locked(cred);
+ list_add_tail(&cred->cr_lru, &free);
}
}
spin_unlock(&cache->lock);
@@ -451,7 +487,6 @@ EXPORT_SYMBOL_GPL(rpcauth_destroy_credcache);
static long
rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
{
- spinlock_t *cache_lock;
struct rpc_cred *cred, *next;
unsigned long expired = jiffies - RPC_AUTH_EXPIRY_MORATORIUM;
long freed = 0;
@@ -460,32 +495,24 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
if (nr_to_scan-- == 0)
break;
+ if (refcount_read(&cred->cr_count) > 1) {
+ rpcauth_lru_remove_locked(cred);
+ continue;
+ }
/*
* Enforce a 60 second garbage collection moratorium
* Note that the cred_unused list must be time-ordered.
*/
- if (time_in_range(cred->cr_expire, expired, jiffies) &&
- test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) {
- freed = SHRINK_STOP;
- break;
- }
-
- list_del_init(&cred->cr_lru);
- number_cred_unused--;
- freed++;
- if (atomic_read(&cred->cr_count) != 0)
+ if (!time_in_range(cred->cr_expire, expired, jiffies))
+ continue;
+ if (!rpcauth_unhash_cred(cred))
continue;
- cache_lock = &cred->cr_auth->au_credcache->lock;
- spin_lock(cache_lock);
- if (atomic_read(&cred->cr_count) == 0) {
- get_rpccred(cred);
- list_add_tail(&cred->cr_lru, free);
- rpcauth_unhash_cred_locked(cred);
- }
- spin_unlock(cache_lock);
+ rpcauth_lru_remove_locked(cred);
+ freed++;
+ list_add_tail(&cred->cr_lru, free);
}
- return freed;
+ return freed ? freed : SHRINK_STOP;
}
static unsigned long
@@ -561,19 +588,15 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
if (!entry->cr_ops->crmatch(acred, entry, flags))
continue;
if (flags & RPCAUTH_LOOKUP_RCU) {
- if (test_bit(RPCAUTH_CRED_HASHED, &entry->cr_flags) &&
- !test_bit(RPCAUTH_CRED_NEW, &entry->cr_flags))
- cred = entry;
+ if (test_bit(RPCAUTH_CRED_NEW, &entry->cr_flags) ||
+ refcount_read(&entry->cr_count) == 0)
+ continue;
+ cred = entry;
break;
}
- spin_lock(&cache->lock);
- if (test_bit(RPCAUTH_CRED_HASHED, &entry->cr_flags) == 0) {
- spin_unlock(&cache->lock);
- continue;
- }
cred = get_rpccred(entry);
- spin_unlock(&cache->lock);
- break;
+ if (cred)
+ break;
}
rcu_read_unlock();
@@ -594,11 +617,13 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
if (!entry->cr_ops->crmatch(acred, entry, flags))
continue;
cred = get_rpccred(entry);
- break;
+ if (cred)
+ break;
}
if (cred == NULL) {
cred = new;
set_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags);
+ refcount_inc(&cred->cr_count);
hlist_add_head_rcu(&cred->cr_hash, &cache->hashtable[nr]);
} else
list_add_tail(&new->cr_lru, &free);
@@ -645,7 +670,7 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
{
INIT_HLIST_NODE(&cred->cr_hash);
INIT_LIST_HEAD(&cred->cr_lru);
- atomic_set(&cred->cr_count, 1);
+ refcount_set(&cred->cr_count, 1);
cred->cr_auth = auth;
cred->cr_ops = ops;
cred->cr_expire = jiffies;
@@ -713,36 +738,29 @@ put_rpccred(struct rpc_cred *cred)
{
if (cred == NULL)
return;
- /* Fast path for unhashed credentials */
- if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) == 0) {
- if (atomic_dec_and_test(&cred->cr_count))
- cred->cr_ops->crdestroy(cred);
- return;
- }
-
- if (!atomic_dec_and_lock(&cred->cr_count, &rpc_credcache_lock))
- return;
- if (!list_empty(&cred->cr_lru)) {
- number_cred_unused--;
- list_del_init(&cred->cr_lru);
- }
- if (test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags) != 0) {
- if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) {
- cred->cr_expire = jiffies;
- list_add_tail(&cred->cr_lru, &cred_unused);
- number_cred_unused++;
- goto out_nodestroy;
- }
- if (!rpcauth_unhash_cred(cred)) {
- /* We were hashed and someone looked us up... */
- goto out_nodestroy;
- }
+ rcu_read_lock();
+ if (refcount_dec_and_test(&cred->cr_count))
+ goto destroy;
+ if (refcount_read(&cred->cr_count) != 1 ||
+ !test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags))
+ goto out;
+ if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0) {
+ cred->cr_expire = jiffies;
+ rpcauth_lru_add(cred);
+ /* Race breaker */
+ if (unlikely(!test_bit(RPCAUTH_CRED_HASHED, &cred->cr_flags)))
+ rpcauth_lru_remove(cred);
+ } else if (rpcauth_unhash_cred(cred)) {
+ rpcauth_lru_remove(cred);
+ if (refcount_dec_and_test(&cred->cr_count))
+ goto destroy;
}
- spin_unlock(&rpc_credcache_lock);
- cred->cr_ops->crdestroy(cred);
+out:
+ rcu_read_unlock();
return;
-out_nodestroy:
- spin_unlock(&rpc_credcache_lock);
+destroy:
+ rcu_read_unlock();
+ cred->cr_ops->crdestroy(cred);
}
EXPORT_SYMBOL_GPL(put_rpccred);
@@ -817,6 +835,16 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
}
+bool
+rpcauth_xmit_need_reencode(struct rpc_task *task)
+{
+ struct rpc_cred *cred = task->tk_rqstp->rq_cred;
+
+ if (!cred || !cred->cr_ops->crneed_reencode)
+ return false;
+ return cred->cr_ops->crneed_reencode(task);
+}
+
int
rpcauth_refreshcred(struct rpc_task *task)
{
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
index f1df9837f1ac..d8831b988b1e 100644
--- a/net/sunrpc/auth_generic.c
+++ b/net/sunrpc/auth_generic.c
@@ -274,7 +274,7 @@ static const struct rpc_authops generic_auth_ops = {
static struct rpc_auth generic_auth = {
.au_ops = &generic_auth_ops,
- .au_count = ATOMIC_INIT(0),
+ .au_count = REFCOUNT_INIT(1),
};
static bool generic_key_to_expire(struct rpc_cred *cred)
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 21c0aa0a0d1d..30f970cdc7f6 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1058,7 +1058,7 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
auth->au_flavor = flavor;
if (gss_pseudoflavor_to_datatouch(gss_auth->mech, flavor))
auth->au_flags |= RPCAUTH_AUTH_DATATOUCH;
- atomic_set(&auth->au_count, 1);
+ refcount_set(&auth->au_count, 1);
kref_init(&gss_auth->kref);
err = rpcauth_init_credcache(auth);
@@ -1187,7 +1187,7 @@ gss_auth_find_or_add_hashed(const struct rpc_auth_create_args *args,
if (strcmp(gss_auth->target_name, args->target_name))
continue;
}
- if (!atomic_inc_not_zero(&gss_auth->rpc_auth.au_count))
+ if (!refcount_inc_not_zero(&gss_auth->rpc_auth.au_count))
continue;
goto out;
}
@@ -1984,6 +1984,46 @@ gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
return decode(rqstp, &xdr, obj);
}
+static bool
+gss_seq_is_newer(u32 new, u32 old)
+{
+ return (s32)(new - old) > 0;
+}
+
+static bool
+gss_xmit_need_reencode(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_cred *cred = req->rq_cred;
+ struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
+ u32 win, seq_xmit;
+ bool ret = true;
+
+ if (!ctx)
+ return true;
+
+ if (gss_seq_is_newer(req->rq_seqno, READ_ONCE(ctx->gc_seq)))
+ goto out;
+
+ seq_xmit = READ_ONCE(ctx->gc_seq_xmit);
+ while (gss_seq_is_newer(req->rq_seqno, seq_xmit)) {
+ u32 tmp = seq_xmit;
+
+ seq_xmit = cmpxchg(&ctx->gc_seq_xmit, tmp, req->rq_seqno);
+ if (seq_xmit == tmp) {
+ ret = false;
+ goto out;
+ }
+ }
+
+ win = ctx->gc_win;
+ if (win > 0)
+ ret = !gss_seq_is_newer(req->rq_seqno, seq_xmit - win);
+out:
+ gss_put_ctx(ctx);
+ return ret;
+}
+
static int
gss_unwrap_resp(struct rpc_task *task,
kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
@@ -2052,6 +2092,7 @@ static const struct rpc_credops gss_credops = {
.crunwrap_resp = gss_unwrap_resp,
.crkey_timeout = gss_key_timeout,
.crstringify_acceptor = gss_stringify_acceptor,
+ .crneed_reencode = gss_xmit_need_reencode,
};
static const struct rpc_credops gss_nullops = {
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 0220e1ca5280..4f43383971ba 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -53,7 +53,7 @@
u32
krb5_encrypt(
- struct crypto_skcipher *tfm,
+ struct crypto_sync_skcipher *tfm,
void * iv,
void * in,
void * out,
@@ -62,24 +62,24 @@ krb5_encrypt(
u32 ret = -EINVAL;
struct scatterlist sg[1];
u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- if (length % crypto_skcipher_blocksize(tfm) != 0)
+ if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
goto out;
- if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
+ if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
- crypto_skcipher_ivsize(tfm));
+ crypto_sync_skcipher_ivsize(tfm));
goto out;
}
if (iv)
- memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm));
+ memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
memcpy(out, in, length);
sg_init_one(sg, out, length);
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, length, local_iv);
@@ -92,7 +92,7 @@ out:
u32
krb5_decrypt(
- struct crypto_skcipher *tfm,
+ struct crypto_sync_skcipher *tfm,
void * iv,
void * in,
void * out,
@@ -101,23 +101,23 @@ krb5_decrypt(
u32 ret = -EINVAL;
struct scatterlist sg[1];
u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- if (length % crypto_skcipher_blocksize(tfm) != 0)
+ if (length % crypto_sync_skcipher_blocksize(tfm) != 0)
goto out;
- if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
+ if (crypto_sync_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
- crypto_skcipher_ivsize(tfm));
+ crypto_sync_skcipher_ivsize(tfm));
goto out;
}
if (iv)
- memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm));
+ memcpy(local_iv, iv, crypto_sync_skcipher_ivsize(tfm));
memcpy(out, in, length);
sg_init_one(sg, out, length);
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, length, local_iv);
@@ -466,7 +466,8 @@ encryptor(struct scatterlist *sg, void *data)
{
struct encryptor_desc *desc = data;
struct xdr_buf *outbuf = desc->outbuf;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
+ struct crypto_sync_skcipher *tfm =
+ crypto_sync_skcipher_reqtfm(desc->req);
struct page *in_page;
int thislen = desc->fraglen + sg->length;
int fraglen, ret;
@@ -492,7 +493,7 @@ encryptor(struct scatterlist *sg, void *data)
desc->fraglen += sg->length;
desc->pos += sg->length;
- fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
+ fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
@@ -526,16 +527,16 @@ encryptor(struct scatterlist *sg, void *data)
}
int
-gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
+gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
int offset, struct page **pages)
{
int ret;
struct encryptor_desc desc;
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
+ BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
memset(desc.iv, 0, sizeof(desc.iv));
@@ -567,7 +568,8 @@ decryptor(struct scatterlist *sg, void *data)
{
struct decryptor_desc *desc = data;
int thislen = desc->fraglen + sg->length;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
+ struct crypto_sync_skcipher *tfm =
+ crypto_sync_skcipher_reqtfm(desc->req);
int fraglen, ret;
/* Worst case is 4 fragments: head, end of page 1, start
@@ -578,7 +580,7 @@ decryptor(struct scatterlist *sg, void *data)
desc->fragno++;
desc->fraglen += sg->length;
- fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
+ fraglen = thislen & (crypto_sync_skcipher_blocksize(tfm) - 1);
thislen -= fraglen;
if (thislen == 0)
@@ -608,17 +610,17 @@ decryptor(struct scatterlist *sg, void *data)
}
int
-gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
+gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *buf,
int offset)
{
int ret;
struct decryptor_desc desc;
- SKCIPHER_REQUEST_ON_STACK(req, tfm);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
/* XXXJBF: */
- BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
+ BUG_ON((buf->len - offset) % crypto_sync_skcipher_blocksize(tfm) != 0);
- skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_sync_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
memset(desc.iv, 0, sizeof(desc.iv));
@@ -672,12 +674,12 @@ xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
}
static u32
-gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
+gss_krb5_cts_crypt(struct crypto_sync_skcipher *cipher, struct xdr_buf *buf,
u32 offset, u8 *iv, struct page **pages, int encrypt)
{
u32 ret;
struct scatterlist sg[1];
- SKCIPHER_REQUEST_ON_STACK(req, cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, cipher);
u8 *data;
struct page **save_pages;
u32 len = buf->len - offset;
@@ -706,7 +708,7 @@ gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
sg_init_one(sg, data, len);
- skcipher_request_set_tfm(req, cipher);
+ skcipher_request_set_sync_tfm(req, cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg, sg, len, iv);
@@ -735,7 +737,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
struct xdr_netobj hmac;
u8 *cksumkey;
u8 *ecptr;
- struct crypto_skcipher *cipher, *aux_cipher;
+ struct crypto_sync_skcipher *cipher, *aux_cipher;
int blocksize;
struct page **save_pages;
int nblocks, nbytes;
@@ -754,7 +756,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
cksumkey = kctx->acceptor_integ;
usage = KG_USAGE_ACCEPTOR_SEAL;
}
- blocksize = crypto_skcipher_blocksize(cipher);
+ blocksize = crypto_sync_skcipher_blocksize(cipher);
/* hide the gss token header and insert the confounder */
offset += GSS_KRB5_TOK_HDR_LEN;
@@ -807,7 +809,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
memset(desc.iv, 0, sizeof(desc.iv));
if (cbcbytes) {
- SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
desc.fragno = 0;
@@ -816,7 +818,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
desc.outbuf = buf;
desc.req = req;
- skcipher_request_set_tfm(req, aux_cipher);
+ skcipher_request_set_sync_tfm(req, aux_cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
sg_init_table(desc.infrags, 4);
@@ -855,7 +857,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
struct xdr_buf subbuf;
u32 ret = 0;
u8 *cksum_key;
- struct crypto_skcipher *cipher, *aux_cipher;
+ struct crypto_sync_skcipher *cipher, *aux_cipher;
struct xdr_netobj our_hmac_obj;
u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
@@ -874,7 +876,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
cksum_key = kctx->initiator_integ;
usage = KG_USAGE_INITIATOR_SEAL;
}
- blocksize = crypto_skcipher_blocksize(cipher);
+ blocksize = crypto_sync_skcipher_blocksize(cipher);
/* create a segment skipping the header and leaving out the checksum */
@@ -891,13 +893,13 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
memset(desc.iv, 0, sizeof(desc.iv));
if (cbcbytes) {
- SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
+ SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
desc.fragno = 0;
desc.fraglen = 0;
desc.req = req;
- skcipher_request_set_tfm(req, aux_cipher);
+ skcipher_request_set_sync_tfm(req, aux_cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
sg_init_table(desc.frags, 4);
@@ -946,7 +948,8 @@ out_err:
* Set the key of the given cipher.
*/
int
-krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
+krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
+ struct crypto_sync_skcipher *cipher,
unsigned char *cksum)
{
struct crypto_shash *hmac;
@@ -994,7 +997,7 @@ krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
if (err)
goto out_err;
- err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
+ err = crypto_sync_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
if (err)
goto out_err;
@@ -1012,7 +1015,8 @@ out_err:
* Set the key of cipher kctx->enc.
*/
int
-krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
+krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
+ struct crypto_sync_skcipher *cipher,
s32 seqnum)
{
struct crypto_shash *hmac;
@@ -1069,7 +1073,8 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
if (err)
goto out_err;
- err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
+ err = crypto_sync_skcipher_setkey(cipher, Kcrypt,
+ kctx->gk5e->keylength);
if (err)
goto out_err;
diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c
index f7fe2d2b851f..550fdf18d3b3 100644
--- a/net/sunrpc/auth_gss/gss_krb5_keys.c
+++ b/net/sunrpc/auth_gss/gss_krb5_keys.c
@@ -147,7 +147,7 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
size_t blocksize, keybytes, keylength, n;
unsigned char *inblockdata, *outblockdata, *rawkey;
struct xdr_netobj inblock, outblock;
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
u32 ret = EINVAL;
blocksize = gk5e->blocksize;
@@ -157,11 +157,10 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
if ((inkey->len != keylength) || (outkey->len != keylength))
goto err_return;
- cipher = crypto_alloc_skcipher(gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ cipher = crypto_alloc_sync_skcipher(gk5e->encrypt_name, 0, 0);
if (IS_ERR(cipher))
goto err_return;
- if (crypto_skcipher_setkey(cipher, inkey->data, inkey->len))
+ if (crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len))
goto err_return;
/* allocate and set up buffers */
@@ -238,7 +237,7 @@ err_free_in:
memset(inblockdata, 0, blocksize);
kfree(inblockdata);
err_free_cipher:
- crypto_free_skcipher(cipher);
+ crypto_free_sync_skcipher(cipher);
err_return:
return ret;
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 7bb2514aadd9..7f0424dfa8f6 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -218,7 +218,7 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
static inline const void *
get_key(const void *p, const void *end,
- struct krb5_ctx *ctx, struct crypto_skcipher **res)
+ struct krb5_ctx *ctx, struct crypto_sync_skcipher **res)
{
struct xdr_netobj key;
int alg;
@@ -246,15 +246,14 @@ get_key(const void *p, const void *end,
if (IS_ERR(p))
goto out_err;
- *res = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ *res = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(*res)) {
printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
"crypto algorithm %s\n", ctx->gk5e->encrypt_name);
*res = NULL;
goto out_err_free_key;
}
- if (crypto_skcipher_setkey(*res, key.data, key.len)) {
+ if (crypto_sync_skcipher_setkey(*res, key.data, key.len)) {
printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
"crypto algorithm %s\n", ctx->gk5e->encrypt_name);
goto out_err_free_tfm;
@@ -264,7 +263,7 @@ get_key(const void *p, const void *end,
return p;
out_err_free_tfm:
- crypto_free_skcipher(*res);
+ crypto_free_sync_skcipher(*res);
out_err_free_key:
kfree(key.data);
p = ERR_PTR(-EINVAL);
@@ -336,30 +335,30 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
return 0;
out_err_free_key2:
- crypto_free_skcipher(ctx->seq);
+ crypto_free_sync_skcipher(ctx->seq);
out_err_free_key1:
- crypto_free_skcipher(ctx->enc);
+ crypto_free_sync_skcipher(ctx->enc);
out_err_free_mech:
kfree(ctx->mech_used.data);
out_err:
return PTR_ERR(p);
}
-static struct crypto_skcipher *
+static struct crypto_sync_skcipher *
context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
{
- struct crypto_skcipher *cp;
+ struct crypto_sync_skcipher *cp;
- cp = crypto_alloc_skcipher(cname, 0, CRYPTO_ALG_ASYNC);
+ cp = crypto_alloc_sync_skcipher(cname, 0, 0);
if (IS_ERR(cp)) {
dprintk("gss_kerberos_mech: unable to initialize "
"crypto algorithm %s\n", cname);
return NULL;
}
- if (crypto_skcipher_setkey(cp, key, ctx->gk5e->keylength)) {
+ if (crypto_sync_skcipher_setkey(cp, key, ctx->gk5e->keylength)) {
dprintk("gss_kerberos_mech: error setting key for "
"crypto algorithm %s\n", cname);
- crypto_free_skcipher(cp);
+ crypto_free_sync_skcipher(cp);
return NULL;
}
return cp;
@@ -413,9 +412,9 @@ context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
return 0;
out_free_enc:
- crypto_free_skcipher(ctx->enc);
+ crypto_free_sync_skcipher(ctx->enc);
out_free_seq:
- crypto_free_skcipher(ctx->seq);
+ crypto_free_sync_skcipher(ctx->seq);
out_err:
return -EINVAL;
}
@@ -469,17 +468,15 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
/*
* allocate hash, and skciphers for data and seqnum encryption
*/
- ctx->enc = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ ctx->enc = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(ctx->enc)) {
err = PTR_ERR(ctx->enc);
goto out_err_free_hmac;
}
- ctx->seq = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ ctx->seq = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(ctx->seq)) {
- crypto_free_skcipher(ctx->enc);
+ crypto_free_sync_skcipher(ctx->enc);
err = PTR_ERR(ctx->seq);
goto out_err_free_hmac;
}
@@ -591,7 +588,7 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
context_v2_alloc_cipher(ctx, "cbc(aes)",
ctx->acceptor_seal);
if (ctx->acceptor_enc_aux == NULL) {
- crypto_free_skcipher(ctx->initiator_enc_aux);
+ crypto_free_sync_skcipher(ctx->initiator_enc_aux);
goto out_free_acceptor_enc;
}
}
@@ -599,9 +596,9 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
return 0;
out_free_acceptor_enc:
- crypto_free_skcipher(ctx->acceptor_enc);
+ crypto_free_sync_skcipher(ctx->acceptor_enc);
out_free_initiator_enc:
- crypto_free_skcipher(ctx->initiator_enc);
+ crypto_free_sync_skcipher(ctx->initiator_enc);
out_err:
return -EINVAL;
}
@@ -713,12 +710,12 @@ static void
gss_delete_sec_context_kerberos(void *internal_ctx) {
struct krb5_ctx *kctx = internal_ctx;
- crypto_free_skcipher(kctx->seq);
- crypto_free_skcipher(kctx->enc);
- crypto_free_skcipher(kctx->acceptor_enc);
- crypto_free_skcipher(kctx->initiator_enc);
- crypto_free_skcipher(kctx->acceptor_enc_aux);
- crypto_free_skcipher(kctx->initiator_enc_aux);
+ crypto_free_sync_skcipher(kctx->seq);
+ crypto_free_sync_skcipher(kctx->enc);
+ crypto_free_sync_skcipher(kctx->acceptor_enc);
+ crypto_free_sync_skcipher(kctx->initiator_enc);
+ crypto_free_sync_skcipher(kctx->acceptor_enc_aux);
+ crypto_free_sync_skcipher(kctx->initiator_enc_aux);
kfree(kctx->mech_used.data);
kfree(kctx);
}
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index eaad9bc7a0bd..b4adeb06660b 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -63,13 +63,12 @@
#include <linux/sunrpc/gss_krb5.h>
#include <linux/random.h>
#include <linux/crypto.h>
+#include <linux/atomic.h>
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
-DEFINE_SPINLOCK(krb5_seq_lock);
-
static void *
setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token)
{
@@ -124,6 +123,30 @@ setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
return krb5_hdr;
}
+u32
+gss_seq_send_fetch_and_inc(struct krb5_ctx *ctx)
+{
+ u32 old, seq_send = READ_ONCE(ctx->seq_send);
+
+ do {
+ old = seq_send;
+ seq_send = cmpxchg(&ctx->seq_send, old, old + 1);
+ } while (old != seq_send);
+ return seq_send;
+}
+
+u64
+gss_seq_send64_fetch_and_inc(struct krb5_ctx *ctx)
+{
+ u64 old, seq_send = READ_ONCE(ctx->seq_send);
+
+ do {
+ old = seq_send;
+ seq_send = cmpxchg64(&ctx->seq_send64, old, old + 1);
+ } while (old != seq_send);
+ return seq_send;
+}
+
static u32
gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
struct xdr_netobj *token)
@@ -154,9 +177,7 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
- spin_lock(&krb5_seq_lock);
- seq_send = ctx->seq_send++;
- spin_unlock(&krb5_seq_lock);
+ seq_send = gss_seq_send_fetch_and_inc(ctx);
if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff,
seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))
@@ -174,7 +195,6 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
.data = cksumdata};
void *krb5_hdr;
s32 now;
- u64 seq_send;
u8 *cksumkey;
unsigned int cksum_usage;
__be64 seq_send_be64;
@@ -185,11 +205,7 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
/* Set up the sequence number. Now 64-bits in clear
* text and w/o direction indicator */
- spin_lock(&krb5_seq_lock);
- seq_send = ctx->seq_send64++;
- spin_unlock(&krb5_seq_lock);
-
- seq_send_be64 = cpu_to_be64(seq_send);
+ seq_send_be64 = cpu_to_be64(gss_seq_send64_fetch_and_inc(ctx));
memcpy(krb5_hdr + 8, (char *) &seq_send_be64, 8);
if (ctx->initiate) {
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index c8b9082f4a9d..fb6656295204 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -43,13 +43,12 @@ static s32
krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
unsigned char *cksum, unsigned char *buf)
{
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
unsigned char plain[8];
s32 code;
dprintk("RPC: %s:\n", __func__);
- cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
@@ -68,12 +67,12 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
code = krb5_encrypt(cipher, cksum, plain, buf, 8);
out:
- crypto_free_skcipher(cipher);
+ crypto_free_sync_skcipher(cipher);
return code;
}
s32
krb5_make_seq_num(struct krb5_ctx *kctx,
- struct crypto_skcipher *key,
+ struct crypto_sync_skcipher *key,
int direction,
u32 seqnum,
unsigned char *cksum, unsigned char *buf)
@@ -101,13 +100,12 @@ static s32
krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
unsigned char *buf, int *direction, s32 *seqnum)
{
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
unsigned char plain[8];
s32 code;
dprintk("RPC: %s:\n", __func__);
- cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(cipher))
return PTR_ERR(cipher);
@@ -130,7 +128,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
*seqnum = ((plain[0] << 24) | (plain[1] << 16) |
(plain[2] << 8) | (plain[3]));
out:
- crypto_free_skcipher(cipher);
+ crypto_free_sync_skcipher(cipher);
return code;
}
@@ -142,7 +140,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
{
s32 code;
unsigned char plain[8];
- struct crypto_skcipher *key = kctx->seq;
+ struct crypto_sync_skcipher *key = kctx->seq;
dprintk("RPC: krb5_get_seq_num:\n");
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 39a2e672900b..962fa84e6db1 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -174,7 +174,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
now = get_seconds();
- blocksize = crypto_skcipher_blocksize(kctx->enc);
+ blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
gss_krb5_add_padding(buf, offset, blocksize);
BUG_ON((buf->len - offset) % blocksize);
plainlen = conflen + buf->len - offset;
@@ -228,9 +228,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
- spin_lock(&krb5_seq_lock);
- seq_send = kctx->seq_send++;
- spin_unlock(&krb5_seq_lock);
+ seq_send = gss_seq_send_fetch_and_inc(kctx);
/* XXX would probably be more efficient to compute checksum
* and encrypt at the same time: */
@@ -239,10 +237,10 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
return GSS_S_FAILURE;
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
int err;
- cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
+ 0, 0);
if (IS_ERR(cipher))
return GSS_S_FAILURE;
@@ -250,7 +248,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
err = gss_encrypt_xdr_buf(cipher, buf,
offset + headlen - conflen, pages);
- crypto_free_skcipher(cipher);
+ crypto_free_sync_skcipher(cipher);
if (err)
return GSS_S_FAILURE;
} else {
@@ -327,18 +325,18 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
return GSS_S_BAD_SIG;
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
- struct crypto_skcipher *cipher;
+ struct crypto_sync_skcipher *cipher;
int err;
- cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
- CRYPTO_ALG_ASYNC);
+ cipher = crypto_alloc_sync_skcipher(kctx->gk5e->encrypt_name,
+ 0, 0);
if (IS_ERR(cipher))
return GSS_S_FAILURE;
krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
- crypto_free_skcipher(cipher);
+ crypto_free_sync_skcipher(cipher);
if (err)
return GSS_S_DEFECTIVE_TOKEN;
} else {
@@ -371,7 +369,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
/* Copy the data back to the right position. XXX: Would probably be
* better to copy and encrypt at the same time. */
- blocksize = crypto_skcipher_blocksize(kctx->enc);
+ blocksize = crypto_sync_skcipher_blocksize(kctx->enc);
data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
conflen;
orig_start = buf->head[0].iov_base + offset;
@@ -477,9 +475,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
*be16ptr++ = 0;
be64ptr = (__be64 *)be16ptr;
- spin_lock(&krb5_seq_lock);
- *be64ptr = cpu_to_be64(kctx->seq_send64++);
- spin_unlock(&krb5_seq_lock);
+ *be64ptr = cpu_to_be64(gss_seq_send64_fetch_and_inc(kctx));
err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
if (err)
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 5fec3abbe19b..16ac0f4cb7d8 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -117,7 +117,7 @@ int gss_mech_register(struct gss_api_mech *gm)
if (status)
return status;
spin_lock(&registered_mechs_lock);
- list_add(&gm->gm_list, &registered_mechs);
+ list_add_rcu(&gm->gm_list, &registered_mechs);
spin_unlock(&registered_mechs_lock);
dprintk("RPC: registered gss mechanism %s\n", gm->gm_name);
return 0;
@@ -132,7 +132,7 @@ EXPORT_SYMBOL_GPL(gss_mech_register);
void gss_mech_unregister(struct gss_api_mech *gm)
{
spin_lock(&registered_mechs_lock);
- list_del(&gm->gm_list);
+ list_del_rcu(&gm->gm_list);
spin_unlock(&registered_mechs_lock);
dprintk("RPC: unregistered gss mechanism %s\n", gm->gm_name);
gss_mech_free(gm);
@@ -151,15 +151,15 @@ _gss_mech_get_by_name(const char *name)
{
struct gss_api_mech *pos, *gm = NULL;
- spin_lock(&registered_mechs_lock);
- list_for_each_entry(pos, &registered_mechs, gm_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &registered_mechs, gm_list) {
if (0 == strcmp(name, pos->gm_name)) {
if (try_module_get(pos->gm_owner))
gm = pos;
break;
}
}
- spin_unlock(&registered_mechs_lock);
+ rcu_read_unlock();
return gm;
}
@@ -186,8 +186,8 @@ struct gss_api_mech *gss_mech_get_by_OID(struct rpcsec_gss_oid *obj)
dprintk("RPC: %s(%s)\n", __func__, buf);
request_module("rpc-auth-gss-%s", buf);
- spin_lock(&registered_mechs_lock);
- list_for_each_entry(pos, &registered_mechs, gm_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &registered_mechs, gm_list) {
if (obj->len == pos->gm_oid.len) {
if (0 == memcmp(obj->data, pos->gm_oid.data, obj->len)) {
if (try_module_get(pos->gm_owner))
@@ -196,7 +196,7 @@ struct gss_api_mech *gss_mech_get_by_OID(struct rpcsec_gss_oid *obj)
}
}
}
- spin_unlock(&registered_mechs_lock);
+ rcu_read_unlock();
return gm;
}
@@ -216,15 +216,15 @@ static struct gss_api_mech *_gss_mech_get_by_pseudoflavor(u32 pseudoflavor)
{
struct gss_api_mech *gm = NULL, *pos;
- spin_lock(&registered_mechs_lock);
- list_for_each_entry(pos, &registered_mechs, gm_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &registered_mechs, gm_list) {
if (!mech_supports_pseudoflavor(pos, pseudoflavor))
continue;
if (try_module_get(pos->gm_owner))
gm = pos;
break;
}
- spin_unlock(&registered_mechs_lock);
+ rcu_read_unlock();
return gm;
}
@@ -257,8 +257,8 @@ int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr, int size)
struct gss_api_mech *pos = NULL;
int j, i = 0;
- spin_lock(&registered_mechs_lock);
- list_for_each_entry(pos, &registered_mechs, gm_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(pos, &registered_mechs, gm_list) {
for (j = 0; j < pos->gm_pf_num; j++) {
if (i >= size) {
spin_unlock(&registered_mechs_lock);
@@ -267,7 +267,7 @@ int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr, int size)
array_ptr[i++] = pos->gm_pfs[j].pseudoflavor;
}
}
- spin_unlock(&registered_mechs_lock);
+ rcu_read_unlock();
return i;
}
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 444380f968f1..006062ad5f58 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -784,6 +784,7 @@ void gssx_enc_accept_sec_context(struct rpc_rqst *req,
xdr_inline_pages(&req->rq_rcv_buf,
PAGE_SIZE/2 /* pretty arbitrary */,
arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE);
+ req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES;
done:
if (err)
dprintk("RPC: gssx_enc_accept_sec_context: %d\n", err);
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index 4b48228ee8c7..2694a1bc026b 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -21,7 +21,7 @@ static struct rpc_cred null_cred;
static struct rpc_auth *
nul_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
{
- atomic_inc(&null_auth.au_count);
+ refcount_inc(&null_auth.au_count);
return &null_auth;
}
@@ -119,7 +119,7 @@ struct rpc_auth null_auth = {
.au_flags = RPCAUTH_AUTH_NO_CRKEY_TIMEOUT,
.au_ops = &authnull_ops,
.au_flavor = RPC_AUTH_NULL,
- .au_count = ATOMIC_INIT(0),
+ .au_count = REFCOUNT_INIT(1),
};
static
@@ -138,6 +138,6 @@ struct rpc_cred null_cred = {
.cr_lru = LIST_HEAD_INIT(null_cred.cr_lru),
.cr_auth = &null_auth,
.cr_ops = &null_credops,
- .cr_count = ATOMIC_INIT(1),
+ .cr_count = REFCOUNT_INIT(2),
.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE,
};
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 185e56d4f9ae..4c1c7e56288f 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -34,7 +34,7 @@ unx_create(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
{
dprintk("RPC: creating UNIX authenticator for client %p\n",
clnt);
- atomic_inc(&unix_auth.au_count);
+ refcount_inc(&unix_auth.au_count);
return &unix_auth;
}
@@ -239,7 +239,7 @@ struct rpc_auth unix_auth = {
.au_flags = RPCAUTH_AUTH_NO_CRKEY_TIMEOUT,
.au_ops = &authunix_ops,
.au_flavor = RPC_AUTH_UNIX,
- .au_count = ATOMIC_INIT(0),
+ .au_count = REFCOUNT_INIT(1),
};
static
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 3c15a99b9700..fa5ba6ed3197 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -91,7 +91,6 @@ struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
return NULL;
req->rq_xprt = xprt;
- INIT_LIST_HEAD(&req->rq_list);
INIT_LIST_HEAD(&req->rq_bc_list);
/* Preallocate one XDR receive buffer */
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 8ea2f5fadd96..ae3b8145da35 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -61,6 +61,7 @@ static void call_start(struct rpc_task *task);
static void call_reserve(struct rpc_task *task);
static void call_reserveresult(struct rpc_task *task);
static void call_allocate(struct rpc_task *task);
+static void call_encode(struct rpc_task *task);
static void call_decode(struct rpc_task *task);
static void call_bind(struct rpc_task *task);
static void call_bind_status(struct rpc_task *task);
@@ -1137,10 +1138,10 @@ EXPORT_SYMBOL_GPL(rpc_call_async);
struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
{
struct rpc_task *task;
- struct xdr_buf *xbufp = &req->rq_snd_buf;
struct rpc_task_setup task_setup_data = {
.callback_ops = &rpc_default_ops,
- .flags = RPC_TASK_SOFTCONN,
+ .flags = RPC_TASK_SOFTCONN |
+ RPC_TASK_NO_RETRANS_TIMEOUT,
};
dprintk("RPC: rpc_run_bc_task req= %p\n", req);
@@ -1148,14 +1149,7 @@ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req)
* Create an rpc_task to send the data
*/
task = rpc_new_task(&task_setup_data);
- task->tk_rqstp = req;
-
- /*
- * Set up the xdr_buf length.
- * This also indicates that the buffer is XDR encoded already.
- */
- xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
- xbufp->tail[0].iov_len;
+ xprt_init_bc_request(req, task);
task->tk_action = call_bc_transmit;
atomic_inc(&task->tk_count);
@@ -1558,7 +1552,6 @@ call_reserveresult(struct rpc_task *task)
task->tk_status = 0;
if (status >= 0) {
if (task->tk_rqstp) {
- xprt_request_init(task);
task->tk_action = call_refresh;
return;
}
@@ -1680,7 +1673,7 @@ call_allocate(struct rpc_task *task)
dprint_status(task);
task->tk_status = 0;
- task->tk_action = call_bind;
+ task->tk_action = call_encode;
if (req->rq_buffer)
return;
@@ -1721,22 +1714,15 @@ call_allocate(struct rpc_task *task)
rpc_exit(task, -ERESTARTSYS);
}
-static inline int
+static int
rpc_task_need_encode(struct rpc_task *task)
{
- return task->tk_rqstp->rq_snd_buf.len == 0;
+ return test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) == 0 &&
+ (!(task->tk_flags & RPC_TASK_SENT) ||
+ !(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT) ||
+ xprt_request_need_retransmit(task));
}
-static inline void
-rpc_task_force_reencode(struct rpc_task *task)
-{
- task->tk_rqstp->rq_snd_buf.len = 0;
- task->tk_rqstp->rq_bytes_sent = 0;
-}
-
-/*
- * 3. Encode arguments of an RPC call
- */
static void
rpc_xdr_encode(struct rpc_task *task)
{
@@ -1752,6 +1738,7 @@ rpc_xdr_encode(struct rpc_task *task)
xdr_buf_init(&req->rq_rcv_buf,
req->rq_rbuffer,
req->rq_rcvsize);
+ req->rq_bytes_sent = 0;
p = rpc_encode_header(task);
if (p == NULL) {
@@ -1766,6 +1753,36 @@ rpc_xdr_encode(struct rpc_task *task)
task->tk_status = rpcauth_wrap_req(task, encode, req, p,
task->tk_msg.rpc_argp);
+ if (task->tk_status == 0)
+ xprt_request_prepare(req);
+}
+
+/*
+ * 3. Encode arguments of an RPC call
+ */
+static void
+call_encode(struct rpc_task *task)
+{
+ if (!rpc_task_need_encode(task))
+ goto out;
+ /* Encode here so that rpcsec_gss can use correct sequence number. */
+ rpc_xdr_encode(task);
+ /* Did the encode result in an error condition? */
+ if (task->tk_status != 0) {
+ /* Was the error nonfatal? */
+ if (task->tk_status == -EAGAIN || task->tk_status == -ENOMEM)
+ rpc_delay(task, HZ >> 4);
+ else
+ rpc_exit(task, task->tk_status);
+ return;
+ }
+
+ /* Add task to reply queue before transmission to avoid races */
+ if (rpc_reply_expected(task))
+ xprt_request_enqueue_receive(task);
+ xprt_request_enqueue_transmit(task);
+out:
+ task->tk_action = call_bind;
}
/*
@@ -1947,43 +1964,16 @@ call_connect_status(struct rpc_task *task)
static void
call_transmit(struct rpc_task *task)
{
- int is_retrans = RPC_WAS_SENT(task);
-
dprint_status(task);
- task->tk_action = call_status;
- if (task->tk_status < 0)
- return;
- if (!xprt_prepare_transmit(task))
- return;
- task->tk_action = call_transmit_status;
- /* Encode here so that rpcsec_gss can use correct sequence number. */
- if (rpc_task_need_encode(task)) {
- rpc_xdr_encode(task);
- /* Did the encode result in an error condition? */
- if (task->tk_status != 0) {
- /* Was the error nonfatal? */
- if (task->tk_status == -EAGAIN)
- rpc_delay(task, HZ >> 4);
- else
- rpc_exit(task, task->tk_status);
+ task->tk_status = 0;
+ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
+ if (!xprt_prepare_transmit(task))
return;
- }
+ xprt_transmit(task);
}
- xprt_transmit(task);
- if (task->tk_status < 0)
- return;
- if (is_retrans)
- task->tk_client->cl_stats->rpcretrans++;
- /*
- * On success, ensure that we call xprt_end_transmit() before sleeping
- * in order to allow access to the socket to other RPC requests.
- */
- call_transmit_status(task);
- if (rpc_reply_expected(task))
- return;
- task->tk_action = rpc_exit_task;
- rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task);
+ task->tk_action = call_transmit_status;
+ xprt_end_transmit(task);
}
/*
@@ -1999,19 +1989,17 @@ call_transmit_status(struct rpc_task *task)
* test first.
*/
if (task->tk_status == 0) {
- xprt_end_transmit(task);
- rpc_task_force_reencode(task);
+ xprt_request_wait_receive(task);
return;
}
switch (task->tk_status) {
- case -EAGAIN:
- case -ENOBUFS:
- break;
default:
dprint_status(task);
- xprt_end_transmit(task);
- rpc_task_force_reencode(task);
+ break;
+ case -EBADMSG:
+ task->tk_status = 0;
+ task->tk_action = call_encode;
break;
/*
* Special cases: if we've been waiting on the
@@ -2019,6 +2007,14 @@ call_transmit_status(struct rpc_task *task)
* socket just returned a connection error,
* then hold onto the transport lock.
*/
+ case -ENOBUFS:
+ rpc_delay(task, HZ>>2);
+ /* fall through */
+ case -EBADSLT:
+ case -EAGAIN:
+ task->tk_action = call_transmit;
+ task->tk_status = 0;
+ break;
case -ECONNREFUSED:
case -EHOSTDOWN:
case -ENETDOWN:
@@ -2026,7 +2022,6 @@ call_transmit_status(struct rpc_task *task)
case -ENETUNREACH:
case -EPERM:
if (RPC_IS_SOFTCONN(task)) {
- xprt_end_transmit(task);
if (!task->tk_msg.rpc_proc->p_proc)
trace_xprt_ping(task->tk_xprt,
task->tk_status);
@@ -2039,7 +2034,7 @@ call_transmit_status(struct rpc_task *task)
case -EADDRINUSE:
case -ENOTCONN:
case -EPIPE:
- rpc_task_force_reencode(task);
+ break;
}
}
@@ -2053,6 +2048,11 @@ call_bc_transmit(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
+ if (rpc_task_need_encode(task))
+ xprt_request_enqueue_transmit(task);
+ if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+ goto out_wakeup;
+
if (!xprt_prepare_transmit(task))
goto out_retry;
@@ -2061,14 +2061,9 @@ call_bc_transmit(struct rpc_task *task)
"error: %d\n", task->tk_status);
goto out_done;
}
- if (req->rq_connect_cookie != req->rq_xprt->connect_cookie)
- req->rq_bytes_sent = 0;
xprt_transmit(task);
- if (task->tk_status == -EAGAIN)
- goto out_nospace;
-
xprt_end_transmit(task);
dprint_status(task);
switch (task->tk_status) {
@@ -2084,6 +2079,8 @@ call_bc_transmit(struct rpc_task *task)
case -ENOTCONN:
case -EPIPE:
break;
+ case -EAGAIN:
+ goto out_retry;
case -ETIMEDOUT:
/*
* Problem reaching the server. Disconnect and let the
@@ -2107,12 +2104,11 @@ call_bc_transmit(struct rpc_task *task)
"error: %d\n", task->tk_status);
break;
}
+out_wakeup:
rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
out_done:
task->tk_action = rpc_exit_task;
return;
-out_nospace:
- req->rq_connect_cookie = req->rq_xprt->connect_cookie;
out_retry:
task->tk_status = 0;
}
@@ -2125,15 +2121,11 @@ static void
call_status(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
- struct rpc_rqst *req = task->tk_rqstp;
int status;
if (!task->tk_msg.rpc_proc->p_proc)
trace_xprt_ping(task->tk_xprt, task->tk_status);
- if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent)
- task->tk_status = req->rq_reply_bytes_recvd;
-
dprint_status(task);
status = task->tk_status;
@@ -2173,13 +2165,8 @@ call_status(struct rpc_task *task)
/* fall through */
case -EPIPE:
case -ENOTCONN:
- task->tk_action = call_bind;
- break;
- case -ENOBUFS:
- rpc_delay(task, HZ>>2);
- /* fall through */
case -EAGAIN:
- task->tk_action = call_transmit;
+ task->tk_action = call_encode;
break;
case -EIO:
/* shutdown or soft timeout */
@@ -2244,7 +2231,7 @@ call_timeout(struct rpc_task *task)
rpcauth_invalcred(task);
retry:
- task->tk_action = call_bind;
+ task->tk_action = call_encode;
task->tk_status = 0;
}
@@ -2261,6 +2248,11 @@ call_decode(struct rpc_task *task)
dprint_status(task);
+ if (!decode) {
+ task->tk_action = rpc_exit_task;
+ return;
+ }
+
if (task->tk_flags & RPC_CALL_MAJORSEEN) {
if (clnt->cl_chatty) {
printk(KERN_NOTICE "%s: server %s OK\n",
@@ -2283,7 +2275,7 @@ call_decode(struct rpc_task *task)
if (req->rq_rcv_buf.len < 12) {
if (!RPC_IS_SOFT(task)) {
- task->tk_action = call_bind;
+ task->tk_action = call_encode;
goto out_retry;
}
dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
@@ -2298,13 +2290,11 @@ call_decode(struct rpc_task *task)
goto out_retry;
return;
}
-
task->tk_action = rpc_exit_task;
- if (decode) {
- task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
- task->tk_msg.rpc_resp);
- }
+ task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
+ task->tk_msg.rpc_resp);
+
dprintk("RPC: %5u call_decode result %d\n", task->tk_pid,
task->tk_status);
return;
@@ -2416,7 +2406,7 @@ rpc_verify_header(struct rpc_task *task)
task->tk_garb_retry--;
dprintk("RPC: %5u %s: retry garbled creds\n",
task->tk_pid, __func__);
- task->tk_action = call_bind;
+ task->tk_action = call_encode;
goto out_retry;
case RPC_AUTH_TOOWEAK:
printk(KERN_NOTICE "RPC: server %s requires stronger "
@@ -2485,7 +2475,7 @@ out_garbage:
task->tk_garb_retry--;
dprintk("RPC: %5u %s: retrying\n",
task->tk_pid, __func__);
- task->tk_action = call_bind;
+ task->tk_action = call_encode;
out_retry:
return ERR_PTR(-EAGAIN);
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 3fe5d60ab0e2..57ca5bead1cb 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -99,65 +99,79 @@ __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
}
-static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue)
-{
- struct list_head *q = &queue->tasks[queue->priority];
- struct rpc_task *task;
-
- if (!list_empty(q)) {
- task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
- if (task->tk_owner == queue->owner)
- list_move_tail(&task->u.tk_wait.list, q);
- }
-}
-
static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
{
if (queue->priority != priority) {
- /* Fairness: rotate the list when changing priority */
- rpc_rotate_queue_owner(queue);
queue->priority = priority;
+ queue->nr = 1U << priority;
}
}
-static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
-{
- queue->owner = pid;
- queue->nr = RPC_BATCH_COUNT;
-}
-
static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
{
rpc_set_waitqueue_priority(queue, queue->maxpriority);
- rpc_set_waitqueue_owner(queue, 0);
}
/*
- * Add new request to a priority queue.
+ * Add a request to a queue list
*/
-static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
- struct rpc_task *task,
- unsigned char queue_priority)
+static void
+__rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
{
- struct list_head *q;
struct rpc_task *t;
- INIT_LIST_HEAD(&task->u.tk_wait.links);
- if (unlikely(queue_priority > queue->maxpriority))
- queue_priority = queue->maxpriority;
- if (queue_priority > queue->priority)
- rpc_set_waitqueue_priority(queue, queue_priority);
- q = &queue->tasks[queue_priority];
list_for_each_entry(t, q, u.tk_wait.list) {
if (t->tk_owner == task->tk_owner) {
- list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
+ list_add_tail(&task->u.tk_wait.links,
+ &t->u.tk_wait.links);
+ /* Cache the queue head in task->u.tk_wait.list */
+ task->u.tk_wait.list.next = q;
+ task->u.tk_wait.list.prev = NULL;
return;
}
}
+ INIT_LIST_HEAD(&task->u.tk_wait.links);
list_add_tail(&task->u.tk_wait.list, q);
}
/*
+ * Remove request from a queue list
+ */
+static void
+__rpc_list_dequeue_task(struct rpc_task *task)
+{
+ struct list_head *q;
+ struct rpc_task *t;
+
+ if (task->u.tk_wait.list.prev == NULL) {
+ list_del(&task->u.tk_wait.links);
+ return;
+ }
+ if (!list_empty(&task->u.tk_wait.links)) {
+ t = list_first_entry(&task->u.tk_wait.links,
+ struct rpc_task,
+ u.tk_wait.links);
+ /* Assume __rpc_list_enqueue_task() cached the queue head */
+ q = t->u.tk_wait.list.next;
+ list_add_tail(&t->u.tk_wait.list, q);
+ list_del(&task->u.tk_wait.links);
+ }
+ list_del(&task->u.tk_wait.list);
+}
+
+/*
+ * Add new request to a priority queue.
+ */
+static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
+ struct rpc_task *task,
+ unsigned char queue_priority)
+{
+ if (unlikely(queue_priority > queue->maxpriority))
+ queue_priority = queue->maxpriority;
+ __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
+}
+
+/*
* Add new request to wait queue.
*
* Swapper tasks always get inserted at the head of the queue.
@@ -194,13 +208,7 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
*/
static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
{
- struct rpc_task *t;
-
- if (!list_empty(&task->u.tk_wait.links)) {
- t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
- list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
- list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
- }
+ __rpc_list_dequeue_task(task);
}
/*
@@ -212,7 +220,8 @@ static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_tas
__rpc_disable_timer(queue, task);
if (RPC_IS_PRIORITY(queue))
__rpc_remove_wait_queue_priority(task);
- list_del(&task->u.tk_wait.list);
+ else
+ list_del(&task->u.tk_wait.list);
queue->qlen--;
dprintk("RPC: %5u removed from queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
@@ -440,14 +449,28 @@ static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
/*
* Wake up a queued task while the queue lock is being held
*/
-static void rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
- struct rpc_wait_queue *queue, struct rpc_task *task)
+static struct rpc_task *
+rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
+ struct rpc_wait_queue *queue, struct rpc_task *task,
+ bool (*action)(struct rpc_task *, void *), void *data)
{
if (RPC_IS_QUEUED(task)) {
smp_rmb();
- if (task->tk_waitqueue == queue)
- __rpc_do_wake_up_task_on_wq(wq, queue, task);
+ if (task->tk_waitqueue == queue) {
+ if (action == NULL || action(task, data)) {
+ __rpc_do_wake_up_task_on_wq(wq, queue, task);
+ return task;
+ }
+ }
}
+ return NULL;
+}
+
+static void
+rpc_wake_up_task_on_wq_queue_locked(struct workqueue_struct *wq,
+ struct rpc_wait_queue *queue, struct rpc_task *task)
+{
+ rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, task, NULL, NULL);
}
/*
@@ -465,6 +488,8 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
struct rpc_wait_queue *queue,
struct rpc_task *task)
{
+ if (!RPC_IS_QUEUED(task))
+ return;
spin_lock_bh(&queue->lock);
rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
spin_unlock_bh(&queue->lock);
@@ -475,12 +500,48 @@ void rpc_wake_up_queued_task_on_wq(struct workqueue_struct *wq,
*/
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
+ if (!RPC_IS_QUEUED(task))
+ return;
spin_lock_bh(&queue->lock);
rpc_wake_up_task_queue_locked(queue, task);
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
+static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
+{
+ task->tk_status = *(int *)status;
+ return true;
+}
+
+static void
+rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
+ struct rpc_task *task, int status)
+{
+ rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
+ task, rpc_task_action_set_status, &status);
+}
+
+/**
+ * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
+ * @queue: pointer to rpc_wait_queue
+ * @task: pointer to rpc_task
+ * @status: integer error value
+ *
+ * If @task is queued on @queue, then it is woken up, and @task->tk_status is
+ * set to the value of @status.
+ */
+void
+rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
+ struct rpc_task *task, int status)
+{
+ if (!RPC_IS_QUEUED(task))
+ return;
+ spin_lock_bh(&queue->lock);
+ rpc_wake_up_task_queue_set_status_locked(queue, task, status);
+ spin_unlock_bh(&queue->lock);
+}
+
/*
* Wake up the next task on a priority queue.
*/
@@ -493,17 +554,9 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
* Service a batch of tasks from a single owner.
*/
q = &queue->tasks[queue->priority];
- if (!list_empty(q)) {
- task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
- if (queue->owner == task->tk_owner) {
- if (--queue->nr)
- goto out;
- list_move_tail(&task->u.tk_wait.list, q);
- }
- /*
- * Check if we need to switch queues.
- */
- goto new_owner;
+ if (!list_empty(q) && --queue->nr) {
+ task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
+ goto out;
}
/*
@@ -515,7 +568,7 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
else
q = q - 1;
if (!list_empty(q)) {
- task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
+ task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
goto new_queue;
}
} while (q != &queue->tasks[queue->priority]);
@@ -525,8 +578,6 @@ static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *q
new_queue:
rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
-new_owner:
- rpc_set_waitqueue_owner(queue, task->tk_owner);
out:
return task;
}
@@ -553,12 +604,9 @@ struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
queue, rpc_qname(queue));
spin_lock_bh(&queue->lock);
task = __rpc_find_next_queued(queue);
- if (task != NULL) {
- if (func(task, data))
- rpc_wake_up_task_on_wq_queue_locked(wq, queue, task);
- else
- task = NULL;
- }
+ if (task != NULL)
+ task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
+ task, func, data);
spin_unlock_bh(&queue->lock);
return task;
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index f217c348b341..9062967575c4 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -26,7 +26,8 @@
* Possibly called several times to iterate over an sk_buff and copy
* data out of it.
*/
-size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
+static size_t
+xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
{
if (len > desc->count)
len = desc->count;
@@ -36,7 +37,6 @@ size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
desc->offset += len;
return len;
}
-EXPORT_SYMBOL_GPL(xdr_skb_read_bits);
/**
* xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
@@ -69,7 +69,8 @@ static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to,
* @copy_actor: virtual method for copying data
*
*/
-ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
+static ssize_t
+xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
{
struct page **ppage = xdr->pages;
unsigned int len, pglen = xdr->page_len;
@@ -104,7 +105,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
/* ACL likes to be lazy in allocating pages - ACLs
* are small by default but can get huge. */
- if (unlikely(*ppage == NULL)) {
+ if ((xdr->flags & XDRBUF_SPARSE_PAGES) && *ppage == NULL) {
*ppage = alloc_page(GFP_ATOMIC);
if (unlikely(*ppage == NULL)) {
if (copied == 0)
@@ -140,7 +141,6 @@ copy_tail:
out:
return copied;
}
-EXPORT_SYMBOL_GPL(xdr_partial_copy_from_skb);
/**
* csum_partial_copy_to_xdr - checksum and copy data
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 5185efb9027b..87533fbb96cf 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -171,7 +171,6 @@ void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
mutex_init(&xprt->xpt_mutex);
spin_lock_init(&xprt->xpt_lock);
set_bit(XPT_BUSY, &xprt->xpt_flags);
- rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending");
xprt->xpt_net = get_net(net);
strcpy(xprt->xpt_remotebuf, "uninitialized");
}
@@ -895,7 +894,6 @@ int svc_send(struct svc_rqst *rqstp)
else
len = xprt->xpt_ops->xpo_sendto(rqstp);
mutex_unlock(&xprt->xpt_mutex);
- rpc_wake_up(&xprt->xpt_bc_pending);
trace_svc_send(rqstp, len);
svc_xprt_release(rqstp);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 5445145e639c..db8bb6b3a2b0 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1004,7 +1004,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (!bc_xprt)
return -EAGAIN;
- spin_lock(&bc_xprt->recv_lock);
+ spin_lock(&bc_xprt->queue_lock);
req = xprt_lookup_rqst(bc_xprt, xid);
if (!req)
goto unlock_notfound;
@@ -1022,7 +1022,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
memcpy(dst->iov_base, src->iov_base, src->iov_len);
xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
rqstp->rq_arg.len = 0;
- spin_unlock(&bc_xprt->recv_lock);
+ spin_unlock(&bc_xprt->queue_lock);
return 0;
unlock_notfound:
printk(KERN_NOTICE
@@ -1031,7 +1031,7 @@ unlock_notfound:
__func__, ntohl(calldir),
bc_xprt, ntohl(xid));
unlock_eagain:
- spin_unlock(&bc_xprt->recv_lock);
+ spin_unlock(&bc_xprt->queue_lock);
return -EAGAIN;
}
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 30afbd236656..2bbb8d38d2bf 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>
+#include <linux/bvec.h>
/*
* XDR functions for basic NFS types
@@ -128,6 +129,39 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
}
EXPORT_SYMBOL_GPL(xdr_terminate_string);
+size_t
+xdr_buf_pagecount(struct xdr_buf *buf)
+{
+ if (!buf->page_len)
+ return 0;
+ return (buf->page_base + buf->page_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+}
+
+int
+xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp)
+{
+ size_t i, n = xdr_buf_pagecount(buf);
+
+ if (n != 0 && buf->bvec == NULL) {
+ buf->bvec = kmalloc_array(n, sizeof(buf->bvec[0]), gfp);
+ if (!buf->bvec)
+ return -ENOMEM;
+ for (i = 0; i < n; i++) {
+ buf->bvec[i].bv_page = buf->pages[i];
+ buf->bvec[i].bv_len = PAGE_SIZE;
+ buf->bvec[i].bv_offset = 0;
+ }
+ }
+ return 0;
+}
+
+void
+xdr_free_bvec(struct xdr_buf *buf)
+{
+ kfree(buf->bvec);
+ buf->bvec = NULL;
+}
+
void
xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
struct page **pages, unsigned int base, unsigned int len)
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index a8db2e3f8904..86bea4520c4d 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -68,8 +68,6 @@
static void xprt_init(struct rpc_xprt *xprt, struct net *net);
static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
static void xprt_connect_status(struct rpc_task *task);
-static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
-static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *);
static void xprt_destroy(struct rpc_xprt *xprt);
static DEFINE_SPINLOCK(xprt_list_lock);
@@ -171,6 +169,17 @@ out:
}
EXPORT_SYMBOL_GPL(xprt_load_transport);
+static void xprt_clear_locked(struct rpc_xprt *xprt)
+{
+ xprt->snd_task = NULL;
+ if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
+ smp_mb__before_atomic();
+ clear_bit(XPRT_LOCKED, &xprt->state);
+ smp_mb__after_atomic();
+ } else
+ queue_work(xprtiod_workqueue, &xprt->task_cleanup);
+}
+
/**
* xprt_reserve_xprt - serialize write access to transports
* @task: task that is requesting access to the transport
@@ -183,44 +192,53 @@ EXPORT_SYMBOL_GPL(xprt_load_transport);
int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- int priority;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
return 1;
goto out_sleep;
}
+ if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
+ goto out_unlock;
xprt->snd_task = task;
- if (req != NULL)
- req->rq_ntrans++;
return 1;
+out_unlock:
+ xprt_clear_locked(xprt);
out_sleep:
dprintk("RPC: %5u failed to lock transport %p\n",
task->tk_pid, xprt);
- task->tk_timeout = 0;
+ task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
task->tk_status = -EAGAIN;
- if (req == NULL)
- priority = RPC_PRIORITY_LOW;
- else if (!req->rq_ntrans)
- priority = RPC_PRIORITY_NORMAL;
- else
- priority = RPC_PRIORITY_HIGH;
- rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
+ rpc_sleep_on(&xprt->sending, task, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
-static void xprt_clear_locked(struct rpc_xprt *xprt)
+static bool
+xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
{
- xprt->snd_task = NULL;
- if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
- smp_mb__before_atomic();
- clear_bit(XPRT_LOCKED, &xprt->state);
- smp_mb__after_atomic();
- } else
- queue_work(xprtiod_workqueue, &xprt->task_cleanup);
+ return test_bit(XPRT_CWND_WAIT, &xprt->state);
+}
+
+static void
+xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
+{
+ if (!list_empty(&xprt->xmit_queue)) {
+ /* Peek at head of queue to see if it can make progress */
+ if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
+ rq_xmit)->rq_cong)
+ return;
+ }
+ set_bit(XPRT_CWND_WAIT, &xprt->state);
+}
+
+static void
+xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
+{
+ if (!RPCXPRT_CONGESTED(xprt))
+ clear_bit(XPRT_CWND_WAIT, &xprt->state);
}
/*
@@ -230,11 +248,11 @@ static void xprt_clear_locked(struct rpc_xprt *xprt)
* Same as xprt_reserve_xprt, but Van Jacobson congestion control is
* integrated into the decision of whether a request is allowed to be
* woken up and given access to the transport.
+ * Note that the lock is only granted if we know there are free slots.
*/
int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
- int priority;
if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
if (task == xprt->snd_task)
@@ -245,25 +263,19 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
xprt->snd_task = task;
return 1;
}
- if (__xprt_get_cong(xprt, task)) {
+ if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
+ goto out_unlock;
+ if (!xprt_need_congestion_window_wait(xprt)) {
xprt->snd_task = task;
- req->rq_ntrans++;
return 1;
}
+out_unlock:
xprt_clear_locked(xprt);
out_sleep:
- if (req)
- __xprt_put_cong(xprt, req);
dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
- task->tk_timeout = 0;
+ task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
task->tk_status = -EAGAIN;
- if (req == NULL)
- priority = RPC_PRIORITY_LOW;
- else if (!req->rq_ntrans)
- priority = RPC_PRIORITY_NORMAL;
- else
- priority = RPC_PRIORITY_HIGH;
- rpc_sleep_on_priority(&xprt->sending, task, NULL, priority);
+ rpc_sleep_on(&xprt->sending, task, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
@@ -272,6 +284,8 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
{
int retval;
+ if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
+ return 1;
spin_lock_bh(&xprt->transport_lock);
retval = xprt->ops->reserve_xprt(xprt, task);
spin_unlock_bh(&xprt->transport_lock);
@@ -281,12 +295,8 @@ static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
{
struct rpc_xprt *xprt = data;
- struct rpc_rqst *req;
- req = task->tk_rqstp;
xprt->snd_task = task;
- if (req)
- req->rq_ntrans++;
return true;
}
@@ -294,53 +304,30 @@ static void __xprt_lock_write_next(struct rpc_xprt *xprt)
{
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
return;
-
+ if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
+ goto out_unlock;
if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
__xprt_lock_write_func, xprt))
return;
+out_unlock:
xprt_clear_locked(xprt);
}
-static bool __xprt_lock_write_cong_func(struct rpc_task *task, void *data)
-{
- struct rpc_xprt *xprt = data;
- struct rpc_rqst *req;
-
- req = task->tk_rqstp;
- if (req == NULL) {
- xprt->snd_task = task;
- return true;
- }
- if (__xprt_get_cong(xprt, task)) {
- xprt->snd_task = task;
- req->rq_ntrans++;
- return true;
- }
- return false;
-}
-
static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
{
if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
return;
- if (RPCXPRT_CONGESTED(xprt))
+ if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
+ goto out_unlock;
+ if (xprt_need_congestion_window_wait(xprt))
goto out_unlock;
if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
- __xprt_lock_write_cong_func, xprt))
+ __xprt_lock_write_func, xprt))
return;
out_unlock:
xprt_clear_locked(xprt);
}
-static void xprt_task_clear_bytes_sent(struct rpc_task *task)
-{
- if (task != NULL) {
- struct rpc_rqst *req = task->tk_rqstp;
- if (req != NULL)
- req->rq_bytes_sent = 0;
- }
-}
-
/**
* xprt_release_xprt - allow other requests to use a transport
* @xprt: transport with other tasks potentially waiting
@@ -351,7 +338,6 @@ static void xprt_task_clear_bytes_sent(struct rpc_task *task)
void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
{
if (xprt->snd_task == task) {
- xprt_task_clear_bytes_sent(task);
xprt_clear_locked(xprt);
__xprt_lock_write_next(xprt);
}
@@ -369,7 +355,6 @@ EXPORT_SYMBOL_GPL(xprt_release_xprt);
void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
{
if (xprt->snd_task == task) {
- xprt_task_clear_bytes_sent(task);
xprt_clear_locked(xprt);
__xprt_lock_write_next_cong(xprt);
}
@@ -378,6 +363,8 @@ EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
{
+ if (xprt->snd_task != task)
+ return;
spin_lock_bh(&xprt->transport_lock);
xprt->ops->release_xprt(xprt, task);
spin_unlock_bh(&xprt->transport_lock);
@@ -388,16 +375,16 @@ static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *ta
* overflowed. Put the task to sleep if this is the case.
*/
static int
-__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
+__xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
- struct rpc_rqst *req = task->tk_rqstp;
-
if (req->rq_cong)
return 1;
dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
- task->tk_pid, xprt->cong, xprt->cwnd);
- if (RPCXPRT_CONGESTED(xprt))
+ req->rq_task->tk_pid, xprt->cong, xprt->cwnd);
+ if (RPCXPRT_CONGESTED(xprt)) {
+ xprt_set_congestion_window_wait(xprt);
return 0;
+ }
req->rq_cong = 1;
xprt->cong += RPC_CWNDSCALE;
return 1;
@@ -414,10 +401,32 @@ __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
return;
req->rq_cong = 0;
xprt->cong -= RPC_CWNDSCALE;
+ xprt_test_and_clear_congestion_window_wait(xprt);
__xprt_lock_write_next_cong(xprt);
}
/**
+ * xprt_request_get_cong - Request congestion control credits
+ * @xprt: pointer to transport
+ * @req: pointer to RPC request
+ *
+ * Useful for transports that require congestion control.
+ */
+bool
+xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
+{
+ bool ret = false;
+
+ if (req->rq_cong)
+ return true;
+ spin_lock_bh(&xprt->transport_lock);
+ ret = __xprt_get_cong(xprt, req) != 0;
+ spin_unlock_bh(&xprt->transport_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xprt_request_get_cong);
+
+/**
* xprt_release_rqst_cong - housekeeping when request is complete
* @task: RPC request that recently completed
*
@@ -431,6 +440,20 @@ void xprt_release_rqst_cong(struct rpc_task *task)
}
EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
+/*
+ * Clear the congestion window wait flag and wake up the next
+ * entry on xprt->sending
+ */
+static void
+xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
+{
+ if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
+ spin_lock_bh(&xprt->transport_lock);
+ __xprt_lock_write_next_cong(xprt);
+ spin_unlock_bh(&xprt->transport_lock);
+ }
+}
+
/**
* xprt_adjust_cwnd - adjust transport congestion window
* @xprt: pointer to xprt
@@ -488,39 +511,46 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
/**
* xprt_wait_for_buffer_space - wait for transport output buffer to clear
- * @task: task to be put to sleep
- * @action: function pointer to be executed after wait
+ * @xprt: transport
*
* Note that we only set the timer for the case of RPC_IS_SOFT(), since
* we don't in general want to force a socket disconnection due to
* an incomplete RPC call transmission.
*/
-void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
+void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
{
- struct rpc_rqst *req = task->tk_rqstp;
- struct rpc_xprt *xprt = req->rq_xprt;
-
- task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
- rpc_sleep_on(&xprt->pending, task, action);
+ set_bit(XPRT_WRITE_SPACE, &xprt->state);
}
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
+static bool
+xprt_clear_write_space_locked(struct rpc_xprt *xprt)
+{
+ if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
+ __xprt_lock_write_next(xprt);
+ dprintk("RPC: write space: waking waiting task on "
+ "xprt %p\n", xprt);
+ return true;
+ }
+ return false;
+}
+
/**
* xprt_write_space - wake the task waiting for transport output buffer space
* @xprt: transport with waiting tasks
*
* Can be called in a soft IRQ context, so xprt_write_space never sleeps.
*/
-void xprt_write_space(struct rpc_xprt *xprt)
+bool xprt_write_space(struct rpc_xprt *xprt)
{
+ bool ret;
+
+ if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
+ return false;
spin_lock_bh(&xprt->transport_lock);
- if (xprt->snd_task) {
- dprintk("RPC: write space: waking waiting task on "
- "xprt %p\n", xprt);
- rpc_wake_up_queued_task_on_wq(xprtiod_workqueue,
- &xprt->pending, xprt->snd_task);
- }
+ ret = xprt_clear_write_space_locked(xprt);
spin_unlock_bh(&xprt->transport_lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(xprt_write_space);
@@ -631,6 +661,7 @@ void xprt_disconnect_done(struct rpc_xprt *xprt)
dprintk("RPC: disconnected transport %p\n", xprt);
spin_lock_bh(&xprt->transport_lock);
xprt_clear_connected(xprt);
+ xprt_clear_write_space_locked(xprt);
xprt_wake_pending_tasks(xprt, -EAGAIN);
spin_unlock_bh(&xprt->transport_lock);
}
@@ -654,6 +685,22 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
}
EXPORT_SYMBOL_GPL(xprt_force_disconnect);
+static unsigned int
+xprt_connect_cookie(struct rpc_xprt *xprt)
+{
+ return READ_ONCE(xprt->connect_cookie);
+}
+
+static bool
+xprt_request_retransmit_after_disconnect(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
+ !xprt_connected(xprt);
+}
+
/**
* xprt_conditional_disconnect - force a transport to disconnect
* @xprt: transport to disconnect
@@ -692,7 +739,7 @@ static void
xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
__must_hold(&xprt->transport_lock)
{
- if (list_empty(&xprt->recv) && xprt_has_timer(xprt))
+ if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
}
@@ -702,7 +749,7 @@ xprt_init_autodisconnect(struct timer_list *t)
struct rpc_xprt *xprt = from_timer(xprt, t, timer);
spin_lock(&xprt->transport_lock);
- if (!list_empty(&xprt->recv))
+ if (!RB_EMPTY_ROOT(&xprt->recv_queue))
goto out_abort;
/* Reset xprt->last_used to avoid connect/autodisconnect cycling */
xprt->last_used = jiffies;
@@ -726,7 +773,6 @@ bool xprt_lock_connect(struct rpc_xprt *xprt,
goto out;
if (xprt->snd_task != task)
goto out;
- xprt_task_clear_bytes_sent(task);
xprt->snd_task = cookie;
ret = true;
out:
@@ -772,7 +818,6 @@ void xprt_connect(struct rpc_task *task)
xprt->ops->close(xprt);
if (!xprt_connected(xprt)) {
- task->tk_rqstp->rq_bytes_sent = 0;
task->tk_timeout = task->tk_rqstp->rq_timeout;
task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
@@ -789,17 +834,11 @@ void xprt_connect(struct rpc_task *task)
static void xprt_connect_status(struct rpc_task *task)
{
- struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
-
- if (task->tk_status == 0) {
- xprt->stat.connect_count++;
- xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
+ switch (task->tk_status) {
+ case 0:
dprintk("RPC: %5u xprt_connect_status: connection established\n",
task->tk_pid);
- return;
- }
-
- switch (task->tk_status) {
+ break;
case -ECONNREFUSED:
case -ECONNRESET:
case -ECONNABORTED:
@@ -816,28 +855,97 @@ static void xprt_connect_status(struct rpc_task *task)
default:
dprintk("RPC: %5u xprt_connect_status: error %d connecting to "
"server %s\n", task->tk_pid, -task->tk_status,
- xprt->servername);
+ task->tk_rqstp->rq_xprt->servername);
task->tk_status = -EIO;
}
}
+enum xprt_xid_rb_cmp {
+ XID_RB_EQUAL,
+ XID_RB_LEFT,
+ XID_RB_RIGHT,
+};
+static enum xprt_xid_rb_cmp
+xprt_xid_cmp(__be32 xid1, __be32 xid2)
+{
+ if (xid1 == xid2)
+ return XID_RB_EQUAL;
+ if ((__force u32)xid1 < (__force u32)xid2)
+ return XID_RB_LEFT;
+ return XID_RB_RIGHT;
+}
+
+static struct rpc_rqst *
+xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
+{
+ struct rb_node *n = xprt->recv_queue.rb_node;
+ struct rpc_rqst *req;
+
+ while (n != NULL) {
+ req = rb_entry(n, struct rpc_rqst, rq_recv);
+ switch (xprt_xid_cmp(xid, req->rq_xid)) {
+ case XID_RB_LEFT:
+ n = n->rb_left;
+ break;
+ case XID_RB_RIGHT:
+ n = n->rb_right;
+ break;
+ case XID_RB_EQUAL:
+ return req;
+ }
+ }
+ return NULL;
+}
+
+static void
+xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
+{
+ struct rb_node **p = &xprt->recv_queue.rb_node;
+ struct rb_node *n = NULL;
+ struct rpc_rqst *req;
+
+ while (*p != NULL) {
+ n = *p;
+ req = rb_entry(n, struct rpc_rqst, rq_recv);
+ switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
+ case XID_RB_LEFT:
+ p = &n->rb_left;
+ break;
+ case XID_RB_RIGHT:
+ p = &n->rb_right;
+ break;
+ case XID_RB_EQUAL:
+ WARN_ON_ONCE(new != req);
+ return;
+ }
+ }
+ rb_link_node(&new->rq_recv, n, p);
+ rb_insert_color(&new->rq_recv, &xprt->recv_queue);
+}
+
+static void
+xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
+{
+ rb_erase(&req->rq_recv, &xprt->recv_queue);
+}
+
/**
* xprt_lookup_rqst - find an RPC request corresponding to an XID
* @xprt: transport on which the original request was transmitted
* @xid: RPC XID of incoming reply
*
- * Caller holds xprt->recv_lock.
+ * Caller holds xprt->queue_lock.
*/
struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
{
struct rpc_rqst *entry;
- list_for_each_entry(entry, &xprt->recv, rq_list)
- if (entry->rq_xid == xid) {
- trace_xprt_lookup_rqst(xprt, xid, 0);
- entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
- return entry;
- }
+ entry = xprt_request_rb_find(xprt, xid);
+ if (entry != NULL) {
+ trace_xprt_lookup_rqst(xprt, xid, 0);
+ entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
+ return entry;
+ }
dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
ntohl(xid));
@@ -847,16 +955,22 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
}
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
+static bool
+xprt_is_pinned_rqst(struct rpc_rqst *req)
+{
+ return atomic_read(&req->rq_pin) != 0;
+}
+
/**
* xprt_pin_rqst - Pin a request on the transport receive list
* @req: Request to pin
*
* Caller must ensure this is atomic with the call to xprt_lookup_rqst()
- * so should be holding the xprt transport lock.
+ * so should be holding the xprt receive lock.
*/
void xprt_pin_rqst(struct rpc_rqst *req)
{
- set_bit(RPC_TASK_MSG_RECV, &req->rq_task->tk_runstate);
+ atomic_inc(&req->rq_pin);
}
EXPORT_SYMBOL_GPL(xprt_pin_rqst);
@@ -864,38 +978,87 @@ EXPORT_SYMBOL_GPL(xprt_pin_rqst);
* xprt_unpin_rqst - Unpin a request on the transport receive list
* @req: Request to pin
*
- * Caller should be holding the xprt transport lock.
+ * Caller should be holding the xprt receive lock.
*/
void xprt_unpin_rqst(struct rpc_rqst *req)
{
- struct rpc_task *task = req->rq_task;
-
- clear_bit(RPC_TASK_MSG_RECV, &task->tk_runstate);
- if (test_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate))
- wake_up_bit(&task->tk_runstate, RPC_TASK_MSG_RECV);
+ if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
+ atomic_dec(&req->rq_pin);
+ return;
+ }
+ if (atomic_dec_and_test(&req->rq_pin))
+ wake_up_var(&req->rq_pin);
}
EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
-__must_hold(&req->rq_xprt->recv_lock)
{
- struct rpc_task *task = req->rq_task;
+ wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
+}
- if (task && test_bit(RPC_TASK_MSG_RECV, &task->tk_runstate)) {
- spin_unlock(&req->rq_xprt->recv_lock);
- set_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate);
- wait_on_bit(&task->tk_runstate, RPC_TASK_MSG_RECV,
- TASK_UNINTERRUPTIBLE);
- clear_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate);
- spin_lock(&req->rq_xprt->recv_lock);
- }
+static bool
+xprt_request_data_received(struct rpc_task *task)
+{
+ return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
+ READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
+}
+
+static bool
+xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
+{
+ return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
+ READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
+}
+
+/**
+ * xprt_request_enqueue_receive - Add an request to the receive queue
+ * @task: RPC task
+ *
+ */
+void
+xprt_request_enqueue_receive(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ if (!xprt_request_need_enqueue_receive(task, req))
+ return;
+ spin_lock(&xprt->queue_lock);
+
+ /* Update the softirq receive buffer */
+ memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
+ sizeof(req->rq_private_buf));
+
+ /* Add request to the receive list */
+ xprt_request_rb_insert(xprt, req);
+ set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
+ spin_unlock(&xprt->queue_lock);
+
+ xprt_reset_majortimeo(req);
+ /* Turn off autodisconnect */
+ del_singleshot_timer_sync(&xprt->timer);
+}
+
+/**
+ * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
+ * @task: RPC task
+ *
+ * Caller must hold xprt->queue_lock.
+ */
+static void
+xprt_request_dequeue_receive_locked(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
+ xprt_request_rb_remove(req->rq_xprt, req);
}
/**
* xprt_update_rtt - Update RPC RTT statistics
* @task: RPC request that recently completed
*
- * Caller holds xprt->recv_lock.
+ * Caller holds xprt->queue_lock.
*/
void xprt_update_rtt(struct rpc_task *task)
{
@@ -917,7 +1080,7 @@ EXPORT_SYMBOL_GPL(xprt_update_rtt);
* @task: RPC request that recently completed
* @copied: actual number of bytes received from the transport
*
- * Caller holds xprt->recv_lock.
+ * Caller holds xprt->queue_lock.
*/
void xprt_complete_rqst(struct rpc_task *task, int copied)
{
@@ -930,12 +1093,12 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
xprt->stat.recvs++;
- list_del_init(&req->rq_list);
req->rq_private_buf.len = copied;
/* Ensure all writes are done before we update */
/* req->rq_reply_bytes_recvd */
smp_wmb();
req->rq_reply_bytes_recvd = copied;
+ xprt_request_dequeue_receive_locked(task);
rpc_wake_up_queued_task(&xprt->pending, task);
}
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
@@ -957,6 +1120,172 @@ static void xprt_timer(struct rpc_task *task)
}
/**
+ * xprt_request_wait_receive - wait for the reply to an RPC request
+ * @task: RPC task about to send a request
+ *
+ */
+void xprt_request_wait_receive(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
+ return;
+ /*
+ * Sleep on the pending queue if we're expecting a reply.
+ * The spinlock ensures atomicity between the test of
+ * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
+ */
+ spin_lock(&xprt->queue_lock);
+ if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
+ xprt->ops->set_retrans_timeout(task);
+ rpc_sleep_on(&xprt->pending, task, xprt_timer);
+ /*
+ * Send an extra queue wakeup call if the
+ * connection was dropped in case the call to
+ * rpc_sleep_on() raced.
+ */
+ if (xprt_request_retransmit_after_disconnect(task))
+ rpc_wake_up_queued_task_set_status(&xprt->pending,
+ task, -ENOTCONN);
+ }
+ spin_unlock(&xprt->queue_lock);
+}
+
+static bool
+xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
+{
+ return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
+}
+
+/**
+ * xprt_request_enqueue_transmit - queue a task for transmission
+ * @task: pointer to rpc_task
+ *
+ * Add a task to the transmission queue.
+ */
+void
+xprt_request_enqueue_transmit(struct rpc_task *task)
+{
+ struct rpc_rqst *pos, *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ if (xprt_request_need_enqueue_transmit(task, req)) {
+ spin_lock(&xprt->queue_lock);
+ /*
+ * Requests that carry congestion control credits are added
+ * to the head of the list to avoid starvation issues.
+ */
+ if (req->rq_cong) {
+ xprt_clear_congestion_window_wait(xprt);
+ list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
+ if (pos->rq_cong)
+ continue;
+ /* Note: req is added _before_ pos */
+ list_add_tail(&req->rq_xmit, &pos->rq_xmit);
+ INIT_LIST_HEAD(&req->rq_xmit2);
+ goto out;
+ }
+ } else if (RPC_IS_SWAPPER(task)) {
+ list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
+ if (pos->rq_cong || pos->rq_bytes_sent)
+ continue;
+ if (RPC_IS_SWAPPER(pos->rq_task))
+ continue;
+ /* Note: req is added _before_ pos */
+ list_add_tail(&req->rq_xmit, &pos->rq_xmit);
+ INIT_LIST_HEAD(&req->rq_xmit2);
+ goto out;
+ }
+ } else {
+ list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
+ if (pos->rq_task->tk_owner != task->tk_owner)
+ continue;
+ list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
+ INIT_LIST_HEAD(&req->rq_xmit);
+ goto out;
+ }
+ }
+ list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
+ INIT_LIST_HEAD(&req->rq_xmit2);
+out:
+ set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
+ spin_unlock(&xprt->queue_lock);
+ }
+}
+
+/**
+ * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
+ * @task: pointer to rpc_task
+ *
+ * Remove a task from the transmission queue
+ * Caller must hold xprt->queue_lock
+ */
+static void
+xprt_request_dequeue_transmit_locked(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+ return;
+ if (!list_empty(&req->rq_xmit)) {
+ list_del(&req->rq_xmit);
+ if (!list_empty(&req->rq_xmit2)) {
+ struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
+ struct rpc_rqst, rq_xmit2);
+ list_del(&req->rq_xmit2);
+ list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
+ }
+ } else
+ list_del(&req->rq_xmit2);
+}
+
+/**
+ * xprt_request_dequeue_transmit - remove a task from the transmission queue
+ * @task: pointer to rpc_task
+ *
+ * Remove a task from the transmission queue
+ */
+static void
+xprt_request_dequeue_transmit(struct rpc_task *task)
+{
+ struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ spin_lock(&xprt->queue_lock);
+ xprt_request_dequeue_transmit_locked(task);
+ spin_unlock(&xprt->queue_lock);
+}
+
+/**
+ * xprt_request_prepare - prepare an encoded request for transport
+ * @req: pointer to rpc_rqst
+ *
+ * Calls into the transport layer to do whatever is needed to prepare
+ * the request for transmission or receive.
+ */
+void
+xprt_request_prepare(struct rpc_rqst *req)
+{
+ struct rpc_xprt *xprt = req->rq_xprt;
+
+ if (xprt->ops->prepare_request)
+ xprt->ops->prepare_request(req);
+}
+
+/**
+ * xprt_request_need_retransmit - Test if a task needs retransmission
+ * @task: pointer to rpc_task
+ *
+ * Test for whether a connection breakage requires the task to retransmit
+ */
+bool
+xprt_request_need_retransmit(struct rpc_task *task)
+{
+ return xprt_request_retransmit_after_disconnect(task);
+}
+
+/**
* xprt_prepare_transmit - reserve the transport before sending a request
* @task: RPC task about to send a request
*
@@ -965,32 +1294,18 @@ bool xprt_prepare_transmit(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
- bool ret = false;
dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
- spin_lock_bh(&xprt->transport_lock);
- if (!req->rq_bytes_sent) {
- if (req->rq_reply_bytes_recvd) {
- task->tk_status = req->rq_reply_bytes_recvd;
- goto out_unlock;
- }
- if ((task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
- && xprt_connected(xprt)
- && req->rq_connect_cookie == xprt->connect_cookie) {
- xprt->ops->set_retrans_timeout(task);
- rpc_sleep_on(&xprt->pending, task, xprt_timer);
- goto out_unlock;
- }
- }
- if (!xprt->ops->reserve_xprt(xprt, task)) {
- task->tk_status = -EAGAIN;
- goto out_unlock;
+ if (!xprt_lock_write(xprt, task)) {
+ /* Race breaker: someone may have transmitted us */
+ if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+ rpc_wake_up_queued_task_set_status(&xprt->sending,
+ task, 0);
+ return false;
+
}
- ret = true;
-out_unlock:
- spin_unlock_bh(&xprt->transport_lock);
- return ret;
+ return true;
}
void xprt_end_transmit(struct rpc_task *task)
@@ -999,54 +1314,62 @@ void xprt_end_transmit(struct rpc_task *task)
}
/**
- * xprt_transmit - send an RPC request on a transport
- * @task: controlling RPC task
+ * xprt_request_transmit - send an RPC request on a transport
+ * @req: pointer to request to transmit
+ * @snd_task: RPC task that owns the transport lock
*
- * We have to copy the iovec because sendmsg fiddles with its contents.
+ * This performs the transmission of a single request.
+ * Note that if the request is not the same as snd_task, then it
+ * does need to be pinned.
+ * Returns '0' on success.
*/
-void xprt_transmit(struct rpc_task *task)
+static int
+xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
{
- struct rpc_rqst *req = task->tk_rqstp;
- struct rpc_xprt *xprt = req->rq_xprt;
+ struct rpc_xprt *xprt = req->rq_xprt;
+ struct rpc_task *task = req->rq_task;
unsigned int connect_cookie;
+ int is_retrans = RPC_WAS_SENT(task);
int status;
dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
- if (!req->rq_reply_bytes_recvd) {
- if (list_empty(&req->rq_list) && rpc_reply_expected(task)) {
- /*
- * Add to the list only if we're expecting a reply
- */
- /* Update the softirq receive buffer */
- memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
- sizeof(req->rq_private_buf));
- /* Add request to the receive list */
- spin_lock(&xprt->recv_lock);
- list_add_tail(&req->rq_list, &xprt->recv);
- spin_unlock(&xprt->recv_lock);
- xprt_reset_majortimeo(req);
- /* Turn off autodisconnect */
- del_singleshot_timer_sync(&xprt->timer);
+ if (!req->rq_bytes_sent) {
+ if (xprt_request_data_received(task)) {
+ status = 0;
+ goto out_dequeue;
}
- } else if (!req->rq_bytes_sent)
- return;
+ /* Verify that our message lies in the RPCSEC_GSS window */
+ if (rpcauth_xmit_need_reencode(task)) {
+ status = -EBADMSG;
+ goto out_dequeue;
+ }
+ }
+
+ /*
+ * Update req->rq_ntrans before transmitting to avoid races with
+ * xprt_update_rtt(), which needs to know that it is recording a
+ * reply to the first transmission.
+ */
+ req->rq_ntrans++;
connect_cookie = xprt->connect_cookie;
- status = xprt->ops->send_request(task);
+ status = xprt->ops->send_request(req);
trace_xprt_transmit(xprt, req->rq_xid, status);
if (status != 0) {
- task->tk_status = status;
- return;
+ req->rq_ntrans--;
+ return status;
}
+
+ if (is_retrans)
+ task->tk_client->cl_stats->rpcretrans++;
+
xprt_inject_disconnect(xprt);
dprintk("RPC: %5u xmit complete\n", task->tk_pid);
task->tk_flags |= RPC_TASK_SENT;
spin_lock_bh(&xprt->transport_lock);
- xprt->ops->set_retrans_timeout(task);
-
xprt->stat.sends++;
xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
xprt->stat.bklog_u += xprt->backlog.qlen;
@@ -1055,25 +1378,49 @@ void xprt_transmit(struct rpc_task *task)
spin_unlock_bh(&xprt->transport_lock);
req->rq_connect_cookie = connect_cookie;
- if (rpc_reply_expected(task) && !READ_ONCE(req->rq_reply_bytes_recvd)) {
- /*
- * Sleep on the pending queue if we're expecting a reply.
- * The spinlock ensures atomicity between the test of
- * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
- */
- spin_lock(&xprt->recv_lock);
- if (!req->rq_reply_bytes_recvd) {
- rpc_sleep_on(&xprt->pending, task, xprt_timer);
- /*
- * Send an extra queue wakeup call if the
- * connection was dropped in case the call to
- * rpc_sleep_on() raced.
- */
- if (!xprt_connected(xprt))
- xprt_wake_pending_tasks(xprt, -ENOTCONN);
- }
- spin_unlock(&xprt->recv_lock);
+out_dequeue:
+ xprt_request_dequeue_transmit(task);
+ rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
+ return status;
+}
+
+/**
+ * xprt_transmit - send an RPC request on a transport
+ * @task: controlling RPC task
+ *
+ * Attempts to drain the transmit queue. On exit, either the transport
+ * signalled an error that needs to be handled before transmission can
+ * resume, or @task finished transmitting, and detected that it already
+ * received a reply.
+ */
+void
+xprt_transmit(struct rpc_task *task)
+{
+ struct rpc_rqst *next, *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
+ int status;
+
+ spin_lock(&xprt->queue_lock);
+ while (!list_empty(&xprt->xmit_queue)) {
+ next = list_first_entry(&xprt->xmit_queue,
+ struct rpc_rqst, rq_xmit);
+ xprt_pin_rqst(next);
+ spin_unlock(&xprt->queue_lock);
+ status = xprt_request_transmit(next, task);
+ if (status == -EBADMSG && next != req)
+ status = 0;
+ cond_resched();
+ spin_lock(&xprt->queue_lock);
+ xprt_unpin_rqst(next);
+ if (status == 0) {
+ if (!xprt_request_data_received(task) ||
+ test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+ continue;
+ } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+ task->tk_status = status;
+ break;
}
+ spin_unlock(&xprt->queue_lock);
}
static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
@@ -1170,20 +1517,6 @@ out_init_req:
}
EXPORT_SYMBOL_GPL(xprt_alloc_slot);
-void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
-{
- /* Note: grabbing the xprt_lock_write() ensures that we throttle
- * new slot allocation if the transport is congested (i.e. when
- * reconnecting a stream transport or when out of socket write
- * buffer space).
- */
- if (xprt_lock_write(xprt, task)) {
- xprt_alloc_slot(xprt, task);
- xprt_release_write(xprt, task);
- }
-}
-EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
-
void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
{
spin_lock(&xprt->reserve_lock);
@@ -1250,6 +1583,60 @@ void xprt_free(struct rpc_xprt *xprt)
}
EXPORT_SYMBOL_GPL(xprt_free);
+static void
+xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
+{
+ req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
+}
+
+static __be32
+xprt_alloc_xid(struct rpc_xprt *xprt)
+{
+ __be32 xid;
+
+ spin_lock(&xprt->reserve_lock);
+ xid = (__force __be32)xprt->xid++;
+ spin_unlock(&xprt->reserve_lock);
+ return xid;
+}
+
+static void
+xprt_init_xid(struct rpc_xprt *xprt)
+{
+ xprt->xid = prandom_u32();
+}
+
+static void
+xprt_request_init(struct rpc_task *task)
+{
+ struct rpc_xprt *xprt = task->tk_xprt;
+ struct rpc_rqst *req = task->tk_rqstp;
+
+ req->rq_timeout = task->tk_client->cl_timeout->to_initval;
+ req->rq_task = task;
+ req->rq_xprt = xprt;
+ req->rq_buffer = NULL;
+ req->rq_xid = xprt_alloc_xid(xprt);
+ xprt_init_connect_cookie(req, xprt);
+ req->rq_bytes_sent = 0;
+ req->rq_snd_buf.len = 0;
+ req->rq_snd_buf.buflen = 0;
+ req->rq_rcv_buf.len = 0;
+ req->rq_rcv_buf.buflen = 0;
+ req->rq_release_snd_buf = NULL;
+ xprt_reset_majortimeo(req);
+ dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
+ req, ntohl(req->rq_xid));
+}
+
+static void
+xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ xprt->ops->alloc_slot(xprt, task);
+ if (task->tk_rqstp != NULL)
+ xprt_request_init(task);
+}
+
/**
* xprt_reserve - allocate an RPC request slot
* @task: RPC task requesting a slot allocation
@@ -1269,7 +1656,7 @@ void xprt_reserve(struct rpc_task *task)
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
if (!xprt_throttle_congested(xprt, task))
- xprt->ops->alloc_slot(xprt, task);
+ xprt_do_reserve(xprt, task);
}
/**
@@ -1291,45 +1678,29 @@ void xprt_retry_reserve(struct rpc_task *task)
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
- xprt->ops->alloc_slot(xprt, task);
-}
-
-static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
-{
- __be32 xid;
-
- spin_lock(&xprt->reserve_lock);
- xid = (__force __be32)xprt->xid++;
- spin_unlock(&xprt->reserve_lock);
- return xid;
+ xprt_do_reserve(xprt, task);
}
-static inline void xprt_init_xid(struct rpc_xprt *xprt)
-{
- xprt->xid = prandom_u32();
-}
-
-void xprt_request_init(struct rpc_task *task)
+static void
+xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req)
{
- struct rpc_xprt *xprt = task->tk_xprt;
- struct rpc_rqst *req = task->tk_rqstp;
+ struct rpc_xprt *xprt = req->rq_xprt;
- INIT_LIST_HEAD(&req->rq_list);
- req->rq_timeout = task->tk_client->cl_timeout->to_initval;
- req->rq_task = task;
- req->rq_xprt = xprt;
- req->rq_buffer = NULL;
- req->rq_xid = xprt_alloc_xid(xprt);
- req->rq_connect_cookie = xprt->connect_cookie - 1;
- req->rq_bytes_sent = 0;
- req->rq_snd_buf.len = 0;
- req->rq_snd_buf.buflen = 0;
- req->rq_rcv_buf.len = 0;
- req->rq_rcv_buf.buflen = 0;
- req->rq_release_snd_buf = NULL;
- xprt_reset_majortimeo(req);
- dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
- req, ntohl(req->rq_xid));
+ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
+ test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
+ xprt_is_pinned_rqst(req)) {
+ spin_lock(&xprt->queue_lock);
+ xprt_request_dequeue_transmit_locked(task);
+ xprt_request_dequeue_receive_locked(task);
+ while (xprt_is_pinned_rqst(req)) {
+ set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
+ spin_unlock(&xprt->queue_lock);
+ xprt_wait_on_pinned_rqst(req);
+ spin_lock(&xprt->queue_lock);
+ clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
+ }
+ spin_unlock(&xprt->queue_lock);
+ }
}
/**
@@ -1345,8 +1716,7 @@ void xprt_release(struct rpc_task *task)
if (req == NULL) {
if (task->tk_client) {
xprt = task->tk_xprt;
- if (xprt->snd_task == task)
- xprt_release_write(xprt, task);
+ xprt_release_write(xprt, task);
}
return;
}
@@ -1356,12 +1726,7 @@ void xprt_release(struct rpc_task *task)
task->tk_ops->rpc_count_stats(task, task->tk_calldata);
else if (task->tk_client)
rpc_count_iostats(task, task->tk_client->cl_metrics);
- spin_lock(&xprt->recv_lock);
- if (!list_empty(&req->rq_list)) {
- list_del_init(&req->rq_list);
- xprt_wait_on_pinned_rqst(req);
- }
- spin_unlock(&xprt->recv_lock);
+ xprt_request_dequeue_all(task, req);
spin_lock_bh(&xprt->transport_lock);
xprt->ops->release_xprt(xprt, task);
if (xprt->ops->release_request)
@@ -1372,6 +1737,7 @@ void xprt_release(struct rpc_task *task)
if (req->rq_buffer)
xprt->ops->buf_free(task);
xprt_inject_disconnect(xprt);
+ xdr_free_bvec(&req->rq_rcv_buf);
if (req->rq_cred != NULL)
put_rpccred(req->rq_cred);
task->tk_rqstp = NULL;
@@ -1385,16 +1751,36 @@ void xprt_release(struct rpc_task *task)
xprt_free_bc_request(req);
}
+#ifdef CONFIG_SUNRPC_BACKCHANNEL
+void
+xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
+{
+ struct xdr_buf *xbufp = &req->rq_snd_buf;
+
+ task->tk_rqstp = req;
+ req->rq_task = task;
+ xprt_init_connect_cookie(req, req->rq_xprt);
+ /*
+ * Set up the xdr_buf length.
+ * This also indicates that the buffer is XDR encoded already.
+ */
+ xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
+ xbufp->tail[0].iov_len;
+ req->rq_bytes_sent = 0;
+}
+#endif
+
static void xprt_init(struct rpc_xprt *xprt, struct net *net)
{
kref_init(&xprt->kref);
spin_lock_init(&xprt->transport_lock);
spin_lock_init(&xprt->reserve_lock);
- spin_lock_init(&xprt->recv_lock);
+ spin_lock_init(&xprt->queue_lock);
INIT_LIST_HEAD(&xprt->free);
- INIT_LIST_HEAD(&xprt->recv);
+ xprt->recv_queue = RB_ROOT;
+ INIT_LIST_HEAD(&xprt->xmit_queue);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
spin_lock_init(&xprt->bc_pa_lock);
INIT_LIST_HEAD(&xprt->bc_pa_list);
@@ -1407,7 +1793,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
rpc_init_wait_queue(&xprt->binding, "xprt_binding");
rpc_init_wait_queue(&xprt->pending, "xprt_pending");
- rpc_init_priority_wait_queue(&xprt->sending, "xprt_sending");
+ rpc_init_wait_queue(&xprt->sending, "xprt_sending");
rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
xprt_init_xid(xprt);
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 90adeff4c06b..e5b367a3e517 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -51,12 +51,11 @@ static int rpcrdma_bc_setup_reqs(struct rpcrdma_xprt *r_xprt,
rqst = &req->rl_slot;
rqst->rq_xprt = xprt;
- INIT_LIST_HEAD(&rqst->rq_list);
INIT_LIST_HEAD(&rqst->rq_bc_list);
__set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
- spin_lock_bh(&xprt->bc_pa_lock);
+ spin_lock(&xprt->bc_pa_lock);
list_add(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
- spin_unlock_bh(&xprt->bc_pa_lock);
+ spin_unlock(&xprt->bc_pa_lock);
size = r_xprt->rx_data.inline_rsize;
rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
@@ -201,6 +200,9 @@ int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst)
if (!xprt_connected(rqst->rq_xprt))
goto drop_connection;
+ if (!xprt_request_get_cong(rqst->rq_xprt, rqst))
+ return -EBADSLT;
+
rc = rpcrdma_bc_marshal_reply(rqst);
if (rc < 0)
goto failed_marshal;
@@ -228,16 +230,16 @@ void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs)
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpc_rqst *rqst, *tmp;
- spin_lock_bh(&xprt->bc_pa_lock);
+ spin_lock(&xprt->bc_pa_lock);
list_for_each_entry_safe(rqst, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
list_del(&rqst->rq_bc_pa_list);
- spin_unlock_bh(&xprt->bc_pa_lock);
+ spin_unlock(&xprt->bc_pa_lock);
rpcrdma_bc_free_rqst(r_xprt, rqst);
- spin_lock_bh(&xprt->bc_pa_lock);
+ spin_lock(&xprt->bc_pa_lock);
}
- spin_unlock_bh(&xprt->bc_pa_lock);
+ spin_unlock(&xprt->bc_pa_lock);
}
/**
@@ -255,9 +257,9 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
rpcrdma_recv_buffer_put(req->rl_reply);
req->rl_reply = NULL;
- spin_lock_bh(&xprt->bc_pa_lock);
+ spin_lock(&xprt->bc_pa_lock);
list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
- spin_unlock_bh(&xprt->bc_pa_lock);
+ spin_unlock(&xprt->bc_pa_lock);
}
/**
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c
index 0f7c465d9a5a..7f5632cd5a48 100644
--- a/net/sunrpc/xprtrdma/fmr_ops.c
+++ b/net/sunrpc/xprtrdma/fmr_ops.c
@@ -49,46 +49,7 @@ fmr_is_supported(struct rpcrdma_ia *ia)
return true;
}
-static int
-fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
-{
- static struct ib_fmr_attr fmr_attr = {
- .max_pages = RPCRDMA_MAX_FMR_SGES,
- .max_maps = 1,
- .page_shift = PAGE_SHIFT
- };
-
- mr->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
- sizeof(u64), GFP_KERNEL);
- if (!mr->fmr.fm_physaddrs)
- goto out_free;
-
- mr->mr_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
- sizeof(*mr->mr_sg), GFP_KERNEL);
- if (!mr->mr_sg)
- goto out_free;
-
- sg_init_table(mr->mr_sg, RPCRDMA_MAX_FMR_SGES);
-
- mr->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
- &fmr_attr);
- if (IS_ERR(mr->fmr.fm_mr))
- goto out_fmr_err;
-
- INIT_LIST_HEAD(&mr->mr_list);
- return 0;
-
-out_fmr_err:
- dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
- PTR_ERR(mr->fmr.fm_mr));
-
-out_free:
- kfree(mr->mr_sg);
- kfree(mr->fmr.fm_physaddrs);
- return -ENOMEM;
-}
-
-static int
+static void
__fmr_unmap(struct rpcrdma_mr *mr)
{
LIST_HEAD(l);
@@ -97,13 +58,16 @@ __fmr_unmap(struct rpcrdma_mr *mr)
list_add(&mr->fmr.fm_mr->list, &l);
rc = ib_unmap_fmr(&l);
list_del(&mr->fmr.fm_mr->list);
- return rc;
+ if (rc)
+ pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
+ mr, rc);
}
+/* Release an MR.
+ */
static void
fmr_op_release_mr(struct rpcrdma_mr *mr)
{
- LIST_HEAD(unmap_list);
int rc;
kfree(mr->fmr.fm_physaddrs);
@@ -112,10 +76,7 @@ fmr_op_release_mr(struct rpcrdma_mr *mr)
/* In case this one was left mapped, try to unmap it
* to prevent dealloc_fmr from failing with EBUSY
*/
- rc = __fmr_unmap(mr);
- if (rc)
- pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
- mr, rc);
+ __fmr_unmap(mr);
rc = ib_dealloc_fmr(mr->fmr.fm_mr);
if (rc)
@@ -125,40 +86,68 @@ fmr_op_release_mr(struct rpcrdma_mr *mr)
kfree(mr);
}
-/* Reset of a single FMR.
+/* MRs are dynamically allocated, so simply clean up and release the MR.
+ * A replacement MR will subsequently be allocated on demand.
*/
static void
-fmr_op_recover_mr(struct rpcrdma_mr *mr)
+fmr_mr_recycle_worker(struct work_struct *work)
{
+ struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
- int rc;
- /* ORDER: invalidate first */
- rc = __fmr_unmap(mr);
- if (rc)
- goto out_release;
-
- /* ORDER: then DMA unmap */
- rpcrdma_mr_unmap_and_put(mr);
+ trace_xprtrdma_mr_recycle(mr);
- r_xprt->rx_stats.mrs_recovered++;
- return;
-
-out_release:
- pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc, mr);
- r_xprt->rx_stats.mrs_orphaned++;
-
- trace_xprtrdma_dma_unmap(mr);
+ trace_xprtrdma_mr_unmap(mr);
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mr->mr_sg, mr->mr_nents, mr->mr_dir);
spin_lock(&r_xprt->rx_buf.rb_mrlock);
list_del(&mr->mr_all);
+ r_xprt->rx_stats.mrs_recycled++;
spin_unlock(&r_xprt->rx_buf.rb_mrlock);
-
fmr_op_release_mr(mr);
}
+static int
+fmr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
+{
+ static struct ib_fmr_attr fmr_attr = {
+ .max_pages = RPCRDMA_MAX_FMR_SGES,
+ .max_maps = 1,
+ .page_shift = PAGE_SHIFT
+ };
+
+ mr->fmr.fm_physaddrs = kcalloc(RPCRDMA_MAX_FMR_SGES,
+ sizeof(u64), GFP_KERNEL);
+ if (!mr->fmr.fm_physaddrs)
+ goto out_free;
+
+ mr->mr_sg = kcalloc(RPCRDMA_MAX_FMR_SGES,
+ sizeof(*mr->mr_sg), GFP_KERNEL);
+ if (!mr->mr_sg)
+ goto out_free;
+
+ sg_init_table(mr->mr_sg, RPCRDMA_MAX_FMR_SGES);
+
+ mr->fmr.fm_mr = ib_alloc_fmr(ia->ri_pd, RPCRDMA_FMR_ACCESS_FLAGS,
+ &fmr_attr);
+ if (IS_ERR(mr->fmr.fm_mr))
+ goto out_fmr_err;
+
+ INIT_LIST_HEAD(&mr->mr_list);
+ INIT_WORK(&mr->mr_recycle, fmr_mr_recycle_worker);
+ return 0;
+
+out_fmr_err:
+ dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__,
+ PTR_ERR(mr->fmr.fm_mr));
+
+out_free:
+ kfree(mr->mr_sg);
+ kfree(mr->fmr.fm_physaddrs);
+ return -ENOMEM;
+}
+
/* On success, sets:
* ep->rep_attr.cap.max_send_wr
* ep->rep_attr.cap.max_recv_wr
@@ -187,6 +176,7 @@ fmr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
RPCRDMA_MAX_FMR_SGES);
+ ia->ri_max_segs += 2; /* segments for head and tail buffers */
return 0;
}
@@ -244,7 +234,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
mr->mr_sg, i, mr->mr_dir);
if (!mr->mr_nents)
goto out_dmamap_err;
- trace_xprtrdma_dma_map(mr);
+ trace_xprtrdma_mr_map(mr);
for (i = 0, dma_pages = mr->fmr.fm_physaddrs; i < mr->mr_nents; i++)
dma_pages[i] = sg_dma_address(&mr->mr_sg[i]);
@@ -305,13 +295,13 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
list_for_each_entry(mr, mrs, mr_list) {
dprintk("RPC: %s: unmapping fmr %p\n",
__func__, &mr->fmr);
- trace_xprtrdma_localinv(mr);
+ trace_xprtrdma_mr_localinv(mr);
list_add_tail(&mr->fmr.fm_mr->list, &unmap_list);
}
r_xprt->rx_stats.local_inv_needed++;
rc = ib_unmap_fmr(&unmap_list);
if (rc)
- goto out_reset;
+ goto out_release;
/* ORDER: Now DMA unmap all of the req's MRs, and return
* them to the free MW list.
@@ -324,13 +314,13 @@ fmr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
return;
-out_reset:
+out_release:
pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc);
while (!list_empty(mrs)) {
mr = rpcrdma_mr_pop(mrs);
list_del(&mr->fmr.fm_mr->list);
- fmr_op_recover_mr(mr);
+ rpcrdma_mr_recycle(mr);
}
}
@@ -338,7 +328,6 @@ const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = {
.ro_map = fmr_op_map,
.ro_send = fmr_op_send,
.ro_unmap_sync = fmr_op_unmap_sync,
- .ro_recover_mr = fmr_op_recover_mr,
.ro_open = fmr_op_open,
.ro_maxpages = fmr_op_maxpages,
.ro_init_mr = fmr_op_init_mr,
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 1bb00dd6ccdb..fc6378cc0c1c 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -97,6 +97,44 @@ out_not_supported:
return false;
}
+static void
+frwr_op_release_mr(struct rpcrdma_mr *mr)
+{
+ int rc;
+
+ rc = ib_dereg_mr(mr->frwr.fr_mr);
+ if (rc)
+ pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
+ mr, rc);
+ kfree(mr->mr_sg);
+ kfree(mr);
+}
+
+/* MRs are dynamically allocated, so simply clean up and release the MR.
+ * A replacement MR will subsequently be allocated on demand.
+ */
+static void
+frwr_mr_recycle_worker(struct work_struct *work)
+{
+ struct rpcrdma_mr *mr = container_of(work, struct rpcrdma_mr, mr_recycle);
+ enum rpcrdma_frwr_state state = mr->frwr.fr_state;
+ struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
+
+ trace_xprtrdma_mr_recycle(mr);
+
+ if (state != FRWR_FLUSHED_LI) {
+ trace_xprtrdma_mr_unmap(mr);
+ ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
+ mr->mr_sg, mr->mr_nents, mr->mr_dir);
+ }
+
+ spin_lock(&r_xprt->rx_buf.rb_mrlock);
+ list_del(&mr->mr_all);
+ r_xprt->rx_stats.mrs_recycled++;
+ spin_unlock(&r_xprt->rx_buf.rb_mrlock);
+ frwr_op_release_mr(mr);
+}
+
static int
frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
{
@@ -113,6 +151,7 @@ frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
goto out_list_err;
INIT_LIST_HEAD(&mr->mr_list);
+ INIT_WORK(&mr->mr_recycle, frwr_mr_recycle_worker);
sg_init_table(mr->mr_sg, depth);
init_completion(&frwr->fr_linv_done);
return 0;
@@ -131,79 +170,6 @@ out_list_err:
return rc;
}
-static void
-frwr_op_release_mr(struct rpcrdma_mr *mr)
-{
- int rc;
-
- rc = ib_dereg_mr(mr->frwr.fr_mr);
- if (rc)
- pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
- mr, rc);
- kfree(mr->mr_sg);
- kfree(mr);
-}
-
-static int
-__frwr_mr_reset(struct rpcrdma_ia *ia, struct rpcrdma_mr *mr)
-{
- struct rpcrdma_frwr *frwr = &mr->frwr;
- int rc;
-
- rc = ib_dereg_mr(frwr->fr_mr);
- if (rc) {
- pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
- rc, mr);
- return rc;
- }
-
- frwr->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
- ia->ri_max_frwr_depth);
- if (IS_ERR(frwr->fr_mr)) {
- pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
- PTR_ERR(frwr->fr_mr), mr);
- return PTR_ERR(frwr->fr_mr);
- }
-
- dprintk("RPC: %s: recovered FRWR %p\n", __func__, frwr);
- frwr->fr_state = FRWR_IS_INVALID;
- return 0;
-}
-
-/* Reset of a single FRWR. Generate a fresh rkey by replacing the MR.
- */
-static void
-frwr_op_recover_mr(struct rpcrdma_mr *mr)
-{
- enum rpcrdma_frwr_state state = mr->frwr.fr_state;
- struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
- struct rpcrdma_ia *ia = &r_xprt->rx_ia;
- int rc;
-
- rc = __frwr_mr_reset(ia, mr);
- if (state != FRWR_FLUSHED_LI) {
- trace_xprtrdma_dma_unmap(mr);
- ib_dma_unmap_sg(ia->ri_device,
- mr->mr_sg, mr->mr_nents, mr->mr_dir);
- }
- if (rc)
- goto out_release;
-
- rpcrdma_mr_put(mr);
- r_xprt->rx_stats.mrs_recovered++;
- return;
-
-out_release:
- pr_err("rpcrdma: FRWR reset failed %d, %p released\n", rc, mr);
- r_xprt->rx_stats.mrs_orphaned++;
-
- spin_lock(&r_xprt->rx_buf.rb_mrlock);
- list_del(&mr->mr_all);
- spin_unlock(&r_xprt->rx_buf.rb_mrlock);
-
- frwr_op_release_mr(mr);
-}
-
/* On success, sets:
* ep->rep_attr.cap.max_send_wr
* ep->rep_attr.cap.max_recv_wr
@@ -276,6 +242,7 @@ frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
ia->ri_max_frwr_depth);
+ ia->ri_max_segs += 2; /* segments for head and tail buffers */
return 0;
}
@@ -384,7 +351,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
mr = NULL;
do {
if (mr)
- rpcrdma_mr_defer_recovery(mr);
+ rpcrdma_mr_recycle(mr);
mr = rpcrdma_mr_get(r_xprt);
if (!mr)
return ERR_PTR(-EAGAIN);
@@ -417,7 +384,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
mr->mr_nents = ib_dma_map_sg(ia->ri_device, mr->mr_sg, i, mr->mr_dir);
if (!mr->mr_nents)
goto out_dmamap_err;
- trace_xprtrdma_dma_map(mr);
+ trace_xprtrdma_mr_map(mr);
ibmr = frwr->fr_mr;
n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
@@ -451,7 +418,7 @@ out_dmamap_err:
out_mapmr_err:
pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
frwr->fr_mr, n, mr->mr_nents);
- rpcrdma_mr_defer_recovery(mr);
+ rpcrdma_mr_recycle(mr);
return ERR_PTR(-EIO);
}
@@ -499,7 +466,7 @@ frwr_op_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
list_for_each_entry(mr, mrs, mr_list)
if (mr->mr_handle == rep->rr_inv_rkey) {
list_del_init(&mr->mr_list);
- trace_xprtrdma_remoteinv(mr);
+ trace_xprtrdma_mr_remoteinv(mr);
mr->frwr.fr_state = FRWR_IS_INVALID;
rpcrdma_mr_unmap_and_put(mr);
break; /* only one invalidated MR per RPC */
@@ -536,7 +503,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
mr->frwr.fr_state = FRWR_IS_INVALID;
frwr = &mr->frwr;
- trace_xprtrdma_localinv(mr);
+ trace_xprtrdma_mr_localinv(mr);
frwr->fr_cqe.done = frwr_wc_localinv;
last = &frwr->fr_invwr;
@@ -570,7 +537,7 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mrs)
if (bad_wr != first)
wait_for_completion(&frwr->fr_linv_done);
if (rc)
- goto reset_mrs;
+ goto out_release;
/* ORDER: Now DMA unmap all of the MRs, and return
* them to the free MR list.
@@ -582,22 +549,21 @@ unmap:
}
return;
-reset_mrs:
+out_release:
pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc);
- /* Find and reset the MRs in the LOCAL_INV WRs that did not
+ /* Unmap and release the MRs in the LOCAL_INV WRs that did not
* get posted.
*/
while (bad_wr) {
frwr = container_of(bad_wr, struct rpcrdma_frwr,
fr_invwr);
mr = container_of(frwr, struct rpcrdma_mr, frwr);
-
- __frwr_mr_reset(ia, mr);
-
bad_wr = bad_wr->next;
+
+ list_del(&mr->mr_list);
+ frwr_op_release_mr(mr);
}
- goto unmap;
}
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
@@ -605,7 +571,6 @@ const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
.ro_send = frwr_op_send,
.ro_reminv = frwr_op_reminv,
.ro_unmap_sync = frwr_op_unmap_sync,
- .ro_recover_mr = frwr_op_recover_mr,
.ro_open = frwr_op_open,
.ro_maxpages = frwr_op_maxpages,
.ro_init_mr = frwr_op_init_mr,
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index c8ae983c6cc0..9f53e0240035 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -71,7 +71,6 @@ static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
size = RPCRDMA_HDRLEN_MIN;
/* Maximum Read list size */
- maxsegs += 2; /* segment for head and tail buffers */
size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
/* Minimal Read chunk size */
@@ -97,7 +96,6 @@ static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
size = RPCRDMA_HDRLEN_MIN;
/* Maximum Write list size */
- maxsegs += 2; /* segment for head and tail buffers */
size = sizeof(__be32); /* segment count */
size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
size += sizeof(__be32); /* list discriminator */
@@ -805,7 +803,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
struct rpcrdma_mr *mr;
mr = rpcrdma_mr_pop(&req->rl_registered);
- rpcrdma_mr_defer_recovery(mr);
+ rpcrdma_mr_recycle(mr);
}
/* This implementation supports the following combinations
@@ -866,7 +864,7 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
out_err:
switch (ret) {
case -EAGAIN:
- xprt_wait_for_buffer_space(rqst->rq_task, NULL);
+ xprt_wait_for_buffer_space(rqst->rq_xprt);
break;
case -ENOBUFS:
break;
@@ -1216,7 +1214,6 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
struct rpc_rqst *rqst = rep->rr_rqst;
- unsigned long cwnd;
int status;
xprt->reestablish_timeout = 0;
@@ -1238,15 +1235,10 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
goto out_badheader;
out:
- spin_lock(&xprt->recv_lock);
- cwnd = xprt->cwnd;
- xprt->cwnd = r_xprt->rx_buf.rb_credits << RPC_CWNDSHIFT;
- if (xprt->cwnd > cwnd)
- xprt_release_rqst_cong(rqst->rq_task);
-
+ spin_lock(&xprt->queue_lock);
xprt_complete_rqst(rqst->rq_task, status);
xprt_unpin_rqst(rqst);
- spin_unlock(&xprt->recv_lock);
+ spin_unlock(&xprt->queue_lock);
return;
/* If the incoming reply terminated a pending RPC, the next
@@ -1345,19 +1337,23 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
/* Match incoming rpcrdma_rep to an rpcrdma_req to
* get context for handling any incoming chunks.
*/
- spin_lock(&xprt->recv_lock);
+ spin_lock(&xprt->queue_lock);
rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
if (!rqst)
goto out_norqst;
xprt_pin_rqst(rqst);
+ spin_unlock(&xprt->queue_lock);
if (credits == 0)
credits = 1; /* don't deadlock */
else if (credits > buf->rb_max_requests)
credits = buf->rb_max_requests;
- buf->rb_credits = credits;
-
- spin_unlock(&xprt->recv_lock);
+ if (buf->rb_credits != credits) {
+ spin_lock_bh(&xprt->transport_lock);
+ buf->rb_credits = credits;
+ xprt->cwnd = credits << RPC_CWNDSHIFT;
+ spin_unlock_bh(&xprt->transport_lock);
+ }
req = rpcr_to_rdmar(rqst);
req->rl_reply = rep;
@@ -1378,7 +1374,7 @@ out_badversion:
* is corrupt.
*/
out_norqst:
- spin_unlock(&xprt->recv_lock);
+ spin_unlock(&xprt->queue_lock);
trace_xprtrdma_reply_rqst(rep);
goto repost;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index a68180090554..d3a1a237cee6 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -56,7 +56,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
if (src->iov_len < 24)
goto out_shortreply;
- spin_lock(&xprt->recv_lock);
+ spin_lock(&xprt->queue_lock);
req = xprt_lookup_rqst(xprt, xid);
if (!req)
goto out_notfound;
@@ -86,7 +86,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
rcvbuf->len = 0;
out_unlock:
- spin_unlock(&xprt->recv_lock);
+ spin_unlock(&xprt->queue_lock);
out:
return ret;
@@ -215,9 +215,8 @@ drop_connection:
* connection.
*/
static int
-xprt_rdma_bc_send_request(struct rpc_task *task)
+xprt_rdma_bc_send_request(struct rpc_rqst *rqst)
{
- struct rpc_rqst *rqst = task->tk_rqstp;
struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
struct svcxprt_rdma *rdma;
int ret;
@@ -225,12 +224,7 @@ xprt_rdma_bc_send_request(struct rpc_task *task)
dprintk("svcrdma: sending bc call with xid: %08x\n",
be32_to_cpu(rqst->rq_xid));
- if (!mutex_trylock(&sxprt->xpt_mutex)) {
- rpc_sleep_on(&sxprt->xpt_bc_pending, task, NULL);
- if (!mutex_trylock(&sxprt->xpt_mutex))
- return -EAGAIN;
- rpc_wake_up_queued_task(&sxprt->xpt_bc_pending, task);
- }
+ mutex_lock(&sxprt->xpt_mutex);
ret = -ENOTCONN;
rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
@@ -248,6 +242,7 @@ static void
xprt_rdma_bc_close(struct rpc_xprt *xprt)
{
dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
+ xprt->cwnd = RPC_CWNDSHIFT;
}
static void
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 143ce2579ba9..ae2a83828953 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -225,69 +225,59 @@ xprt_rdma_free_addresses(struct rpc_xprt *xprt)
}
}
-void
-rpcrdma_conn_func(struct rpcrdma_ep *ep)
-{
- schedule_delayed_work(&ep->rep_connect_worker, 0);
-}
-
-void
-rpcrdma_connect_worker(struct work_struct *work)
-{
- struct rpcrdma_ep *ep =
- container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
- struct rpcrdma_xprt *r_xprt =
- container_of(ep, struct rpcrdma_xprt, rx_ep);
- struct rpc_xprt *xprt = &r_xprt->rx_xprt;
-
- spin_lock_bh(&xprt->transport_lock);
- if (ep->rep_connected > 0) {
- if (!xprt_test_and_set_connected(xprt))
- xprt_wake_pending_tasks(xprt, 0);
- } else {
- if (xprt_test_and_clear_connected(xprt))
- xprt_wake_pending_tasks(xprt, -ENOTCONN);
- }
- spin_unlock_bh(&xprt->transport_lock);
-}
-
+/**
+ * xprt_rdma_connect_worker - establish connection in the background
+ * @work: worker thread context
+ *
+ * Requester holds the xprt's send lock to prevent activity on this
+ * transport while a fresh connection is being established. RPC tasks
+ * sleep on the xprt's pending queue waiting for connect to complete.
+ */
static void
xprt_rdma_connect_worker(struct work_struct *work)
{
struct rpcrdma_xprt *r_xprt = container_of(work, struct rpcrdma_xprt,
rx_connect_worker.work);
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
- int rc = 0;
-
- xprt_clear_connected(xprt);
+ int rc;
rc = rpcrdma_ep_connect(&r_xprt->rx_ep, &r_xprt->rx_ia);
- if (rc)
- xprt_wake_pending_tasks(xprt, rc);
-
xprt_clear_connecting(xprt);
+ if (r_xprt->rx_ep.rep_connected > 0) {
+ if (!xprt_test_and_set_connected(xprt)) {
+ xprt->stat.connect_count++;
+ xprt->stat.connect_time += (long)jiffies -
+ xprt->stat.connect_start;
+ xprt_wake_pending_tasks(xprt, -EAGAIN);
+ }
+ } else {
+ if (xprt_test_and_clear_connected(xprt))
+ xprt_wake_pending_tasks(xprt, rc);
+ }
}
+/**
+ * xprt_rdma_inject_disconnect - inject a connection fault
+ * @xprt: transport context
+ *
+ * If @xprt is connected, disconnect it to simulate spurious connection
+ * loss.
+ */
static void
xprt_rdma_inject_disconnect(struct rpc_xprt *xprt)
{
- struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt,
- rx_xprt);
+ struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
trace_xprtrdma_inject_dsc(r_xprt);
rdma_disconnect(r_xprt->rx_ia.ri_id);
}
-/*
- * xprt_rdma_destroy
+/**
+ * xprt_rdma_destroy - Full tear down of transport
+ * @xprt: doomed transport context
*
- * Destroy the xprt.
- * Free all memory associated with the object, including its own.
- * NOTE: none of the *destroy methods free memory for their top-level
- * objects, even though they may have allocated it (they do free
- * private memory). It's up to the caller to handle it. In this
- * case (RDMA transport), all structure memory is inlined with the
- * struct rpcrdma_xprt.
+ * Caller guarantees there will be no more calls to us with
+ * this @xprt.
*/
static void
xprt_rdma_destroy(struct rpc_xprt *xprt)
@@ -298,8 +288,6 @@ xprt_rdma_destroy(struct rpc_xprt *xprt)
cancel_delayed_work_sync(&r_xprt->rx_connect_worker);
- xprt_clear_connected(xprt);
-
rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia);
rpcrdma_buffer_destroy(&r_xprt->rx_buf);
rpcrdma_ia_close(&r_xprt->rx_ia);
@@ -442,11 +430,12 @@ out1:
}
/**
- * xprt_rdma_close - Close down RDMA connection
- * @xprt: generic transport to be closed
+ * xprt_rdma_close - close a transport connection
+ * @xprt: transport context
*
- * Called during transport shutdown reconnect, or device
- * removal. Caller holds the transport's write lock.
+ * Called during transport shutdown, reconnect, or device removal.
+ * Caller holds @xprt's send lock to prevent activity on this
+ * transport while the connection is torn down.
*/
static void
xprt_rdma_close(struct rpc_xprt *xprt)
@@ -468,6 +457,12 @@ xprt_rdma_close(struct rpc_xprt *xprt)
xprt->reestablish_timeout = 0;
xprt_disconnect_done(xprt);
rpcrdma_ep_disconnect(ep, ia);
+
+ /* Prepare @xprt for the next connection by reinitializing
+ * its credit grant to one (see RFC 8166, Section 3.3.3).
+ */
+ r_xprt->rx_buf.rb_credits = 1;
+ xprt->cwnd = RPC_CWNDSHIFT;
}
/**
@@ -519,6 +514,12 @@ xprt_rdma_timer(struct rpc_xprt *xprt, struct rpc_task *task)
xprt_force_disconnect(xprt);
}
+/**
+ * xprt_rdma_connect - try to establish a transport connection
+ * @xprt: transport state
+ * @task: RPC scheduler context
+ *
+ */
static void
xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
{
@@ -638,13 +639,6 @@ rpcrdma_get_recvbuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
* 0: Success; rq_buffer points to RPC buffer to use
* ENOMEM: Out of memory, call again later
* EIO: A permanent error occurred, do not retry
- *
- * The RDMA allocate/free functions need the task structure as a place
- * to hide the struct rpcrdma_req, which is necessary for the actual
- * send/recv sequence.
- *
- * xprt_rdma_allocate provides buffers that are already mapped for
- * DMA, and a local DMA lkey is provided for each.
*/
static int
xprt_rdma_allocate(struct rpc_task *task)
@@ -693,7 +687,7 @@ xprt_rdma_free(struct rpc_task *task)
/**
* xprt_rdma_send_request - marshal and send an RPC request
- * @task: RPC task with an RPC message in rq_snd_buf
+ * @rqst: RPC message in rq_snd_buf
*
* Caller holds the transport's write lock.
*
@@ -706,9 +700,8 @@ xprt_rdma_free(struct rpc_task *task)
* sent. Do not try to send this message again.
*/
static int
-xprt_rdma_send_request(struct rpc_task *task)
+xprt_rdma_send_request(struct rpc_rqst *rqst)
{
- struct rpc_rqst *rqst = task->tk_rqstp;
struct rpc_xprt *xprt = rqst->rq_xprt;
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
@@ -722,6 +715,9 @@ xprt_rdma_send_request(struct rpc_task *task)
if (!xprt_connected(xprt))
goto drop_connection;
+ if (!xprt_request_get_cong(xprt, rqst))
+ return -EBADSLT;
+
rc = rpcrdma_marshal_req(r_xprt, rqst);
if (rc < 0)
goto failed_marshal;
@@ -741,7 +737,7 @@ xprt_rdma_send_request(struct rpc_task *task)
/* An RPC with no reply will throw off credit accounting,
* so drop the connection to reset the credit grant.
*/
- if (!rpc_reply_expected(task))
+ if (!rpc_reply_expected(rqst->rq_task))
goto drop_connection;
return 0;
@@ -766,7 +762,7 @@ void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
0, /* need a local port? */
xprt->stat.bind_count,
xprt->stat.connect_count,
- xprt->stat.connect_time,
+ xprt->stat.connect_time / HZ,
idle_time,
xprt->stat.sends,
xprt->stat.recvs,
@@ -786,7 +782,7 @@ void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
r_xprt->rx_stats.bad_reply_count,
r_xprt->rx_stats.nomsg_call_count);
seq_printf(seq, "%lu %lu %lu %lu %lu %lu\n",
- r_xprt->rx_stats.mrs_recovered,
+ r_xprt->rx_stats.mrs_recycled,
r_xprt->rx_stats.mrs_orphaned,
r_xprt->rx_stats.mrs_allocated,
r_xprt->rx_stats.local_inv_needed,
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 956a5ea47b58..3ddba94c939f 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -108,20 +108,48 @@ rpcrdma_destroy_wq(void)
}
}
+/**
+ * rpcrdma_disconnect_worker - Force a disconnect
+ * @work: endpoint to be disconnected
+ *
+ * Provider callbacks can possibly run in an IRQ context. This function
+ * is invoked in a worker thread to guarantee that disconnect wake-up
+ * calls are always done in process context.
+ */
+static void
+rpcrdma_disconnect_worker(struct work_struct *work)
+{
+ struct rpcrdma_ep *ep = container_of(work, struct rpcrdma_ep,
+ rep_disconnect_worker.work);
+ struct rpcrdma_xprt *r_xprt =
+ container_of(ep, struct rpcrdma_xprt, rx_ep);
+
+ xprt_force_disconnect(&r_xprt->rx_xprt);
+}
+
+/**
+ * rpcrdma_qp_event_handler - Handle one QP event (error notification)
+ * @event: details of the event
+ * @context: ep that owns QP where event occurred
+ *
+ * Called from the RDMA provider (device driver) possibly in an interrupt
+ * context.
+ */
static void
-rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
+rpcrdma_qp_event_handler(struct ib_event *event, void *context)
{
struct rpcrdma_ep *ep = context;
struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
rx_ep);
- trace_xprtrdma_qp_error(r_xprt, event);
- pr_err("rpcrdma: %s on device %s ep %p\n",
- ib_event_msg(event->event), event->device->name, context);
+ trace_xprtrdma_qp_event(r_xprt, event);
+ pr_err("rpcrdma: %s on device %s connected to %s:%s\n",
+ ib_event_msg(event->event), event->device->name,
+ rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
if (ep->rep_connected == 1) {
ep->rep_connected = -EIO;
- rpcrdma_conn_func(ep);
+ schedule_delayed_work(&ep->rep_disconnect_worker, 0);
wake_up_all(&ep->rep_connect_wait);
}
}
@@ -219,38 +247,48 @@ rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
rpcrdma_set_max_header_sizes(r_xprt);
}
+/**
+ * rpcrdma_cm_event_handler - Handle RDMA CM events
+ * @id: rdma_cm_id on which an event has occurred
+ * @event: details of the event
+ *
+ * Called with @id's mutex held. Returns 1 if caller should
+ * destroy @id, otherwise 0.
+ */
static int
-rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
+rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
{
- struct rpcrdma_xprt *xprt = id->context;
- struct rpcrdma_ia *ia = &xprt->rx_ia;
- struct rpcrdma_ep *ep = &xprt->rx_ep;
- int connstate = 0;
+ struct rpcrdma_xprt *r_xprt = id->context;
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
+ struct rpcrdma_ep *ep = &r_xprt->rx_ep;
+ struct rpc_xprt *xprt = &r_xprt->rx_xprt;
+
+ might_sleep();
- trace_xprtrdma_conn_upcall(xprt, event);
+ trace_xprtrdma_cm_event(r_xprt, event);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
case RDMA_CM_EVENT_ROUTE_RESOLVED:
ia->ri_async_rc = 0;
complete(&ia->ri_done);
- break;
+ return 0;
case RDMA_CM_EVENT_ADDR_ERROR:
ia->ri_async_rc = -EPROTO;
complete(&ia->ri_done);
- break;
+ return 0;
case RDMA_CM_EVENT_ROUTE_ERROR:
ia->ri_async_rc = -ENETUNREACH;
complete(&ia->ri_done);
- break;
+ return 0;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
pr_info("rpcrdma: removing device %s for %s:%s\n",
ia->ri_device->name,
- rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt));
+ rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
#endif
set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
ep->rep_connected = -ENODEV;
- xprt_force_disconnect(&xprt->rx_xprt);
+ xprt_force_disconnect(xprt);
wait_for_completion(&ia->ri_remove_done);
ia->ri_id = NULL;
@@ -258,41 +296,40 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
/* Return 1 to ensure the core destroys the id. */
return 1;
case RDMA_CM_EVENT_ESTABLISHED:
- ++xprt->rx_xprt.connect_cookie;
- connstate = 1;
- rpcrdma_update_connect_private(xprt, &event->param.conn);
- goto connected;
+ ++xprt->connect_cookie;
+ ep->rep_connected = 1;
+ rpcrdma_update_connect_private(r_xprt, &event->param.conn);
+ wake_up_all(&ep->rep_connect_wait);
+ break;
case RDMA_CM_EVENT_CONNECT_ERROR:
- connstate = -ENOTCONN;
- goto connected;
+ ep->rep_connected = -ENOTCONN;
+ goto disconnected;
case RDMA_CM_EVENT_UNREACHABLE:
- connstate = -ENETUNREACH;
- goto connected;
+ ep->rep_connected = -ENETUNREACH;
+ goto disconnected;
case RDMA_CM_EVENT_REJECTED:
dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
- rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
+ rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
rdma_reject_msg(id, event->status));
- connstate = -ECONNREFUSED;
+ ep->rep_connected = -ECONNREFUSED;
if (event->status == IB_CM_REJ_STALE_CONN)
- connstate = -EAGAIN;
- goto connected;
+ ep->rep_connected = -EAGAIN;
+ goto disconnected;
case RDMA_CM_EVENT_DISCONNECTED:
- ++xprt->rx_xprt.connect_cookie;
- connstate = -ECONNABORTED;
-connected:
- ep->rep_connected = connstate;
- rpcrdma_conn_func(ep);
+ ++xprt->connect_cookie;
+ ep->rep_connected = -ECONNABORTED;
+disconnected:
+ xprt_force_disconnect(xprt);
wake_up_all(&ep->rep_connect_wait);
- /*FALLTHROUGH*/
+ break;
default:
- dprintk("RPC: %s: %s:%s on %s/%s (ep 0x%p): %s\n",
- __func__,
- rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
- ia->ri_device->name, ia->ri_ops->ro_displayname,
- ep, rdma_event_msg(event->event));
break;
}
+ dprintk("RPC: %s: %s:%s on %s/%s: %s\n", __func__,
+ rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
+ ia->ri_device->name, ia->ri_ops->ro_displayname,
+ rdma_event_msg(event->event));
return 0;
}
@@ -308,7 +345,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
init_completion(&ia->ri_done);
init_completion(&ia->ri_remove_done);
- id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_conn_upcall,
+ id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
xprt, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(id)) {
rc = PTR_ERR(id);
@@ -519,7 +556,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
if (rc)
return rc;
- ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
+ ep->rep_attr.event_handler = rpcrdma_qp_event_handler;
ep->rep_attr.qp_context = ep;
ep->rep_attr.srq = NULL;
ep->rep_attr.cap.max_send_sge = max_sge;
@@ -542,7 +579,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
cdata->max_requests >> 2);
ep->rep_send_count = ep->rep_send_batch;
init_waitqueue_head(&ep->rep_connect_wait);
- INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
+ INIT_DELAYED_WORK(&ep->rep_disconnect_worker,
+ rpcrdma_disconnect_worker);
sendcq = ib_alloc_cq(ia->ri_device, NULL,
ep->rep_attr.cap.max_send_wr + 1,
@@ -615,7 +653,7 @@ out1:
void
rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
- cancel_delayed_work_sync(&ep->rep_connect_worker);
+ cancel_delayed_work_sync(&ep->rep_disconnect_worker);
if (ia->ri_id && ia->ri_id->qp) {
rpcrdma_ep_disconnect(ep, ia);
@@ -728,6 +766,7 @@ rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
{
struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
rx_ia);
+ struct rpc_xprt *xprt = &r_xprt->rx_xprt;
int rc;
retry:
@@ -754,6 +793,8 @@ retry:
}
ep->rep_connected = 0;
+ xprt_clear_connected(xprt);
+
rpcrdma_post_recvs(r_xprt, true);
rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
@@ -877,7 +918,6 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
sc->sc_xprt = r_xprt;
buf->rb_sc_ctxs[i] = sc;
}
- buf->rb_flags = 0;
return 0;
@@ -978,39 +1018,6 @@ rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
}
static void
-rpcrdma_mr_recovery_worker(struct work_struct *work)
-{
- struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
- rb_recovery_worker.work);
- struct rpcrdma_mr *mr;
-
- spin_lock(&buf->rb_recovery_lock);
- while (!list_empty(&buf->rb_stale_mrs)) {
- mr = rpcrdma_mr_pop(&buf->rb_stale_mrs);
- spin_unlock(&buf->rb_recovery_lock);
-
- trace_xprtrdma_recover_mr(mr);
- mr->mr_xprt->rx_ia.ri_ops->ro_recover_mr(mr);
-
- spin_lock(&buf->rb_recovery_lock);
- }
- spin_unlock(&buf->rb_recovery_lock);
-}
-
-void
-rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr)
-{
- struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
- struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
-
- spin_lock(&buf->rb_recovery_lock);
- rpcrdma_mr_push(mr, &buf->rb_stale_mrs);
- spin_unlock(&buf->rb_recovery_lock);
-
- schedule_delayed_work(&buf->rb_recovery_worker, 0);
-}
-
-static void
rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
@@ -1019,7 +1026,7 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
LIST_HEAD(free);
LIST_HEAD(all);
- for (count = 0; count < 3; count++) {
+ for (count = 0; count < ia->ri_max_segs; count++) {
struct rpcrdma_mr *mr;
int rc;
@@ -1138,18 +1145,15 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
int i, rc;
+ buf->rb_flags = 0;
buf->rb_max_requests = r_xprt->rx_data.max_requests;
buf->rb_bc_srv_max_requests = 0;
spin_lock_init(&buf->rb_mrlock);
spin_lock_init(&buf->rb_lock);
- spin_lock_init(&buf->rb_recovery_lock);
INIT_LIST_HEAD(&buf->rb_mrs);
INIT_LIST_HEAD(&buf->rb_all);
- INIT_LIST_HEAD(&buf->rb_stale_mrs);
INIT_DELAYED_WORK(&buf->rb_refresh_worker,
rpcrdma_mr_refresh_worker);
- INIT_DELAYED_WORK(&buf->rb_recovery_worker,
- rpcrdma_mr_recovery_worker);
rpcrdma_mrs_create(r_xprt);
@@ -1233,7 +1237,6 @@ rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
void
rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
{
- cancel_delayed_work_sync(&buf->rb_recovery_worker);
cancel_delayed_work_sync(&buf->rb_refresh_worker);
rpcrdma_sendctxs_destroy(buf);
@@ -1326,7 +1329,7 @@ rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
{
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
- trace_xprtrdma_dma_unmap(mr);
+ trace_xprtrdma_mr_unmap(mr);
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mr->mr_sg, mr->mr_nents, mr->mr_dir);
__rpcrdma_mr_put(&r_xprt->rx_buf, mr);
@@ -1518,9 +1521,11 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
struct ib_recv_wr *wr, *bad_wr;
int needed, count, rc;
+ rc = 0;
+ count = 0;
needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
if (buf->rb_posted_receives > needed)
- return;
+ goto out;
needed -= buf->rb_posted_receives;
count = 0;
@@ -1556,7 +1561,7 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
--needed;
}
if (!count)
- return;
+ goto out;
rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
(const struct ib_recv_wr **)&bad_wr);
@@ -1570,5 +1575,6 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
}
}
buf->rb_posted_receives += count;
+out:
trace_xprtrdma_post_recvs(r_xprt, count, rc);
}
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 2ca14f7c2d51..a13ccb643ce0 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -101,7 +101,7 @@ struct rpcrdma_ep {
wait_queue_head_t rep_connect_wait;
struct rpcrdma_connect_private rep_cm_private;
struct rdma_conn_param rep_remote_cma;
- struct delayed_work rep_connect_worker;
+ struct delayed_work rep_disconnect_worker;
};
/* Pre-allocate extra Work Requests for handling backward receives
@@ -280,6 +280,7 @@ struct rpcrdma_mr {
u32 mr_handle;
u32 mr_length;
u64 mr_offset;
+ struct work_struct mr_recycle;
struct list_head mr_all;
};
@@ -411,9 +412,6 @@ struct rpcrdma_buffer {
u32 rb_bc_max_requests;
- spinlock_t rb_recovery_lock; /* protect rb_stale_mrs */
- struct list_head rb_stale_mrs;
- struct delayed_work rb_recovery_worker;
struct delayed_work rb_refresh_worker;
};
#define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia)
@@ -452,7 +450,7 @@ struct rpcrdma_stats {
unsigned long hardway_register_count;
unsigned long failed_marshal_count;
unsigned long bad_reply_count;
- unsigned long mrs_recovered;
+ unsigned long mrs_recycled;
unsigned long mrs_orphaned;
unsigned long mrs_allocated;
unsigned long empty_sendctx_q;
@@ -481,7 +479,6 @@ struct rpcrdma_memreg_ops {
struct list_head *mrs);
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
struct list_head *);
- void (*ro_recover_mr)(struct rpcrdma_mr *mr);
int (*ro_open)(struct rpcrdma_ia *,
struct rpcrdma_ep *,
struct rpcrdma_create_data_internal *);
@@ -559,7 +556,6 @@ int rpcrdma_ep_create(struct rpcrdma_ep *, struct rpcrdma_ia *,
struct rpcrdma_create_data_internal *);
void rpcrdma_ep_destroy(struct rpcrdma_ep *, struct rpcrdma_ia *);
int rpcrdma_ep_connect(struct rpcrdma_ep *, struct rpcrdma_ia *);
-void rpcrdma_conn_func(struct rpcrdma_ep *ep);
void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
@@ -578,7 +574,12 @@ struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf);
struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
void rpcrdma_mr_put(struct rpcrdma_mr *mr);
void rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr);
-void rpcrdma_mr_defer_recovery(struct rpcrdma_mr *mr);
+
+static inline void
+rpcrdma_mr_recycle(struct rpcrdma_mr *mr)
+{
+ schedule_work(&mr->mr_recycle);
+}
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
void rpcrdma_buffer_put(struct rpcrdma_req *);
@@ -652,7 +653,6 @@ static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
extern unsigned int xprt_rdma_max_inline_read;
void xprt_rdma_format_addresses(struct rpc_xprt *xprt, struct sockaddr *sap);
void xprt_rdma_free_addresses(struct rpc_xprt *xprt);
-void rpcrdma_connect_worker(struct work_struct *work);
void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq);
int xprt_rdma_init(void);
void xprt_rdma_cleanup(void);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 6b7539c0466e..1b51e04d3566 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -47,13 +47,13 @@
#include <net/checksum.h>
#include <net/udp.h>
#include <net/tcp.h>
+#include <linux/bvec.h>
+#include <linux/uio.h>
#include <trace/events/sunrpc.h>
#include "sunrpc.h"
-#define RPC_TCP_READ_CHUNK_SZ (3*512*1024)
-
static void xs_close(struct rpc_xprt *xprt);
static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt,
struct socket *sock);
@@ -129,7 +129,7 @@ static struct ctl_table xs_tunables_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &xprt_min_resvport_limit,
- .extra2 = &xprt_max_resvport
+ .extra2 = &xprt_max_resvport_limit
},
{
.procname = "max_resvport",
@@ -137,7 +137,7 @@ static struct ctl_table xs_tunables_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &xprt_min_resvport,
+ .extra1 = &xprt_min_resvport_limit,
.extra2 = &xprt_max_resvport_limit
},
{
@@ -325,6 +325,362 @@ static void xs_free_peer_addresses(struct rpc_xprt *xprt)
}
}
+static size_t
+xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
+{
+ size_t i,n;
+
+ if (!(buf->flags & XDRBUF_SPARSE_PAGES))
+ return want;
+ if (want > buf->page_len)
+ want = buf->page_len;
+ n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ for (i = 0; i < n; i++) {
+ if (buf->pages[i])
+ continue;
+ buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
+ if (!buf->pages[i]) {
+ buf->page_len = (i * PAGE_SIZE) - buf->page_base;
+ return buf->page_len;
+ }
+ }
+ return want;
+}
+
+static ssize_t
+xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
+{
+ ssize_t ret;
+ if (seek != 0)
+ iov_iter_advance(&msg->msg_iter, seek);
+ ret = sock_recvmsg(sock, msg, flags);
+ return ret > 0 ? ret + seek : ret;
+}
+
+static ssize_t
+xs_read_kvec(struct socket *sock, struct msghdr *msg, int flags,
+ struct kvec *kvec, size_t count, size_t seek)
+{
+ iov_iter_kvec(&msg->msg_iter, READ | ITER_KVEC, kvec, 1, count);
+ return xs_sock_recvmsg(sock, msg, flags, seek);
+}
+
+static ssize_t
+xs_read_bvec(struct socket *sock, struct msghdr *msg, int flags,
+ struct bio_vec *bvec, unsigned long nr, size_t count,
+ size_t seek)
+{
+ iov_iter_bvec(&msg->msg_iter, READ | ITER_BVEC, bvec, nr, count);
+ return xs_sock_recvmsg(sock, msg, flags, seek);
+}
+
+static ssize_t
+xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
+ size_t count)
+{
+ struct kvec kvec = { 0 };
+ return xs_read_kvec(sock, msg, flags | MSG_TRUNC, &kvec, count, 0);
+}
+
+static ssize_t
+xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
+ struct xdr_buf *buf, size_t count, size_t seek, size_t *read)
+{
+ size_t want, seek_init = seek, offset = 0;
+ ssize_t ret;
+
+ if (seek < buf->head[0].iov_len) {
+ want = min_t(size_t, count, buf->head[0].iov_len);
+ ret = xs_read_kvec(sock, msg, flags, &buf->head[0], want, seek);
+ if (ret <= 0)
+ goto sock_err;
+ offset += ret;
+ if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
+ goto out;
+ if (ret != want)
+ goto eagain;
+ seek = 0;
+ } else {
+ seek -= buf->head[0].iov_len;
+ offset += buf->head[0].iov_len;
+ }
+ if (seek < buf->page_len) {
+ want = xs_alloc_sparse_pages(buf,
+ min_t(size_t, count - offset, buf->page_len),
+ GFP_NOWAIT);
+ ret = xs_read_bvec(sock, msg, flags, buf->bvec,
+ xdr_buf_pagecount(buf),
+ want + buf->page_base,
+ seek + buf->page_base);
+ if (ret <= 0)
+ goto sock_err;
+ offset += ret - buf->page_base;
+ if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
+ goto out;
+ if (ret != want)
+ goto eagain;
+ seek = 0;
+ } else {
+ seek -= buf->page_len;
+ offset += buf->page_len;
+ }
+ if (seek < buf->tail[0].iov_len) {
+ want = min_t(size_t, count - offset, buf->tail[0].iov_len);
+ ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);
+ if (ret <= 0)
+ goto sock_err;
+ offset += ret;
+ if (offset == count || msg->msg_flags & (MSG_EOR|MSG_TRUNC))
+ goto out;
+ if (ret != want)
+ goto eagain;
+ } else
+ offset += buf->tail[0].iov_len;
+ ret = -EMSGSIZE;
+ msg->msg_flags |= MSG_TRUNC;
+out:
+ *read = offset - seek_init;
+ return ret;
+eagain:
+ ret = -EAGAIN;
+ goto out;
+sock_err:
+ offset += seek;
+ goto out;
+}
+
+static void
+xs_read_header(struct sock_xprt *transport, struct xdr_buf *buf)
+{
+ if (!transport->recv.copied) {
+ if (buf->head[0].iov_len >= transport->recv.offset)
+ memcpy(buf->head[0].iov_base,
+ &transport->recv.xid,
+ transport->recv.offset);
+ transport->recv.copied = transport->recv.offset;
+ }
+}
+
+static bool
+xs_read_stream_request_done(struct sock_xprt *transport)
+{
+ return transport->recv.fraghdr & cpu_to_be32(RPC_LAST_STREAM_FRAGMENT);
+}
+
+static ssize_t
+xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
+ int flags, struct rpc_rqst *req)
+{
+ struct xdr_buf *buf = &req->rq_private_buf;
+ size_t want, read;
+ ssize_t ret;
+
+ xs_read_header(transport, buf);
+
+ want = transport->recv.len - transport->recv.offset;
+ ret = xs_read_xdr_buf(transport->sock, msg, flags, buf,
+ transport->recv.copied + want, transport->recv.copied,
+ &read);
+ transport->recv.offset += read;
+ transport->recv.copied += read;
+ if (transport->recv.offset == transport->recv.len) {
+ if (xs_read_stream_request_done(transport))
+ msg->msg_flags |= MSG_EOR;
+ return transport->recv.copied;
+ }
+
+ switch (ret) {
+ case -EMSGSIZE:
+ return transport->recv.copied;
+ case 0:
+ return -ESHUTDOWN;
+ default:
+ if (ret < 0)
+ return ret;
+ }
+ return -EAGAIN;
+}
+
+static size_t
+xs_read_stream_headersize(bool isfrag)
+{
+ if (isfrag)
+ return sizeof(__be32);
+ return 3 * sizeof(__be32);
+}
+
+static ssize_t
+xs_read_stream_header(struct sock_xprt *transport, struct msghdr *msg,
+ int flags, size_t want, size_t seek)
+{
+ struct kvec kvec = {
+ .iov_base = &transport->recv.fraghdr,
+ .iov_len = want,
+ };
+ return xs_read_kvec(transport->sock, msg, flags, &kvec, want, seek);
+}
+
+#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+static ssize_t
+xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
+{
+ struct rpc_xprt *xprt = &transport->xprt;
+ struct rpc_rqst *req;
+ ssize_t ret;
+
+ /* Look up and lock the request corresponding to the given XID */
+ req = xprt_lookup_bc_request(xprt, transport->recv.xid);
+ if (!req) {
+ printk(KERN_WARNING "Callback slot table overflowed\n");
+ return -ESHUTDOWN;
+ }
+
+ ret = xs_read_stream_request(transport, msg, flags, req);
+ if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
+ xprt_complete_bc_request(req, ret);
+
+ return ret;
+}
+#else /* CONFIG_SUNRPC_BACKCHANNEL */
+static ssize_t
+xs_read_stream_call(struct sock_xprt *transport, struct msghdr *msg, int flags)
+{
+ return -ESHUTDOWN;
+}
+#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+
+static ssize_t
+xs_read_stream_reply(struct sock_xprt *transport, struct msghdr *msg, int flags)
+{
+ struct rpc_xprt *xprt = &transport->xprt;
+ struct rpc_rqst *req;
+ ssize_t ret = 0;
+
+ /* Look up and lock the request corresponding to the given XID */
+ spin_lock(&xprt->queue_lock);
+ req = xprt_lookup_rqst(xprt, transport->recv.xid);
+ if (!req) {
+ msg->msg_flags |= MSG_TRUNC;
+ goto out;
+ }
+ xprt_pin_rqst(req);
+ spin_unlock(&xprt->queue_lock);
+
+ ret = xs_read_stream_request(transport, msg, flags, req);
+
+ spin_lock(&xprt->queue_lock);
+ if (msg->msg_flags & (MSG_EOR|MSG_TRUNC))
+ xprt_complete_rqst(req->rq_task, ret);
+ xprt_unpin_rqst(req);
+out:
+ spin_unlock(&xprt->queue_lock);
+ return ret;
+}
+
+static ssize_t
+xs_read_stream(struct sock_xprt *transport, int flags)
+{
+ struct msghdr msg = { 0 };
+ size_t want, read = 0;
+ ssize_t ret = 0;
+
+ if (transport->recv.len == 0) {
+ want = xs_read_stream_headersize(transport->recv.copied != 0);
+ ret = xs_read_stream_header(transport, &msg, flags, want,
+ transport->recv.offset);
+ if (ret <= 0)
+ goto out_err;
+ transport->recv.offset = ret;
+ if (ret != want) {
+ ret = -EAGAIN;
+ goto out_err;
+ }
+ transport->recv.len = be32_to_cpu(transport->recv.fraghdr) &
+ RPC_FRAGMENT_SIZE_MASK;
+ transport->recv.offset -= sizeof(transport->recv.fraghdr);
+ read = ret;
+ }
+
+ switch (be32_to_cpu(transport->recv.calldir)) {
+ case RPC_CALL:
+ ret = xs_read_stream_call(transport, &msg, flags);
+ break;
+ case RPC_REPLY:
+ ret = xs_read_stream_reply(transport, &msg, flags);
+ }
+ if (msg.msg_flags & MSG_TRUNC) {
+ transport->recv.calldir = cpu_to_be32(-1);
+ transport->recv.copied = -1;
+ }
+ if (ret < 0)
+ goto out_err;
+ read += ret;
+ if (transport->recv.offset < transport->recv.len) {
+ ret = xs_read_discard(transport->sock, &msg, flags,
+ transport->recv.len - transport->recv.offset);
+ if (ret <= 0)
+ goto out_err;
+ transport->recv.offset += ret;
+ read += ret;
+ if (transport->recv.offset != transport->recv.len)
+ return -EAGAIN;
+ }
+ if (xs_read_stream_request_done(transport)) {
+ trace_xs_stream_read_request(transport);
+ transport->recv.copied = 0;
+ }
+ transport->recv.offset = 0;
+ transport->recv.len = 0;
+ return read;
+out_err:
+ switch (ret) {
+ case 0:
+ case -ESHUTDOWN:
+ xprt_force_disconnect(&transport->xprt);
+ return -ESHUTDOWN;
+ }
+ return ret;
+}
+
+static void xs_stream_data_receive(struct sock_xprt *transport)
+{
+ size_t read = 0;
+ ssize_t ret = 0;
+
+ mutex_lock(&transport->recv_mutex);
+ if (transport->sock == NULL)
+ goto out;
+ clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
+ for (;;) {
+ ret = xs_read_stream(transport, MSG_DONTWAIT);
+ if (ret <= 0)
+ break;
+ read += ret;
+ cond_resched();
+ }
+out:
+ mutex_unlock(&transport->recv_mutex);
+ trace_xs_stream_read_data(&transport->xprt, ret, read);
+}
+
+static void xs_stream_data_receive_workfn(struct work_struct *work)
+{
+ struct sock_xprt *transport =
+ container_of(work, struct sock_xprt, recv_worker);
+ xs_stream_data_receive(transport);
+}
+
+static void
+xs_stream_reset_connect(struct sock_xprt *transport)
+{
+ transport->recv.offset = 0;
+ transport->recv.len = 0;
+ transport->recv.copied = 0;
+ transport->xmit.offset = 0;
+ transport->xprt.stat.connect_count++;
+ transport->xprt.stat.connect_start = jiffies;
+}
+
#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
@@ -440,28 +796,21 @@ out:
return err;
}
-static void xs_nospace_callback(struct rpc_task *task)
-{
- struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
-
- transport->inet->sk_write_pending--;
-}
-
/**
- * xs_nospace - place task on wait queue if transmit was incomplete
- * @task: task to put to sleep
+ * xs_nospace - handle transmit was incomplete
+ * @req: pointer to RPC request
*
*/
-static int xs_nospace(struct rpc_task *task)
+static int xs_nospace(struct rpc_rqst *req)
{
- struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
struct sock *sk = transport->inet;
int ret = -EAGAIN;
dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
- task->tk_pid, req->rq_slen - req->rq_bytes_sent,
+ req->rq_task->tk_pid,
+ req->rq_slen - transport->xmit.offset,
req->rq_slen);
/* Protect against races with write_space */
@@ -471,7 +820,7 @@ static int xs_nospace(struct rpc_task *task)
if (xprt_connected(xprt)) {
/* wait for more buffer space */
sk->sk_write_pending++;
- xprt_wait_for_buffer_space(task, xs_nospace_callback);
+ xprt_wait_for_buffer_space(xprt);
} else
ret = -ENOTCONN;
@@ -491,6 +840,22 @@ static int xs_nospace(struct rpc_task *task)
return ret;
}
+static void
+xs_stream_prepare_request(struct rpc_rqst *req)
+{
+ req->rq_task->tk_status = xdr_alloc_bvec(&req->rq_rcv_buf, GFP_NOIO);
+}
+
+/*
+ * Determine if the previous message in the stream was aborted before it
+ * could complete transmission.
+ */
+static bool
+xs_send_request_was_aborted(struct sock_xprt *transport, struct rpc_rqst *req)
+{
+ return transport->xmit.offset != 0 && req->rq_bytes_sent == 0;
+}
+
/*
* Construct a stream transport record marker in @buf.
*/
@@ -503,7 +868,7 @@ static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
/**
* xs_local_send_request - write an RPC request to an AF_LOCAL socket
- * @task: RPC task that manages the state of an RPC request
+ * @req: pointer to RPC request
*
* Return values:
* 0: The request has been sent
@@ -512,9 +877,8 @@ static inline void xs_encode_stream_record_marker(struct xdr_buf *buf)
* ENOTCONN: Caller needs to invoke connect logic then call again
* other: Some other error occured, the request was not sent
*/
-static int xs_local_send_request(struct rpc_task *task)
+static int xs_local_send_request(struct rpc_rqst *req)
{
- struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport =
container_of(xprt, struct sock_xprt, xprt);
@@ -522,25 +886,34 @@ static int xs_local_send_request(struct rpc_task *task)
int status;
int sent = 0;
+ /* Close the stream if the previous transmission was incomplete */
+ if (xs_send_request_was_aborted(transport, req)) {
+ xs_close(xprt);
+ return -ENOTCONN;
+ }
+
xs_encode_stream_record_marker(&req->rq_snd_buf);
xs_pktdump("packet data:",
req->rq_svec->iov_base, req->rq_svec->iov_len);
req->rq_xtime = ktime_get();
- status = xs_sendpages(transport->sock, NULL, 0, xdr, req->rq_bytes_sent,
+ status = xs_sendpages(transport->sock, NULL, 0, xdr,
+ transport->xmit.offset,
true, &sent);
dprintk("RPC: %s(%u) = %d\n",
- __func__, xdr->len - req->rq_bytes_sent, status);
+ __func__, xdr->len - transport->xmit.offset, status);
if (status == -EAGAIN && sock_writeable(transport->inet))
status = -ENOBUFS;
if (likely(sent > 0) || status == 0) {
- req->rq_bytes_sent += sent;
- req->rq_xmit_bytes_sent += sent;
+ transport->xmit.offset += sent;
+ req->rq_bytes_sent = transport->xmit.offset;
if (likely(req->rq_bytes_sent >= req->rq_slen)) {
+ req->rq_xmit_bytes_sent += transport->xmit.offset;
req->rq_bytes_sent = 0;
+ transport->xmit.offset = 0;
return 0;
}
status = -EAGAIN;
@@ -550,7 +923,7 @@ static int xs_local_send_request(struct rpc_task *task)
case -ENOBUFS:
break;
case -EAGAIN:
- status = xs_nospace(task);
+ status = xs_nospace(req);
break;
default:
dprintk("RPC: sendmsg returned unrecognized error %d\n",
@@ -566,7 +939,7 @@ static int xs_local_send_request(struct rpc_task *task)
/**
* xs_udp_send_request - write an RPC request to a UDP socket
- * @task: address of RPC task that manages the state of an RPC request
+ * @req: pointer to RPC request
*
* Return values:
* 0: The request has been sent
@@ -575,9 +948,8 @@ static int xs_local_send_request(struct rpc_task *task)
* ENOTCONN: Caller needs to invoke connect logic then call again
* other: Some other error occurred, the request was not sent
*/
-static int xs_udp_send_request(struct rpc_task *task)
+static int xs_udp_send_request(struct rpc_rqst *req)
{
- struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
struct xdr_buf *xdr = &req->rq_snd_buf;
@@ -590,12 +962,16 @@ static int xs_udp_send_request(struct rpc_task *task)
if (!xprt_bound(xprt))
return -ENOTCONN;
+
+ if (!xprt_request_get_cong(xprt, req))
+ return -EBADSLT;
+
req->rq_xtime = ktime_get();
status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen,
- xdr, req->rq_bytes_sent, true, &sent);
+ xdr, 0, true, &sent);
dprintk("RPC: xs_udp_send_request(%u) = %d\n",
- xdr->len - req->rq_bytes_sent, status);
+ xdr->len, status);
/* firewall is blocking us, don't return -EAGAIN or we end up looping */
if (status == -EPERM)
@@ -619,7 +995,7 @@ process_status:
/* Should we call xs_close() here? */
break;
case -EAGAIN:
- status = xs_nospace(task);
+ status = xs_nospace(req);
break;
case -ENETUNREACH:
case -ENOBUFS:
@@ -639,7 +1015,7 @@ process_status:
/**
* xs_tcp_send_request - write an RPC request to a TCP socket
- * @task: address of RPC task that manages the state of an RPC request
+ * @req: pointer to RPC request
*
* Return values:
* 0: The request has been sent
@@ -651,9 +1027,8 @@ process_status:
* XXX: In the case of soft timeouts, should we eventually give up
* if sendmsg is not able to make progress?
*/
-static int xs_tcp_send_request(struct rpc_task *task)
+static int xs_tcp_send_request(struct rpc_rqst *req)
{
- struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
struct xdr_buf *xdr = &req->rq_snd_buf;
@@ -662,6 +1037,13 @@ static int xs_tcp_send_request(struct rpc_task *task)
int status;
int sent;
+ /* Close the stream if the previous transmission was incomplete */
+ if (xs_send_request_was_aborted(transport, req)) {
+ if (transport->sock != NULL)
+ kernel_sock_shutdown(transport->sock, SHUT_RDWR);
+ return -ENOTCONN;
+ }
+
xs_encode_stream_record_marker(&req->rq_snd_buf);
xs_pktdump("packet data:",
@@ -671,7 +1053,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
* completes while the socket holds a reference to the pages,
* then we may end up resending corrupted data.
*/
- if (task->tk_flags & RPC_TASK_SENT)
+ if (req->rq_task->tk_flags & RPC_TASK_SENT)
zerocopy = false;
if (test_bit(XPRT_SOCK_UPD_TIMEOUT, &transport->sock_state))
@@ -684,17 +1066,20 @@ static int xs_tcp_send_request(struct rpc_task *task)
while (1) {
sent = 0;
status = xs_sendpages(transport->sock, NULL, 0, xdr,
- req->rq_bytes_sent, zerocopy, &sent);
+ transport->xmit.offset,
+ zerocopy, &sent);
dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
- xdr->len - req->rq_bytes_sent, status);
+ xdr->len - transport->xmit.offset, status);
/* If we've sent the entire packet, immediately
* reset the count of bytes sent. */
- req->rq_bytes_sent += sent;
- req->rq_xmit_bytes_sent += sent;
+ transport->xmit.offset += sent;
+ req->rq_bytes_sent = transport->xmit.offset;
if (likely(req->rq_bytes_sent >= req->rq_slen)) {
+ req->rq_xmit_bytes_sent += transport->xmit.offset;
req->rq_bytes_sent = 0;
+ transport->xmit.offset = 0;
return 0;
}
@@ -732,7 +1117,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
/* Should we call xs_close() here? */
break;
case -EAGAIN:
- status = xs_nospace(task);
+ status = xs_nospace(req);
break;
case -ECONNRESET:
case -ECONNREFUSED:
@@ -749,35 +1134,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
return status;
}
-/**
- * xs_tcp_release_xprt - clean up after a tcp transmission
- * @xprt: transport
- * @task: rpc task
- *
- * This cleans up if an error causes us to abort the transmission of a request.
- * In this case, the socket may need to be reset in order to avoid confusing
- * the server.
- */
-static void xs_tcp_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
-{
- struct rpc_rqst *req;
-
- if (task != xprt->snd_task)
- return;
- if (task == NULL)
- goto out_release;
- req = task->tk_rqstp;
- if (req == NULL)
- goto out_release;
- if (req->rq_bytes_sent == 0)
- goto out_release;
- if (req->rq_bytes_sent == req->rq_snd_buf.len)
- goto out_release;
- set_bit(XPRT_CLOSE_WAIT, &xprt->state);
-out_release:
- xprt_release_xprt(xprt, task);
-}
-
static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
{
transport->old_data_ready = sk->sk_data_ready;
@@ -921,114 +1277,6 @@ static void xs_destroy(struct rpc_xprt *xprt)
module_put(THIS_MODULE);
}
-static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
-{
- struct xdr_skb_reader desc = {
- .skb = skb,
- .offset = sizeof(rpc_fraghdr),
- .count = skb->len - sizeof(rpc_fraghdr),
- };
-
- if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
- return -1;
- if (desc.count)
- return -1;
- return 0;
-}
-
-/**
- * xs_local_data_read_skb
- * @xprt: transport
- * @sk: socket
- * @skb: skbuff
- *
- * Currently this assumes we can read the whole reply in a single gulp.
- */
-static void xs_local_data_read_skb(struct rpc_xprt *xprt,
- struct sock *sk,
- struct sk_buff *skb)
-{
- struct rpc_task *task;
- struct rpc_rqst *rovr;
- int repsize, copied;
- u32 _xid;
- __be32 *xp;
-
- repsize = skb->len - sizeof(rpc_fraghdr);
- if (repsize < 4) {
- dprintk("RPC: impossible RPC reply size %d\n", repsize);
- return;
- }
-
- /* Copy the XID from the skb... */
- xp = skb_header_pointer(skb, sizeof(rpc_fraghdr), sizeof(_xid), &_xid);
- if (xp == NULL)
- return;
-
- /* Look up and lock the request corresponding to the given XID */
- spin_lock(&xprt->recv_lock);
- rovr = xprt_lookup_rqst(xprt, *xp);
- if (!rovr)
- goto out_unlock;
- xprt_pin_rqst(rovr);
- spin_unlock(&xprt->recv_lock);
- task = rovr->rq_task;
-
- copied = rovr->rq_private_buf.buflen;
- if (copied > repsize)
- copied = repsize;
-
- if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
- dprintk("RPC: sk_buff copy failed\n");
- spin_lock(&xprt->recv_lock);
- goto out_unpin;
- }
-
- spin_lock(&xprt->recv_lock);
- xprt_complete_rqst(task, copied);
-out_unpin:
- xprt_unpin_rqst(rovr);
- out_unlock:
- spin_unlock(&xprt->recv_lock);
-}
-
-static void xs_local_data_receive(struct sock_xprt *transport)
-{
- struct sk_buff *skb;
- struct sock *sk;
- int err;
-
-restart:
- mutex_lock(&transport->recv_mutex);
- sk = transport->inet;
- if (sk == NULL)
- goto out;
- for (;;) {
- skb = skb_recv_datagram(sk, 0, 1, &err);
- if (skb != NULL) {
- xs_local_data_read_skb(&transport->xprt, sk, skb);
- skb_free_datagram(sk, skb);
- continue;
- }
- if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
- break;
- if (need_resched()) {
- mutex_unlock(&transport->recv_mutex);
- cond_resched();
- goto restart;
- }
- }
-out:
- mutex_unlock(&transport->recv_mutex);
-}
-
-static void xs_local_data_receive_workfn(struct work_struct *work)
-{
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, recv_worker);
- xs_local_data_receive(transport);
-}
-
/**
* xs_udp_data_read_skb - receive callback for UDP sockets
* @xprt: transport
@@ -1058,13 +1306,13 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
return;
/* Look up and lock the request corresponding to the given XID */
- spin_lock(&xprt->recv_lock);
+ spin_lock(&xprt->queue_lock);
rovr = xprt_lookup_rqst(xprt, *xp);
if (!rovr)
goto out_unlock;
xprt_pin_rqst(rovr);
xprt_update_rtt(rovr->rq_task);
- spin_unlock(&xprt->recv_lock);
+ spin_unlock(&xprt->queue_lock);
task = rovr->rq_task;
if ((copied = rovr->rq_private_buf.buflen) > repsize)
@@ -1072,7 +1320,7 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
/* Suck it into the iovec, verify checksum if not done by hw. */
if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
- spin_lock(&xprt->recv_lock);
+ spin_lock(&xprt->queue_lock);
__UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
goto out_unpin;
}
@@ -1081,13 +1329,13 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
spin_lock_bh(&xprt->transport_lock);
xprt_adjust_cwnd(xprt, task, copied);
spin_unlock_bh(&xprt->transport_lock);
- spin_lock(&xprt->recv_lock);
+ spin_lock(&xprt->queue_lock);
xprt_complete_rqst(task, copied);
__UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
out_unpin:
xprt_unpin_rqst(rovr);
out_unlock:
- spin_unlock(&xprt->recv_lock);
+ spin_unlock(&xprt->queue_lock);
}
static void xs_udp_data_receive(struct sock_xprt *transport)
@@ -1096,25 +1344,18 @@ static void xs_udp_data_receive(struct sock_xprt *transport)
struct sock *sk;
int err;
-restart:
mutex_lock(&transport->recv_mutex);
sk = transport->inet;
if (sk == NULL)
goto out;
+ clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
for (;;) {
skb = skb_recv_udp(sk, 0, 1, &err);
- if (skb != NULL) {
- xs_udp_data_read_skb(&transport->xprt, sk, skb);
- consume_skb(skb);
- continue;
- }
- if (!test_and_clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
+ if (skb == NULL)
break;
- if (need_resched()) {
- mutex_unlock(&transport->recv_mutex);
- cond_resched();
- goto restart;
- }
+ xs_udp_data_read_skb(&transport->xprt, sk, skb);
+ consume_skb(skb);
+ cond_resched();
}
out:
mutex_unlock(&transport->recv_mutex);
@@ -1163,263 +1404,7 @@ static void xs_tcp_force_close(struct rpc_xprt *xprt)
xprt_force_disconnect(xprt);
}
-static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
-{
- struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- size_t len, used;
- char *p;
-
- p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
- len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
- used = xdr_skb_read_bits(desc, p, len);
- transport->tcp_offset += used;
- if (used != len)
- return;
-
- transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
- if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
- transport->tcp_flags |= TCP_RCV_LAST_FRAG;
- else
- transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
- transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
-
- transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
- transport->tcp_offset = 0;
-
- /* Sanity check of the record length */
- if (unlikely(transport->tcp_reclen < 8)) {
- dprintk("RPC: invalid TCP record fragment length\n");
- xs_tcp_force_close(xprt);
- return;
- }
- dprintk("RPC: reading TCP record fragment of length %d\n",
- transport->tcp_reclen);
-}
-
-static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
-{
- if (transport->tcp_offset == transport->tcp_reclen) {
- transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
- transport->tcp_offset = 0;
- if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
- transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
- transport->tcp_flags |= TCP_RCV_COPY_XID;
- transport->tcp_copied = 0;
- }
- }
-}
-
-static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
-{
- size_t len, used;
- char *p;
-
- len = sizeof(transport->tcp_xid) - transport->tcp_offset;
- dprintk("RPC: reading XID (%zu bytes)\n", len);
- p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
- used = xdr_skb_read_bits(desc, p, len);
- transport->tcp_offset += used;
- if (used != len)
- return;
- transport->tcp_flags &= ~TCP_RCV_COPY_XID;
- transport->tcp_flags |= TCP_RCV_READ_CALLDIR;
- transport->tcp_copied = 4;
- dprintk("RPC: reading %s XID %08x\n",
- (transport->tcp_flags & TCP_RPC_REPLY) ? "reply for"
- : "request with",
- ntohl(transport->tcp_xid));
- xs_tcp_check_fraghdr(transport);
-}
-
-static inline void xs_tcp_read_calldir(struct sock_xprt *transport,
- struct xdr_skb_reader *desc)
-{
- size_t len, used;
- u32 offset;
- char *p;
-
- /*
- * We want transport->tcp_offset to be 8 at the end of this routine
- * (4 bytes for the xid and 4 bytes for the call/reply flag).
- * When this function is called for the first time,
- * transport->tcp_offset is 4 (after having already read the xid).
- */
- offset = transport->tcp_offset - sizeof(transport->tcp_xid);
- len = sizeof(transport->tcp_calldir) - offset;
- dprintk("RPC: reading CALL/REPLY flag (%zu bytes)\n", len);
- p = ((char *) &transport->tcp_calldir) + offset;
- used = xdr_skb_read_bits(desc, p, len);
- transport->tcp_offset += used;
- if (used != len)
- return;
- transport->tcp_flags &= ~TCP_RCV_READ_CALLDIR;
- /*
- * We don't yet have the XDR buffer, so we will write the calldir
- * out after we get the buffer from the 'struct rpc_rqst'
- */
- switch (ntohl(transport->tcp_calldir)) {
- case RPC_REPLY:
- transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
- transport->tcp_flags |= TCP_RCV_COPY_DATA;
- transport->tcp_flags |= TCP_RPC_REPLY;
- break;
- case RPC_CALL:
- transport->tcp_flags |= TCP_RCV_COPY_CALLDIR;
- transport->tcp_flags |= TCP_RCV_COPY_DATA;
- transport->tcp_flags &= ~TCP_RPC_REPLY;
- break;
- default:
- dprintk("RPC: invalid request message type\n");
- xs_tcp_force_close(&transport->xprt);
- }
- xs_tcp_check_fraghdr(transport);
-}
-
-static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
- struct xdr_skb_reader *desc,
- struct rpc_rqst *req)
-{
- struct sock_xprt *transport =
- container_of(xprt, struct sock_xprt, xprt);
- struct xdr_buf *rcvbuf;
- size_t len;
- ssize_t r;
-
- rcvbuf = &req->rq_private_buf;
-
- if (transport->tcp_flags & TCP_RCV_COPY_CALLDIR) {
- /*
- * Save the RPC direction in the XDR buffer
- */
- memcpy(rcvbuf->head[0].iov_base + transport->tcp_copied,
- &transport->tcp_calldir,
- sizeof(transport->tcp_calldir));
- transport->tcp_copied += sizeof(transport->tcp_calldir);
- transport->tcp_flags &= ~TCP_RCV_COPY_CALLDIR;
- }
-
- len = desc->count;
- if (len > transport->tcp_reclen - transport->tcp_offset)
- desc->count = transport->tcp_reclen - transport->tcp_offset;
- r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
- desc, xdr_skb_read_bits);
-
- if (desc->count) {
- /* Error when copying to the receive buffer,
- * usually because we weren't able to allocate
- * additional buffer pages. All we can do now
- * is turn off TCP_RCV_COPY_DATA, so the request
- * will not receive any additional updates,
- * and time out.
- * Any remaining data from this record will
- * be discarded.
- */
- transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
- dprintk("RPC: XID %08x truncated request\n",
- ntohl(transport->tcp_xid));
- dprintk("RPC: xprt = %p, tcp_copied = %lu, "
- "tcp_offset = %u, tcp_reclen = %u\n",
- xprt, transport->tcp_copied,
- transport->tcp_offset, transport->tcp_reclen);
- return;
- }
-
- transport->tcp_copied += r;
- transport->tcp_offset += r;
- desc->count = len - r;
-
- dprintk("RPC: XID %08x read %zd bytes\n",
- ntohl(transport->tcp_xid), r);
- dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
- "tcp_reclen = %u\n", xprt, transport->tcp_copied,
- transport->tcp_offset, transport->tcp_reclen);
-
- if (transport->tcp_copied == req->rq_private_buf.buflen)
- transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
- else if (transport->tcp_offset == transport->tcp_reclen) {
- if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
- transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
- }
-}
-
-/*
- * Finds the request corresponding to the RPC xid and invokes the common
- * tcp read code to read the data.
- */
-static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
- struct xdr_skb_reader *desc)
-{
- struct sock_xprt *transport =
- container_of(xprt, struct sock_xprt, xprt);
- struct rpc_rqst *req;
-
- dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
-
- /* Find and lock the request corresponding to this xid */
- spin_lock(&xprt->recv_lock);
- req = xprt_lookup_rqst(xprt, transport->tcp_xid);
- if (!req) {
- dprintk("RPC: XID %08x request not found!\n",
- ntohl(transport->tcp_xid));
- spin_unlock(&xprt->recv_lock);
- return -1;
- }
- xprt_pin_rqst(req);
- spin_unlock(&xprt->recv_lock);
-
- xs_tcp_read_common(xprt, desc, req);
-
- spin_lock(&xprt->recv_lock);
- if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
- xprt_complete_rqst(req->rq_task, transport->tcp_copied);
- xprt_unpin_rqst(req);
- spin_unlock(&xprt->recv_lock);
- return 0;
-}
-
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
-/*
- * Obtains an rpc_rqst previously allocated and invokes the common
- * tcp read code to read the data. The result is placed in the callback
- * queue.
- * If we're unable to obtain the rpc_rqst we schedule the closing of the
- * connection and return -1.
- */
-static int xs_tcp_read_callback(struct rpc_xprt *xprt,
- struct xdr_skb_reader *desc)
-{
- struct sock_xprt *transport =
- container_of(xprt, struct sock_xprt, xprt);
- struct rpc_rqst *req;
-
- /* Look up the request corresponding to the given XID */
- req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
- if (req == NULL) {
- printk(KERN_WARNING "Callback slot table overflowed\n");
- xprt_force_disconnect(xprt);
- return -1;
- }
-
- dprintk("RPC: read callback XID %08x\n", ntohl(req->rq_xid));
- xs_tcp_read_common(xprt, desc, req);
-
- if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
- xprt_complete_bc_request(req, transport->tcp_copied);
-
- return 0;
-}
-
-static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
- struct xdr_skb_reader *desc)
-{
- struct sock_xprt *transport =
- container_of(xprt, struct sock_xprt, xprt);
-
- return (transport->tcp_flags & TCP_RPC_REPLY) ?
- xs_tcp_read_reply(xprt, desc) :
- xs_tcp_read_callback(xprt, desc);
-}
-
static int xs_tcp_bc_up(struct svc_serv *serv, struct net *net)
{
int ret;
@@ -1435,145 +1420,8 @@ static size_t xs_tcp_bc_maxpayload(struct rpc_xprt *xprt)
{
return PAGE_SIZE;
}
-#else
-static inline int _xs_tcp_read_data(struct rpc_xprt *xprt,
- struct xdr_skb_reader *desc)
-{
- return xs_tcp_read_reply(xprt, desc);
-}
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
-/*
- * Read data off the transport. This can be either an RPC_CALL or an
- * RPC_REPLY. Relay the processing to helper functions.
- */
-static void xs_tcp_read_data(struct rpc_xprt *xprt,
- struct xdr_skb_reader *desc)
-{
- struct sock_xprt *transport =
- container_of(xprt, struct sock_xprt, xprt);
-
- if (_xs_tcp_read_data(xprt, desc) == 0)
- xs_tcp_check_fraghdr(transport);
- else {
- /*
- * The transport_lock protects the request handling.
- * There's no need to hold it to update the tcp_flags.
- */
- transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
- }
-}
-
-static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
-{
- size_t len;
-
- len = transport->tcp_reclen - transport->tcp_offset;
- if (len > desc->count)
- len = desc->count;
- desc->count -= len;
- desc->offset += len;
- transport->tcp_offset += len;
- dprintk("RPC: discarded %zu bytes\n", len);
- xs_tcp_check_fraghdr(transport);
-}
-
-static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
-{
- struct rpc_xprt *xprt = rd_desc->arg.data;
- struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
- struct xdr_skb_reader desc = {
- .skb = skb,
- .offset = offset,
- .count = len,
- };
- size_t ret;
-
- dprintk("RPC: xs_tcp_data_recv started\n");
- do {
- trace_xs_tcp_data_recv(transport);
- /* Read in a new fragment marker if necessary */
- /* Can we ever really expect to get completely empty fragments? */
- if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
- xs_tcp_read_fraghdr(xprt, &desc);
- continue;
- }
- /* Read in the xid if necessary */
- if (transport->tcp_flags & TCP_RCV_COPY_XID) {
- xs_tcp_read_xid(transport, &desc);
- continue;
- }
- /* Read in the call/reply flag */
- if (transport->tcp_flags & TCP_RCV_READ_CALLDIR) {
- xs_tcp_read_calldir(transport, &desc);
- continue;
- }
- /* Read in the request data */
- if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
- xs_tcp_read_data(xprt, &desc);
- continue;
- }
- /* Skip over any trailing bytes on short reads */
- xs_tcp_read_discard(transport, &desc);
- } while (desc.count);
- ret = len - desc.count;
- if (ret < rd_desc->count)
- rd_desc->count -= ret;
- else
- rd_desc->count = 0;
- trace_xs_tcp_data_recv(transport);
- dprintk("RPC: xs_tcp_data_recv done\n");
- return ret;
-}
-
-static void xs_tcp_data_receive(struct sock_xprt *transport)
-{
- struct rpc_xprt *xprt = &transport->xprt;
- struct sock *sk;
- read_descriptor_t rd_desc = {
- .arg.data = xprt,
- };
- unsigned long total = 0;
- int read = 0;
-
-restart:
- mutex_lock(&transport->recv_mutex);
- sk = transport->inet;
- if (sk == NULL)
- goto out;
-
- /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
- for (;;) {
- rd_desc.count = RPC_TCP_READ_CHUNK_SZ;
- lock_sock(sk);
- read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
- if (rd_desc.count != 0 || read < 0) {
- clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
- release_sock(sk);
- break;
- }
- release_sock(sk);
- total += read;
- if (need_resched()) {
- mutex_unlock(&transport->recv_mutex);
- cond_resched();
- goto restart;
- }
- }
- if (test_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
- queue_work(xprtiod_workqueue, &transport->recv_worker);
-out:
- mutex_unlock(&transport->recv_mutex);
- trace_xs_tcp_data_ready(xprt, read, total);
-}
-
-static void xs_tcp_data_receive_workfn(struct work_struct *work)
-{
- struct sock_xprt *transport =
- container_of(work, struct sock_xprt, recv_worker);
- xs_tcp_data_receive(transport);
-}
-
/**
* xs_tcp_state_change - callback to handle TCP socket state changes
* @sk: socket whose state has changed
@@ -1600,17 +1448,13 @@ static void xs_tcp_state_change(struct sock *sk)
case TCP_ESTABLISHED:
spin_lock(&xprt->transport_lock);
if (!xprt_test_and_set_connected(xprt)) {
-
- /* Reset TCP record info */
- transport->tcp_offset = 0;
- transport->tcp_reclen = 0;
- transport->tcp_copied = 0;
- transport->tcp_flags =
- TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
xprt->connect_cookie++;
clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
xprt_clear_connecting(xprt);
+ xprt->stat.connect_count++;
+ xprt->stat.connect_time += (long)jiffies -
+ xprt->stat.connect_start;
xprt_wake_pending_tasks(xprt, -EAGAIN);
}
spin_unlock(&xprt->transport_lock);
@@ -1675,7 +1519,8 @@ static void xs_write_space(struct sock *sk)
if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0)
goto out;
- xprt_write_space(xprt);
+ if (xprt_write_space(xprt))
+ sk->sk_write_pending--;
out:
rcu_read_unlock();
}
@@ -1773,11 +1618,17 @@ static void xs_udp_timer(struct rpc_xprt *xprt, struct rpc_task *task)
spin_unlock_bh(&xprt->transport_lock);
}
-static unsigned short xs_get_random_port(void)
+static int xs_get_random_port(void)
{
- unsigned short range = xprt_max_resvport - xprt_min_resvport + 1;
- unsigned short rand = (unsigned short) prandom_u32() % range;
- return rand + xprt_min_resvport;
+ unsigned short min = xprt_min_resvport, max = xprt_max_resvport;
+ unsigned short range;
+ unsigned short rand;
+
+ if (max < min)
+ return -EADDRINUSE;
+ range = max - min + 1;
+ rand = (unsigned short) prandom_u32() % range;
+ return rand + min;
}
/**
@@ -1833,9 +1684,9 @@ static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock)
transport->srcport = xs_sock_getport(sock);
}
-static unsigned short xs_get_srcport(struct sock_xprt *transport)
+static int xs_get_srcport(struct sock_xprt *transport)
{
- unsigned short port = transport->srcport;
+ int port = transport->srcport;
if (port == 0 && transport->xprt.resvport)
port = xs_get_random_port();
@@ -1856,7 +1707,7 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
{
struct sockaddr_storage myaddr;
int err, nloop = 0;
- unsigned short port = xs_get_srcport(transport);
+ int port = xs_get_srcport(transport);
unsigned short last;
/*
@@ -1874,8 +1725,8 @@ static int xs_bind(struct sock_xprt *transport, struct socket *sock)
* transport->xprt.resvport == 1) xs_get_srcport above will
* ensure that port is non-zero and we will bind as needed.
*/
- if (port == 0)
- return 0;
+ if (port <= 0)
+ return port;
memcpy(&myaddr, &transport->srcaddr, transport->xprt.addrlen);
do {
@@ -2028,9 +1879,8 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
write_unlock_bh(&sk->sk_callback_lock);
}
- /* Tell the socket layer to start connecting... */
- xprt->stat.connect_count++;
- xprt->stat.connect_start = jiffies;
+ xs_stream_reset_connect(transport);
+
return kernel_connect(sock, xs_addr(xprt), xprt->addrlen, 0);
}
@@ -2062,6 +1912,9 @@ static int xs_local_setup_socket(struct sock_xprt *transport)
case 0:
dprintk("RPC: xprt %p connected to %s\n",
xprt, xprt->address_strings[RPC_DISPLAY_ADDR]);
+ xprt->stat.connect_count++;
+ xprt->stat.connect_time += (long)jiffies -
+ xprt->stat.connect_start;
xprt_set_connected(xprt);
case -ENOBUFS:
break;
@@ -2386,9 +2239,10 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
xs_set_memalloc(xprt);
+ /* Reset TCP record info */
+ xs_stream_reset_connect(transport);
+
/* Tell the socket layer to start connecting... */
- xprt->stat.connect_count++;
- xprt->stat.connect_start = jiffies;
set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state);
ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK);
switch (ret) {
@@ -2561,7 +2415,7 @@ static void xs_local_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
"%llu %llu %lu %llu %llu\n",
xprt->stat.bind_count,
xprt->stat.connect_count,
- xprt->stat.connect_time,
+ xprt->stat.connect_time / HZ,
idle_time,
xprt->stat.sends,
xprt->stat.recvs,
@@ -2616,7 +2470,7 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
transport->srcport,
xprt->stat.bind_count,
xprt->stat.connect_count,
- xprt->stat.connect_time,
+ xprt->stat.connect_time / HZ,
idle_time,
xprt->stat.sends,
xprt->stat.recvs,
@@ -2704,9 +2558,8 @@ static int bc_sendto(struct rpc_rqst *req)
/*
* The send routine. Borrows from svc_send
*/
-static int bc_send_request(struct rpc_task *task)
+static int bc_send_request(struct rpc_rqst *req)
{
- struct rpc_rqst *req = task->tk_rqstp;
struct svc_xprt *xprt;
int len;
@@ -2720,12 +2573,7 @@ static int bc_send_request(struct rpc_task *task)
* Grab the mutex to serialize data as the connection is shared
* with the fore channel
*/
- if (!mutex_trylock(&xprt->xpt_mutex)) {
- rpc_sleep_on(&xprt->xpt_bc_pending, task, NULL);
- if (!mutex_trylock(&xprt->xpt_mutex))
- return -EAGAIN;
- rpc_wake_up_queued_task(&xprt->xpt_bc_pending, task);
- }
+ mutex_lock(&xprt->xpt_mutex);
if (test_bit(XPT_DEAD, &xprt->xpt_flags))
len = -ENOTCONN;
else
@@ -2761,7 +2609,7 @@ static void bc_destroy(struct rpc_xprt *xprt)
static const struct rpc_xprt_ops xs_local_ops = {
.reserve_xprt = xprt_reserve_xprt,
- .release_xprt = xs_tcp_release_xprt,
+ .release_xprt = xprt_release_xprt,
.alloc_slot = xprt_alloc_slot,
.free_slot = xprt_free_slot,
.rpcbind = xs_local_rpcbind,
@@ -2769,6 +2617,7 @@ static const struct rpc_xprt_ops xs_local_ops = {
.connect = xs_local_connect,
.buf_alloc = rpc_malloc,
.buf_free = rpc_free,
+ .prepare_request = xs_stream_prepare_request,
.send_request = xs_local_send_request,
.set_retrans_timeout = xprt_set_retrans_timeout_def,
.close = xs_close,
@@ -2803,14 +2652,15 @@ static const struct rpc_xprt_ops xs_udp_ops = {
static const struct rpc_xprt_ops xs_tcp_ops = {
.reserve_xprt = xprt_reserve_xprt,
- .release_xprt = xs_tcp_release_xprt,
- .alloc_slot = xprt_lock_and_alloc_slot,
+ .release_xprt = xprt_release_xprt,
+ .alloc_slot = xprt_alloc_slot,
.free_slot = xprt_free_slot,
.rpcbind = rpcb_getport_async,
.set_port = xs_set_port,
.connect = xs_connect,
.buf_alloc = rpc_malloc,
.buf_free = rpc_free,
+ .prepare_request = xs_stream_prepare_request,
.send_request = xs_tcp_send_request,
.set_retrans_timeout = xprt_set_retrans_timeout_def,
.close = xs_tcp_shutdown,
@@ -2952,9 +2802,8 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
xprt->ops = &xs_local_ops;
xprt->timeout = &xs_local_default_timeout;
- INIT_WORK(&transport->recv_worker, xs_local_data_receive_workfn);
- INIT_DELAYED_WORK(&transport->connect_worker,
- xs_dummy_setup_socket);
+ INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
+ INIT_DELAYED_WORK(&transport->connect_worker, xs_dummy_setup_socket);
switch (sun->sun_family) {
case AF_LOCAL:
@@ -3106,7 +2955,7 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
xprt->connect_timeout = xprt->timeout->to_initval *
(xprt->timeout->to_retries + 1);
- INIT_WORK(&transport->recv_worker, xs_tcp_data_receive_workfn);
+ INIT_WORK(&transport->recv_worker, xs_stream_data_receive_workfn);
INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_setup_socket);
switch (addr->sa_family) {
@@ -3317,12 +3166,8 @@ static int param_set_uint_minmax(const char *val,
static int param_set_portnr(const char *val, const struct kernel_param *kp)
{
- if (kp->arg == &xprt_min_resvport)
- return param_set_uint_minmax(val, kp,
- RPC_MIN_RESVPORT,
- xprt_max_resvport);
return param_set_uint_minmax(val, kp,
- xprt_min_resvport,
+ RPC_MIN_RESVPORT,
RPC_MAX_RESVPORT);
}
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
index 57d0d871dcf7..33e67bd1dc34 100644
--- a/samples/mei/mei-amt-version.c
+++ b/samples/mei/mei-amt-version.c
@@ -370,7 +370,7 @@ static uint32_t amt_host_if_call(struct amt_host_if *acmd,
unsigned int expected_sz)
{
uint32_t in_buf_sz;
- uint32_t out_buf_sz;
+ ssize_t out_buf_sz;
ssize_t written;
uint32_t status;
struct amt_host_if_resp_header *msg_hdr;
diff --git a/scripts/Makefile b/scripts/Makefile
index 61affa300d25..ece52ff20171 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -39,8 +39,7 @@ build_unifdef: $(obj)/unifdef
subdir-$(CONFIG_MODVERSIONS) += genksyms
subdir-y += mod
subdir-$(CONFIG_SECURITY_SELINUX) += selinux
-subdir-$(CONFIG_DTC) += dtc
subdir-$(CONFIG_GDB_SCRIPTS) += gdb
# Let clean descend into subdirs
-subdir- += basic kconfig package gcc-plugins
+subdir- += basic dtc kconfig package gcc-plugins
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 61e596650ed3..8fe4468f9bda 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -283,7 +283,7 @@ $(obj)/%.dtb.S: $(obj)/%.dtb FORCE
quiet_cmd_dtc = DTC $@
cmd_dtc = mkdir -p $(dir ${dtc-tmp}) ; \
- $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
+ $(HOSTCC) -E $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
$(DTC) -O dtb -o $@ -b 0 \
$(addprefix -i,$(dir $<) $(DTC_INCLUDE)) $(DTC_FLAGS) \
-d $(depfile).dtc.tmp $(dtc-tmp) ; \
diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile
index 1c943e03eaf2..056d5da6c477 100644
--- a/scripts/dtc/Makefile
+++ b/scripts/dtc/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
# scripts/dtc makefile
-hostprogs-y := dtc
+hostprogs-$(CONFIG_DTC) := dtc
always := $(hostprogs-y)
dtc-objs := dtc.o flattree.o fstree.o data.o livetree.o treesource.o \
@@ -11,6 +11,13 @@ dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o
# Source files need to get at the userspace version of libfdt_env.h to compile
HOST_EXTRACFLAGS := -I$(src)/libfdt
+ifeq ($(wildcard /usr/include/yaml.h),)
+HOST_EXTRACFLAGS += -DNO_YAML
+else
+dtc-objs += yamltree.o
+HOSTLDLIBS_dtc := -lyaml
+endif
+
# Generated files need one more search path to include headers in source tree
HOSTCFLAGS_dtc-lexer.lex.o := -I$(src)
HOSTCFLAGS_dtc-parser.tab.o := -I$(src)
diff --git a/scripts/dtc/Makefile.dtc b/scripts/dtc/Makefile.dtc
index bece49b35535..d4375630a7f7 100644
--- a/scripts/dtc/Makefile.dtc
+++ b/scripts/dtc/Makefile.dtc
@@ -14,5 +14,9 @@ DTC_SRCS = \
treesource.c \
util.c
+ifneq ($(NO_YAML),1)
+DTC_SRCS += yamltree.c
+endif
+
DTC_GEN_SRCS = dtc-lexer.lex.c dtc-parser.tab.c
DTC_OBJS = $(DTC_SRCS:%.c=%.o) $(DTC_GEN_SRCS:%.c=%.o)
diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c
index a2cc1036c915..9c9b0c328af6 100644
--- a/scripts/dtc/checks.c
+++ b/scripts/dtc/checks.c
@@ -962,6 +962,143 @@ static void check_simple_bus_reg(struct check *c, struct dt_info *dti, struct no
}
WARNING(simple_bus_reg, check_simple_bus_reg, NULL, &reg_format, &simple_bus_bridge);
+static const struct bus_type i2c_bus = {
+ .name = "i2c-bus",
+};
+
+static void check_i2c_bus_bridge(struct check *c, struct dt_info *dti, struct node *node)
+{
+ if (strprefixeq(node->name, node->basenamelen, "i2c-bus") ||
+ strprefixeq(node->name, node->basenamelen, "i2c-arb")) {
+ node->bus = &i2c_bus;
+ } else if (strprefixeq(node->name, node->basenamelen, "i2c")) {
+ struct node *child;
+ for_each_child(node, child) {
+ if (strprefixeq(child->name, node->basenamelen, "i2c-bus"))
+ return;
+ }
+ node->bus = &i2c_bus;
+ } else
+ return;
+
+ if (!node->children)
+ return;
+
+ if (node_addr_cells(node) != 1)
+ FAIL(c, dti, node, "incorrect #address-cells for I2C bus");
+ if (node_size_cells(node) != 0)
+ FAIL(c, dti, node, "incorrect #size-cells for I2C bus");
+
+}
+WARNING(i2c_bus_bridge, check_i2c_bus_bridge, NULL, &addr_size_cells);
+
+static void check_i2c_bus_reg(struct check *c, struct dt_info *dti, struct node *node)
+{
+ struct property *prop;
+ const char *unitname = get_unitname(node);
+ char unit_addr[17];
+ uint32_t reg = 0;
+ int len;
+ cell_t *cells = NULL;
+
+ if (!node->parent || (node->parent->bus != &i2c_bus))
+ return;
+
+ prop = get_property(node, "reg");
+ if (prop)
+ cells = (cell_t *)prop->val.val;
+
+ if (!cells) {
+ FAIL(c, dti, node, "missing or empty reg property");
+ return;
+ }
+
+ reg = fdt32_to_cpu(*cells);
+ snprintf(unit_addr, sizeof(unit_addr), "%x", reg);
+ if (!streq(unitname, unit_addr))
+ FAIL(c, dti, node, "I2C bus unit address format error, expected \"%s\"",
+ unit_addr);
+
+ for (len = prop->val.len; len > 0; len -= 4) {
+ reg = fdt32_to_cpu(*(cells++));
+ if (reg > 0x3ff)
+ FAIL_PROP(c, dti, node, prop, "I2C address must be less than 10-bits, got \"0x%x\"",
+ reg);
+
+ }
+}
+WARNING(i2c_bus_reg, check_i2c_bus_reg, NULL, &reg_format, &i2c_bus_bridge);
+
+static const struct bus_type spi_bus = {
+ .name = "spi-bus",
+};
+
+static void check_spi_bus_bridge(struct check *c, struct dt_info *dti, struct node *node)
+{
+
+ if (strprefixeq(node->name, node->basenamelen, "spi")) {
+ node->bus = &spi_bus;
+ } else {
+ /* Try to detect SPI buses which don't have proper node name */
+ struct node *child;
+
+ if (node_addr_cells(node) != 1 || node_size_cells(node) != 0)
+ return;
+
+ for_each_child(node, child) {
+ struct property *prop;
+ for_each_property(child, prop) {
+ if (strprefixeq(prop->name, 4, "spi-")) {
+ node->bus = &spi_bus;
+ break;
+ }
+ }
+ if (node->bus == &spi_bus)
+ break;
+ }
+
+ if (node->bus == &spi_bus && get_property(node, "reg"))
+ FAIL(c, dti, node, "node name for SPI buses should be 'spi'");
+ }
+ if (node->bus != &spi_bus || !node->children)
+ return;
+
+ if (node_addr_cells(node) != 1)
+ FAIL(c, dti, node, "incorrect #address-cells for SPI bus");
+ if (node_size_cells(node) != 0)
+ FAIL(c, dti, node, "incorrect #size-cells for SPI bus");
+
+}
+WARNING(spi_bus_bridge, check_spi_bus_bridge, NULL, &addr_size_cells);
+
+static void check_spi_bus_reg(struct check *c, struct dt_info *dti, struct node *node)
+{
+ struct property *prop;
+ const char *unitname = get_unitname(node);
+ char unit_addr[9];
+ uint32_t reg = 0;
+ cell_t *cells = NULL;
+
+ if (!node->parent || (node->parent->bus != &spi_bus))
+ return;
+
+ prop = get_property(node, "reg");
+ if (prop)
+ cells = (cell_t *)prop->val.val;
+
+ if (!cells) {
+ FAIL(c, dti, node, "missing or empty reg property");
+ return;
+ }
+
+ reg = fdt32_to_cpu(*cells);
+ snprintf(unit_addr, sizeof(unit_addr), "%x", reg);
+ if (!streq(unitname, unit_addr))
+ FAIL(c, dti, node, "SPI bus unit address format error, expected \"%s\"",
+ unit_addr);
+}
+WARNING(spi_bus_reg, check_spi_bus_reg, NULL, &reg_format, &spi_bus_bridge);
+
static void check_unit_address_format(struct check *c, struct dt_info *dti,
struct node *node)
{
@@ -1582,6 +1719,12 @@ static struct check *check_table[] = {
&simple_bus_bridge,
&simple_bus_reg,
+ &i2c_bus_bridge,
+ &i2c_bus_reg,
+
+ &spi_bus_bridge,
+ &spi_bus_reg,
+
&avoid_default_addr_size,
&avoid_unnecessary_addr_size,
&unique_unit_address,
diff --git a/scripts/dtc/data.c b/scripts/dtc/data.c
index aa37a16c8891..4a204145cc7b 100644
--- a/scripts/dtc/data.c
+++ b/scripts/dtc/data.c
@@ -74,7 +74,8 @@ struct data data_copy_escape_string(const char *s, int len)
struct data d;
char *q;
- d = data_grow_for(empty_data, len + 1);
+ d = data_add_marker(empty_data, TYPE_STRING, NULL);
+ d = data_grow_for(d, len + 1);
q = d.val;
while (i < len) {
@@ -94,6 +95,7 @@ struct data data_copy_file(FILE *f, size_t maxlen)
{
struct data d = empty_data;
+ d = data_add_marker(d, TYPE_NONE, NULL);
while (!feof(f) && (d.len < maxlen)) {
size_t chunksize, ret;
diff --git a/scripts/dtc/dtc-parser.y b/scripts/dtc/dtc-parser.y
index 011a5b25539a..dd70ebf386f4 100644
--- a/scripts/dtc/dtc-parser.y
+++ b/scripts/dtc/dtc-parser.y
@@ -287,6 +287,7 @@ propdata:
}
| propdataprefix DT_REF
{
+ $1 = data_add_marker($1, TYPE_STRING, $2);
$$ = data_add_marker($1, REF_PATH, $2);
}
| propdataprefix DT_INCBIN '(' DT_STRING ',' integer_prim ',' integer_prim ')'
@@ -340,22 +341,27 @@ arrayprefix:
DT_BITS DT_LITERAL '<'
{
unsigned long long bits;
+ enum markertype type = TYPE_UINT32;
bits = $2;
- if ((bits != 8) && (bits != 16) &&
- (bits != 32) && (bits != 64)) {
+ switch (bits) {
+ case 8: type = TYPE_UINT8; break;
+ case 16: type = TYPE_UINT16; break;
+ case 32: type = TYPE_UINT32; break;
+ case 64: type = TYPE_UINT64; break;
+ default:
ERROR(&@2, "Array elements must be"
" 8, 16, 32 or 64-bits");
bits = 32;
}
- $$.data = empty_data;
+ $$.data = data_add_marker(empty_data, type, NULL);
$$.bits = bits;
}
| '<'
{
- $$.data = empty_data;
+ $$.data = data_add_marker(empty_data, TYPE_UINT32, NULL);
$$.bits = 32;
}
| arrayprefix integer_prim
@@ -499,7 +505,7 @@ integer_unary:
bytestring:
/* empty */
{
- $$ = empty_data;
+ $$ = data_add_marker(empty_data, TYPE_UINT8, NULL);
}
| bytestring DT_BYTE
{
diff --git a/scripts/dtc/dtc.c b/scripts/dtc/dtc.c
index c36994e6eac5..64134aadb997 100644
--- a/scripts/dtc/dtc.c
+++ b/scripts/dtc/dtc.c
@@ -95,6 +95,9 @@ static const char * const usage_opts_help[] = {
"\n\tOutput formats are:\n"
"\t\tdts - device tree source text\n"
"\t\tdtb - device tree blob\n"
+#ifndef NO_YAML
+ "\t\tyaml - device tree encoded as YAML\n"
+#endif
"\t\tasm - assembler source",
"\n\tBlob version to produce, defaults to "stringify(DEFAULT_FDT_VERSION)" (for dtb and asm output)",
"\n\tOutput dependency file",
@@ -128,6 +131,8 @@ static const char *guess_type_by_name(const char *fname, const char *fallback)
return fallback;
if (!strcasecmp(s, ".dts"))
return "dts";
+ if (!strcasecmp(s, ".yaml"))
+ return "yaml";
if (!strcasecmp(s, ".dtb"))
return "dtb";
return fallback;
@@ -350,6 +355,12 @@ int main(int argc, char *argv[])
if (streq(outform, "dts")) {
dt_to_source(outf, dti);
+#ifndef NO_YAML
+ } else if (streq(outform, "yaml")) {
+ if (!streq(inform, "dts"))
+ die("YAML output format requires dts input format\n");
+ dt_to_yaml(outf, dti);
+#endif
} else if (streq(outform, "dtb")) {
dt_to_blob(outf, dti, outversion);
} else if (streq(outform, "asm")) {
diff --git a/scripts/dtc/dtc.h b/scripts/dtc/dtc.h
index 6d667701ab6a..cbe541525c2c 100644
--- a/scripts/dtc/dtc.h
+++ b/scripts/dtc/dtc.h
@@ -74,10 +74,17 @@ typedef uint32_t cell_t;
/* Data blobs */
enum markertype {
+ TYPE_NONE,
REF_PHANDLE,
REF_PATH,
LABEL,
+ TYPE_UINT8,
+ TYPE_UINT16,
+ TYPE_UINT32,
+ TYPE_UINT64,
+ TYPE_STRING,
};
+extern const char *markername(enum markertype markertype);
struct marker {
enum markertype type;
@@ -101,6 +108,8 @@ struct data {
for_each_marker(m) \
if ((m)->type == (t))
+size_t type_marker_length(struct marker *m);
+
void data_free(struct data d);
struct data data_grow_for(struct data d, int xlen);
@@ -290,6 +299,10 @@ struct dt_info *dt_from_blob(const char *fname);
void dt_to_source(FILE *f, struct dt_info *dti);
struct dt_info *dt_from_source(const char *f);
+/* YAML source */
+
+void dt_to_yaml(FILE *f, struct dt_info *dti);
+
/* FS trees */
struct dt_info *dt_from_fs(const char *dirname);
diff --git a/scripts/dtc/flattree.c b/scripts/dtc/flattree.c
index 8d268fb785db..851ea87dbc0f 100644
--- a/scripts/dtc/flattree.c
+++ b/scripts/dtc/flattree.c
@@ -393,7 +393,7 @@ void dt_to_blob(FILE *f, struct dt_info *dti, int version)
padlen = 0;
if (quiet < 1)
fprintf(stderr,
- "Warning: blob size %d >= minimum size %d\n",
+ "Warning: blob size %"PRIu32" >= minimum size %d\n",
fdt32_to_cpu(fdt.totalsize), minsize);
}
}
diff --git a/scripts/dtc/libfdt/fdt.c b/scripts/dtc/libfdt/fdt.c
index 7855a1787763..ae03b1112961 100644
--- a/scripts/dtc/libfdt/fdt.c
+++ b/scripts/dtc/libfdt/fdt.c
@@ -55,7 +55,12 @@
#include "libfdt_internal.h"
-int fdt_check_header(const void *fdt)
+/*
+ * Minimal sanity check for a read-only tree. fdt_ro_probe_() checks
+ * that the given buffer contains what appears to be a flattened
+ * device tree with sane information in its header.
+ */
+int fdt_ro_probe_(const void *fdt)
{
if (fdt_magic(fdt) == FDT_MAGIC) {
/* Complete tree */
@@ -74,6 +79,78 @@ int fdt_check_header(const void *fdt)
return 0;
}
+static int check_off_(uint32_t hdrsize, uint32_t totalsize, uint32_t off)
+{
+ return (off >= hdrsize) && (off <= totalsize);
+}
+
+static int check_block_(uint32_t hdrsize, uint32_t totalsize,
+ uint32_t base, uint32_t size)
+{
+ if (!check_off_(hdrsize, totalsize, base))
+ return 0; /* block start out of bounds */
+ if ((base + size) < base)
+ return 0; /* overflow */
+ if (!check_off_(hdrsize, totalsize, base + size))
+ return 0; /* block end out of bounds */
+ return 1;
+}
+
+size_t fdt_header_size_(uint32_t version)
+{
+ if (version <= 1)
+ return FDT_V1_SIZE;
+ else if (version <= 2)
+ return FDT_V2_SIZE;
+ else if (version <= 3)
+ return FDT_V3_SIZE;
+ else if (version <= 16)
+ return FDT_V16_SIZE;
+ else
+ return FDT_V17_SIZE;
+}
+
+int fdt_check_header(const void *fdt)
+{
+ size_t hdrsize;
+
+ if (fdt_magic(fdt) != FDT_MAGIC)
+ return -FDT_ERR_BADMAGIC;
+ hdrsize = fdt_header_size(fdt);
+ if ((fdt_version(fdt) < FDT_FIRST_SUPPORTED_VERSION)
+ || (fdt_last_comp_version(fdt) > FDT_LAST_SUPPORTED_VERSION))
+ return -FDT_ERR_BADVERSION;
+ if (fdt_version(fdt) < fdt_last_comp_version(fdt))
+ return -FDT_ERR_BADVERSION;
+
+ if ((fdt_totalsize(fdt) < hdrsize)
+ || (fdt_totalsize(fdt) > INT_MAX))
+ return -FDT_ERR_TRUNCATED;
+
+ /* Bounds check memrsv block */
+ if (!check_off_(hdrsize, fdt_totalsize(fdt), fdt_off_mem_rsvmap(fdt)))
+ return -FDT_ERR_TRUNCATED;
+
+ /* Bounds check structure block */
+ if (fdt_version(fdt) < 17) {
+ if (!check_off_(hdrsize, fdt_totalsize(fdt),
+ fdt_off_dt_struct(fdt)))
+ return -FDT_ERR_TRUNCATED;
+ } else {
+ if (!check_block_(hdrsize, fdt_totalsize(fdt),
+ fdt_off_dt_struct(fdt),
+ fdt_size_dt_struct(fdt)))
+ return -FDT_ERR_TRUNCATED;
+ }
+
+ /* Bounds check strings block */
+ if (!check_block_(hdrsize, fdt_totalsize(fdt),
+ fdt_off_dt_strings(fdt), fdt_size_dt_strings(fdt)))
+ return -FDT_ERR_TRUNCATED;
+
+ return 0;
+}
+
const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len)
{
unsigned absoffset = offset + fdt_off_dt_struct(fdt);
@@ -244,7 +321,7 @@ const char *fdt_find_string_(const char *strtab, int tabsize, const char *s)
int fdt_move(const void *fdt, void *buf, int bufsize)
{
- FDT_CHECK_HEADER(fdt);
+ FDT_RO_PROBE(fdt);
if (fdt_totalsize(fdt) > bufsize)
return -FDT_ERR_NOSPACE;
diff --git a/scripts/dtc/libfdt/fdt_addresses.c b/scripts/dtc/libfdt/fdt_addresses.c
index eff4dbcc729d..49537b578d03 100644
--- a/scripts/dtc/libfdt/fdt_addresses.c
+++ b/scripts/dtc/libfdt/fdt_addresses.c
@@ -1,6 +1,7 @@
/*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2014 David Gibson <david@gibson.dropbear.id.au>
+ * Copyright (C) 2018 embedded brains GmbH
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
@@ -55,42 +56,32 @@
#include "libfdt_internal.h"
-int fdt_address_cells(const void *fdt, int nodeoffset)
+static int fdt_cells(const void *fdt, int nodeoffset, const char *name)
{
- const fdt32_t *ac;
+ const fdt32_t *c;
int val;
int len;
- ac = fdt_getprop(fdt, nodeoffset, "#address-cells", &len);
- if (!ac)
+ c = fdt_getprop(fdt, nodeoffset, name, &len);
+ if (!c)
return 2;
- if (len != sizeof(*ac))
+ if (len != sizeof(*c))
return -FDT_ERR_BADNCELLS;
- val = fdt32_to_cpu(*ac);
+ val = fdt32_to_cpu(*c);
if ((val <= 0) || (val > FDT_MAX_NCELLS))
return -FDT_ERR_BADNCELLS;
return val;
}
-int fdt_size_cells(const void *fdt, int nodeoffset)
+int fdt_address_cells(const void *fdt, int nodeoffset)
{
- const fdt32_t *sc;
- int val;
- int len;
-
- sc = fdt_getprop(fdt, nodeoffset, "#size-cells", &len);
- if (!sc)
- return 2;
-
- if (len != sizeof(*sc))
- return -FDT_ERR_BADNCELLS;
-
- val = fdt32_to_cpu(*sc);
- if ((val < 0) || (val > FDT_MAX_NCELLS))
- return -FDT_ERR_BADNCELLS;
+ return fdt_cells(fdt, nodeoffset, "#address-cells");
+}
- return val;
+int fdt_size_cells(const void *fdt, int nodeoffset)
+{
+ return fdt_cells(fdt, nodeoffset, "#size-cells");
}
diff --git a/scripts/dtc/libfdt/fdt_overlay.c b/scripts/dtc/libfdt/fdt_overlay.c
index bf75388ec9a2..5fdab6c6371d 100644
--- a/scripts/dtc/libfdt/fdt_overlay.c
+++ b/scripts/dtc/libfdt/fdt_overlay.c
@@ -697,7 +697,7 @@ static int get_path_len(const void *fdt, int nodeoffset)
int len = 0, namelen;
const char *name;
- FDT_CHECK_HEADER(fdt);
+ FDT_RO_PROBE(fdt);
for (;;) {
name = fdt_get_name(fdt, nodeoffset, &namelen);
@@ -866,8 +866,8 @@ int fdt_overlay_apply(void *fdt, void *fdto)
uint32_t delta = fdt_get_max_phandle(fdt);
int ret;
- FDT_CHECK_HEADER(fdt);
- FDT_CHECK_HEADER(fdto);
+ FDT_RO_PROBE(fdt);
+ FDT_RO_PROBE(fdto);
ret = overlay_adjust_local_phandles(fdto, delta);
if (ret)
diff --git a/scripts/dtc/libfdt/fdt_ro.c b/scripts/dtc/libfdt/fdt_ro.c
index dfb3236da388..eafc14282892 100644
--- a/scripts/dtc/libfdt/fdt_ro.c
+++ b/scripts/dtc/libfdt/fdt_ro.c
@@ -76,17 +76,72 @@ static int fdt_nodename_eq_(const void *fdt, int offset,
return 0;
}
+const char *fdt_get_string(const void *fdt, int stroffset, int *lenp)
+{
+ uint32_t absoffset = stroffset + fdt_off_dt_strings(fdt);
+ size_t len;
+ int err;
+ const char *s, *n;
+
+ err = fdt_ro_probe_(fdt);
+ if (err != 0)
+ goto fail;
+
+ err = -FDT_ERR_BADOFFSET;
+ if (absoffset >= fdt_totalsize(fdt))
+ goto fail;
+ len = fdt_totalsize(fdt) - absoffset;
+
+ if (fdt_magic(fdt) == FDT_MAGIC) {
+ if (stroffset < 0)
+ goto fail;
+ if (fdt_version(fdt) >= 17) {
+ if (stroffset >= fdt_size_dt_strings(fdt))
+ goto fail;
+ if ((fdt_size_dt_strings(fdt) - stroffset) < len)
+ len = fdt_size_dt_strings(fdt) - stroffset;
+ }
+ } else if (fdt_magic(fdt) == FDT_SW_MAGIC) {
+ if ((stroffset >= 0)
+ || (stroffset < -fdt_size_dt_strings(fdt)))
+ goto fail;
+ if ((-stroffset) < len)
+ len = -stroffset;
+ } else {
+ err = -FDT_ERR_INTERNAL;
+ goto fail;
+ }
+
+ s = (const char *)fdt + absoffset;
+ n = memchr(s, '\0', len);
+ if (!n) {
+ /* missing terminating NULL */
+ err = -FDT_ERR_TRUNCATED;
+ goto fail;
+ }
+
+ if (lenp)
+ *lenp = n - s;
+ return s;
+
+fail:
+ if (lenp)
+ *lenp = err;
+ return NULL;
+}
+
const char *fdt_string(const void *fdt, int stroffset)
{
- return (const char *)fdt + fdt_off_dt_strings(fdt) + stroffset;
+ return fdt_get_string(fdt, stroffset, NULL);
}
static int fdt_string_eq_(const void *fdt, int stroffset,
const char *s, int len)
{
- const char *p = fdt_string(fdt, stroffset);
+ int slen;
+ const char *p = fdt_get_string(fdt, stroffset, &slen);
- return (strlen(p) == len) && (memcmp(p, s, len) == 0);
+ return p && (slen == len) && (memcmp(p, s, len) == 0);
}
uint32_t fdt_get_max_phandle(const void *fdt)
@@ -115,21 +170,42 @@ uint32_t fdt_get_max_phandle(const void *fdt)
return 0;
}
+static const struct fdt_reserve_entry *fdt_mem_rsv(const void *fdt, int n)
+{
+ int offset = n * sizeof(struct fdt_reserve_entry);
+ int absoffset = fdt_off_mem_rsvmap(fdt) + offset;
+
+ if (absoffset < fdt_off_mem_rsvmap(fdt))
+ return NULL;
+ if (absoffset > fdt_totalsize(fdt) - sizeof(struct fdt_reserve_entry))
+ return NULL;
+ return fdt_mem_rsv_(fdt, n);
+}
+
int fdt_get_mem_rsv(const void *fdt, int n, uint64_t *address, uint64_t *size)
{
- FDT_CHECK_HEADER(fdt);
- *address = fdt64_to_cpu(fdt_mem_rsv_(fdt, n)->address);
- *size = fdt64_to_cpu(fdt_mem_rsv_(fdt, n)->size);
+ const struct fdt_reserve_entry *re;
+
+ FDT_RO_PROBE(fdt);
+ re = fdt_mem_rsv(fdt, n);
+ if (!re)
+ return -FDT_ERR_BADOFFSET;
+
+ *address = fdt64_ld(&re->address);
+ *size = fdt64_ld(&re->size);
return 0;
}
int fdt_num_mem_rsv(const void *fdt)
{
- int i = 0;
+ int i;
+ const struct fdt_reserve_entry *re;
- while (fdt64_to_cpu(fdt_mem_rsv_(fdt, i)->size) != 0)
- i++;
- return i;
+ for (i = 0; (re = fdt_mem_rsv(fdt, i)) != NULL; i++) {
+ if (fdt64_ld(&re->size) == 0)
+ return i;
+ }
+ return -FDT_ERR_TRUNCATED;
}
static int nextprop_(const void *fdt, int offset)
@@ -161,7 +237,7 @@ int fdt_subnode_offset_namelen(const void *fdt, int offset,
{
int depth;
- FDT_CHECK_HEADER(fdt);
+ FDT_RO_PROBE(fdt);
for (depth = 0;
(offset >= 0) && (depth >= 0);
@@ -187,7 +263,7 @@ int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen)
const char *p = path;
int offset = 0;
- FDT_CHECK_HEADER(fdt);
+ FDT_RO_PROBE(fdt);
/* see if we have an alias */
if (*path != '/') {
@@ -237,7 +313,7 @@ const char *fdt_get_name(const void *fdt, int nodeoffset, int *len)
const char *nameptr;
int err;
- if (((err = fdt_check_header(fdt)) != 0)
+ if (((err = fdt_ro_probe_(fdt)) != 0)
|| ((err = fdt_check_node_offset_(fdt, nodeoffset)) < 0))
goto fail;
@@ -303,7 +379,7 @@ static const struct fdt_property *fdt_get_property_by_offset_(const void *fdt,
prop = fdt_offset_ptr_(fdt, offset);
if (lenp)
- *lenp = fdt32_to_cpu(prop->len);
+ *lenp = fdt32_ld(&prop->len);
return prop;
}
@@ -340,7 +416,7 @@ static const struct fdt_property *fdt_get_property_namelen_(const void *fdt,
offset = -FDT_ERR_INTERNAL;
break;
}
- if (fdt_string_eq_(fdt, fdt32_to_cpu(prop->nameoff),
+ if (fdt_string_eq_(fdt, fdt32_ld(&prop->nameoff),
name, namelen)) {
if (poffset)
*poffset = offset;
@@ -393,7 +469,7 @@ const void *fdt_getprop_namelen(const void *fdt, int nodeoffset,
/* Handle realignment */
if (fdt_version(fdt) < 0x10 && (poffset + sizeof(*prop)) % 8 &&
- fdt32_to_cpu(prop->len) >= 8)
+ fdt32_ld(&prop->len) >= 8)
return prop->data + 4;
return prop->data;
}
@@ -406,12 +482,22 @@ const void *fdt_getprop_by_offset(const void *fdt, int offset,
prop = fdt_get_property_by_offset_(fdt, offset, lenp);
if (!prop)
return NULL;
- if (namep)
- *namep = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+ if (namep) {
+ const char *name;
+ int namelen;
+ name = fdt_get_string(fdt, fdt32_ld(&prop->nameoff),
+ &namelen);
+ if (!name) {
+ if (lenp)
+ *lenp = namelen;
+ return NULL;
+ }
+ *namep = name;
+ }
/* Handle realignment */
if (fdt_version(fdt) < 0x10 && (offset + sizeof(*prop)) % 8 &&
- fdt32_to_cpu(prop->len) >= 8)
+ fdt32_ld(&prop->len) >= 8)
return prop->data + 4;
return prop->data;
}
@@ -436,7 +522,7 @@ uint32_t fdt_get_phandle(const void *fdt, int nodeoffset)
return 0;
}
- return fdt32_to_cpu(*php);
+ return fdt32_ld(php);
}
const char *fdt_get_alias_namelen(const void *fdt,
@@ -462,7 +548,7 @@ int fdt_get_path(const void *fdt, int nodeoffset, char *buf, int buflen)
int offset, depth, namelen;
const char *name;
- FDT_CHECK_HEADER(fdt);
+ FDT_RO_PROBE(fdt);
if (buflen < 2)
return -FDT_ERR_NOSPACE;
@@ -514,7 +600,7 @@ int fdt_supernode_atdepth_offset(const void *fdt, int nodeoffset,
int offset, depth;
int supernodeoffset = -FDT_ERR_INTERNAL;
- FDT_CHECK_HEADER(fdt);
+ FDT_RO_PROBE(fdt);
if (supernodedepth < 0)
return -FDT_ERR_NOTFOUND;
@@ -573,7 +659,7 @@ int fdt_node_offset_by_prop_value(const void *fdt, int startoffset,
const void *val;
int len;
- FDT_CHECK_HEADER(fdt);
+ FDT_RO_PROBE(fdt);
/* FIXME: The algorithm here is pretty horrible: we scan each
* property of a node in fdt_getprop(), then if that didn't
@@ -599,7 +685,7 @@ int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle)
if ((phandle == 0) || (phandle == -1))
return -FDT_ERR_BADPHANDLE;
- FDT_CHECK_HEADER(fdt);
+ FDT_RO_PROBE(fdt);
/* FIXME: The algorithm here is pretty horrible: we
* potentially scan each property of a node in
@@ -752,7 +838,7 @@ int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
{
int offset, err;
- FDT_CHECK_HEADER(fdt);
+ FDT_RO_PROBE(fdt);
/* FIXME: The algorithm here is pretty horrible: we scan each
* property of a node in fdt_node_check_compatible(), then if
@@ -771,3 +857,66 @@ int fdt_node_offset_by_compatible(const void *fdt, int startoffset,
return offset; /* error from fdt_next_node() */
}
+
+int fdt_check_full(const void *fdt, size_t bufsize)
+{
+ int err;
+ int num_memrsv;
+ int offset, nextoffset = 0;
+ uint32_t tag;
+ unsigned depth = 0;
+ const void *prop;
+ const char *propname;
+
+ if (bufsize < FDT_V1_SIZE)
+ return -FDT_ERR_TRUNCATED;
+ err = fdt_check_header(fdt);
+ if (err != 0)
+ return err;
+ if (bufsize < fdt_totalsize(fdt))
+ return -FDT_ERR_TRUNCATED;
+
+ num_memrsv = fdt_num_mem_rsv(fdt);
+ if (num_memrsv < 0)
+ return num_memrsv;
+
+ while (1) {
+ offset = nextoffset;
+ tag = fdt_next_tag(fdt, offset, &nextoffset);
+
+ if (nextoffset < 0)
+ return nextoffset;
+
+ switch (tag) {
+ case FDT_NOP:
+ break;
+
+ case FDT_END:
+ if (depth != 0)
+ return -FDT_ERR_BADSTRUCTURE;
+ return 0;
+
+ case FDT_BEGIN_NODE:
+ depth++;
+ if (depth > INT_MAX)
+ return -FDT_ERR_BADSTRUCTURE;
+ break;
+
+ case FDT_END_NODE:
+ if (depth == 0)
+ return -FDT_ERR_BADSTRUCTURE;
+ depth--;
+ break;
+
+ case FDT_PROP:
+ prop = fdt_getprop_by_offset(fdt, offset, &propname,
+ &err);
+ if (!prop)
+ return err;
+ break;
+
+ default:
+ return -FDT_ERR_INTERNAL;
+ }
+ }
+}
diff --git a/scripts/dtc/libfdt/fdt_rw.c b/scripts/dtc/libfdt/fdt_rw.c
index 9b829051e444..2e49855d7cf8 100644
--- a/scripts/dtc/libfdt/fdt_rw.c
+++ b/scripts/dtc/libfdt/fdt_rw.c
@@ -67,9 +67,9 @@ static int fdt_blocks_misordered_(const void *fdt,
(fdt_off_dt_strings(fdt) + fdt_size_dt_strings(fdt)));
}
-static int fdt_rw_check_header_(void *fdt)
+static int fdt_rw_probe_(void *fdt)
{
- FDT_CHECK_HEADER(fdt);
+ FDT_RO_PROBE(fdt);
if (fdt_version(fdt) < 17)
return -FDT_ERR_BADVERSION;
@@ -82,10 +82,10 @@ static int fdt_rw_check_header_(void *fdt)
return 0;
}
-#define FDT_RW_CHECK_HEADER(fdt) \
+#define FDT_RW_PROBE(fdt) \
{ \
int err_; \
- if ((err_ = fdt_rw_check_header_(fdt)) != 0) \
+ if ((err_ = fdt_rw_probe_(fdt)) != 0) \
return err_; \
}
@@ -176,7 +176,7 @@ int fdt_add_mem_rsv(void *fdt, uint64_t address, uint64_t size)
struct fdt_reserve_entry *re;
int err;
- FDT_RW_CHECK_HEADER(fdt);
+ FDT_RW_PROBE(fdt);
re = fdt_mem_rsv_w_(fdt, fdt_num_mem_rsv(fdt));
err = fdt_splice_mem_rsv_(fdt, re, 0, 1);
@@ -192,7 +192,7 @@ int fdt_del_mem_rsv(void *fdt, int n)
{
struct fdt_reserve_entry *re = fdt_mem_rsv_w_(fdt, n);
- FDT_RW_CHECK_HEADER(fdt);
+ FDT_RW_PROBE(fdt);
if (n >= fdt_num_mem_rsv(fdt))
return -FDT_ERR_NOTFOUND;
@@ -252,7 +252,7 @@ int fdt_set_name(void *fdt, int nodeoffset, const char *name)
int oldlen, newlen;
int err;
- FDT_RW_CHECK_HEADER(fdt);
+ FDT_RW_PROBE(fdt);
namep = (char *)(uintptr_t)fdt_get_name(fdt, nodeoffset, &oldlen);
if (!namep)
@@ -275,7 +275,7 @@ int fdt_setprop_placeholder(void *fdt, int nodeoffset, const char *name,
struct fdt_property *prop;
int err;
- FDT_RW_CHECK_HEADER(fdt);
+ FDT_RW_PROBE(fdt);
err = fdt_resize_property_(fdt, nodeoffset, name, len, &prop);
if (err == -FDT_ERR_NOTFOUND)
@@ -308,7 +308,7 @@ int fdt_appendprop(void *fdt, int nodeoffset, const char *name,
struct fdt_property *prop;
int err, oldlen, newlen;
- FDT_RW_CHECK_HEADER(fdt);
+ FDT_RW_PROBE(fdt);
prop = fdt_get_property_w(fdt, nodeoffset, name, &oldlen);
if (prop) {
@@ -334,7 +334,7 @@ int fdt_delprop(void *fdt, int nodeoffset, const char *name)
struct fdt_property *prop;
int len, proplen;
- FDT_RW_CHECK_HEADER(fdt);
+ FDT_RW_PROBE(fdt);
prop = fdt_get_property_w(fdt, nodeoffset, name, &len);
if (!prop)
@@ -354,7 +354,7 @@ int fdt_add_subnode_namelen(void *fdt, int parentoffset,
uint32_t tag;
fdt32_t *endtag;
- FDT_RW_CHECK_HEADER(fdt);
+ FDT_RW_PROBE(fdt);
offset = fdt_subnode_offset_namelen(fdt, parentoffset, name, namelen);
if (offset >= 0)
@@ -394,7 +394,7 @@ int fdt_del_node(void *fdt, int nodeoffset)
{
int endoffset;
- FDT_RW_CHECK_HEADER(fdt);
+ FDT_RW_PROBE(fdt);
endoffset = fdt_node_end_offset_(fdt, nodeoffset);
if (endoffset < 0)
@@ -435,7 +435,7 @@ int fdt_open_into(const void *fdt, void *buf, int bufsize)
const char *fdtend = fdtstart + fdt_totalsize(fdt);
char *tmp;
- FDT_CHECK_HEADER(fdt);
+ FDT_RO_PROBE(fdt);
mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
* sizeof(struct fdt_reserve_entry);
@@ -494,7 +494,7 @@ int fdt_pack(void *fdt)
{
int mem_rsv_size;
- FDT_RW_CHECK_HEADER(fdt);
+ FDT_RW_PROBE(fdt);
mem_rsv_size = (fdt_num_mem_rsv(fdt)+1)
* sizeof(struct fdt_reserve_entry);
diff --git a/scripts/dtc/libfdt/fdt_sw.c b/scripts/dtc/libfdt/fdt_sw.c
index 6d33cc29d022..9fa4a94d83c3 100644
--- a/scripts/dtc/libfdt/fdt_sw.c
+++ b/scripts/dtc/libfdt/fdt_sw.c
@@ -55,21 +55,77 @@
#include "libfdt_internal.h"
-static int fdt_sw_check_header_(void *fdt)
+static int fdt_sw_probe_(void *fdt)
{
- if (fdt_magic(fdt) != FDT_SW_MAGIC)
+ if (fdt_magic(fdt) == FDT_MAGIC)
+ return -FDT_ERR_BADSTATE;
+ else if (fdt_magic(fdt) != FDT_SW_MAGIC)
return -FDT_ERR_BADMAGIC;
- /* FIXME: should check more details about the header state */
return 0;
}
-#define FDT_SW_CHECK_HEADER(fdt) \
+#define FDT_SW_PROBE(fdt) \
+ { \
+ int err; \
+ if ((err = fdt_sw_probe_(fdt)) != 0) \
+ return err; \
+ }
+
+/* 'memrsv' state: Initial state after fdt_create()
+ *
+ * Allowed functions:
+ * fdt_add_reservmap_entry()
+ * fdt_finish_reservemap() [moves to 'struct' state]
+ */
+static int fdt_sw_probe_memrsv_(void *fdt)
+{
+ int err = fdt_sw_probe_(fdt);
+ if (err)
+ return err;
+
+ if (fdt_off_dt_strings(fdt) != 0)
+ return -FDT_ERR_BADSTATE;
+ return 0;
+}
+
+#define FDT_SW_PROBE_MEMRSV(fdt) \
+ { \
+ int err; \
+ if ((err = fdt_sw_probe_memrsv_(fdt)) != 0) \
+ return err; \
+ }
+
+/* 'struct' state: Enter this state after fdt_finish_reservemap()
+ *
+ * Allowed functions:
+ * fdt_begin_node()
+ * fdt_end_node()
+ * fdt_property*()
+ * fdt_finish() [moves to 'complete' state]
+ */
+static int fdt_sw_probe_struct_(void *fdt)
+{
+ int err = fdt_sw_probe_(fdt);
+ if (err)
+ return err;
+
+ if (fdt_off_dt_strings(fdt) != fdt_totalsize(fdt))
+ return -FDT_ERR_BADSTATE;
+ return 0;
+}
+
+#define FDT_SW_PROBE_STRUCT(fdt) \
{ \
int err; \
- if ((err = fdt_sw_check_header_(fdt)) != 0) \
+ if ((err = fdt_sw_probe_struct_(fdt)) != 0) \
return err; \
}
+/* 'complete' state: Enter this state after fdt_finish()
+ *
+ * Allowed functions: none
+ */
+
static void *fdt_grab_space_(void *fdt, size_t len)
{
int offset = fdt_size_dt_struct(fdt);
@@ -87,9 +143,11 @@ static void *fdt_grab_space_(void *fdt, size_t len)
int fdt_create(void *buf, int bufsize)
{
+ const size_t hdrsize = FDT_ALIGN(sizeof(struct fdt_header),
+ sizeof(struct fdt_reserve_entry));
void *fdt = buf;
- if (bufsize < sizeof(struct fdt_header))
+ if (bufsize < hdrsize)
return -FDT_ERR_NOSPACE;
memset(buf, 0, bufsize);
@@ -99,10 +157,9 @@ int fdt_create(void *buf, int bufsize)
fdt_set_last_comp_version(fdt, FDT_FIRST_SUPPORTED_VERSION);
fdt_set_totalsize(fdt, bufsize);
- fdt_set_off_mem_rsvmap(fdt, FDT_ALIGN(sizeof(struct fdt_header),
- sizeof(struct fdt_reserve_entry)));
+ fdt_set_off_mem_rsvmap(fdt, hdrsize);
fdt_set_off_dt_struct(fdt, fdt_off_mem_rsvmap(fdt));
- fdt_set_off_dt_strings(fdt, bufsize);
+ fdt_set_off_dt_strings(fdt, 0);
return 0;
}
@@ -112,11 +169,14 @@ int fdt_resize(void *fdt, void *buf, int bufsize)
size_t headsize, tailsize;
char *oldtail, *newtail;
- FDT_SW_CHECK_HEADER(fdt);
+ FDT_SW_PROBE(fdt);
- headsize = fdt_off_dt_struct(fdt);
+ headsize = fdt_off_dt_struct(fdt) + fdt_size_dt_struct(fdt);
tailsize = fdt_size_dt_strings(fdt);
+ if ((headsize + tailsize) > fdt_totalsize(fdt))
+ return -FDT_ERR_INTERNAL;
+
if ((headsize + tailsize) > bufsize)
return -FDT_ERR_NOSPACE;
@@ -133,8 +193,9 @@ int fdt_resize(void *fdt, void *buf, int bufsize)
memmove(buf, fdt, headsize);
}
- fdt_set_off_dt_strings(buf, bufsize);
fdt_set_totalsize(buf, bufsize);
+ if (fdt_off_dt_strings(buf))
+ fdt_set_off_dt_strings(buf, bufsize);
return 0;
}
@@ -144,10 +205,7 @@ int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size)
struct fdt_reserve_entry *re;
int offset;
- FDT_SW_CHECK_HEADER(fdt);
-
- if (fdt_size_dt_struct(fdt))
- return -FDT_ERR_BADSTATE;
+ FDT_SW_PROBE_MEMRSV(fdt);
offset = fdt_off_dt_struct(fdt);
if ((offset + sizeof(*re)) > fdt_totalsize(fdt))
@@ -164,16 +222,23 @@ int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size)
int fdt_finish_reservemap(void *fdt)
{
- return fdt_add_reservemap_entry(fdt, 0, 0);
+ int err = fdt_add_reservemap_entry(fdt, 0, 0);
+
+ if (err)
+ return err;
+
+ fdt_set_off_dt_strings(fdt, fdt_totalsize(fdt));
+ return 0;
}
int fdt_begin_node(void *fdt, const char *name)
{
struct fdt_node_header *nh;
- int namelen = strlen(name) + 1;
+ int namelen;
- FDT_SW_CHECK_HEADER(fdt);
+ FDT_SW_PROBE_STRUCT(fdt);
+ namelen = strlen(name) + 1;
nh = fdt_grab_space_(fdt, sizeof(*nh) + FDT_TAGALIGN(namelen));
if (! nh)
return -FDT_ERR_NOSPACE;
@@ -187,7 +252,7 @@ int fdt_end_node(void *fdt)
{
fdt32_t *en;
- FDT_SW_CHECK_HEADER(fdt);
+ FDT_SW_PROBE_STRUCT(fdt);
en = fdt_grab_space_(fdt, FDT_TAGSIZE);
if (! en)
@@ -225,7 +290,7 @@ int fdt_property_placeholder(void *fdt, const char *name, int len, void **valp)
struct fdt_property *prop;
int nameoff;
- FDT_SW_CHECK_HEADER(fdt);
+ FDT_SW_PROBE_STRUCT(fdt);
nameoff = fdt_find_add_string_(fdt, name);
if (nameoff == 0)
@@ -262,7 +327,7 @@ int fdt_finish(void *fdt)
uint32_t tag;
int offset, nextoffset;
- FDT_SW_CHECK_HEADER(fdt);
+ FDT_SW_PROBE_STRUCT(fdt);
/* Add terminator */
end = fdt_grab_space_(fdt, sizeof(*end));
diff --git a/scripts/dtc/libfdt/libfdt.h b/scripts/dtc/libfdt/libfdt.h
index 1e27780e1185..2bd151dd355f 100644
--- a/scripts/dtc/libfdt/libfdt.h
+++ b/scripts/dtc/libfdt/libfdt.h
@@ -90,8 +90,9 @@
/* Error codes: codes for bad device tree blobs */
#define FDT_ERR_TRUNCATED 8
- /* FDT_ERR_TRUNCATED: Structure block of the given device tree
- * ends without an FDT_END tag. */
+ /* FDT_ERR_TRUNCATED: FDT or a sub-block is improperly
+ * terminated (overflows, goes outside allowed bounds, or
+ * isn't properly terminated). */
#define FDT_ERR_BADMAGIC 9
/* FDT_ERR_BADMAGIC: Given "device tree" appears not to be a
* device tree at all - it is missing the flattened device
@@ -153,6 +154,29 @@ static inline void *fdt_offset_ptr_w(void *fdt, int offset, int checklen)
uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset);
+/*
+ * Alignment helpers:
+ * These helpers access words from a device tree blob. They're
+ * built to work even with unaligned pointers on platforms (ike
+ * ARM) that don't like unaligned loads and stores
+ */
+
+static inline uint32_t fdt32_ld(const fdt32_t *p)
+{
+ fdt32_t v;
+
+ memcpy(&v, p, sizeof(v));
+ return fdt32_to_cpu(v);
+}
+
+static inline uint64_t fdt64_ld(const fdt64_t *p)
+{
+ fdt64_t v;
+
+ memcpy(&v, p, sizeof(v));
+ return fdt64_to_cpu(v);
+}
+
/**********************************************************************/
/* Traversal functions */
/**********************************************************************/
@@ -213,7 +237,7 @@ int fdt_next_subnode(const void *fdt, int offset);
/* General functions */
/**********************************************************************/
#define fdt_get_header(fdt, field) \
- (fdt32_to_cpu(((const struct fdt_header *)(fdt))->field))
+ (fdt32_ld(&((const struct fdt_header *)(fdt))->field))
#define fdt_magic(fdt) (fdt_get_header(fdt, magic))
#define fdt_totalsize(fdt) (fdt_get_header(fdt, totalsize))
#define fdt_off_dt_struct(fdt) (fdt_get_header(fdt, off_dt_struct))
@@ -244,18 +268,31 @@ fdt_set_hdr_(size_dt_struct);
#undef fdt_set_hdr_
/**
- * fdt_check_header - sanity check a device tree or possible device tree
+ * fdt_header_size - return the size of the tree's header
+ * @fdt: pointer to a flattened device tree
+ */
+size_t fdt_header_size_(uint32_t version);
+static inline size_t fdt_header_size(const void *fdt)
+{
+ return fdt_header_size_(fdt_version(fdt));
+}
+
+/**
+ * fdt_check_header - sanity check a device tree header
+
* @fdt: pointer to data which might be a flattened device tree
*
* fdt_check_header() checks that the given buffer contains what
- * appears to be a flattened device tree with sane information in its
- * header.
+ * appears to be a flattened device tree, and that the header contains
+ * valid information (to the extent that can be determined from the
+ * header alone).
*
* returns:
* 0, if the buffer appears to contain a valid device tree
* -FDT_ERR_BADMAGIC,
* -FDT_ERR_BADVERSION,
- * -FDT_ERR_BADSTATE, standard meanings, as above
+ * -FDT_ERR_BADSTATE,
+ * -FDT_ERR_TRUNCATED, standard meanings, as above
*/
int fdt_check_header(const void *fdt);
@@ -284,6 +321,24 @@ int fdt_move(const void *fdt, void *buf, int bufsize);
/* Read-only functions */
/**********************************************************************/
+int fdt_check_full(const void *fdt, size_t bufsize);
+
+/**
+ * fdt_get_string - retrieve a string from the strings block of a device tree
+ * @fdt: pointer to the device tree blob
+ * @stroffset: offset of the string within the strings block (native endian)
+ * @lenp: optional pointer to return the string's length
+ *
+ * fdt_get_string() retrieves a pointer to a single string from the
+ * strings block of the device tree blob at fdt, and optionally also
+ * returns the string's length in *lenp.
+ *
+ * returns:
+ * a pointer to the string, on success
+ * NULL, if stroffset is out of bounds, or doesn't point to a valid string
+ */
+const char *fdt_get_string(const void *fdt, int stroffset, int *lenp);
+
/**
* fdt_string - retrieve a string from the strings block of a device tree
* @fdt: pointer to the device tree blob
@@ -294,7 +349,7 @@ int fdt_move(const void *fdt, void *buf, int bufsize);
*
* returns:
* a pointer to the string, on success
- * NULL, if stroffset is out of bounds
+ * NULL, if stroffset is out of bounds, or doesn't point to a valid string
*/
const char *fdt_string(const void *fdt, int stroffset);
@@ -1090,7 +1145,7 @@ int fdt_address_cells(const void *fdt, int nodeoffset);
*
* returns:
* 0 <= n < FDT_MAX_NCELLS, on success
- * 2, if the node has no #address-cells property
+ * 2, if the node has no #size-cells property
* -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid
* #size-cells property
* -FDT_ERR_BADMAGIC,
@@ -1313,10 +1368,13 @@ static inline int fdt_property_u64(void *fdt, const char *name, uint64_t val)
fdt64_t tmp = cpu_to_fdt64(val);
return fdt_property(fdt, name, &tmp, sizeof(tmp));
}
+
+#ifndef SWIG /* Not available in Python */
static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val)
{
return fdt_property_u32(fdt, name, val);
}
+#endif
/**
* fdt_property_placeholder - add a new property and return a ptr to its value
diff --git a/scripts/dtc/libfdt/libfdt_env.h b/scripts/dtc/libfdt/libfdt_env.h
index bd2474628775..eb2053845c9c 100644
--- a/scripts/dtc/libfdt/libfdt_env.h
+++ b/scripts/dtc/libfdt/libfdt_env.h
@@ -56,6 +56,7 @@
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
+#include <limits.h>
#ifdef __CHECKER__
#define FDT_FORCE __attribute__((force))
diff --git a/scripts/dtc/libfdt/libfdt_internal.h b/scripts/dtc/libfdt/libfdt_internal.h
index 7681e192295b..4109f890ae60 100644
--- a/scripts/dtc/libfdt/libfdt_internal.h
+++ b/scripts/dtc/libfdt/libfdt_internal.h
@@ -55,10 +55,11 @@
#define FDT_ALIGN(x, a) (((x) + (a) - 1) & ~((a) - 1))
#define FDT_TAGALIGN(x) (FDT_ALIGN((x), FDT_TAGSIZE))
-#define FDT_CHECK_HEADER(fdt) \
+int fdt_ro_probe_(const void *fdt);
+#define FDT_RO_PROBE(fdt) \
{ \
int err_; \
- if ((err_ = fdt_check_header(fdt)) != 0) \
+ if ((err_ = fdt_ro_probe_(fdt)) != 0) \
return err_; \
}
diff --git a/scripts/dtc/livetree.c b/scripts/dtc/livetree.c
index 6e4c367f54b3..4ff0679e0062 100644
--- a/scripts/dtc/livetree.c
+++ b/scripts/dtc/livetree.c
@@ -594,6 +594,7 @@ struct node *get_node_by_ref(struct node *tree, const char *ref)
cell_t get_node_phandle(struct node *root, struct node *node)
{
static cell_t phandle = 1; /* FIXME: ick, static local */
+ struct data d = empty_data;
if ((node->phandle != 0) && (node->phandle != -1))
return node->phandle;
@@ -603,17 +604,16 @@ cell_t get_node_phandle(struct node *root, struct node *node)
node->phandle = phandle;
+ d = data_add_marker(d, TYPE_UINT32, NULL);
+ d = data_append_cell(d, phandle);
+
if (!get_property(node, "linux,phandle")
&& (phandle_format & PHANDLE_LEGACY))
- add_property(node,
- build_property("linux,phandle",
- data_append_cell(empty_data, phandle)));
+ add_property(node, build_property("linux,phandle", d));
if (!get_property(node, "phandle")
&& (phandle_format & PHANDLE_EPAPR))
- add_property(node,
- build_property("phandle",
- data_append_cell(empty_data, phandle)));
+ add_property(node, build_property("phandle", d));
/* If the node *does* have a phandle property, we must
* be dealing with a self-referencing phandle, which will be
diff --git a/scripts/dtc/treesource.c b/scripts/dtc/treesource.c
index 2461a3d068a0..f2874f1d1465 100644
--- a/scripts/dtc/treesource.c
+++ b/scripts/dtc/treesource.c
@@ -61,24 +61,14 @@ static bool isstring(char c)
|| strchr("\a\b\t\n\v\f\r", c));
}
-static void write_propval_string(FILE *f, struct data val)
+static void write_propval_string(FILE *f, const char *s, size_t len)
{
- const char *str = val.val;
- int i;
- struct marker *m = val.markers;
-
- assert(str[val.len-1] == '\0');
+ const char *end = s + len - 1;
+ assert(*end == '\0');
- while (m && (m->offset == 0)) {
- if (m->type == LABEL)
- fprintf(f, "%s: ", m->ref);
- m = m->next;
- }
fprintf(f, "\"");
-
- for (i = 0; i < (val.len-1); i++) {
- char c = str[i];
-
+ while (s < end) {
+ char c = *s++;
switch (c) {
case '\a':
fprintf(f, "\\a");
@@ -108,91 +98,78 @@ static void write_propval_string(FILE *f, struct data val)
fprintf(f, "\\\"");
break;
case '\0':
- fprintf(f, "\", ");
- while (m && (m->offset <= (i + 1))) {
- if (m->type == LABEL) {
- assert(m->offset == (i+1));
- fprintf(f, "%s: ", m->ref);
- }
- m = m->next;
- }
- fprintf(f, "\"");
+ fprintf(f, "\\0");
break;
default:
if (isprint((unsigned char)c))
fprintf(f, "%c", c);
else
- fprintf(f, "\\x%02hhx", c);
+ fprintf(f, "\\x%02"PRIx8, c);
}
}
fprintf(f, "\"");
-
- /* Wrap up any labels at the end of the value */
- for_each_marker_of_type(m, LABEL) {
- assert (m->offset == val.len);
- fprintf(f, " %s:", m->ref);
- }
}
-static void write_propval_cells(FILE *f, struct data val)
+static void write_propval_int(FILE *f, const char *p, size_t len, size_t width)
{
- void *propend = val.val + val.len;
- fdt32_t *cp = (fdt32_t *)val.val;
- struct marker *m = val.markers;
-
- fprintf(f, "<");
- for (;;) {
- while (m && (m->offset <= ((char *)cp - val.val))) {
- if (m->type == LABEL) {
- assert(m->offset == ((char *)cp - val.val));
- fprintf(f, "%s: ", m->ref);
- }
- m = m->next;
- }
+ const char *end = p + len;
+ assert(len % width == 0);
- fprintf(f, "0x%x", fdt32_to_cpu(*cp++));
- if ((void *)cp >= propend)
+ for (; p < end; p += width) {
+ switch (width) {
+ case 1:
+ fprintf(f, " %02"PRIx8, *(const uint8_t*)p);
+ break;
+ case 2:
+ fprintf(f, " 0x%02"PRIx16, fdt16_to_cpu(*(const fdt16_t*)p));
+ break;
+ case 4:
+ fprintf(f, " 0x%02"PRIx32, fdt32_to_cpu(*(const fdt32_t*)p));
+ break;
+ case 8:
+ fprintf(f, " 0x%02"PRIx64, fdt64_to_cpu(*(const fdt64_t*)p));
break;
- fprintf(f, " ");
+ }
}
+}
- /* Wrap up any labels at the end of the value */
- for_each_marker_of_type(m, LABEL) {
- assert (m->offset == val.len);
- fprintf(f, " %s:", m->ref);
- }
- fprintf(f, ">");
+static bool has_data_type_information(struct marker *m)
+{
+ return m->type >= TYPE_UINT8;
}
-static void write_propval_bytes(FILE *f, struct data val)
+static struct marker *next_type_marker(struct marker *m)
{
- void *propend = val.val + val.len;
- const char *bp = val.val;
- struct marker *m = val.markers;
-
- fprintf(f, "[");
- for (;;) {
- while (m && (m->offset == (bp-val.val))) {
- if (m->type == LABEL)
- fprintf(f, "%s: ", m->ref);
- m = m->next;
- }
+ while (m && !has_data_type_information(m))
+ m = m->next;
+ return m;
+}
- fprintf(f, "%02hhx", (unsigned char)(*bp++));
- if ((const void *)bp >= propend)
- break;
- fprintf(f, " ");
- }
+size_t type_marker_length(struct marker *m)
+{
+ struct marker *next = next_type_marker(m->next);
- /* Wrap up any labels at the end of the value */
- for_each_marker_of_type(m, LABEL) {
- assert (m->offset == val.len);
- fprintf(f, " %s:", m->ref);
- }
- fprintf(f, "]");
+ if (next)
+ return next->offset - m->offset;
+ return 0;
}
-static void write_propval(FILE *f, struct property *prop)
+static const char *delim_start[] = {
+ [TYPE_UINT8] = "[",
+ [TYPE_UINT16] = "/bits/ 16 <",
+ [TYPE_UINT32] = "<",
+ [TYPE_UINT64] = "/bits/ 64 <",
+ [TYPE_STRING] = "",
+};
+static const char *delim_end[] = {
+ [TYPE_UINT8] = " ]",
+ [TYPE_UINT16] = " >",
+ [TYPE_UINT32] = " >",
+ [TYPE_UINT64] = " >",
+ [TYPE_STRING] = "",
+};
+
+static enum markertype guess_value_type(struct property *prop)
{
int len = prop->val.len;
const char *p = prop->val.val;
@@ -201,11 +178,6 @@ static void write_propval(FILE *f, struct property *prop)
int nnotstringlbl = 0, nnotcelllbl = 0;
int i;
- if (len == 0) {
- fprintf(f, ";\n");
- return;
- }
-
for (i = 0; i < len; i++) {
if (! isstring(p[i]))
nnotstring++;
@@ -220,17 +192,91 @@ static void write_propval(FILE *f, struct property *prop)
nnotcelllbl++;
}
- fprintf(f, " = ");
if ((p[len-1] == '\0') && (nnotstring == 0) && (nnul < (len-nnul))
&& (nnotstringlbl == 0)) {
- write_propval_string(f, prop->val);
+ return TYPE_STRING;
} else if (((len % sizeof(cell_t)) == 0) && (nnotcelllbl == 0)) {
- write_propval_cells(f, prop->val);
- } else {
- write_propval_bytes(f, prop->val);
+ return TYPE_UINT32;
}
- fprintf(f, ";\n");
+ return TYPE_UINT8;
+}
+
+static void write_propval(FILE *f, struct property *prop)
+{
+ size_t len = prop->val.len;
+ struct marker *m = prop->val.markers;
+ struct marker dummy_marker;
+ enum markertype emit_type = TYPE_NONE;
+
+ if (len == 0) {
+ fprintf(f, ";\n");
+ return;
+ }
+
+ fprintf(f, " = ");
+
+ if (!next_type_marker(m)) {
+ /* data type information missing, need to guess */
+ dummy_marker.type = guess_value_type(prop);
+ dummy_marker.next = prop->val.markers;
+ dummy_marker.offset = 0;
+ dummy_marker.ref = NULL;
+ m = &dummy_marker;
+ }
+
+ struct marker *m_label = prop->val.markers;
+ for_each_marker(m) {
+ size_t chunk_len;
+ const char *p = &prop->val.val[m->offset];
+
+ if (!has_data_type_information(m))
+ continue;
+
+ chunk_len = type_marker_length(m);
+ if (!chunk_len)
+ chunk_len = len - m->offset;
+
+ if (emit_type != TYPE_NONE)
+ fprintf(f, "%s, ", delim_end[emit_type]);
+ emit_type = m->type;
+
+ for_each_marker_of_type(m_label, LABEL) {
+ if (m_label->offset > m->offset)
+ break;
+ fprintf(f, "%s: ", m_label->ref);
+ }
+
+ fprintf(f, "%s", delim_start[emit_type]);
+
+ if (chunk_len <= 0)
+ continue;
+
+ switch(emit_type) {
+ case TYPE_UINT16:
+ write_propval_int(f, p, chunk_len, 2);
+ break;
+ case TYPE_UINT32:
+ write_propval_int(f, p, chunk_len, 4);
+ break;
+ case TYPE_UINT64:
+ write_propval_int(f, p, chunk_len, 8);
+ break;
+ case TYPE_STRING:
+ write_propval_string(f, p, chunk_len);
+ break;
+ default:
+ write_propval_int(f, p, chunk_len, 1);
+ }
+ }
+
+ /* Wrap up any labels at the end of the value */
+ for_each_marker_of_type(m_label, LABEL) {
+ assert (m_label->offset == len);
+ fprintf(f, " %s:", m_label->ref);
+ }
+
+ fprintf(f, "%s;\n", delim_end[emit_type] ? : "");
}
static void write_tree_source_node(FILE *f, struct node *tree, int level)
@@ -281,4 +327,3 @@ void dt_to_source(FILE *f, struct dt_info *dti)
write_tree_source_node(f, dti->dt, 0);
}
-
diff --git a/scripts/dtc/update-dtc-source.sh b/scripts/dtc/update-dtc-source.sh
index 1a009fd195d0..7dd29a0362b8 100755
--- a/scripts/dtc/update-dtc-source.sh
+++ b/scripts/dtc/update-dtc-source.sh
@@ -32,7 +32,7 @@ DTC_UPSTREAM_PATH=`pwd`/../dtc
DTC_LINUX_PATH=`pwd`/scripts/dtc
DTC_SOURCE="checks.c data.c dtc.c dtc.h flattree.c fstree.c livetree.c srcpos.c \
- srcpos.h treesource.c util.c util.h version_gen.h Makefile.dtc \
+ srcpos.h treesource.c util.c util.h version_gen.h yamltree.c Makefile.dtc \
dtc-lexer.l dtc-parser.y"
LIBFDT_SOURCE="Makefile.libfdt fdt.c fdt.h fdt_addresses.c fdt_empty_tree.c \
fdt_overlay.c fdt_ro.c fdt_rw.c fdt_strerror.c fdt_sw.c \
diff --git a/scripts/dtc/util.c b/scripts/dtc/util.c
index 9953c32a0244..a69b7a13463d 100644
--- a/scripts/dtc/util.c
+++ b/scripts/dtc/util.c
@@ -227,11 +227,11 @@ char get_escape_char(const char *s, int *i)
return val;
}
-int utilfdt_read_err_len(const char *filename, char **buffp, off_t *len)
+int utilfdt_read_err(const char *filename, char **buffp, size_t *len)
{
int fd = 0; /* assume stdin */
char *buf = NULL;
- off_t bufsize = 1024, offset = 0;
+ size_t bufsize = 1024, offset = 0;
int ret = 0;
*buffp = NULL;
@@ -264,20 +264,15 @@ int utilfdt_read_err_len(const char *filename, char **buffp, off_t *len)
free(buf);
else
*buffp = buf;
- *len = bufsize;
+ if (len)
+ *len = bufsize;
return ret;
}
-int utilfdt_read_err(const char *filename, char **buffp)
-{
- off_t len;
- return utilfdt_read_err_len(filename, buffp, &len);
-}
-
-char *utilfdt_read_len(const char *filename, off_t *len)
+char *utilfdt_read(const char *filename, size_t *len)
{
char *buff;
- int ret = utilfdt_read_err_len(filename, &buff, len);
+ int ret = utilfdt_read_err(filename, &buff, len);
if (ret) {
fprintf(stderr, "Couldn't open blob from '%s': %s\n", filename,
@@ -288,12 +283,6 @@ char *utilfdt_read_len(const char *filename, off_t *len)
return buff;
}
-char *utilfdt_read(const char *filename)
-{
- off_t len;
- return utilfdt_read_len(filename, &len);
-}
-
int utilfdt_write_err(const char *filename, const void *blob)
{
int fd = 1; /* assume stdout */
diff --git a/scripts/dtc/util.h b/scripts/dtc/util.h
index 66fba8ea709b..f6cea8274174 100644
--- a/scripts/dtc/util.h
+++ b/scripts/dtc/util.h
@@ -98,16 +98,10 @@ char get_escape_char(const char *s, int *i);
* stderr.
*
* @param filename The filename to read, or - for stdin
- * @return Pointer to allocated buffer containing fdt, or NULL on error
- */
-char *utilfdt_read(const char *filename);
-
-/**
- * Like utilfdt_read(), but also passes back the size of the file read.
- *
* @param len If non-NULL, the amount of data we managed to read
+ * @return Pointer to allocated buffer containing fdt, or NULL on error
*/
-char *utilfdt_read_len(const char *filename, off_t *len);
+char *utilfdt_read(const char *filename, size_t *len);
/**
* Read a device tree file into a buffer. Does not report errors, but only
@@ -116,16 +110,10 @@ char *utilfdt_read_len(const char *filename, off_t *len);
*
* @param filename The filename to read, or - for stdin
* @param buffp Returns pointer to buffer containing fdt
- * @return 0 if ok, else an errno value representing the error
- */
-int utilfdt_read_err(const char *filename, char **buffp);
-
-/**
- * Like utilfdt_read_err(), but also passes back the size of the file read.
- *
* @param len If non-NULL, the amount of data we managed to read
+ * @return 0 if ok, else an errno value representing the error
*/
-int utilfdt_read_err_len(const char *filename, char **buffp, off_t *len);
+int utilfdt_read_err(const char *filename, char **buffp, size_t *len);
/**
* Write a device tree buffer to a file. This will report any errors on
diff --git a/scripts/dtc/version_gen.h b/scripts/dtc/version_gen.h
index b00f14ff7a17..6d23fd095f16 100644
--- a/scripts/dtc/version_gen.h
+++ b/scripts/dtc/version_gen.h
@@ -1 +1 @@
-#define DTC_VERSION "DTC 1.4.6-g84e414b0"
+#define DTC_VERSION "DTC 1.4.7-gc86da84d"
diff --git a/scripts/dtc/yamltree.c b/scripts/dtc/yamltree.c
new file mode 100644
index 000000000000..a00285a5a9ec
--- /dev/null
+++ b/scripts/dtc/yamltree.c
@@ -0,0 +1,247 @@
+/*
+ * (C) Copyright Linaro, Ltd. 2018
+ * (C) Copyright Arm Holdings. 2017
+ * (C) Copyright David Gibson <dwg@au1.ibm.com>, IBM Corporation. 2005.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#include <stdlib.h>
+#include <yaml.h>
+#include "dtc.h"
+#include "srcpos.h"
+
+char *yaml_error_name[] = {
+ [YAML_NO_ERROR] = "no error",
+ [YAML_MEMORY_ERROR] = "memory error",
+ [YAML_READER_ERROR] = "reader error",
+ [YAML_SCANNER_ERROR] = "scanner error",
+ [YAML_PARSER_ERROR] = "parser error",
+ [YAML_COMPOSER_ERROR] = "composer error",
+ [YAML_WRITER_ERROR] = "writer error",
+ [YAML_EMITTER_ERROR] = "emitter error",
+};
+
+#define yaml_emitter_emit_or_die(emitter, event) ( \
+{ \
+ if (!yaml_emitter_emit(emitter, event)) \
+ die("yaml '%s': %s in %s, line %i\n", \
+ yaml_error_name[(emitter)->error], \
+ (emitter)->problem, __func__, __LINE__); \
+})
+
+static void yaml_propval_int(yaml_emitter_t *emitter, struct marker *markers, char *data, int len, int width)
+{
+ yaml_event_t event;
+ void *tag;
+ int off, start_offset = markers->offset;
+
+ switch(width) {
+ case 1: tag = "!u8"; break;
+ case 2: tag = "!u16"; break;
+ case 4: tag = "!u32"; break;
+ case 8: tag = "!u64"; break;
+ default:
+ die("Invalid width %i", width);
+ }
+ assert(len % width == 0);
+
+ yaml_sequence_start_event_initialize(&event, NULL,
+ (yaml_char_t *)tag, width == 4, YAML_FLOW_SEQUENCE_STYLE);
+ yaml_emitter_emit_or_die(emitter, &event);
+
+ for (off = 0; off < len; off += width) {
+ char buf[32];
+ struct marker *m;
+ bool is_phandle = false;
+
+ switch(width) {
+ case 1:
+ sprintf(buf, "0x%"PRIx8, *(uint8_t*)(data + off));
+ break;
+ case 2:
+ sprintf(buf, "0x%"PRIx16, fdt16_to_cpu(*(fdt16_t*)(data + off)));
+ break;
+ case 4:
+ sprintf(buf, "0x%"PRIx32, fdt32_to_cpu(*(fdt32_t*)(data + off)));
+ m = markers;
+ is_phandle = false;
+ for_each_marker_of_type(m, REF_PHANDLE) {
+ if (m->offset == (start_offset + off)) {
+ is_phandle = true;
+ break;
+ }
+ }
+ break;
+ case 8:
+ sprintf(buf, "0x%"PRIx64, fdt64_to_cpu(*(fdt64_t*)(data + off)));
+ break;
+ }
+
+ if (is_phandle)
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t*)"!phandle", (yaml_char_t *)buf,
+ strlen(buf), 0, 0, YAML_PLAIN_SCALAR_STYLE);
+ else
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t*)YAML_INT_TAG, (yaml_char_t *)buf,
+ strlen(buf), 1, 1, YAML_PLAIN_SCALAR_STYLE);
+ yaml_emitter_emit_or_die(emitter, &event);
+ }
+
+ yaml_sequence_end_event_initialize(&event);
+ yaml_emitter_emit_or_die(emitter, &event);
+}
+
+static void yaml_propval_string(yaml_emitter_t *emitter, char *str, int len)
+{
+ yaml_event_t event;
+ int i;
+
+ assert(str[len-1] == '\0');
+
+ /* Make sure the entire string is in the lower 7-bit ascii range */
+ for (i = 0; i < len; i++)
+ assert(isascii(str[i]));
+
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_STR_TAG, (yaml_char_t*)str,
+ len-1, 0, 1, YAML_DOUBLE_QUOTED_SCALAR_STYLE);
+ yaml_emitter_emit_or_die(emitter, &event);
+}
+
+static void yaml_propval(yaml_emitter_t *emitter, struct property *prop)
+{
+ yaml_event_t event;
+ int len = prop->val.len;
+ struct marker *m = prop->val.markers;
+
+ /* Emit the property name */
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_STR_TAG, (yaml_char_t*)prop->name,
+ strlen(prop->name), 1, 1, YAML_PLAIN_SCALAR_STYLE);
+ yaml_emitter_emit_or_die(emitter, &event);
+
+ /* Boolean properties are easiest to deal with. Length is zero, so just emit 'true' */
+ if (len == 0) {
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_BOOL_TAG,
+ (yaml_char_t*)"true",
+ strlen("true"), 1, 0, YAML_PLAIN_SCALAR_STYLE);
+ yaml_emitter_emit_or_die(emitter, &event);
+ return;
+ }
+
+ if (!m)
+ die("No markers present in property '%s' value\n", prop->name);
+
+ yaml_sequence_start_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_SEQ_TAG, 1, YAML_FLOW_SEQUENCE_STYLE);
+ yaml_emitter_emit_or_die(emitter, &event);
+
+ for_each_marker(m) {
+ int chunk_len;
+ char *data = &prop->val.val[m->offset];
+
+ if (m->type < TYPE_UINT8)
+ continue;
+
+ chunk_len = type_marker_length(m) ? : len;
+ assert(chunk_len > 0);
+ len -= chunk_len;
+
+ switch(m->type) {
+ case TYPE_UINT16:
+ yaml_propval_int(emitter, m, data, chunk_len, 2);
+ break;
+ case TYPE_UINT32:
+ yaml_propval_int(emitter, m, data, chunk_len, 4);
+ break;
+ case TYPE_UINT64:
+ yaml_propval_int(emitter, m, data, chunk_len, 8);
+ break;
+ case TYPE_STRING:
+ yaml_propval_string(emitter, data, chunk_len);
+ break;
+ default:
+ yaml_propval_int(emitter, m, data, chunk_len, 1);
+ break;
+ }
+ }
+
+ yaml_sequence_end_event_initialize(&event);
+ yaml_emitter_emit_or_die(emitter, &event);
+}
+
+
+static void yaml_tree(struct node *tree, yaml_emitter_t *emitter)
+{
+ struct property *prop;
+ struct node *child;
+ yaml_event_t event;
+
+ if (tree->deleted)
+ return;
+
+ yaml_mapping_start_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_MAP_TAG, 1, YAML_ANY_MAPPING_STYLE);
+ yaml_emitter_emit_or_die(emitter, &event);
+
+ for_each_property(tree, prop)
+ yaml_propval(emitter, prop);
+
+ /* Loop over all the children, emitting them into the map */
+ for_each_child(tree, child) {
+ yaml_scalar_event_initialize(&event, NULL,
+ (yaml_char_t *)YAML_STR_TAG, (yaml_char_t*)child->name,
+ strlen(child->name), 1, 0, YAML_PLAIN_SCALAR_STYLE);
+ yaml_emitter_emit_or_die(emitter, &event);
+ yaml_tree(child, emitter);
+ }
+
+ yaml_mapping_end_event_initialize(&event);
+ yaml_emitter_emit_or_die(emitter, &event);
+}
+
+void dt_to_yaml(FILE *f, struct dt_info *dti)
+{
+ yaml_emitter_t emitter;
+ yaml_event_t event;
+
+ yaml_emitter_initialize(&emitter);
+ yaml_emitter_set_output_file(&emitter, f);
+ yaml_stream_start_event_initialize(&event, YAML_UTF8_ENCODING);
+ yaml_emitter_emit_or_die(&emitter, &event);
+
+ yaml_document_start_event_initialize(&event, NULL, NULL, NULL, 0);
+ yaml_emitter_emit_or_die(&emitter, &event);
+
+ yaml_sequence_start_event_initialize(&event, NULL, (yaml_char_t *)YAML_SEQ_TAG, 1, YAML_ANY_SEQUENCE_STYLE);
+ yaml_emitter_emit_or_die(&emitter, &event);
+
+ yaml_tree(dti->dt, &emitter);
+
+ yaml_sequence_end_event_initialize(&event);
+ yaml_emitter_emit_or_die(&emitter, &event);
+
+ yaml_document_end_event_initialize(&event, 0);
+ yaml_emitter_emit_or_die(&emitter, &event);
+
+ yaml_stream_end_event_initialize(&event);
+ yaml_emitter_emit_or_die(&emitter, &event);
+
+ yaml_emitter_delete(&emitter);
+}
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 9bb0a7f2863e..5eacba858e4b 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -26,7 +26,7 @@
static struct key *keyring[INTEGRITY_KEYRING_MAX];
-static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
+static const char * const keyring_name[INTEGRITY_KEYRING_MAX] = {
#ifndef CONFIG_INTEGRITY_TRUSTED_KEYRING
"_evm",
"_ima",
@@ -37,12 +37,6 @@ static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
"_module",
};
-#ifdef CONFIG_INTEGRITY_TRUSTED_KEYRING
-static bool init_keyring __initdata = true;
-#else
-static bool init_keyring __initdata;
-#endif
-
#ifdef CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY
#define restrict_link_to_ima restrict_link_by_builtin_and_secondary_trusted
#else
@@ -85,7 +79,7 @@ int __init integrity_init_keyring(const unsigned int id)
struct key_restriction *restriction;
int err = 0;
- if (!init_keyring)
+ if (!IS_ENABLED(CONFIG_INTEGRITY_TRUSTED_KEYRING))
return 0;
restriction = kzalloc(sizeof(struct key_restriction), GFP_KERNEL);
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 8a3905bb02c7..8c25f949ebdb 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -27,7 +27,7 @@
#define EVMKEY "evm-key"
#define MAX_KEY_SIZE 128
static unsigned char evmkey[MAX_KEY_SIZE];
-static int evmkey_len = MAX_KEY_SIZE;
+static const int evmkey_len = MAX_KEY_SIZE;
struct crypto_shash *hmac_tfm;
static struct crypto_shash *evm_tfm[HASH_ALGO__LAST];
@@ -38,7 +38,7 @@ static DEFINE_MUTEX(mutex);
static unsigned long evm_set_key_flags;
-static char * const evm_hmac = "hmac(sha1)";
+static const char evm_hmac[] = "hmac(sha1)";
/**
* evm_set_key() - set EVM HMAC key from the kernel
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 67db9d9454ca..cc12f3449a72 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -88,7 +88,7 @@ struct ima_template_desc {
char *name;
char *fmt;
int num_fields;
- struct ima_template_field **fields;
+ const struct ima_template_field **fields;
};
struct ima_template_entry {
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index a02c5acfd403..99dd1d53fc35 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -51,7 +51,8 @@ int ima_alloc_init_template(struct ima_event_data *event_data,
(*entry)->template_desc = template_desc;
for (i = 0; i < template_desc->num_fields; i++) {
- struct ima_template_field *field = template_desc->fields[i];
+ const struct ima_template_field *field =
+ template_desc->fields[i];
u32 len;
result = field->field_init(event_data,
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 7e7e7e7c250a..d9e7728027c6 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -210,7 +210,7 @@ static int ima_calc_file_hash_atfm(struct file *file,
{
loff_t i_size, offset;
char *rbuf[2] = { NULL, };
- int rc, read = 0, rbuf_len, active = 0, ahash_rc = 0;
+ int rc, rbuf_len, active = 0, ahash_rc = 0;
struct ahash_request *req;
struct scatterlist sg[1];
struct crypto_wait wait;
@@ -257,11 +257,6 @@ static int ima_calc_file_hash_atfm(struct file *file,
&rbuf_size[1], 0);
}
- if (!(file->f_mode & FMODE_READ)) {
- file->f_mode |= FMODE_READ;
- read = 1;
- }
-
for (offset = 0; offset < i_size; offset += rbuf_len) {
if (!rbuf[1] && offset) {
/* Not using two buffers, and it is not the first
@@ -300,8 +295,6 @@ static int ima_calc_file_hash_atfm(struct file *file,
/* wait for the last update request to complete */
rc = ahash_wait(ahash_rc, &wait);
out3:
- if (read)
- file->f_mode &= ~FMODE_READ;
ima_free_pages(rbuf[0], rbuf_size[0]);
ima_free_pages(rbuf[1], rbuf_size[1]);
out2:
@@ -336,7 +329,7 @@ static int ima_calc_file_hash_tfm(struct file *file,
{
loff_t i_size, offset = 0;
char *rbuf;
- int rc, read = 0;
+ int rc;
SHASH_DESC_ON_STACK(shash, tfm);
shash->tfm = tfm;
@@ -357,11 +350,6 @@ static int ima_calc_file_hash_tfm(struct file *file,
if (!rbuf)
return -ENOMEM;
- if (!(file->f_mode & FMODE_READ)) {
- file->f_mode |= FMODE_READ;
- read = 1;
- }
-
while (offset < i_size) {
int rbuf_len;
@@ -378,8 +366,6 @@ static int ima_calc_file_hash_tfm(struct file *file,
if (rc)
break;
}
- if (read)
- file->f_mode &= ~FMODE_READ;
kfree(rbuf);
out:
if (!rc)
@@ -420,6 +406,8 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
{
loff_t i_size;
int rc;
+ struct file *f = file;
+ bool new_file_instance = false, modified_flags = false;
/*
* For consistency, fail file's opened with the O_DIRECT flag on
@@ -431,15 +419,41 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
return -EINVAL;
}
- i_size = i_size_read(file_inode(file));
+ /* Open a new file instance in O_RDONLY if we cannot read */
+ if (!(file->f_mode & FMODE_READ)) {
+ int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
+ O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
+ flags |= O_RDONLY;
+ f = dentry_open(&file->f_path, flags, file->f_cred);
+ if (IS_ERR(f)) {
+ /*
+ * Cannot open the file again, lets modify f_flags
+ * of original and continue
+ */
+ pr_info_ratelimited("Unable to reopen file for reading.\n");
+ f = file;
+ f->f_flags |= FMODE_READ;
+ modified_flags = true;
+ } else {
+ new_file_instance = true;
+ }
+ }
+
+ i_size = i_size_read(file_inode(f));
if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
- rc = ima_calc_file_ahash(file, hash);
+ rc = ima_calc_file_ahash(f, hash);
if (!rc)
- return 0;
+ goto out;
}
- return ima_calc_file_shash(file, hash);
+ rc = ima_calc_file_shash(f, hash);
+out:
+ if (new_file_instance)
+ fput(f);
+ else if (modified_flags)
+ f->f_flags &= ~FMODE_READ;
+ return rc;
}
/*
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index ae9d5c766a3c..3183cc23d0f8 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -42,14 +42,14 @@ static int __init default_canonical_fmt_setup(char *str)
__setup("ima_canonical_fmt", default_canonical_fmt_setup);
static int valid_policy = 1;
-#define TMPBUFLEN 12
+
static ssize_t ima_show_htable_value(char __user *buf, size_t count,
loff_t *ppos, atomic_long_t *val)
{
- char tmpbuf[TMPBUFLEN];
+ char tmpbuf[32]; /* greater than largest 'long' string value */
ssize_t len;
- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
+ len = scnprintf(tmpbuf, sizeof(tmpbuf), "%li\n", atomic_long_read(val));
return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
}
@@ -179,7 +179,8 @@ int ima_measurements_show(struct seq_file *m, void *v)
/* 6th: template specific data */
for (i = 0; i < e->template_desc->num_fields; i++) {
enum ima_show_type show = IMA_SHOW_BINARY;
- struct ima_template_field *field = e->template_desc->fields[i];
+ const struct ima_template_field *field =
+ e->template_desc->fields[i];
if (is_ima_template && strcmp(field->field_id, "d") == 0)
show = IMA_SHOW_BINARY_NO_FIELD_LEN;
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index faac9ecaa0ae..59d834219cd6 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -25,7 +25,7 @@
#include "ima.h"
/* name for boot aggregate entry */
-static const char *boot_aggregate_name = "boot_aggregate";
+static const char boot_aggregate_name[] = "boot_aggregate";
struct tpm_chip *ima_tpm_chip;
/* Add the boot aggregate to the IMA measurement list and extend
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 2d31921fbda4..1b88d58e1325 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -440,7 +440,7 @@ int ima_read_file(struct file *file, enum kernel_read_file_id read_id)
return 0;
}
-static int read_idmap[READING_MAX_ID] = {
+static const int read_idmap[READING_MAX_ID] = {
[READING_FIRMWARE] = FIRMWARE_CHECK,
[READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK,
[READING_MODULE] = MODULE_CHECK,
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
index 30db39b23804..b631b8bc7624 100644
--- a/security/integrity/ima/ima_template.c
+++ b/security/integrity/ima/ima_template.c
@@ -32,7 +32,7 @@ static struct ima_template_desc builtin_templates[] = {
static LIST_HEAD(defined_templates);
static DEFINE_SPINLOCK(template_list);
-static struct ima_template_field supported_fields[] = {
+static const struct ima_template_field supported_fields[] = {
{.field_id = "d", .field_init = ima_eventdigest_init,
.field_show = ima_show_template_digest},
{.field_id = "n", .field_init = ima_eventname_init,
@@ -49,7 +49,7 @@ static struct ima_template_field supported_fields[] = {
static struct ima_template_desc *ima_template;
static struct ima_template_desc *lookup_template_desc(const char *name);
static int template_desc_init_fields(const char *template_fmt,
- struct ima_template_field ***fields,
+ const struct ima_template_field ***fields,
int *num_fields);
static int __init ima_template_setup(char *str)
@@ -125,7 +125,8 @@ static struct ima_template_desc *lookup_template_desc(const char *name)
return found ? template_desc : NULL;
}
-static struct ima_template_field *lookup_template_field(const char *field_id)
+static const struct ima_template_field *
+lookup_template_field(const char *field_id)
{
int i;
@@ -153,11 +154,11 @@ static int template_fmt_size(const char *template_fmt)
}
static int template_desc_init_fields(const char *template_fmt,
- struct ima_template_field ***fields,
+ const struct ima_template_field ***fields,
int *num_fields)
{
const char *template_fmt_ptr;
- struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
+ const struct ima_template_field *found_fields[IMA_TEMPLATE_NUM_FIELDS_MAX];
int template_num_fields;
int i, len;
diff --git a/security/loadpin/Kconfig b/security/loadpin/Kconfig
index dd01aa91e521..a0d70d82b98e 100644
--- a/security/loadpin/Kconfig
+++ b/security/loadpin/Kconfig
@@ -10,10 +10,10 @@ config SECURITY_LOADPIN
have a root filesystem backed by a read-only device such as
dm-verity or a CDROM.
-config SECURITY_LOADPIN_ENABLED
+config SECURITY_LOADPIN_ENFORCE
bool "Enforce LoadPin at boot"
depends on SECURITY_LOADPIN
help
If selected, LoadPin will enforce pinning at boot. If not
selected, it can be enabled at boot with the kernel parameter
- "loadpin.enabled=1".
+ "loadpin.enforce=1".
diff --git a/security/loadpin/loadpin.c b/security/loadpin/loadpin.c
index 0716af28808a..48f39631b370 100644
--- a/security/loadpin/loadpin.c
+++ b/security/loadpin/loadpin.c
@@ -44,7 +44,7 @@ static void report_load(const char *origin, struct file *file, char *operation)
kfree(pathname);
}
-static int enabled = IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENABLED);
+static int enforce = IS_ENABLED(CONFIG_SECURITY_LOADPIN_ENFORCE);
static struct super_block *pinned_root;
static DEFINE_SPINLOCK(pinned_root_spinlock);
@@ -60,8 +60,8 @@ static struct ctl_path loadpin_sysctl_path[] = {
static struct ctl_table loadpin_sysctl_table[] = {
{
- .procname = "enabled",
- .data = &enabled,
+ .procname = "enforce",
+ .data = &enforce,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
@@ -84,8 +84,11 @@ static void check_pinning_enforcement(struct super_block *mnt_sb)
* device, allow sysctl to change modes for testing.
*/
if (mnt_sb->s_bdev) {
+ char bdev[BDEVNAME_SIZE];
+
ro = bdev_read_only(mnt_sb->s_bdev);
- pr_info("dev(%u,%u): %s\n",
+ bdevname(mnt_sb->s_bdev, bdev);
+ pr_info("%s (%u:%u): %s\n", bdev,
MAJOR(mnt_sb->s_bdev->bd_dev),
MINOR(mnt_sb->s_bdev->bd_dev),
ro ? "read-only" : "writable");
@@ -97,7 +100,7 @@ static void check_pinning_enforcement(struct super_block *mnt_sb)
loadpin_sysctl_table))
pr_notice("sysctl registration failed!\n");
else
- pr_info("load pinning can be disabled.\n");
+ pr_info("enforcement can be disabled.\n");
} else
pr_info("load pinning engaged.\n");
}
@@ -128,7 +131,7 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id)
/* This handles the older init_module API that has a NULL file. */
if (!file) {
- if (!enabled) {
+ if (!enforce) {
report_load(origin, NULL, "old-api-pinning-ignored");
return 0;
}
@@ -151,7 +154,7 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id)
* Unlock now since it's only pinned_root we care about.
* In the worst case, we will (correctly) report pinning
* failures before we have announced that pinning is
- * enabled. This would be purely cosmetic.
+ * enforcing. This would be purely cosmetic.
*/
spin_unlock(&pinned_root_spinlock);
check_pinning_enforcement(pinned_root);
@@ -161,7 +164,7 @@ static int loadpin_read_file(struct file *file, enum kernel_read_file_id id)
}
if (IS_ERR_OR_NULL(pinned_root) || load_root != pinned_root) {
- if (unlikely(!enabled)) {
+ if (unlikely(!enforce)) {
report_load(origin, file, "pinning-ignored");
return 0;
}
@@ -186,10 +189,11 @@ static struct security_hook_list loadpin_hooks[] __lsm_ro_after_init = {
void __init loadpin_add_hooks(void)
{
- pr_info("ready to pin (currently %sabled)", enabled ? "en" : "dis");
+ pr_info("ready to pin (currently %senforcing)\n",
+ enforce ? "" : "not ");
security_add_hooks(loadpin_hooks, ARRAY_SIZE(loadpin_hooks), "loadpin");
}
/* Should not be mutable after boot, so not listed in sysfs (perm == 0). */
-module_param(enabled, int, 0);
-MODULE_PARM_DESC(enabled, "Pin module/firmware loading (default: true)");
+module_param(enforce, int, 0);
+MODULE_PARM_DESC(enforce, "Enforce module/firmware pinning");
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 934dabe150fa..81fb4c1631e9 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -421,6 +421,7 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
struct smk_audit_info ad, *saip = NULL;
struct task_smack *tsp;
struct smack_known *tracer_known;
+ const struct cred *tracercred;
if ((mode & PTRACE_MODE_NOAUDIT) == 0) {
smk_ad_init(&ad, func, LSM_AUDIT_DATA_TASK);
@@ -429,7 +430,8 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
}
rcu_read_lock();
- tsp = __task_cred(tracer)->security;
+ tracercred = __task_cred(tracer);
+ tsp = tracercred->security;
tracer_known = smk_of_task(tsp);
if ((mode & PTRACE_MODE_ATTACH) &&
@@ -439,7 +441,7 @@ static int smk_ptrace_rule_check(struct task_struct *tracer,
rc = 0;
else if (smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)
rc = -EACCES;
- else if (capable(CAP_SYS_PTRACE))
+ else if (smack_privileged_cred(CAP_SYS_PTRACE, tracercred))
rc = 0;
else
rc = -EACCES;
@@ -1841,6 +1843,7 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
{
struct smack_known *skp;
struct smack_known *tkp = smk_of_task(tsk->cred->security);
+ const struct cred *tcred;
struct file *file;
int rc;
struct smk_audit_info ad;
@@ -1854,8 +1857,12 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
skp = file->f_security;
rc = smk_access(skp, tkp, MAY_DELIVER, NULL);
rc = smk_bu_note("sigiotask", skp, tkp, MAY_DELIVER, rc);
- if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE))
+
+ rcu_read_lock();
+ tcred = __task_cred(tsk);
+ if (rc != 0 && smack_privileged_cred(CAP_MAC_OVERRIDE, tcred))
rc = 0;
+ rcu_read_unlock();
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, tsk);
@@ -3467,7 +3474,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*/
final = &smack_known_star;
/*
- * No break.
+ * Fall through.
*
* If a smack value has been set we want to use it,
* but since tmpfs isn't giving us the opportunity
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index f6482e53d55a..06b517075ec0 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -2853,7 +2853,6 @@ static const struct file_operations smk_ptrace_ops = {
static int smk_fill_super(struct super_block *sb, void *data, int silent)
{
int rc;
- struct inode *root_inode;
static const struct tree_descr smack_files[] = {
[SMK_LOAD] = {
@@ -2917,8 +2916,6 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
return rc;
}
- root_inode = d_inode(sb->s_root);
-
return 0;
}
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index 000b58522106..bd7c5029fc59 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -157,18 +157,19 @@ static int i2sbus_add_dev(struct macio_dev *macio,
struct device_node *child = NULL, *sound = NULL;
struct resource *r;
int i, layout = 0, rlen, ok = force;
- static const char *rnames[] = { "i2sbus: %s (control)",
- "i2sbus: %s (tx)",
- "i2sbus: %s (rx)" };
+ char node_name[6];
+ static const char *rnames[] = { "i2sbus: %pOFn (control)",
+ "i2sbus: %pOFn (tx)",
+ "i2sbus: %pOFn (rx)" };
static irq_handler_t ints[] = {
i2sbus_bus_intr,
i2sbus_tx_intr,
i2sbus_rx_intr
};
- if (strlen(np->name) != 5)
+ if (snprintf(node_name, sizeof(node_name), "%pOFn", np) != 5)
return 0;
- if (strncmp(np->name, "i2s-", 4))
+ if (strncmp(node_name, "i2s-", 4))
return 0;
dev = kzalloc(sizeof(struct i2sbus_dev), GFP_KERNEL);
@@ -228,13 +229,13 @@ static int i2sbus_add_dev(struct macio_dev *macio,
dev->sound.pcmid = -1;
dev->macio = macio;
dev->control = control;
- dev->bus_number = np->name[4] - 'a';
+ dev->bus_number = node_name[4] - 'a';
INIT_LIST_HEAD(&dev->sound.codec_list);
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) {
dev->interrupts[i] = -1;
snprintf(dev->rnames[i], sizeof(dev->rnames[i]),
- rnames[i], np->name);
+ rnames[i], np);
}
for (i = aoa_resource_i2smmio; i <= aoa_resource_rxdbdma; i++) {
int irq = irq_of_parse_and_map(np, i);
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index 5fbd47a9177e..28867732a318 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -31,7 +31,6 @@ endif # SND_ARM
config SND_PXA2XX_LIB
tristate
- select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
select SND_DMAENGINE_PCM
config SND_PXA2XX_LIB_AC97
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 753d5fc4b284..59a4adc286ed 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -25,6 +25,9 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/genalloc.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
#include <sound/memalloc.h>
/*
@@ -82,31 +85,32 @@ EXPORT_SYMBOL(snd_free_pages);
#ifdef CONFIG_HAS_DMA
/* allocate the coherent DMA pages */
-static void *snd_malloc_dev_pages(struct device *dev, size_t size, dma_addr_t *dma)
+static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size)
{
- int pg;
gfp_t gfp_flags;
- if (WARN_ON(!dma))
- return NULL;
- pg = get_order(size);
gfp_flags = GFP_KERNEL
| __GFP_COMP /* compound page lets parts be mapped */
| __GFP_NORETRY /* don't trigger OOM-killer */
| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
- return dma_alloc_coherent(dev, PAGE_SIZE << pg, dma, gfp_flags);
+ dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
+ gfp_flags);
+#ifdef CONFIG_X86
+ if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+ set_memory_wc((unsigned long)dmab->area,
+ PAGE_ALIGN(size) >> PAGE_SHIFT);
+#endif
}
/* free the coherent DMA pages */
-static void snd_free_dev_pages(struct device *dev, size_t size, void *ptr,
- dma_addr_t dma)
+static void snd_free_dev_pages(struct snd_dma_buffer *dmab)
{
- int pg;
-
- if (ptr == NULL)
- return;
- pg = get_order(size);
- dma_free_coherent(dev, PAGE_SIZE << pg, ptr, dma);
+#ifdef CONFIG_X86
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
+ set_memory_wb((unsigned long)dmab->area,
+ PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
+#endif
+ dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
}
#ifdef CONFIG_GENERIC_ALLOCATOR
@@ -199,12 +203,15 @@ int snd_dma_alloc_pages(int type, struct device *device, size_t size,
*/
dmab->dev.type = SNDRV_DMA_TYPE_DEV;
#endif /* CONFIG_GENERIC_ALLOCATOR */
+ /* fall through */
case SNDRV_DMA_TYPE_DEV:
- dmab->area = snd_malloc_dev_pages(device, size, &dmab->addr);
+ case SNDRV_DMA_TYPE_DEV_UC:
+ snd_malloc_dev_pages(dmab, size);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
+ case SNDRV_DMA_TYPE_DEV_UC_SG:
snd_malloc_sgbuf_pages(device, size, dmab, NULL);
break;
#endif
@@ -275,11 +282,13 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
break;
#endif /* CONFIG_GENERIC_ALLOCATOR */
case SNDRV_DMA_TYPE_DEV:
- snd_free_dev_pages(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
+ case SNDRV_DMA_TYPE_DEV_UC:
+ snd_free_dev_pages(dmab);
break;
#endif
#ifdef CONFIG_SND_DMA_SGBUF
case SNDRV_DMA_TYPE_DEV_SG:
+ case SNDRV_DMA_TYPE_DEV_UC_SG:
snd_free_sgbuf_pages(dmab);
break;
#endif
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 0391cb1a4f19..141c5f3a9575 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -111,7 +111,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
while (plugin->next) {
if (plugin->dst_frames)
frames = plugin->dst_frames(plugin, frames);
- if (snd_BUG_ON(frames <= 0))
+ if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
return -ENXIO;
plugin = plugin->next;
err = snd_pcm_plugin_alloc(plugin, frames);
@@ -123,7 +123,7 @@ int snd_pcm_plug_alloc(struct snd_pcm_substream *plug, snd_pcm_uframes_t frames)
while (plugin->prev) {
if (plugin->src_frames)
frames = plugin->src_frames(plugin, frames);
- if (snd_BUG_ON(frames <= 0))
+ if (snd_BUG_ON((snd_pcm_sframes_t)frames <= 0))
return -ENXIO;
plugin = plugin->prev;
err = snd_pcm_plugin_alloc(plugin, frames);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 4e6110d778bd..40013b26f671 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -2172,18 +2172,25 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream,
if (err < 0)
goto _end_unlock;
+ runtime->twake = runtime->control->avail_min ? : 1;
+ if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
+ snd_pcm_update_hw_ptr(substream);
+
if (!is_playback &&
- runtime->status->state == SNDRV_PCM_STATE_PREPARED &&
- size >= runtime->start_threshold) {
- err = snd_pcm_start(substream);
- if (err < 0)
+ runtime->status->state == SNDRV_PCM_STATE_PREPARED) {
+ if (size >= runtime->start_threshold) {
+ err = snd_pcm_start(substream);
+ if (err < 0)
+ goto _end_unlock;
+ } else {
+ /* nothing to do */
+ err = 0;
goto _end_unlock;
+ }
}
- runtime->twake = runtime->control->avail_min ? : 1;
- if (runtime->status->state == SNDRV_PCM_STATE_RUNNING)
- snd_pcm_update_hw_ptr(substream);
avail = snd_pcm_avail(substream);
+
while (size > 0) {
snd_pcm_uframes_t frames, appl_ptr, appl_ofs;
snd_pcm_uframes_t cont;
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 08d5662039e3..ee601d7f0926 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -1236,6 +1236,28 @@ int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
}
EXPORT_SYMBOL(snd_rawmidi_transmit);
+/**
+ * snd_rawmidi_proceed - Discard the all pending bytes and proceed
+ * @substream: rawmidi substream
+ *
+ * Return: the number of discarded bytes
+ */
+int snd_rawmidi_proceed(struct snd_rawmidi_substream *substream)
+{
+ struct snd_rawmidi_runtime *runtime = substream->runtime;
+ unsigned long flags;
+ int count = 0;
+
+ spin_lock_irqsave(&runtime->lock, flags);
+ if (runtime->avail < runtime->buffer_size) {
+ count = runtime->buffer_size - runtime->avail;
+ __snd_rawmidi_transmit_ack(substream, count);
+ }
+ spin_unlock_irqrestore(&runtime->lock, flags);
+ return count;
+}
+EXPORT_SYMBOL(snd_rawmidi_proceed);
+
static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
const unsigned char __user *userbuf,
const unsigned char *kernelbuf,
diff --git a/sound/core/seq/oss/seq_oss_timer.c b/sound/core/seq/oss/seq_oss_timer.c
index ba127c22539a..0778d28421da 100644
--- a/sound/core/seq/oss/seq_oss_timer.c
+++ b/sound/core/seq/oss/seq_oss_timer.c
@@ -92,7 +92,7 @@ snd_seq_oss_process_timer_event(struct seq_oss_timer *rec, union evrec *ev)
case TMR_WAIT_REL:
parm += rec->cur_tick;
rec->realtime = 0;
- /* fall through and continue to next */
+ /* fall through */
case TMR_WAIT_ABS:
if (parm == 0) {
rec->realtime = 1;
diff --git a/sound/core/seq/seq_system.c b/sound/core/seq/seq_system.c
index 8ce1d0b40dce..0dc5d5a45ecc 100644
--- a/sound/core/seq/seq_system.c
+++ b/sound/core/seq/seq_system.c
@@ -123,6 +123,7 @@ int __init snd_seq_system_client_init(void)
{
struct snd_seq_port_callback pcallbacks;
struct snd_seq_port_info *port;
+ int err;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
@@ -134,6 +135,10 @@ int __init snd_seq_system_client_init(void)
/* register client */
sysclient = snd_seq_create_kernel_client(NULL, 0, "System");
+ if (sysclient < 0) {
+ kfree(port);
+ return sysclient;
+ }
/* register timer */
strcpy(port->name, "Timer");
@@ -144,7 +149,10 @@ int __init snd_seq_system_client_init(void)
port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT;
port->addr.client = sysclient;
port->addr.port = SNDRV_SEQ_PORT_SYSTEM_TIMER;
- snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, port);
+ err = snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT,
+ port);
+ if (err < 0)
+ goto error_port;
/* register announcement port */
strcpy(port->name, "Announce");
@@ -154,16 +162,24 @@ int __init snd_seq_system_client_init(void)
port->flags = SNDRV_SEQ_PORT_FLG_GIVEN_PORT;
port->addr.client = sysclient;
port->addr.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
- snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT, port);
+ err = snd_seq_kernel_client_ctl(sysclient, SNDRV_SEQ_IOCTL_CREATE_PORT,
+ port);
+ if (err < 0)
+ goto error_port;
announce_port = port->addr.port;
kfree(port);
return 0;
+
+ error_port:
+ snd_seq_system_client_done();
+ kfree(port);
+ return err;
}
/* unregister our internal client */
-void __exit snd_seq_system_client_done(void)
+void snd_seq_system_client_done(void)
{
int oldsysclient = sysclient;
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index cb988efd1ed0..e5a40795914a 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -149,9 +149,7 @@ static void snd_vmidi_output_work(struct work_struct *work)
/* discard the outputs in dispatch mode unless subscribed */
if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
!(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
- char buf[32];
- while (snd_rawmidi_transmit(substream, buf, sizeof(buf)) > 0)
- ; /* ignored */
+ snd_rawmidi_proceed(substream);
return;
}
diff --git a/sound/core/sgbuf.c b/sound/core/sgbuf.c
index 84fffabdd129..c1cfaa01a5cb 100644
--- a/sound/core/sgbuf.c
+++ b/sound/core/sgbuf.c
@@ -23,6 +23,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
+#include <asm/pgtable.h>
#include <sound/memalloc.h>
@@ -43,6 +44,8 @@ int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
dmab->area = NULL;
tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG)
+ tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC;
tmpb.dev.dev = sgbuf->dev;
for (i = 0; i < sgbuf->pages; i++) {
if (!(sgbuf->table[i].addr & ~PAGE_MASK))
@@ -72,12 +75,20 @@ void *snd_malloc_sgbuf_pages(struct device *device,
struct snd_dma_buffer tmpb;
struct snd_sg_page *table;
struct page **pgtable;
+ int type = SNDRV_DMA_TYPE_DEV;
+ pgprot_t prot = PAGE_KERNEL;
dmab->area = NULL;
dmab->addr = 0;
dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
if (! sgbuf)
return NULL;
+ if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
+ type = SNDRV_DMA_TYPE_DEV_UC;
+#ifdef pgprot_noncached
+ prot = pgprot_noncached(PAGE_KERNEL);
+#endif
+ }
sgbuf->dev = device;
pages = snd_sgbuf_aligned_pages(size);
sgbuf->tblsize = sgbuf_align_table(pages);
@@ -98,7 +109,7 @@ void *snd_malloc_sgbuf_pages(struct device *device,
if (chunk > maxpages)
chunk = maxpages;
chunk <<= PAGE_SHIFT;
- if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device,
+ if (snd_dma_alloc_pages_fallback(type, device,
chunk, &tmpb) < 0) {
if (!sgbuf->pages)
goto _failed;
@@ -125,7 +136,7 @@ void *snd_malloc_sgbuf_pages(struct device *device,
}
sgbuf->size = size;
- dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, PAGE_KERNEL);
+ dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
if (! dmab->area)
goto _failed;
if (res_size)
diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig
index 529d9f405fa9..8a146b039276 100644
--- a/sound/firewire/Kconfig
+++ b/sound/firewire/Kconfig
@@ -147,7 +147,9 @@ config SND_FIREWIRE_MOTU
help
Say Y here to enable support for FireWire devices which MOTU produced:
* 828mk2
+ * Traveler
* 828mk3
+ * Audio Express
To compile this driver as a module, choose M here: the module
will be called snd-firewire-motu.
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index cb9acfe60f6a..fcd965f1d69e 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -140,6 +140,59 @@ const unsigned int amdtp_rate_table[CIP_SFC_COUNT] = {
};
EXPORT_SYMBOL(amdtp_rate_table);
+static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_interval *s = hw_param_interval(params, rule->var);
+ const struct snd_interval *r =
+ hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval t = {
+ .min = s->min, .max = s->max, .integer = 1,
+ };
+ int i;
+
+ for (i = 0; i < CIP_SFC_COUNT; ++i) {
+ unsigned int rate = amdtp_rate_table[i];
+ unsigned int step = amdtp_syt_intervals[i];
+
+ if (!snd_interval_test(r, rate))
+ continue;
+
+ t.min = roundup(t.min, step);
+ t.max = rounddown(t.max, step);
+ }
+
+ if (snd_interval_checkempty(&t))
+ return -EINVAL;
+
+ return snd_interval_refine(s, &t);
+}
+
+static int apply_constraint_to_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct snd_interval *r =
+ hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
+ const struct snd_interval *s = hw_param_interval_c(params, rule->deps[0]);
+ struct snd_interval t = {
+ .min = UINT_MAX, .max = 0, .integer = 1,
+ };
+ int i;
+
+ for (i = 0; i < CIP_SFC_COUNT; ++i) {
+ unsigned int step = amdtp_syt_intervals[i];
+ unsigned int rate = amdtp_rate_table[i];
+
+ if (s->min % step || s->max % step)
+ continue;
+
+ t.min = min(t.min, rate);
+ t.max = max(t.max, rate);
+ }
+
+ return snd_interval_refine(r, &t);
+}
+
/**
* amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
* @s: the AMDTP stream, which must be initialized.
@@ -194,16 +247,27 @@ int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s,
* number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
* depending on its sampling rate. For accurate period interrupt, it's
* preferrable to align period/buffer sizes to current SYT_INTERVAL.
- *
- * TODO: These constraints can be improved with proper rules.
- * Currently apply LCM of SYT_INTERVALs.
*/
- err = snd_pcm_hw_constraint_step(runtime, 0,
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32);
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
+ apply_constraint_to_size, NULL,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+ if (err < 0)
+ goto end;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ apply_constraint_to_rate, NULL,
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
+ if (err < 0)
+ goto end;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
+ apply_constraint_to_size, NULL,
+ SNDRV_PCM_HW_PARAM_RATE, -1);
+ if (err < 0)
+ goto end;
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ apply_constraint_to_rate, NULL,
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE, -1);
if (err < 0)
goto end;
- err = snd_pcm_hw_constraint_step(runtime, 0,
- SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32);
end:
return err;
}
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 93676354f87f..672d13488454 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -126,23 +126,6 @@ end:
return err;
}
-static void bebob_free(struct snd_bebob *bebob)
-{
- snd_bebob_stream_destroy_duplex(bebob);
- fw_unit_put(bebob->unit);
-
- kfree(bebob->maudio_special_quirk);
-
- mutex_destroy(&bebob->mutex);
- kfree(bebob);
-}
-
-/*
- * This module releases the FireWire unit data after all ALSA character devices
- * are released by applications. This is for releasing stream data or finishing
- * transactions safely. Thus at returning from .remove(), this module still keep
- * references for the unit.
- */
static void
bebob_card_free(struct snd_card *card)
{
@@ -152,7 +135,7 @@ bebob_card_free(struct snd_card *card)
clear_bit(bebob->card_index, devices_used);
mutex_unlock(&devices_mutex);
- bebob_free(card->private_data);
+ snd_bebob_stream_destroy_duplex(bebob);
}
static const struct snd_bebob_spec *
@@ -192,7 +175,6 @@ do_registration(struct work_struct *work)
return;
mutex_lock(&devices_mutex);
-
for (card_index = 0; card_index < SNDRV_CARDS; card_index++) {
if (!test_bit(card_index, devices_used) && enable[card_index])
break;
@@ -208,6 +190,11 @@ do_registration(struct work_struct *work)
mutex_unlock(&devices_mutex);
return;
}
+ set_bit(card_index, devices_used);
+ mutex_unlock(&devices_mutex);
+
+ bebob->card->private_free = bebob_card_free;
+ bebob->card->private_data = bebob;
err = name_device(bebob);
if (err < 0)
@@ -248,23 +235,10 @@ do_registration(struct work_struct *work)
if (err < 0)
goto error;
- set_bit(card_index, devices_used);
- mutex_unlock(&devices_mutex);
-
- /*
- * After registered, bebob instance can be released corresponding to
- * releasing the sound card instance.
- */
- bebob->card->private_free = bebob_card_free;
- bebob->card->private_data = bebob;
bebob->registered = true;
return;
error:
- mutex_unlock(&devices_mutex);
- snd_bebob_stream_destroy_duplex(bebob);
- kfree(bebob->maudio_special_quirk);
- bebob->maudio_special_quirk = NULL;
snd_card_free(bebob->card);
dev_info(&bebob->unit->device,
"Sound card registration failed: %d\n", err);
@@ -295,15 +269,15 @@ bebob_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
}
/* Allocate this independent of sound card instance. */
- bebob = kzalloc(sizeof(struct snd_bebob), GFP_KERNEL);
- if (bebob == NULL)
+ bebob = devm_kzalloc(&unit->device, sizeof(struct snd_bebob),
+ GFP_KERNEL);
+ if (!bebob)
return -ENOMEM;
-
bebob->unit = fw_unit_get(unit);
- bebob->entry = entry;
- bebob->spec = spec;
dev_set_drvdata(&unit->device, bebob);
+ bebob->entry = entry;
+ bebob->spec = spec;
mutex_init(&bebob->mutex);
spin_lock_init(&bebob->lock);
init_waitqueue_head(&bebob->hwdep_wait);
@@ -379,12 +353,12 @@ static void bebob_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&bebob->dwork);
if (bebob->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(bebob->card);
- } else {
- /* Don't forget this case. */
- bebob_free(bebob);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(bebob->card);
}
+
+ mutex_destroy(&bebob->mutex);
+ fw_unit_put(bebob->unit);
}
static const struct snd_bebob_rate_spec normal_rate_spec = {
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index c266997ad299..51152ca4af57 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -261,8 +261,9 @@ snd_bebob_maudio_special_discover(struct snd_bebob *bebob, bool is1814)
struct special_params *params;
int err;
- params = kzalloc(sizeof(struct special_params), GFP_KERNEL);
- if (params == NULL)
+ params = devm_kzalloc(&bebob->card->card_dev,
+ sizeof(struct special_params), GFP_KERNEL);
+ if (!params)
return -ENOMEM;
mutex_lock(&bebob->mutex);
diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
index 774eb2205668..0f6dbcffe711 100644
--- a/sound/firewire/dice/dice.c
+++ b/sound/firewire/dice/dice.c
@@ -122,25 +122,12 @@ static void dice_card_strings(struct snd_dice *dice)
strcpy(card->mixername, "DICE");
}
-static void dice_free(struct snd_dice *dice)
+static void dice_card_free(struct snd_card *card)
{
+ struct snd_dice *dice = card->private_data;
+
snd_dice_stream_destroy_duplex(dice);
snd_dice_transaction_destroy(dice);
- fw_unit_put(dice->unit);
-
- mutex_destroy(&dice->mutex);
- kfree(dice);
-}
-
-/*
- * This module releases the FireWire unit data after all ALSA character devices
- * are released by applications. This is for releasing stream data or finishing
- * transactions safely. Thus at returning from .remove(), this module still keep
- * references for the unit.
- */
-static void dice_card_free(struct snd_card *card)
-{
- dice_free(card->private_data);
}
static void do_registration(struct work_struct *work)
@@ -155,6 +142,8 @@ static void do_registration(struct work_struct *work)
&dice->card);
if (err < 0)
return;
+ dice->card->private_free = dice_card_free;
+ dice->card->private_data = dice;
err = snd_dice_transaction_init(dice);
if (err < 0)
@@ -192,19 +181,10 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- /*
- * After registered, dice instance can be released corresponding to
- * releasing the sound card instance.
- */
- dice->card->private_free = dice_card_free;
- dice->card->private_data = dice;
dice->registered = true;
return;
error:
- snd_dice_stream_destroy_duplex(dice);
- snd_dice_transaction_destroy(dice);
- snd_dice_stream_destroy_duplex(dice);
snd_card_free(dice->card);
dev_info(&dice->unit->device,
"Sound card registration failed: %d\n", err);
@@ -223,10 +203,9 @@ static int dice_probe(struct fw_unit *unit,
}
/* Allocate this independent of sound card instance. */
- dice = kzalloc(sizeof(struct snd_dice), GFP_KERNEL);
- if (dice == NULL)
+ dice = devm_kzalloc(&unit->device, sizeof(struct snd_dice), GFP_KERNEL);
+ if (!dice)
return -ENOMEM;
-
dice->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, dice);
@@ -263,10 +242,10 @@ static void dice_remove(struct fw_unit *unit)
if (dice->registered) {
/* No need to wait for releasing card object in this context. */
snd_card_free_when_closed(dice->card);
- } else {
- /* Don't forget this case. */
- dice_free(dice);
}
+
+ mutex_destroy(&dice->mutex);
+ fw_unit_put(dice->unit);
}
static void dice_bus_reset(struct fw_unit *unit)
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index ef689997d6a5..6c6ea149ef6b 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -41,20 +41,12 @@ static int name_card(struct snd_dg00x *dg00x)
return 0;
}
-static void dg00x_free(struct snd_dg00x *dg00x)
+static void dg00x_card_free(struct snd_card *card)
{
+ struct snd_dg00x *dg00x = card->private_data;
+
snd_dg00x_stream_destroy_duplex(dg00x);
snd_dg00x_transaction_unregister(dg00x);
-
- fw_unit_put(dg00x->unit);
-
- mutex_destroy(&dg00x->mutex);
- kfree(dg00x);
-}
-
-static void dg00x_card_free(struct snd_card *card)
-{
- dg00x_free(card->private_data);
}
static void do_registration(struct work_struct *work)
@@ -70,6 +62,8 @@ static void do_registration(struct work_struct *work)
&dg00x->card);
if (err < 0)
return;
+ dg00x->card->private_free = dg00x_card_free;
+ dg00x->card->private_data = dg00x;
err = name_card(dg00x);
if (err < 0)
@@ -101,14 +95,10 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- dg00x->card->private_free = dg00x_card_free;
- dg00x->card->private_data = dg00x;
dg00x->registered = true;
return;
error:
- snd_dg00x_transaction_unregister(dg00x);
- snd_dg00x_stream_destroy_duplex(dg00x);
snd_card_free(dg00x->card);
dev_info(&dg00x->unit->device,
"Sound card registration failed: %d\n", err);
@@ -120,8 +110,9 @@ static int snd_dg00x_probe(struct fw_unit *unit,
struct snd_dg00x *dg00x;
/* Allocate this independent of sound card instance. */
- dg00x = kzalloc(sizeof(struct snd_dg00x), GFP_KERNEL);
- if (dg00x == NULL)
+ dg00x = devm_kzalloc(&unit->device, sizeof(struct snd_dg00x),
+ GFP_KERNEL);
+ if (!dg00x)
return -ENOMEM;
dg00x->unit = fw_unit_get(unit);
@@ -173,12 +164,12 @@ static void snd_dg00x_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&dg00x->dwork);
if (dg00x->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(dg00x->card);
- } else {
- /* Don't forget this case. */
- dg00x_free(dg00x);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(dg00x->card);
}
+
+ mutex_destroy(&dg00x->mutex);
+ fw_unit_put(dg00x->unit);
}
static const struct ieee1394_device_id snd_dg00x_id_table[] = {
diff --git a/sound/firewire/fireface/ff.c b/sound/firewire/fireface/ff.c
index 4974bc7980e9..3f61cfeace69 100644
--- a/sound/firewire/fireface/ff.c
+++ b/sound/firewire/fireface/ff.c
@@ -27,20 +27,12 @@ static void name_card(struct snd_ff *ff)
dev_name(&ff->unit->device), 100 << fw_dev->max_speed);
}
-static void ff_free(struct snd_ff *ff)
+static void ff_card_free(struct snd_card *card)
{
+ struct snd_ff *ff = card->private_data;
+
snd_ff_stream_destroy_duplex(ff);
snd_ff_transaction_unregister(ff);
-
- fw_unit_put(ff->unit);
-
- mutex_destroy(&ff->mutex);
- kfree(ff);
-}
-
-static void ff_card_free(struct snd_card *card)
-{
- ff_free(card->private_data);
}
static void do_registration(struct work_struct *work)
@@ -55,6 +47,8 @@ static void do_registration(struct work_struct *work)
&ff->card);
if (err < 0)
return;
+ ff->card->private_free = ff_card_free;
+ ff->card->private_data = ff;
err = snd_ff_transaction_register(ff);
if (err < 0)
@@ -84,14 +78,10 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- ff->card->private_free = ff_card_free;
- ff->card->private_data = ff;
ff->registered = true;
return;
error:
- snd_ff_transaction_unregister(ff);
- snd_ff_stream_destroy_duplex(ff);
snd_card_free(ff->card);
dev_info(&ff->unit->device,
"Sound card registration failed: %d\n", err);
@@ -102,11 +92,9 @@ static int snd_ff_probe(struct fw_unit *unit,
{
struct snd_ff *ff;
- ff = kzalloc(sizeof(struct snd_ff), GFP_KERNEL);
- if (ff == NULL)
+ ff = devm_kzalloc(&unit->device, sizeof(struct snd_ff), GFP_KERNEL);
+ if (!ff)
return -ENOMEM;
-
- /* initialize myself */
ff->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, ff);
@@ -149,12 +137,12 @@ static void snd_ff_remove(struct fw_unit *unit)
cancel_work_sync(&ff->dwork.work);
if (ff->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(ff->card);
- } else {
- /* Don't forget this case. */
- ff_free(ff);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(ff->card);
}
+
+ mutex_destroy(&ff->mutex);
+ fw_unit_put(ff->unit);
}
static const struct snd_ff_spec spec_ff400 = {
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index f2d073365cf6..faf0e001c4c5 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -184,36 +184,17 @@ end:
return err;
}
-static void efw_free(struct snd_efw *efw)
-{
- snd_efw_stream_destroy_duplex(efw);
- snd_efw_transaction_remove_instance(efw);
- fw_unit_put(efw->unit);
-
- kfree(efw->resp_buf);
-
- mutex_destroy(&efw->mutex);
- kfree(efw);
-}
-
-/*
- * This module releases the FireWire unit data after all ALSA character devices
- * are released by applications. This is for releasing stream data or finishing
- * transactions safely. Thus at returning from .remove(), this module still keep
- * references for the unit.
- */
static void
efw_card_free(struct snd_card *card)
{
struct snd_efw *efw = card->private_data;
- if (efw->card_index >= 0) {
- mutex_lock(&devices_mutex);
- clear_bit(efw->card_index, devices_used);
- mutex_unlock(&devices_mutex);
- }
+ mutex_lock(&devices_mutex);
+ clear_bit(efw->card_index, devices_used);
+ mutex_unlock(&devices_mutex);
- efw_free(card->private_data);
+ snd_efw_stream_destroy_duplex(efw);
+ snd_efw_transaction_remove_instance(efw);
}
static void
@@ -226,9 +207,8 @@ do_registration(struct work_struct *work)
if (efw->registered)
return;
- mutex_lock(&devices_mutex);
-
/* check registered cards */
+ mutex_lock(&devices_mutex);
for (card_index = 0; card_index < SNDRV_CARDS; ++card_index) {
if (!test_bit(card_index, devices_used) && enable[card_index])
break;
@@ -244,12 +224,18 @@ do_registration(struct work_struct *work)
mutex_unlock(&devices_mutex);
return;
}
+ set_bit(card_index, devices_used);
+ mutex_unlock(&devices_mutex);
+
+ efw->card->private_free = efw_card_free;
+ efw->card->private_data = efw;
/* prepare response buffer */
snd_efw_resp_buf_size = clamp(snd_efw_resp_buf_size,
SND_EFW_RESPONSE_MAXIMUM_BYTES, 4096U);
- efw->resp_buf = kzalloc(snd_efw_resp_buf_size, GFP_KERNEL);
- if (efw->resp_buf == NULL) {
+ efw->resp_buf = devm_kzalloc(&efw->card->card_dev,
+ snd_efw_resp_buf_size, GFP_KERNEL);
+ if (!efw->resp_buf) {
err = -ENOMEM;
goto error;
}
@@ -284,25 +270,11 @@ do_registration(struct work_struct *work)
if (err < 0)
goto error;
- set_bit(card_index, devices_used);
- mutex_unlock(&devices_mutex);
-
- /*
- * After registered, efw instance can be released corresponding to
- * releasing the sound card instance.
- */
- efw->card->private_free = efw_card_free;
- efw->card->private_data = efw;
efw->registered = true;
return;
error:
- mutex_unlock(&devices_mutex);
- snd_efw_transaction_remove_instance(efw);
- snd_efw_stream_destroy_duplex(efw);
snd_card_free(efw->card);
- kfree(efw->resp_buf);
- efw->resp_buf = NULL;
dev_info(&efw->unit->device,
"Sound card registration failed: %d\n", err);
}
@@ -312,10 +284,9 @@ efw_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry)
{
struct snd_efw *efw;
- efw = kzalloc(sizeof(struct snd_efw), GFP_KERNEL);
+ efw = devm_kzalloc(&unit->device, sizeof(struct snd_efw), GFP_KERNEL);
if (efw == NULL)
return -ENOMEM;
-
efw->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, efw);
@@ -363,12 +334,12 @@ static void efw_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&efw->dwork);
if (efw->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(efw->card);
- } else {
- /* Don't forget this case. */
- efw_free(efw);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(efw->card);
}
+
+ mutex_destroy(&efw->mutex);
+ fw_unit_put(efw->unit);
}
static const struct ieee1394_device_id efw_id_table[] = {
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 30957477e005..9ebe510ea26b 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -602,8 +602,6 @@ static void isight_card_free(struct snd_card *card)
struct isight *isight = card->private_data;
fw_iso_resources_destroy(&isight->resources);
- fw_unit_put(isight->unit);
- mutex_destroy(&isight->mutex);
}
static u64 get_unit_base(struct fw_unit *unit)
@@ -640,7 +638,7 @@ static int isight_probe(struct fw_unit *unit,
if (!isight->audio_base) {
dev_err(&unit->device, "audio unit base not found\n");
err = -ENXIO;
- goto err_unit;
+ goto error;
}
fw_iso_resources_init(&isight->resources, unit);
@@ -669,12 +667,12 @@ static int isight_probe(struct fw_unit *unit,
dev_set_drvdata(&unit->device, isight);
return 0;
-
-err_unit:
- fw_unit_put(isight->unit);
- mutex_destroy(&isight->mutex);
error:
snd_card_free(card);
+
+ mutex_destroy(&isight->mutex);
+ fw_unit_put(isight->unit);
+
return err;
}
@@ -703,7 +701,11 @@ static void isight_remove(struct fw_unit *unit)
isight_stop_streaming(isight);
mutex_unlock(&isight->mutex);
- snd_card_free_when_closed(isight->card);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(isight->card);
+
+ mutex_destroy(&isight->mutex);
+ fw_unit_put(isight->unit);
}
static const struct ieee1394_device_id isight_id_table[] = {
diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
index 300d31b6f191..220e61926ea4 100644
--- a/sound/firewire/motu/motu.c
+++ b/sound/firewire/motu/motu.c
@@ -52,26 +52,12 @@ static void name_card(struct snd_motu *motu)
dev_name(&motu->unit->device), 100 << fw_dev->max_speed);
}
-static void motu_free(struct snd_motu *motu)
+static void motu_card_free(struct snd_card *card)
{
- snd_motu_transaction_unregister(motu);
+ struct snd_motu *motu = card->private_data;
+ snd_motu_transaction_unregister(motu);
snd_motu_stream_destroy_duplex(motu);
- fw_unit_put(motu->unit);
-
- mutex_destroy(&motu->mutex);
- kfree(motu);
-}
-
-/*
- * This module releases the FireWire unit data after all ALSA character devices
- * are released by applications. This is for releasing stream data or finishing
- * transactions safely. Thus at returning from .remove(), this module still keep
- * references for the unit.
- */
-static void motu_card_free(struct snd_card *card)
-{
- motu_free(card->private_data);
}
static void do_registration(struct work_struct *work)
@@ -86,6 +72,8 @@ static void do_registration(struct work_struct *work)
&motu->card);
if (err < 0)
return;
+ motu->card->private_free = motu_card_free;
+ motu->card->private_data = motu;
name_card(motu);
@@ -120,18 +108,10 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- /*
- * After registered, motu instance can be released corresponding to
- * releasing the sound card instance.
- */
- motu->card->private_free = motu_card_free;
- motu->card->private_data = motu;
motu->registered = true;
return;
error:
- snd_motu_transaction_unregister(motu);
- snd_motu_stream_destroy_duplex(motu);
snd_card_free(motu->card);
dev_info(&motu->unit->device,
"Sound card registration failed: %d\n", err);
@@ -143,14 +123,13 @@ static int motu_probe(struct fw_unit *unit,
struct snd_motu *motu;
/* Allocate this independently of sound card instance. */
- motu = kzalloc(sizeof(struct snd_motu), GFP_KERNEL);
- if (motu == NULL)
+ motu = devm_kzalloc(&unit->device, sizeof(struct snd_motu), GFP_KERNEL);
+ if (!motu)
return -ENOMEM;
-
- motu->spec = (const struct snd_motu_spec *)entry->driver_data;
motu->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, motu);
+ motu->spec = (const struct snd_motu_spec *)entry->driver_data;
mutex_init(&motu->mutex);
spin_lock_init(&motu->lock);
init_waitqueue_head(&motu->hwdep_wait);
@@ -174,12 +153,12 @@ static void motu_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&motu->dwork);
if (motu->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(motu->card);
- } else {
- /* Don't forget this case. */
- motu_free(motu);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(motu->card);
}
+
+ mutex_destroy(&motu->mutex);
+ fw_unit_put(motu->unit);
}
static void motu_bus_update(struct fw_unit *unit)
diff --git a/sound/firewire/oxfw/oxfw-scs1x.c b/sound/firewire/oxfw/oxfw-scs1x.c
index f33497cdc706..9d9545880a28 100644
--- a/sound/firewire/oxfw/oxfw-scs1x.c
+++ b/sound/firewire/oxfw/oxfw-scs1x.c
@@ -372,8 +372,9 @@ int snd_oxfw_scs1x_add(struct snd_oxfw *oxfw)
struct fw_scs1x *scs;
int err;
- scs = kzalloc(sizeof(struct fw_scs1x), GFP_KERNEL);
- if (scs == NULL)
+ scs = devm_kzalloc(&oxfw->card->card_dev, sizeof(struct fw_scs1x),
+ GFP_KERNEL);
+ if (!scs)
return -ENOMEM;
scs->fw_dev = fw_parent_device(oxfw->unit);
oxfw->spec = scs;
diff --git a/sound/firewire/oxfw/oxfw-spkr.c b/sound/firewire/oxfw/oxfw-spkr.c
index cb905af0660d..66d4b1f73f0f 100644
--- a/sound/firewire/oxfw/oxfw-spkr.c
+++ b/sound/firewire/oxfw/oxfw-spkr.c
@@ -270,8 +270,9 @@ int snd_oxfw_add_spkr(struct snd_oxfw *oxfw, bool is_lacie)
unsigned int i, first_ch;
int err;
- spkr = kzalloc(sizeof(struct fw_spkr), GFP_KERNEL);
- if (spkr == NULL)
+ spkr = devm_kzalloc(&oxfw->card->card_dev, sizeof(struct fw_spkr),
+ GFP_KERNEL);
+ if (!spkr)
return -ENOMEM;
oxfw->spec = spkr;
diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c
index d9361f352133..f230a9e44c3c 100644
--- a/sound/firewire/oxfw/oxfw-stream.c
+++ b/sound/firewire/oxfw/oxfw-stream.c
@@ -517,8 +517,9 @@ assume_stream_formats(struct snd_oxfw *oxfw, enum avc_general_plug_dir dir,
if (err < 0)
goto end;
- formats[eid] = kmemdup(buf, *len, GFP_KERNEL);
- if (formats[eid] == NULL) {
+ formats[eid] = devm_kmemdup(&oxfw->card->card_dev, buf, *len,
+ GFP_KERNEL);
+ if (!formats[eid]) {
err = -ENOMEM;
goto end;
}
@@ -535,7 +536,8 @@ assume_stream_formats(struct snd_oxfw *oxfw, enum avc_general_plug_dir dir,
continue;
eid++;
- formats[eid] = kmemdup(buf, *len, GFP_KERNEL);
+ formats[eid] = devm_kmemdup(&oxfw->card->card_dev, buf, *len,
+ GFP_KERNEL);
if (formats[eid] == NULL) {
err = -ENOMEM;
goto end;
@@ -597,8 +599,9 @@ static int fill_stream_formats(struct snd_oxfw *oxfw,
if (err < 0)
break;
- formats[eid] = kmemdup(buf, len, GFP_KERNEL);
- if (formats[eid] == NULL) {
+ formats[eid] = devm_kmemdup(&oxfw->card->card_dev, buf, len,
+ GFP_KERNEL);
+ if (!formats[eid]) {
err = -ENOMEM;
break;
}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 2ea8be6c8584..afb78d90384b 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -113,35 +113,13 @@ end:
return err;
}
-static void oxfw_free(struct snd_oxfw *oxfw)
+static void oxfw_card_free(struct snd_card *card)
{
- unsigned int i;
+ struct snd_oxfw *oxfw = card->private_data;
snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
if (oxfw->has_output)
snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
-
- fw_unit_put(oxfw->unit);
-
- for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
- kfree(oxfw->tx_stream_formats[i]);
- kfree(oxfw->rx_stream_formats[i]);
- }
-
- kfree(oxfw->spec);
- mutex_destroy(&oxfw->mutex);
- kfree(oxfw);
-}
-
-/*
- * This module releases the FireWire unit data after all ALSA character devices
- * are released by applications. This is for releasing stream data or finishing
- * transactions safely. Thus at returning from .remove(), this module still keep
- * references for the unit.
- */
-static void oxfw_card_free(struct snd_card *card)
-{
- oxfw_free(card->private_data);
}
static int detect_quirks(struct snd_oxfw *oxfw)
@@ -208,7 +186,6 @@ static int detect_quirks(struct snd_oxfw *oxfw)
static void do_registration(struct work_struct *work)
{
struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
- int i;
int err;
if (oxfw->registered)
@@ -218,6 +195,8 @@ static void do_registration(struct work_struct *work)
&oxfw->card);
if (err < 0)
return;
+ oxfw->card->private_free = oxfw_card_free;
+ oxfw->card->private_data = oxfw;
err = name_card(oxfw);
if (err < 0)
@@ -258,28 +237,11 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- /*
- * After registered, oxfw instance can be released corresponding to
- * releasing the sound card instance.
- */
- oxfw->card->private_free = oxfw_card_free;
- oxfw->card->private_data = oxfw;
oxfw->registered = true;
return;
error:
- snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
- if (oxfw->has_output)
- snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
- for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) {
- kfree(oxfw->tx_stream_formats[i]);
- oxfw->tx_stream_formats[i] = NULL;
- kfree(oxfw->rx_stream_formats[i]);
- oxfw->rx_stream_formats[i] = NULL;
- }
snd_card_free(oxfw->card);
- kfree(oxfw->spec);
- oxfw->spec = NULL;
dev_info(&oxfw->unit->device,
"Sound card registration failed: %d\n", err);
}
@@ -293,14 +255,13 @@ static int oxfw_probe(struct fw_unit *unit,
return -ENODEV;
/* Allocate this independent of sound card instance. */
- oxfw = kzalloc(sizeof(struct snd_oxfw), GFP_KERNEL);
- if (oxfw == NULL)
+ oxfw = devm_kzalloc(&unit->device, sizeof(struct snd_oxfw), GFP_KERNEL);
+ if (!oxfw)
return -ENOMEM;
-
- oxfw->entry = entry;
oxfw->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, oxfw);
+ oxfw->entry = entry;
mutex_init(&oxfw->mutex);
spin_lock_init(&oxfw->lock);
init_waitqueue_head(&oxfw->hwdep_wait);
@@ -347,12 +308,12 @@ static void oxfw_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&oxfw->dwork);
if (oxfw->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(oxfw->card);
- } else {
- /* Don't forget this case. */
- oxfw_free(oxfw);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(oxfw->card);
}
+
+ mutex_destroy(&oxfw->mutex);
+ fw_unit_put(oxfw->unit);
}
static const struct compat_info griffin_firewave = {
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index d3fdc463a884..ef57fa4db323 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -85,20 +85,12 @@ static int identify_model(struct snd_tscm *tscm)
return 0;
}
-static void tscm_free(struct snd_tscm *tscm)
+static void tscm_card_free(struct snd_card *card)
{
+ struct snd_tscm *tscm = card->private_data;
+
snd_tscm_transaction_unregister(tscm);
snd_tscm_stream_destroy_duplex(tscm);
-
- fw_unit_put(tscm->unit);
-
- mutex_destroy(&tscm->mutex);
- kfree(tscm);
-}
-
-static void tscm_card_free(struct snd_card *card)
-{
- tscm_free(card->private_data);
}
static void do_registration(struct work_struct *work)
@@ -110,6 +102,8 @@ static void do_registration(struct work_struct *work)
&tscm->card);
if (err < 0)
return;
+ tscm->card->private_free = tscm_card_free;
+ tscm->card->private_data = tscm;
err = identify_model(tscm);
if (err < 0)
@@ -141,18 +135,10 @@ static void do_registration(struct work_struct *work)
if (err < 0)
goto error;
- /*
- * After registered, tscm instance can be released corresponding to
- * releasing the sound card instance.
- */
- tscm->card->private_free = tscm_card_free;
- tscm->card->private_data = tscm;
tscm->registered = true;
return;
error:
- snd_tscm_transaction_unregister(tscm);
- snd_tscm_stream_destroy_duplex(tscm);
snd_card_free(tscm->card);
dev_info(&tscm->unit->device,
"Sound card registration failed: %d\n", err);
@@ -164,11 +150,9 @@ static int snd_tscm_probe(struct fw_unit *unit,
struct snd_tscm *tscm;
/* Allocate this independent of sound card instance. */
- tscm = kzalloc(sizeof(struct snd_tscm), GFP_KERNEL);
- if (tscm == NULL)
+ tscm = devm_kzalloc(&unit->device, sizeof(struct snd_tscm), GFP_KERNEL);
+ if (!tscm)
return -ENOMEM;
-
- /* initialize myself */
tscm->unit = fw_unit_get(unit);
dev_set_drvdata(&unit->device, tscm);
@@ -216,12 +200,12 @@ static void snd_tscm_remove(struct fw_unit *unit)
cancel_delayed_work_sync(&tscm->dwork);
if (tscm->registered) {
- /* No need to wait for releasing card object in this context. */
- snd_card_free_when_closed(tscm->card);
- } else {
- /* Don't forget this case. */
- tscm_free(tscm);
+ // Block till all of ALSA character devices are released.
+ snd_card_free(tscm->card);
}
+
+ mutex_destroy(&tscm->mutex);
+ fw_unit_put(tscm->unit);
}
static const struct ieee1394_device_id snd_tscm_id_table[] = {
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index 5bc4a1d587d4..60cb00fd0c69 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -48,9 +48,11 @@ void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *bus, bool enable)
}
if (enable)
- snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, 0, AZX_PPCTL_GPROCEN);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
+ AZX_PPCTL_GPROCEN, AZX_PPCTL_GPROCEN);
else
- snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_GPROCEN, 0);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
+ AZX_PPCTL_GPROCEN, 0);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_enable);
@@ -68,9 +70,11 @@ void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *bus, bool enable)
}
if (enable)
- snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, 0, AZX_PPCTL_PIE);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
+ AZX_PPCTL_PIE, AZX_PPCTL_PIE);
else
- snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL, AZX_PPCTL_PIE, 0);
+ snd_hdac_updatel(bus->ppcap, AZX_REG_PP_PPCTL,
+ AZX_PPCTL_PIE, 0);
}
EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_ppcap_int_enable);
@@ -194,7 +198,8 @@ static int check_hdac_link_power_active(struct hdac_ext_link *link, bool enable)
*/
int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link)
{
- snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL, 0, AZX_MLCTL_SPA);
+ snd_hdac_updatel(link->ml_addr, AZX_REG_ML_LCTL,
+ AZX_MLCTL_SPA, AZX_MLCTL_SPA);
return check_hdac_link_power_active(link, true);
}
@@ -222,8 +227,8 @@ int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus)
int ret;
list_for_each_entry(hlink, &bus->hlink_list, list) {
- snd_hdac_updatel(hlink->ml_addr,
- AZX_REG_ML_LCTL, 0, AZX_MLCTL_SPA);
+ snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL,
+ AZX_MLCTL_SPA, AZX_MLCTL_SPA);
ret = check_hdac_link_power_active(hlink, true);
if (ret < 0)
return ret;
@@ -243,7 +248,8 @@ int snd_hdac_ext_bus_link_power_down_all(struct hdac_bus *bus)
int ret;
list_for_each_entry(hlink, &bus->hlink_list, list) {
- snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL, AZX_MLCTL_SPA, 0);
+ snd_hdac_updatel(hlink->ml_addr, AZX_REG_ML_LCTL,
+ AZX_MLCTL_SPA, 0);
ret = check_hdac_link_power_active(hlink, false);
if (ret < 0)
return ret;
diff --git a/sound/i2c/cs8427.c b/sound/i2c/cs8427.c
index 2647309bc675..8afa2f888466 100644
--- a/sound/i2c/cs8427.c
+++ b/sound/i2c/cs8427.c
@@ -118,7 +118,7 @@ static int snd_cs8427_send_corudata(struct snd_i2c_device *device,
struct cs8427 *chip = device->private_data;
char *hw_data = udata ?
chip->playback.hw_udata : chip->playback.hw_status;
- char data[32];
+ unsigned char data[32];
int err, idx;
if (!memcmp(hw_data, ndata, count))
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index ac0ab6eb40f0..47e0b2820ace 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -389,7 +389,8 @@ static int snd_opti9xx_configure(struct snd_opti9xx *chip,
case OPTi9XX_HW_82C931:
/* disable 3D sound (set GPIO1 as output, low) */
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(20), 0x04, 0x0c);
- case OPTi9XX_HW_82C933: /* FALL THROUGH */
+ /* fall through */
+ case OPTi9XX_HW_82C933:
/*
* The BTC 1817DW has QS1000 wavetable which is connected
* to the serial digital input of the OPTI931.
@@ -400,7 +401,8 @@ static int snd_opti9xx_configure(struct snd_opti9xx *chip,
* or digital input signal.
*/
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(26), 0x01, 0x01);
- case OPTi9XX_HW_82C930: /* FALL THROUGH */
+ /* fall through */
+ case OPTi9XX_HW_82C930:
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(6), 0x02, 0x03);
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(3), 0x00, 0xff);
snd_opti9xx_write_mask(chip, OPTi9XX_MC_REG(4), 0x10 |
diff --git a/sound/isa/sb/sb8_main.c b/sound/isa/sb/sb8_main.c
index 481797744b3c..8288fae90085 100644
--- a/sound/isa/sb/sb8_main.c
+++ b/sound/isa/sb/sb8_main.c
@@ -130,13 +130,13 @@ static int snd_sb8_playback_prepare(struct snd_pcm_substream *substream)
chip->playback_format = SB_DSP_HI_OUTPUT_AUTO;
break;
}
- /* fallthru */
+ /* fall through */
case SB_HW_201:
if (rate > 23000) {
chip->playback_format = SB_DSP_HI_OUTPUT_AUTO;
break;
}
- /* fallthru */
+ /* fall through */
case SB_HW_20:
chip->playback_format = SB_DSP_LO_OUTPUT_AUTO;
break;
@@ -287,7 +287,7 @@ static int snd_sb8_capture_prepare(struct snd_pcm_substream *substream)
chip->capture_format = SB_DSP_HI_INPUT_AUTO;
break;
}
- /* fallthru */
+ /* fall through */
case SB_HW_20:
chip->capture_format = SB_DSP_LO_INPUT_AUTO;
break;
@@ -387,7 +387,7 @@ irqreturn_t snd_sb8dsp_interrupt(struct snd_sb *chip)
case SB_MODE_PLAYBACK_16: /* ok.. playback is active */
if (chip->hardware != SB_HW_JAZZ16)
break;
- /* fallthru */
+ /* fall through */
case SB_MODE_PLAYBACK_8:
substream = chip->playback_substream;
if (chip->playback_format == SB_DSP_OUTPUT)
@@ -397,7 +397,7 @@ irqreturn_t snd_sb8dsp_interrupt(struct snd_sb *chip)
case SB_MODE_CAPTURE_16:
if (chip->hardware != SB_HW_JAZZ16)
break;
- /* fallthru */
+ /* fall through */
case SB_MODE_CAPTURE_8:
substream = chip->capture_substream;
if (chip->capture_format == SB_DSP_INPUT)
diff --git a/sound/mips/hal2.c b/sound/mips/hal2.c
index c8904e732aaa..a4ed54aeaf1d 100644
--- a/sound/mips/hal2.c
+++ b/sound/mips/hal2.c
@@ -500,7 +500,8 @@ static const struct snd_pcm_hardware hal2_pcm_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_BLOCK_TRANSFER),
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = SNDRV_PCM_FMTBIT_S16_BE,
.rates = SNDRV_PCM_RATE_8000_48000,
.rate_min = 8000,
@@ -563,6 +564,8 @@ static int hal2_playback_prepare(struct snd_pcm_substream *substream)
dac->sample_rate = hal2_compute_rate(dac, runtime->rate);
memset(&dac->pcm_indirect, 0, sizeof(dac->pcm_indirect));
dac->pcm_indirect.hw_buffer_size = H2_BUF_SIZE;
+ dac->pcm_indirect.hw_queue_size = H2_BUF_SIZE / 2;
+ dac->pcm_indirect.hw_io = dac->buffer_dma;
dac->pcm_indirect.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream);
dac->substream = substream;
hal2_setup_dac(hal2);
@@ -575,9 +578,6 @@ static int hal2_playback_trigger(struct snd_pcm_substream *substream, int cmd)
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- hal2->dac.pcm_indirect.hw_io = hal2->dac.buffer_dma;
- hal2->dac.pcm_indirect.hw_data = 0;
- substream->ops->ack(substream);
hal2_start_dac(hal2);
break;
case SNDRV_PCM_TRIGGER_STOP:
@@ -615,7 +615,6 @@ static int hal2_playback_ack(struct snd_pcm_substream *substream)
struct snd_hal2 *hal2 = snd_pcm_substream_chip(substream);
struct hal2_codec *dac = &hal2->dac;
- dac->pcm_indirect.hw_queue_size = H2_BUF_SIZE / 2;
return snd_pcm_indirect_playback_transfer(substream,
&dac->pcm_indirect,
hal2_playback_transfer);
@@ -655,6 +654,7 @@ static int hal2_capture_prepare(struct snd_pcm_substream *substream)
memset(&adc->pcm_indirect, 0, sizeof(adc->pcm_indirect));
adc->pcm_indirect.hw_buffer_size = H2_BUF_SIZE;
adc->pcm_indirect.hw_queue_size = H2_BUF_SIZE / 2;
+ adc->pcm_indirect.hw_io = adc->buffer_dma;
adc->pcm_indirect.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream);
adc->substream = substream;
hal2_setup_adc(hal2);
@@ -667,9 +667,6 @@ static int hal2_capture_trigger(struct snd_pcm_substream *substream, int cmd)
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- hal2->adc.pcm_indirect.hw_io = hal2->adc.buffer_dma;
- hal2->adc.pcm_indirect.hw_data = 0;
- printk(KERN_DEBUG "buffer_dma %x\n", hal2->adc.buffer_dma);
hal2_start_adc(hal2);
break;
case SNDRV_PCM_TRIGGER_STOP:
diff --git a/sound/pci/asihpi/hpios.c b/sound/pci/asihpi/hpios.c
index 5ef4fe964366..7c91330af719 100644
--- a/sound/pci/asihpi/hpios.c
+++ b/sound/pci/asihpi/hpios.c
@@ -49,7 +49,7 @@ u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
/*?? any benefit in using managed dmam_alloc_coherent? */
p_mem_area->vaddr =
dma_alloc_coherent(&pdev->dev, size, &p_mem_area->dma_handle,
- GFP_DMA32 | GFP_KERNEL);
+ GFP_KERNEL);
if (p_mem_area->vaddr) {
HPI_DEBUG_LOG(DEBUG, "allocated %d bytes, dma 0x%x vma %p\n",
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index a1e4944dcfe8..1a41f8c80243 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -903,15 +903,15 @@ static int snd_atiixp_playback_prepare(struct snd_pcm_substream *substream)
case 8:
data |= ATI_REG_OUT_DMA_SLOT_BIT(10) |
ATI_REG_OUT_DMA_SLOT_BIT(11);
- /* fallthru */
+ /* fall through */
case 6:
data |= ATI_REG_OUT_DMA_SLOT_BIT(7) |
ATI_REG_OUT_DMA_SLOT_BIT(8);
- /* fallthru */
+ /* fall through */
case 4:
data |= ATI_REG_OUT_DMA_SLOT_BIT(6) |
ATI_REG_OUT_DMA_SLOT_BIT(9);
- /* fallthru */
+ /* fall through */
default:
data |= ATI_REG_OUT_DMA_SLOT_BIT(3) |
ATI_REG_OUT_DMA_SLOT_BIT(4);
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 2e5b460a847c..96ece1a71cf1 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1115,6 +1115,7 @@ vortex_adbdma_setbuffers(vortex_t * vortex, int adbdma,
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0xc,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 3));
+ /* fall through */
/* 3 pages */
case 3:
dma->cfg0 |= 0x12000000;
@@ -1122,12 +1123,14 @@ vortex_adbdma_setbuffers(vortex_t * vortex, int adbdma,
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0x8,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 2));
+ /* fall through */
/* 2 pages */
case 2:
dma->cfg0 |= 0x88000000 | 0x44000000 | 0x10000000 | (psize - 1);
hwwrite(vortex->mmio,
VORTEX_ADBDMA_BUFBASE + (adbdma << 4) + 0x4,
snd_pcm_sgbuf_get_addr(dma->substream, psize));
+ /* fall through */
/* 1 page */
case 1:
dma->cfg0 |= 0x80000000 | 0x40000000 | ((psize - 1) << 0xc);
@@ -1390,17 +1393,20 @@ vortex_wtdma_setbuffers(vortex_t * vortex, int wtdma,
dma->cfg1 |= 0x88000000 | 0x44000000 | 0x30000000 | (psize-1);
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0xc,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 3));
+ /* fall through */
/* 3 pages */
case 3:
dma->cfg0 |= 0x12000000;
dma->cfg1 |= 0x80000000 | 0x40000000 | ((psize-1) << 0xc);
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0x8,
snd_pcm_sgbuf_get_addr(dma->substream, psize * 2));
+ /* fall through */
/* 2 pages */
case 2:
dma->cfg0 |= 0x88000000 | 0x44000000 | 0x10000000 | (psize-1);
hwwrite(vortex->mmio, VORTEX_WTDMA_BUFBASE + (wtdma << 4) + 0x4,
snd_pcm_sgbuf_get_addr(dma->substream, psize));
+ /* fall through */
/* 1 page */
case 1:
dma->cfg0 |= 0x80000000 | 0x40000000 | ((psize-1) << 0xc);
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index 146e1a3498c7..750eec437a79 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -1443,7 +1443,8 @@ static const struct snd_pcm_hardware snd_cs46xx_playback =
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/
- /*SNDRV_PCM_INFO_RESUME*/),
+ /*SNDRV_PCM_INFO_RESUME*/ |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 |
SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |
SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE),
@@ -1465,7 +1466,8 @@ static const struct snd_pcm_hardware snd_cs46xx_capture =
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/
- /*SNDRV_PCM_INFO_RESUME*/),
+ /*SNDRV_PCM_INFO_RESUME*/ |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
.rate_min = 5500,
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 9f2b6097f486..30b3472d0b75 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1753,7 +1753,8 @@ static const struct snd_pcm_hardware snd_emu10k1_fx8010_playback =
{
.info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_RESUME |
- /* SNDRV_PCM_INFO_MMAP_VALID | */ SNDRV_PCM_INFO_PAUSE),
+ /* SNDRV_PCM_INFO_MMAP_VALID | */ SNDRV_PCM_INFO_PAUSE |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
.rates = SNDRV_PCM_RATE_48000,
.rate_min = 48000,
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index b9a6b66aeb0e..df0d636145f8 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -13,7 +13,7 @@
#include <linux/export.h>
#include <linux/sort.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h
index d1a6a9c1329a..f1457c6b3969 100644
--- a/sound/pci/hda/hda_beep.h
+++ b/sound/pci/hda/hda_beep.h
@@ -9,7 +9,7 @@
#ifndef __SOUND_HDA_BEEP_H
#define __SOUND_HDA_BEEP_H
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#define HDA_BEEP_MODE_OFF 0
#define HDA_BEEP_MODE_ON 1
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index d361bb77ca00..9174f1b3a987 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -11,7 +11,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
/*
@@ -81,6 +81,12 @@ static int hda_codec_driver_probe(struct device *dev)
hda_codec_patch_t patch;
int err;
+ if (codec->bus->core.ext_ops) {
+ if (WARN_ON(!codec->bus->core.ext_ops->hdev_attach))
+ return -EINVAL;
+ return codec->bus->core.ext_ops->hdev_attach(&codec->core);
+ }
+
if (WARN_ON(!codec->preset))
return -EINVAL;
@@ -134,6 +140,12 @@ static int hda_codec_driver_remove(struct device *dev)
{
struct hda_codec *codec = dev_to_hda_codec(dev);
+ if (codec->bus->core.ext_ops) {
+ if (WARN_ON(!codec->bus->core.ext_ops->hdev_detach))
+ return -EINVAL;
+ return codec->bus->core.ext_ops->hdev_detach(&codec->core);
+ }
+
if (codec->patch_ops.free)
codec->patch_ops.free(codec);
snd_hda_codec_cleanup_for_unbind(codec);
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 26d348b47867..0957813939e5 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -27,7 +27,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include <sound/asoundef.h>
#include <sound/tlv.h>
#include <sound/initval.h>
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index a12e594d4e3b..fe2506672a72 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -130,8 +130,9 @@ static int azx_pcm_hw_params(struct snd_pcm_substream *substream,
azx_dev->core.bufsize = 0;
azx_dev->core.period_bytes = 0;
azx_dev->core.format_val = 0;
- ret = chip->ops->substream_alloc_pages(chip, substream,
- params_buffer_bytes(hw_params));
+ ret = snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+
unlock:
dsp_unlock(azx_dev);
return ret;
@@ -141,7 +142,6 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx_dev *azx_dev = get_azx_dev(substream);
- struct azx *chip = apcm->chip;
struct hda_pcm_stream *hinfo = to_hda_pcm_stream(substream);
int err;
@@ -152,7 +152,7 @@ static int azx_pcm_hw_free(struct snd_pcm_substream *substream)
snd_hda_codec_cleanup(apcm->codec, hinfo, substream);
- err = chip->ops->substream_free_pages(chip, substream);
+ err = snd_pcm_lib_free_pages(substream);
azx_stream(azx_dev)->prepared = 0;
dsp_unlock(azx_dev);
return err;
@@ -732,6 +732,7 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
int pcm_dev = cpcm->device;
unsigned int size;
int s, err;
+ int type = SNDRV_DMA_TYPE_DEV_SG;
list_for_each_entry(apcm, &chip->pcm_list, list) {
if (apcm->pcm->device == pcm_dev) {
@@ -770,7 +771,9 @@ int snd_hda_attach_pcm_stream(struct hda_bus *_bus, struct hda_codec *codec,
size = CONFIG_SND_HDA_PREALLOC_SIZE * 1024;
if (size > MAX_PREALLOC_SIZE)
size = MAX_PREALLOC_SIZE;
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
+ if (chip->uc_buffer)
+ type = SNDRV_DMA_TYPE_DEV_UC_SG;
+ snd_pcm_lib_preallocate_pages_for_all(pcm, type,
chip->card->dev,
size, MAX_PREALLOC_SIZE);
return 0;
@@ -1220,27 +1223,6 @@ void snd_hda_bus_reset(struct hda_bus *bus)
bus->in_reset = 0;
}
-static int get_jackpoll_interval(struct azx *chip)
-{
- int i;
- unsigned int j;
-
- if (!chip->jackpoll_ms)
- return 0;
-
- i = chip->jackpoll_ms[chip->dev_index];
- if (i == 0)
- return 0;
- if (i < 50 || i > 60000)
- j = 0;
- else
- j = msecs_to_jiffies(i);
- if (j == 0)
- dev_warn(chip->card->dev,
- "jackpoll_ms value out of range: %d\n", i);
- return j;
-}
-
/* HD-audio bus initialization */
int azx_bus_init(struct azx *chip, const char *model,
const struct hdac_io_ops *io_ops)
@@ -1323,7 +1305,7 @@ int azx_probe_codecs(struct azx *chip, unsigned int max_slots)
err = snd_hda_codec_new(&chip->bus, chip->card, c, &codec);
if (err < 0)
continue;
- codec->jackpoll_interval = get_jackpoll_interval(chip);
+ codec->jackpoll_interval = chip->jackpoll_interval;
codec->beep_mode = chip->beep_mode;
codecs++;
}
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index a68e75b00ea3..c95097bb5a0c 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -20,7 +20,7 @@
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/initval.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include <sound/hda_register.h>
#define AZX_MAX_CODECS HDA_MAX_CODECS
@@ -76,7 +76,6 @@ struct azx_dev {
* when link position is not greater than FIFO size
*/
unsigned int insufficient:1;
- unsigned int wc_marked:1;
};
#define azx_stream(dev) (&(dev)->core)
@@ -88,11 +87,6 @@ struct azx;
struct hda_controller_ops {
/* Disable msi if supported, PCI only */
int (*disable_msi_reset_irq)(struct azx *);
- int (*substream_alloc_pages)(struct azx *chip,
- struct snd_pcm_substream *substream,
- size_t size);
- int (*substream_free_pages)(struct azx *chip,
- struct snd_pcm_substream *substream);
void (*pcm_mmap_prepare)(struct snd_pcm_substream *substream,
struct vm_area_struct *area);
/* Check if current position is acceptable */
@@ -127,7 +121,7 @@ struct azx {
int capture_streams;
int capture_index_offset;
int num_streams;
- const int *jackpoll_ms; /* per-card jack poll interval */
+ int jackpoll_interval; /* jack poll interval in jiffies */
/* Register interaction. */
const struct hda_controller_ops *ops;
@@ -160,6 +154,7 @@ struct azx {
unsigned int msi:1;
unsigned int probing:1; /* codec probing phase */
unsigned int snoop:1;
+ unsigned int uc_buffer:1; /* non-cached pages for stream buffers */
unsigned int align_buffer_size:1;
unsigned int region_requested:1;
unsigned int disabled:1; /* disabled by vga_switcheroo */
@@ -175,11 +170,10 @@ struct azx {
#define azx_bus(chip) (&(chip)->bus.core)
#define bus_to_azx(_bus) container_of(_bus, struct azx, bus.core)
-#ifdef CONFIG_X86
-#define azx_snoop(chip) ((chip)->snoop)
-#else
-#define azx_snoop(chip) true
-#endif
+static inline bool azx_snoop(struct azx *chip)
+{
+ return !IS_ENABLED(CONFIG_X86) || chip->snoop;
+}
/*
* macros for easy use
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index ba7fe9b6655c..806b12ed44a2 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -27,7 +27,7 @@
#include <sound/core.h>
#include <asm/unaligned.h>
#include <sound/hda_chmap.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
enum eld_versions {
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 579984ecdec3..276150f29cda 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -32,7 +32,7 @@
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/tlv.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index cc009a4a3d1d..268bba6ec985 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -23,7 +23,7 @@
#include <linux/compat.h>
#include <linux/nospec.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include <sound/hda_hwdep.h>
#include <sound/minors.h>
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index aa4c672dbaf7..d8eb2b5f51ae 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -63,7 +63,7 @@
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/firmware.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_controller.h"
#include "hda_intel.h"
@@ -399,61 +399,6 @@ static char *driver_short_names[] = {
[AZX_DRIVER_GENERIC] = "HD-Audio Generic",
};
-#ifdef CONFIG_X86
-static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
-{
- int pages;
-
- if (azx_snoop(chip))
- return;
- if (!dmab || !dmab->area || !dmab->bytes)
- return;
-
-#ifdef CONFIG_SND_DMA_SGBUF
- if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
- struct snd_sg_buf *sgbuf = dmab->private_data;
- if (chip->driver_type == AZX_DRIVER_CMEDIA)
- return; /* deal with only CORB/RIRB buffers */
- if (on)
- set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
- else
- set_pages_array_wb(sgbuf->page_table, sgbuf->pages);
- return;
- }
-#endif
-
- pages = (dmab->bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (on)
- set_memory_wc((unsigned long)dmab->area, pages);
- else
- set_memory_wb((unsigned long)dmab->area, pages);
-}
-
-static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
- bool on)
-{
- __mark_pages_wc(chip, buf, on);
-}
-static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
- struct snd_pcm_substream *substream, bool on)
-{
- if (azx_dev->wc_marked != on) {
- __mark_pages_wc(chip, snd_pcm_get_dma_buf(substream), on);
- azx_dev->wc_marked = on;
- }
-}
-#else
-/* NOP for other archs */
-static inline void mark_pages_wc(struct azx *chip, struct snd_dma_buffer *buf,
- bool on)
-{
-}
-static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev,
- struct snd_pcm_substream *substream, bool on)
-{
-}
-#endif
-
static int azx_acquire_irq(struct azx *chip, int do_disconnect);
static void set_default_power_save(struct azx *chip);
@@ -1678,6 +1623,7 @@ static void azx_check_snoop_available(struct azx *chip)
dev_info(chip->card->dev, "Force to %s mode by module option\n",
snoop ? "snoop" : "non-snoop");
chip->snoop = snoop;
+ chip->uc_buffer = !snoop;
return;
}
@@ -1698,8 +1644,12 @@ static void azx_check_snoop_available(struct azx *chip)
snoop = false;
chip->snoop = snoop;
- if (!snoop)
+ if (!snoop) {
dev_info(chip->card->dev, "Force to non-snoop mode\n");
+ /* C-Media requires non-cached pages only for CORB/RIRB */
+ if (chip->driver_type != AZX_DRIVER_CMEDIA)
+ chip->uc_buffer = true;
+ }
}
static void azx_probe_work(struct work_struct *work)
@@ -1767,7 +1717,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
chip->driver_type = driver_caps & 0xff;
check_msi(chip);
chip->dev_index = dev;
- chip->jackpoll_ms = jackpoll_ms;
+ if (jackpoll_ms[dev] >= 50 && jackpoll_ms[dev] <= 60000)
+ chip->jackpoll_interval = msecs_to_jiffies(jackpoll_ms[dev]);
INIT_LIST_HEAD(&chip->pcm_list);
INIT_WORK(&hda->irq_pending_work, azx_irq_pending_work);
INIT_LIST_HEAD(&hda->list);
@@ -2090,55 +2041,24 @@ static int dma_alloc_pages(struct hdac_bus *bus,
struct snd_dma_buffer *buf)
{
struct azx *chip = bus_to_azx(bus);
- int err;
- err = snd_dma_alloc_pages(type,
- bus->dev,
- size, buf);
- if (err < 0)
- return err;
- mark_pages_wc(chip, buf, true);
- return 0;
+ if (!azx_snoop(chip) && type == SNDRV_DMA_TYPE_DEV)
+ type = SNDRV_DMA_TYPE_DEV_UC;
+ return snd_dma_alloc_pages(type, bus->dev, size, buf);
}
static void dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf)
{
- struct azx *chip = bus_to_azx(bus);
-
- mark_pages_wc(chip, buf, false);
snd_dma_free_pages(buf);
}
-static int substream_alloc_pages(struct azx *chip,
- struct snd_pcm_substream *substream,
- size_t size)
-{
- struct azx_dev *azx_dev = get_azx_dev(substream);
- int ret;
-
- mark_runtime_wc(chip, azx_dev, substream, false);
- ret = snd_pcm_lib_malloc_pages(substream, size);
- if (ret < 0)
- return ret;
- mark_runtime_wc(chip, azx_dev, substream, true);
- return 0;
-}
-
-static int substream_free_pages(struct azx *chip,
- struct snd_pcm_substream *substream)
-{
- struct azx_dev *azx_dev = get_azx_dev(substream);
- mark_runtime_wc(chip, azx_dev, substream, false);
- return snd_pcm_lib_free_pages(substream);
-}
-
static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
#ifdef CONFIG_X86
struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
struct azx *chip = apcm->chip;
- if (!azx_snoop(chip) && chip->driver_type != AZX_DRIVER_CMEDIA)
+ if (chip->uc_buffer)
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
#endif
}
@@ -2156,8 +2076,6 @@ static const struct hdac_io_ops pci_hda_io_ops = {
static const struct hda_controller_ops pci_hda_ops = {
.disable_msi_reset_irq = disable_msi_reset_irq,
- .substream_alloc_pages = substream_alloc_pages,
- .substream_free_pages = substream_free_pages,
.pcm_mmap_prepare = pcm_mmap_prepare,
.position_check = azx_position_check,
.link_power = azx_intel_link_power,
@@ -2257,8 +2175,12 @@ static struct snd_pci_quirk power_save_blacklist[] = {
/* https://bugzilla.redhat.com/show_bug.cgi?id=1581607 */
SND_PCI_QUIRK(0x1558, 0x3501, "Clevo W35xSS_370SS", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x1028, 0x0497, "Dell Precision T3600", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
/* Note the P55A-UD3 and Z87-D3HP share the subsys id for the HDA dev */
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte P55A-UD3 / Z87-D3HP", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
+ SND_PCI_QUIRK(0x8086, 0x2040, "Intel DZ77BH-55K", 0),
/* https://bugzilla.kernel.org/show_bug.cgi?id=199607 */
SND_PCI_QUIRK(0x8086, 0x2057, "Intel NUC5i7RYB", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1520902 */
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index a33234e04d4f..c499727920e6 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -15,7 +15,7 @@
#include <sound/core.h>
#include <sound/control.h>
#include <sound/jack.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index c6b778b2580c..a65740419650 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <sound/core.h>
#include <linux/module.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
static int dump_coef = -1;
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index 6ec79c58d48d..c154b19a0c45 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -14,7 +14,7 @@
#include <linux/string.h>
#include <linux/export.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include <sound/hda_hwdep.h>
#include <sound/minors.h>
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index 0621920f7617..dd7d4242d6d2 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -35,7 +35,7 @@
#include <sound/core.h>
#include <sound/initval.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_controller.h"
/* Defines for Nvidia Tegra HDA support */
@@ -99,19 +99,6 @@ static void dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf)
snd_dma_free_pages(buf);
}
-static int substream_alloc_pages(struct azx *chip,
- struct snd_pcm_substream *substream,
- size_t size)
-{
- return snd_pcm_lib_malloc_pages(substream, size);
-}
-
-static int substream_free_pages(struct azx *chip,
- struct snd_pcm_substream *substream)
-{
- return snd_pcm_lib_free_pages(substream);
-}
-
/*
* Register access ops. Tegra HDA register access is DWORD only.
*/
@@ -180,10 +167,7 @@ static const struct hdac_io_ops hda_tegra_io_ops = {
.dma_free_pages = dma_free_pages,
};
-static const struct hda_controller_ops hda_tegra_ops = {
- .substream_alloc_pages = substream_alloc_pages,
- .substream_free_pages = substream_free_pages,
-};
+static const struct hda_controller_ops hda_tegra_ops; /* nothing special */
static void hda_tegra_init(struct hda_tegra *hda)
{
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index fd476fb40e1b..ebfd0be885b3 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -24,7 +24,7 @@
#include <linux/module.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_beep.h"
diff --git a/sound/pci/hda/patch_ca0110.c b/sound/pci/hda/patch_ca0110.c
index c2d9ee9cfdc0..21d0f0610913 100644
--- a/sound/pci/hda/patch_ca0110.c
+++ b/sound/pci/hda/patch_ca0110.c
@@ -22,7 +22,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 0166a3d7cd55..0a24037184c3 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -31,8 +31,9 @@
#include <linux/types.h>
#include <linux/io.h>
#include <linux/pci.h>
+#include <asm/io.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
@@ -81,12 +82,12 @@
#define SCP_GET 1
#define EFX_FILE "ctefx.bin"
-#define SBZ_EFX_FILE "ctefx-sbz.bin"
+#define DESKTOP_EFX_FILE "ctefx-desktop.bin"
#define R3DI_EFX_FILE "ctefx-r3di.bin"
#ifdef CONFIG_SND_HDA_CODEC_CA0132_DSP
MODULE_FIRMWARE(EFX_FILE);
-MODULE_FIRMWARE(SBZ_EFX_FILE);
+MODULE_FIRMWARE(DESKTOP_EFX_FILE);
MODULE_FIRMWARE(R3DI_EFX_FILE);
#endif
@@ -152,7 +153,10 @@ enum {
XBASS_XOVER,
EQ_PRESET_ENUM,
SMART_VOLUME_ENUM,
- MIC_BOOST_ENUM
+ MIC_BOOST_ENUM,
+ AE5_HEADPHONE_GAIN_ENUM,
+ AE5_SOUND_FILTER_ENUM,
+ ZXR_HEADPHONE_GAIN
#define EFFECTS_COUNT (EFFECT_END_NID - EFFECT_START_NID)
};
@@ -666,6 +670,65 @@ static const struct ct_dsp_volume_ctl ca0132_alt_vol_ctls[] = {
}
};
+/* Values for ca0113_mmio_command_set for selecting output. */
+#define AE5_CA0113_OUT_SET_COMMANDS 6
+struct ae5_ca0113_output_set {
+ unsigned int group[AE5_CA0113_OUT_SET_COMMANDS];
+ unsigned int target[AE5_CA0113_OUT_SET_COMMANDS];
+ unsigned int vals[AE5_CA0113_OUT_SET_COMMANDS];
+};
+
+static const struct ae5_ca0113_output_set ae5_ca0113_output_presets[] = {
+ { .group = { 0x30, 0x30, 0x48, 0x48, 0x48, 0x30 },
+ .target = { 0x2e, 0x30, 0x0d, 0x17, 0x19, 0x32 },
+ .vals = { 0x00, 0x00, 0x40, 0x00, 0x00, 0x3f }
+ },
+ { .group = { 0x30, 0x30, 0x48, 0x48, 0x48, 0x30 },
+ .target = { 0x2e, 0x30, 0x0d, 0x17, 0x19, 0x32 },
+ .vals = { 0x3f, 0x3f, 0x00, 0x00, 0x00, 0x00 }
+ },
+ { .group = { 0x30, 0x30, 0x48, 0x48, 0x48, 0x30 },
+ .target = { 0x2e, 0x30, 0x0d, 0x17, 0x19, 0x32 },
+ .vals = { 0x00, 0x00, 0x40, 0x00, 0x00, 0x3f }
+ }
+};
+
+/* ae5 ca0113 command sequences to set headphone gain levels. */
+#define AE5_HEADPHONE_GAIN_PRESET_MAX_COMMANDS 4
+struct ae5_headphone_gain_set {
+ char *name;
+ unsigned int vals[AE5_HEADPHONE_GAIN_PRESET_MAX_COMMANDS];
+};
+
+static const struct ae5_headphone_gain_set ae5_headphone_gain_presets[] = {
+ { .name = "Low (16-31",
+ .vals = { 0xff, 0x2c, 0xf5, 0x32 }
+ },
+ { .name = "Medium (32-149",
+ .vals = { 0x38, 0xa8, 0x3e, 0x4c }
+ },
+ { .name = "High (150-600",
+ .vals = { 0xff, 0xff, 0xff, 0x7f }
+ }
+};
+
+struct ae5_filter_set {
+ char *name;
+ unsigned int val;
+};
+
+static const struct ae5_filter_set ae5_filter_presets[] = {
+ { .name = "Slow Roll Off",
+ .val = 0xa0
+ },
+ { .name = "Minimum Phase",
+ .val = 0xc0
+ },
+ { .name = "Fast Roll Off",
+ .val = 0x80
+ }
+};
+
enum hda_cmd_vendor_io {
/* for DspIO node */
VENDOR_DSPIO_SCP_WRITE_DATA_LOW = 0x000,
@@ -685,6 +748,9 @@ enum hda_cmd_vendor_io {
VENDOR_CHIPIO_DATA_LOW = 0x300,
VENDOR_CHIPIO_DATA_HIGH = 0x400,
+ VENDOR_CHIPIO_8051_WRITE_DIRECT = 0x500,
+ VENDOR_CHIPIO_8051_READ_DIRECT = 0xD00,
+
VENDOR_CHIPIO_GET_PARAMETER = 0xF00,
VENDOR_CHIPIO_STATUS = 0xF01,
VENDOR_CHIPIO_HIC_POST_READ = 0x702,
@@ -692,6 +758,9 @@ enum hda_cmd_vendor_io {
VENDOR_CHIPIO_8051_DATA_WRITE = 0x707,
VENDOR_CHIPIO_8051_DATA_READ = 0xF07,
+ VENDOR_CHIPIO_8051_PMEM_READ = 0xF08,
+ VENDOR_CHIPIO_8051_IRAM_WRITE = 0x709,
+ VENDOR_CHIPIO_8051_IRAM_READ = 0xF09,
VENDOR_CHIPIO_CT_EXTENSIONS_ENABLE = 0x70A,
VENDOR_CHIPIO_CT_EXTENSIONS_GET = 0xF0A,
@@ -798,6 +867,12 @@ enum control_param_id {
* impedance is selected*/
CONTROL_PARAM_PORTD_160OHM_GAIN = 10,
+ /*
+ * This control param name was found in the 8051 memory, and makes
+ * sense given the fact the AE-5 uses it and has the ASI flag set.
+ */
+ CONTROL_PARAM_ASI = 23,
+
/* Stream Control */
/* Select stream with the given ID */
@@ -955,7 +1030,11 @@ struct ca0132_spec {
long eq_preset_val;
unsigned int tlv[4];
struct hda_vmaster_mute_hook vmaster_mute;
-
+ /* AE-5 Control values */
+ unsigned char ae5_headphone_gain_val;
+ unsigned char ae5_filter_val;
+ /* ZxR Control Values */
+ unsigned char zxr_gain_set;
struct hda_codec *codec;
struct delayed_work unsol_hp_work;
@@ -995,8 +1074,11 @@ enum {
QUIRK_ALIENWARE,
QUIRK_ALIENWARE_M17XR4,
QUIRK_SBZ,
+ QUIRK_ZXR,
+ QUIRK_ZXR_DBPRO,
QUIRK_R3DI,
QUIRK_R3D,
+ QUIRK_AE5,
};
static const struct hda_pintbl alienware_pincfgs[] = {
@@ -1028,6 +1110,21 @@ static const struct hda_pintbl sbz_pincfgs[] = {
{}
};
+/* Sound Blaster ZxR pin configs taken from Windows Driver */
+static const struct hda_pintbl zxr_pincfgs[] = {
+ { 0x0b, 0x01047110 }, /* Port G -- Lineout FRONT L/R */
+ { 0x0c, 0x414510f0 }, /* SPDIF Out 1 - Disabled*/
+ { 0x0d, 0x014510f0 }, /* Digital Out */
+ { 0x0e, 0x41c520f0 }, /* SPDIF In - Disabled*/
+ { 0x0f, 0x0122711f }, /* Port A -- BackPanel HP */
+ { 0x10, 0x01017111 }, /* Port D -- Center/LFE */
+ { 0x11, 0x01017114 }, /* Port B -- LineMicIn2 / Rear L/R */
+ { 0x12, 0x01a271f0 }, /* Port C -- LineIn1 */
+ { 0x13, 0x908700f0 }, /* What U Hear In*/
+ { 0x18, 0x50d000f0 }, /* N/A */
+ {}
+};
+
/* Recon3D pin configs taken from Windows Driver */
static const struct hda_pintbl r3d_pincfgs[] = {
{ 0x0b, 0x01014110 }, /* Port G -- Lineout FRONT L/R */
@@ -1043,6 +1140,21 @@ static const struct hda_pintbl r3d_pincfgs[] = {
{}
};
+/* Sound Blaster AE-5 pin configs taken from Windows Driver */
+static const struct hda_pintbl ae5_pincfgs[] = {
+ { 0x0b, 0x01017010 }, /* Port G -- Lineout FRONT L/R */
+ { 0x0c, 0x014510f0 }, /* SPDIF Out 1 */
+ { 0x0d, 0x014510f0 }, /* Digital Out */
+ { 0x0e, 0x01c510f0 }, /* SPDIF In */
+ { 0x0f, 0x01017114 }, /* Port A -- Rear L/R. */
+ { 0x10, 0x01017012 }, /* Port D -- Center/LFE or FP Hp */
+ { 0x11, 0x01a170ff }, /* Port B -- LineMicIn2 / Rear Headphone */
+ { 0x12, 0x01a170f0 }, /* Port C -- LineIn1 */
+ { 0x13, 0x908700f0 }, /* What U Hear In*/
+ { 0x18, 0x50d000f0 }, /* N/A */
+ {}
+};
+
/* Recon3D integrated pin configs taken from Windows Driver */
static const struct hda_pintbl r3di_pincfgs[] = {
{ 0x0b, 0x01014110 }, /* Port G -- Lineout FRONT L/R */
@@ -1069,6 +1181,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
SND_PCI_QUIRK(0x1102, 0x0013, "Recon3D", QUIRK_R3D),
+ SND_PCI_QUIRK(0x1102, 0x0051, "Sound Blaster AE-5", QUIRK_AE5),
{}
};
@@ -1454,6 +1567,20 @@ static void chipio_set_conn_rate(struct hda_codec *codec,
}
/*
+ * Writes to the 8051's internal address space directly instead of indirectly,
+ * giving access to the special function registers located at addresses
+ * 0x80-0xFF.
+ */
+static void chipio_8051_write_direct(struct hda_codec *codec,
+ unsigned int addr, unsigned int data)
+{
+ unsigned int verb;
+
+ verb = VENDOR_CHIPIO_8051_WRITE_DIRECT | data;
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0, verb, addr);
+}
+
+/*
* Enable clocks.
*/
static void chipio_enable_clocks(struct hda_codec *codec)
@@ -3088,7 +3215,9 @@ static bool dspload_wait_loaded(struct hda_codec *codec)
}
/*
- * Setup GPIO for the other variants of Core3D.
+ * ca0113 related functions. The ca0113 acts as the HDA bus for the pci-e
+ * based cards, and has a second mmio region, region2, that's used for special
+ * commands.
*/
/*
@@ -3096,8 +3225,11 @@ static bool dspload_wait_loaded(struct hda_codec *codec)
* the mmio address 0x320 is used to set GPIO pins. The format for the data
* The first eight bits are just the number of the pin. So far, I've only seen
* this number go to 7.
+ * AE-5 note: The AE-5 seems to use pins 2 and 3 to somehow set the color value
+ * of the on-card LED. It seems to use pin 2 for data, then toggles 3 to on and
+ * then off to send that bit.
*/
-static void ca0132_mmio_gpio_set(struct hda_codec *codec, unsigned int gpio_pin,
+static void ca0113_mmio_gpio_set(struct hda_codec *codec, unsigned int gpio_pin,
bool enable)
{
struct ca0132_spec *spec = codec->spec;
@@ -3110,6 +3242,89 @@ static void ca0132_mmio_gpio_set(struct hda_codec *codec, unsigned int gpio_pin,
}
/*
+ * Special pci region2 commands that are only used by the AE-5. They follow
+ * a set format, and require reads at certain points to seemingly 'clear'
+ * the response data. My first tests didn't do these reads, and would cause
+ * the card to get locked up until the memory was read. These commands
+ * seem to work with three distinct values that I've taken to calling group,
+ * target-id, and value.
+ */
+static void ca0113_mmio_command_set(struct hda_codec *codec, unsigned int group,
+ unsigned int target, unsigned int value)
+{
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int write_val;
+
+ writel(0x0000007e, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ writel(0x0000005a, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+
+ writel(0x00800005, spec->mem_base + 0x20c);
+ writel(group, spec->mem_base + 0x804);
+
+ writel(0x00800005, spec->mem_base + 0x20c);
+ write_val = (target & 0xff);
+ write_val |= (value << 8);
+
+
+ writel(write_val, spec->mem_base + 0x204);
+ /*
+ * Need delay here or else it goes too fast and works inconsistently.
+ */
+ msleep(20);
+
+ readl(spec->mem_base + 0x860);
+ readl(spec->mem_base + 0x854);
+ readl(spec->mem_base + 0x840);
+
+ writel(0x00800004, spec->mem_base + 0x20c);
+ writel(0x00000000, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+}
+
+/*
+ * This second type of command is used for setting the sound filter type.
+ */
+static void ca0113_mmio_command_set_type2(struct hda_codec *codec,
+ unsigned int group, unsigned int target, unsigned int value)
+{
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int write_val;
+
+ writel(0x0000007e, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ writel(0x0000005a, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+
+ writel(0x00800003, spec->mem_base + 0x20c);
+ writel(group, spec->mem_base + 0x804);
+
+ writel(0x00800005, spec->mem_base + 0x20c);
+ write_val = (target & 0xff);
+ write_val |= (value << 8);
+
+
+ writel(write_val, spec->mem_base + 0x204);
+ msleep(20);
+ readl(spec->mem_base + 0x860);
+ readl(spec->mem_base + 0x854);
+ readl(spec->mem_base + 0x840);
+
+ writel(0x00800004, spec->mem_base + 0x20c);
+ writel(0x00000000, spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+ readl(spec->mem_base + 0x210);
+}
+
+/*
+ * Setup GPIO for the other variants of Core3D.
+ */
+
+/*
* Sets up the GPIO pins so that they are discoverable. If this isn't done,
* the card shows as having no GPIO pins.
*/
@@ -3119,6 +3334,7 @@ static void ca0132_gpio_init(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_SBZ:
+ case QUIRK_AE5:
snd_hda_codec_write(codec, 0x01, 0, 0x793, 0x00);
snd_hda_codec_write(codec, 0x01, 0, 0x794, 0x53);
snd_hda_codec_write(codec, 0x01, 0, 0x790, 0x23);
@@ -3928,6 +4144,138 @@ exit:
return err < 0 ? err : 0;
}
+static int ae5_headphone_gain_set(struct hda_codec *codec, long val);
+static int zxr_headphone_gain_set(struct hda_codec *codec, long val);
+static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val);
+
+static void ae5_mmio_select_out(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int i;
+
+ for (i = 0; i < AE5_CA0113_OUT_SET_COMMANDS; i++)
+ ca0113_mmio_command_set(codec,
+ ae5_ca0113_output_presets[spec->cur_out_type].group[i],
+ ae5_ca0113_output_presets[spec->cur_out_type].target[i],
+ ae5_ca0113_output_presets[spec->cur_out_type].vals[i]);
+}
+
+/*
+ * These are the commands needed to setup output on each of the different card
+ * types.
+ */
+static void ca0132_alt_select_out_quirk_handler(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int tmp;
+
+ switch (spec->cur_out_type) {
+ case SPEAKER_OUT:
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ ca0113_mmio_gpio_set(codec, 7, false);
+ ca0113_mmio_gpio_set(codec, 4, true);
+ ca0113_mmio_gpio_set(codec, 1, true);
+ chipio_set_control_param(codec, 0x0d, 0x18);
+ break;
+ case QUIRK_ZXR:
+ ca0113_mmio_gpio_set(codec, 2, true);
+ ca0113_mmio_gpio_set(codec, 3, true);
+ ca0113_mmio_gpio_set(codec, 5, false);
+ zxr_headphone_gain_set(codec, 0);
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ break;
+ case QUIRK_R3DI:
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ r3di_gpio_out_set(codec, R3DI_LINE_OUT);
+ break;
+ case QUIRK_R3D:
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ ca0113_mmio_gpio_set(codec, 1, true);
+ break;
+ case QUIRK_AE5:
+ ae5_mmio_select_out(codec);
+ ae5_headphone_gain_set(codec, 2);
+ tmp = FLOAT_ZERO;
+ dspio_set_uint_param(codec, 0x96, 0x29, tmp);
+ dspio_set_uint_param(codec, 0x96, 0x2a, tmp);
+ chipio_set_control_param(codec, 0x0d, 0xa4);
+ chipio_write(codec, 0x18b03c, 0x00000012);
+ break;
+ }
+ break;
+ case HEADPHONE_OUT:
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ ca0113_mmio_gpio_set(codec, 7, true);
+ ca0113_mmio_gpio_set(codec, 4, true);
+ ca0113_mmio_gpio_set(codec, 1, false);
+ chipio_set_control_param(codec, 0x0d, 0x12);
+ break;
+ case QUIRK_ZXR:
+ ca0113_mmio_gpio_set(codec, 2, false);
+ ca0113_mmio_gpio_set(codec, 3, false);
+ ca0113_mmio_gpio_set(codec, 5, true);
+ zxr_headphone_gain_set(codec, spec->zxr_gain_set);
+ chipio_set_control_param(codec, 0x0d, 0x21);
+ break;
+ case QUIRK_R3DI:
+ chipio_set_control_param(codec, 0x0d, 0x21);
+ r3di_gpio_out_set(codec, R3DI_HEADPHONE_OUT);
+ break;
+ case QUIRK_R3D:
+ chipio_set_control_param(codec, 0x0d, 0x21);
+ ca0113_mmio_gpio_set(codec, 0x1, false);
+ break;
+ case QUIRK_AE5:
+ ae5_mmio_select_out(codec);
+ ae5_headphone_gain_set(codec,
+ spec->ae5_headphone_gain_val);
+ tmp = FLOAT_ONE;
+ dspio_set_uint_param(codec, 0x96, 0x29, tmp);
+ dspio_set_uint_param(codec, 0x96, 0x2a, tmp);
+ chipio_set_control_param(codec, 0x0d, 0xa1);
+ chipio_write(codec, 0x18b03c, 0x00000012);
+ break;
+ }
+ break;
+ case SURROUND_OUT:
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ ca0113_mmio_gpio_set(codec, 7, false);
+ ca0113_mmio_gpio_set(codec, 4, true);
+ ca0113_mmio_gpio_set(codec, 1, true);
+ chipio_set_control_param(codec, 0x0d, 0x18);
+ break;
+ case QUIRK_ZXR:
+ ca0113_mmio_gpio_set(codec, 2, true);
+ ca0113_mmio_gpio_set(codec, 3, true);
+ ca0113_mmio_gpio_set(codec, 5, false);
+ zxr_headphone_gain_set(codec, 0);
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ break;
+ case QUIRK_R3DI:
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ r3di_gpio_out_set(codec, R3DI_LINE_OUT);
+ break;
+ case QUIRK_R3D:
+ ca0113_mmio_gpio_set(codec, 1, true);
+ chipio_set_control_param(codec, 0x0d, 0x24);
+ break;
+ case QUIRK_AE5:
+ ae5_mmio_select_out(codec);
+ ae5_headphone_gain_set(codec, 2);
+ tmp = FLOAT_ZERO;
+ dspio_set_uint_param(codec, 0x96, 0x29, tmp);
+ dspio_set_uint_param(codec, 0x96, 0x2a, tmp);
+ chipio_set_control_param(codec, 0x0d, 0xa4);
+ chipio_write(codec, 0x18b03c, 0x00000012);
+ break;
+ }
+ break;
+ }
+}
+
/*
* This function behaves similarly to the ca0132_select_out funciton above,
* except with a few differences. It adds the ability to select the current
@@ -3978,26 +4326,11 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
if (err < 0)
goto exit;
+ ca0132_alt_select_out_quirk_handler(codec);
+
switch (spec->cur_out_type) {
case SPEAKER_OUT:
codec_dbg(codec, "%s speaker\n", __func__);
- /*speaker out config*/
- switch (spec->quirk) {
- case QUIRK_SBZ:
- ca0132_mmio_gpio_set(codec, 7, false);
- ca0132_mmio_gpio_set(codec, 4, true);
- ca0132_mmio_gpio_set(codec, 1, true);
- chipio_set_control_param(codec, 0x0D, 0x18);
- break;
- case QUIRK_R3DI:
- chipio_set_control_param(codec, 0x0D, 0x24);
- r3di_gpio_out_set(codec, R3DI_LINE_OUT);
- break;
- case QUIRK_R3D:
- chipio_set_control_param(codec, 0x0D, 0x24);
- ca0132_mmio_gpio_set(codec, 1, true);
- break;
- }
/* disable headphone node */
pin_ctl = snd_hda_codec_read(codec, spec->out_pins[1], 0,
@@ -4021,23 +4354,6 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
break;
case HEADPHONE_OUT:
codec_dbg(codec, "%s hp\n", __func__);
- /* Headphone out config*/
- switch (spec->quirk) {
- case QUIRK_SBZ:
- ca0132_mmio_gpio_set(codec, 7, true);
- ca0132_mmio_gpio_set(codec, 4, true);
- ca0132_mmio_gpio_set(codec, 1, false);
- chipio_set_control_param(codec, 0x0D, 0x12);
- break;
- case QUIRK_R3DI:
- chipio_set_control_param(codec, 0x0D, 0x21);
- r3di_gpio_out_set(codec, R3DI_HEADPHONE_OUT);
- break;
- case QUIRK_R3D:
- chipio_set_control_param(codec, 0x0D, 0x21);
- ca0132_mmio_gpio_set(codec, 0x1, false);
- break;
- }
snd_hda_codec_write(codec, spec->out_pins[0], 0,
AC_VERB_SET_EAPD_BTLENABLE, 0x00);
@@ -4067,23 +4383,7 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
break;
case SURROUND_OUT:
codec_dbg(codec, "%s surround\n", __func__);
- /* Surround out config*/
- switch (spec->quirk) {
- case QUIRK_SBZ:
- ca0132_mmio_gpio_set(codec, 7, false);
- ca0132_mmio_gpio_set(codec, 4, true);
- ca0132_mmio_gpio_set(codec, 1, true);
- chipio_set_control_param(codec, 0x0D, 0x18);
- break;
- case QUIRK_R3DI:
- chipio_set_control_param(codec, 0x0D, 0x24);
- r3di_gpio_out_set(codec, R3DI_LINE_OUT);
- break;
- case QUIRK_R3D:
- ca0132_mmio_gpio_set(codec, 1, true);
- chipio_set_control_param(codec, 0x0D, 0x24);
- break;
- }
+
/* enable line out node */
pin_ctl = snd_hda_codec_read(codec, spec->out_pins[0], 0,
AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
@@ -4108,14 +4408,21 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
snd_hda_set_pin_ctl(codec, spec->out_pins[3],
pin_ctl | PIN_OUT);
- if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
- dspio_set_uint_param(codec, 0x80, 0x04, FLOAT_ONE);
- else
- dspio_set_uint_param(codec, 0x80, 0x04, FLOAT_EIGHT);
+ dspio_set_uint_param(codec, 0x80, 0x04, FLOAT_EIGHT);
break;
}
+ /*
+ * Surround always sets it's scp command to req 0x04 to FLOAT_EIGHT.
+ * With this set though, X_BASS cannot be enabled. So, if we have OutFX
+ * enabled, we need to make sure X_BASS is off, otherwise everything
+ * sounds all muffled. Running ca0132_effects_set with X_BASS as the
+ * effect should sort this out.
+ */
+ if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
+ ca0132_effects_set(codec, X_BASS,
+ spec->effects_switch[X_BASS - EFFECT_START_NID]);
- /* run through the output dsp commands for line-out */
+ /* run through the output dsp commands for the selected output. */
for (i = 0; i < alt_out_presets[spec->cur_out_type].commands; i++) {
err = dspio_set_uint_param(codec,
alt_out_presets[spec->cur_out_type].mids[i],
@@ -4152,7 +4459,6 @@ static void ca0132_unsol_hp_delayed(struct work_struct *work)
static void ca0132_set_dmic(struct hda_codec *codec, int enable);
static int ca0132_mic_boost_set(struct hda_codec *codec, long val);
-static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val);
static void resume_mic1(struct hda_codec *codec, unsigned int oldval);
static int stop_mic1(struct hda_codec *codec);
static int ca0132_cvoice_switch_set(struct hda_codec *codec);
@@ -4341,13 +4647,20 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_SBZ:
case QUIRK_R3D:
- ca0132_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_gpio_set(codec, 0, false);
+ tmp = FLOAT_THREE;
+ break;
+ case QUIRK_ZXR:
tmp = FLOAT_THREE;
break;
case QUIRK_R3DI:
r3di_gpio_mic_set(codec, R3DI_REAR_MIC);
tmp = FLOAT_ONE;
break;
+ case QUIRK_AE5:
+ ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00);
+ tmp = FLOAT_THREE;
+ break;
default:
tmp = FLOAT_ONE;
break;
@@ -4362,10 +4675,19 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
-
- if (spec->quirk == QUIRK_SBZ) {
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
chipio_write(codec, 0x18B098, 0x0000000C);
chipio_write(codec, 0x18B09C, 0x0000000C);
+ break;
+ case QUIRK_ZXR:
+ chipio_write(codec, 0x18B098, 0x0000000C);
+ chipio_write(codec, 0x18B09C, 0x000000CC);
+ break;
+ case QUIRK_AE5:
+ chipio_write(codec, 0x18B098, 0x0000000C);
+ chipio_write(codec, 0x18B09C, 0x0000004C);
+ break;
}
ca0132_alt_mic_boost_set(codec, spec->mic_boost_enum_val);
break;
@@ -4374,11 +4696,14 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_SBZ:
case QUIRK_R3D:
- ca0132_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_gpio_set(codec, 0, false);
break;
case QUIRK_R3DI:
r3di_gpio_mic_set(codec, R3DI_REAR_MIC);
break;
+ case QUIRK_AE5:
+ ca0113_mmio_command_set(codec, 0x48, 0x28, 0x00);
+ break;
}
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
@@ -4389,11 +4714,13 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
tmp = FLOAT_ZERO;
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
- if (spec->quirk == QUIRK_SBZ) {
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ case QUIRK_AE5:
chipio_write(codec, 0x18B098, 0x00000000);
chipio_write(codec, 0x18B09C, 0x00000000);
+ break;
}
-
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
break;
@@ -4401,14 +4728,18 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_SBZ:
case QUIRK_R3D:
- ca0132_mmio_gpio_set(codec, 0, true);
- ca0132_mmio_gpio_set(codec, 5, false);
+ ca0113_mmio_gpio_set(codec, 0, true);
+ ca0113_mmio_gpio_set(codec, 5, false);
tmp = FLOAT_THREE;
break;
case QUIRK_R3DI:
r3di_gpio_mic_set(codec, R3DI_FRONT_MIC);
tmp = FLOAT_ONE;
break;
+ case QUIRK_AE5:
+ ca0113_mmio_command_set(codec, 0x48, 0x28, 0x3f);
+ tmp = FLOAT_THREE;
+ break;
default:
tmp = FLOAT_ONE;
break;
@@ -4424,9 +4755,15 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
- if (spec->quirk == QUIRK_SBZ) {
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
chipio_write(codec, 0x18B098, 0x0000000C);
chipio_write(codec, 0x18B09C, 0x000000CC);
+ break;
+ case QUIRK_AE5:
+ chipio_write(codec, 0x18B098, 0x0000000C);
+ chipio_write(codec, 0x18B09C, 0x0000004C);
+ break;
}
ca0132_alt_mic_boost_set(codec, spec->mic_boost_enum_val);
break;
@@ -4435,7 +4772,6 @@ static int ca0132_alt_select_in(struct hda_codec *codec)
snd_hda_power_down_pm(codec);
return 0;
-
}
/*
@@ -4507,6 +4843,8 @@ static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
/* if PE if off, turn off out effects. */
if (!spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
val = 0;
+ if (spec->cur_out_type == SURROUND_OUT && nid == X_BASS)
+ val = 0;
}
/* for in effect, qualify with CrystalVoice */
@@ -4520,7 +4858,7 @@ static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
val = 0;
/* If Voice Focus on SBZ, set to two channel. */
- if ((nid == VOICE_FOCUS) && (spec->quirk == QUIRK_SBZ)
+ if ((nid == VOICE_FOCUS) && (spec->use_pci_mmio)
&& (spec->cur_mic_type != REAR_LINE_IN)) {
if (spec->effects_switch[CRYSTAL_VOICE -
EFFECT_START_NID]) {
@@ -4539,7 +4877,7 @@ static int ca0132_effects_set(struct hda_codec *codec, hda_nid_t nid, long val)
* For SBZ noise reduction, there's an extra command
* to module ID 0x47. No clue why.
*/
- if ((nid == NOISE_REDUCTION) && (spec->quirk == QUIRK_SBZ)
+ if ((nid == NOISE_REDUCTION) && (spec->use_pci_mmio)
&& (spec->cur_mic_type != REAR_LINE_IN)) {
if (spec->effects_switch[CRYSTAL_VOICE -
EFFECT_START_NID]) {
@@ -4678,6 +5016,27 @@ static int ca0132_alt_mic_boost_set(struct hda_codec *codec, long val)
return ret;
}
+static int ae5_headphone_gain_set(struct hda_codec *codec, long val)
+{
+ unsigned int i;
+
+ for (i = 0; i < 4; i++)
+ ca0113_mmio_command_set(codec, 0x48, 0x11 + i,
+ ae5_headphone_gain_presets[val].vals[i]);
+ return 0;
+}
+
+/*
+ * gpio pin 1 is a relay that switches on/off, apparently setting the headphone
+ * amplifier to handle a 600 ohm load.
+ */
+static int zxr_headphone_gain_set(struct hda_codec *codec, long val)
+{
+ ca0113_mmio_gpio_set(codec, 1, val);
+
+ return 0;
+}
+
static int ca0132_vnode_switch_set(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -4942,6 +5301,112 @@ static int ca0132_alt_mic_boost_put(struct snd_kcontrol *kcontrol,
return 1;
}
+/*
+ * Sound BlasterX AE-5 Headphone Gain Controls.
+ */
+#define AE5_HEADPHONE_GAIN_MAX 3
+static int ae5_headphone_gain_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ char *sfx = " Ohms)";
+ char namestr[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = AE5_HEADPHONE_GAIN_MAX;
+ if (uinfo->value.enumerated.item >= AE5_HEADPHONE_GAIN_MAX)
+ uinfo->value.enumerated.item = AE5_HEADPHONE_GAIN_MAX - 1;
+ sprintf(namestr, "%s %s",
+ ae5_headphone_gain_presets[uinfo->value.enumerated.item].name,
+ sfx);
+ strcpy(uinfo->value.enumerated.name, namestr);
+ return 0;
+}
+
+static int ae5_headphone_gain_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ca0132_spec *spec = codec->spec;
+
+ ucontrol->value.enumerated.item[0] = spec->ae5_headphone_gain_val;
+ return 0;
+}
+
+static int ae5_headphone_gain_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ca0132_spec *spec = codec->spec;
+ int sel = ucontrol->value.enumerated.item[0];
+ unsigned int items = AE5_HEADPHONE_GAIN_MAX;
+
+ if (sel >= items)
+ return 0;
+
+ codec_dbg(codec, "ae5_headphone_gain: boost=%d\n",
+ sel);
+
+ spec->ae5_headphone_gain_val = sel;
+
+ if (spec->out_enum_val == HEADPHONE_OUT)
+ ae5_headphone_gain_set(codec, spec->ae5_headphone_gain_val);
+
+ return 1;
+}
+
+/*
+ * Sound BlasterX AE-5 sound filter enumerated control.
+ */
+#define AE5_SOUND_FILTER_MAX 3
+
+static int ae5_sound_filter_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ char namestr[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
+ uinfo->count = 1;
+ uinfo->value.enumerated.items = AE5_SOUND_FILTER_MAX;
+ if (uinfo->value.enumerated.item >= AE5_SOUND_FILTER_MAX)
+ uinfo->value.enumerated.item = AE5_SOUND_FILTER_MAX - 1;
+ sprintf(namestr, "%s",
+ ae5_filter_presets[uinfo->value.enumerated.item].name);
+ strcpy(uinfo->value.enumerated.name, namestr);
+ return 0;
+}
+
+static int ae5_sound_filter_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ca0132_spec *spec = codec->spec;
+
+ ucontrol->value.enumerated.item[0] = spec->ae5_filter_val;
+ return 0;
+}
+
+static int ae5_sound_filter_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct ca0132_spec *spec = codec->spec;
+ int sel = ucontrol->value.enumerated.item[0];
+ unsigned int items = AE5_SOUND_FILTER_MAX;
+
+ if (sel >= items)
+ return 0;
+
+ codec_dbg(codec, "ae5_sound_filter: %s\n",
+ ae5_filter_presets[sel].name);
+
+ spec->ae5_filter_val = sel;
+
+ ca0113_mmio_command_set_type2(codec, 0x48, 0x07,
+ ae5_filter_presets[sel].val);
+
+ return 1;
+}
/*
* Input Select Control for alternative ca0132 codecs. This exists because
@@ -5330,6 +5795,16 @@ static int ca0132_switch_put(struct snd_kcontrol *kcontrol,
goto exit;
}
+ if (nid == ZXR_HEADPHONE_GAIN) {
+ spec->zxr_gain_set = *valp;
+ if (spec->cur_out_type == HEADPHONE_OUT)
+ changed = zxr_headphone_gain_set(codec, *valp);
+ else
+ changed = 0;
+
+ goto exit;
+ }
+
exit:
snd_hda_power_down(codec);
return changed;
@@ -5705,6 +6180,50 @@ static int ca0132_alt_add_mic_boost_enum(struct hda_codec *codec)
}
/*
+ * Add headphone gain enumerated control for the AE-5. This switches between
+ * three modes, low, medium, and high. When non-headphone outputs are selected,
+ * it is automatically set to high. This is the same behavior as Windows.
+ */
+static int ae5_add_headphone_gain_enum(struct hda_codec *codec)
+{
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_MUTE_MONO("AE-5: Headphone Gain",
+ AE5_HEADPHONE_GAIN_ENUM, 1, 0, HDA_OUTPUT);
+ knew.info = ae5_headphone_gain_info;
+ knew.get = ae5_headphone_gain_get;
+ knew.put = ae5_headphone_gain_put;
+ return snd_hda_ctl_add(codec, AE5_HEADPHONE_GAIN_ENUM,
+ snd_ctl_new1(&knew, codec));
+}
+
+/*
+ * Add sound filter enumerated control for the AE-5. This adds three different
+ * settings: Slow Roll Off, Minimum Phase, and Fast Roll Off. From what I've
+ * read into it, it changes the DAC's interpolation filter.
+ */
+static int ae5_add_sound_filter_enum(struct hda_codec *codec)
+{
+ struct snd_kcontrol_new knew =
+ HDA_CODEC_MUTE_MONO("AE-5: Sound Filter",
+ AE5_SOUND_FILTER_ENUM, 1, 0, HDA_OUTPUT);
+ knew.info = ae5_sound_filter_info;
+ knew.get = ae5_sound_filter_get;
+ knew.put = ae5_sound_filter_put;
+ return snd_hda_ctl_add(codec, AE5_SOUND_FILTER_ENUM,
+ snd_ctl_new1(&knew, codec));
+}
+
+static int zxr_add_headphone_gain_switch(struct hda_codec *codec)
+{
+ struct snd_kcontrol_new knew =
+ CA0132_CODEC_MUTE_MONO("ZxR: 600 Ohm Gain",
+ ZXR_HEADPHONE_GAIN, 1, HDA_OUTPUT);
+
+ return snd_hda_ctl_add(codec, ZXR_HEADPHONE_GAIN,
+ snd_ctl_new1(&knew, codec));
+}
+
+/*
* Need to create slave controls for the alternate codecs that have surround
* capabilities.
*/
@@ -5847,7 +6366,8 @@ static int ca0132_build_controls(struct hda_codec *codec)
NULL, ca0132_alt_slave_pfxs,
"Playback Switch",
true, &spec->vmaster_mute.sw_kctl);
-
+ if (err < 0)
+ return err;
}
/* Add in and out effects controls.
@@ -5855,8 +6375,8 @@ static int ca0132_build_controls(struct hda_codec *codec)
*/
num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT;
for (i = 0; i < num_fx; i++) {
- /* SBZ and R3D break if Echo Cancellation is used. */
- if (spec->quirk == QUIRK_SBZ || spec->quirk == QUIRK_R3D) {
+ /* Desktop cards break if Echo Cancellation is used. */
+ if (spec->use_pci_mmio) {
if (i == (ECHO_CANCELLATION - IN_EFFECT_START_NID +
OUT_EFFECTS_COUNT))
continue;
@@ -5874,8 +6394,14 @@ static int ca0132_build_controls(struct hda_codec *codec)
* prefix, and change PlayEnhancement and CrystalVoice to match.
*/
if (spec->use_alt_controls) {
- ca0132_alt_add_svm_enum(codec);
- add_ca0132_alt_eq_presets(codec);
+ err = ca0132_alt_add_svm_enum(codec);
+ if (err < 0)
+ return err;
+
+ err = add_ca0132_alt_eq_presets(codec);
+ if (err < 0)
+ return err;
+
err = add_fx_switch(codec, PLAY_ENHANCEMENT,
"Enable OutFX", 0);
if (err < 0)
@@ -5912,7 +6438,9 @@ static int ca0132_build_controls(struct hda_codec *codec)
if (err < 0)
return err;
}
- add_voicefx(codec);
+ err = add_voicefx(codec);
+ if (err < 0)
+ return err;
/*
* If the codec uses alt_functions, you need the enumerated controls
@@ -5920,9 +6448,36 @@ static int ca0132_build_controls(struct hda_codec *codec)
* setting control.
*/
if (spec->use_alt_functions) {
- ca0132_alt_add_output_enum(codec);
- ca0132_alt_add_input_enum(codec);
- ca0132_alt_add_mic_boost_enum(codec);
+ err = ca0132_alt_add_output_enum(codec);
+ if (err < 0)
+ return err;
+ err = ca0132_alt_add_mic_boost_enum(codec);
+ if (err < 0)
+ return err;
+ /*
+ * ZxR only has microphone input, there is no front panel
+ * header on the card, and aux-in is handled by the DBPro board.
+ */
+ if (spec->quirk != QUIRK_ZXR) {
+ err = ca0132_alt_add_input_enum(codec);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ if (spec->quirk == QUIRK_AE5) {
+ err = ae5_add_headphone_gain_enum(codec);
+ if (err < 0)
+ return err;
+ err = ae5_add_sound_filter_enum(codec);
+ if (err < 0)
+ return err;
+ }
+
+ if (spec->quirk == QUIRK_ZXR) {
+ err = zxr_add_headphone_gain_switch(codec);
+ if (err < 0)
+ return err;
}
#ifdef ENABLE_TUNING_CONTROLS
add_tuning_ctls(codec);
@@ -5955,6 +6510,27 @@ static int ca0132_build_controls(struct hda_codec *codec)
return 0;
}
+static int dbpro_build_controls(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ int err = 0;
+
+ if (spec->dig_out) {
+ err = snd_hda_create_spdif_out_ctls(codec, spec->dig_out,
+ spec->dig_out);
+ if (err < 0)
+ return err;
+ }
+
+ if (spec->dig_in) {
+ err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
/*
* PCM
*/
@@ -6058,6 +6634,40 @@ static int ca0132_build_pcms(struct hda_codec *codec)
return 0;
}
+static int dbpro_build_pcms(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ struct hda_pcm *info;
+
+ info = snd_hda_codec_pcm_new(codec, "CA0132 Alt Analog");
+ if (!info)
+ return -ENOMEM;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0132_pcm_analog_capture;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = 1;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
+
+
+ if (!spec->dig_out && !spec->dig_in)
+ return 0;
+
+ info = snd_hda_codec_pcm_new(codec, "CA0132 Digital");
+ if (!info)
+ return -ENOMEM;
+ info->pcm_type = HDA_PCM_TYPE_SPDIF;
+ if (spec->dig_out) {
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK] =
+ ca0132_pcm_digital_playback;
+ info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dig_out;
+ }
+ if (spec->dig_in) {
+ info->stream[SNDRV_PCM_STREAM_CAPTURE] =
+ ca0132_pcm_digital_capture;
+ info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in;
+ }
+
+ return 0;
+}
+
static void init_output(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac)
{
if (pin) {
@@ -6238,69 +6848,48 @@ static void ca0132_refresh_widget_caps(struct hda_codec *codec)
}
/*
- * Recon3D r3d_setup_defaults sub functions.
+ * Creates a dummy stream to bind the output to. This seems to have to be done
+ * after changing the main outputs source and destination streams.
*/
-
-static void r3d_dsp_scp_startup(struct hda_codec *codec)
+static void ca0132_alt_create_dummy_stream(struct hda_codec *codec)
{
- unsigned int tmp;
-
- tmp = 0x00000000;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0A, tmp);
-
- tmp = 0x00000001;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0B, tmp);
-
- tmp = 0x00000004;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
- tmp = 0x00000005;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
- tmp = 0x00000000;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
-}
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int stream_format;
-static void r3d_dsp_initial_mic_setup(struct hda_codec *codec)
-{
- unsigned int tmp;
+ stream_format = snd_hdac_calc_stream_format(48000, 2,
+ SNDRV_PCM_FORMAT_S32_LE, 32, 0);
- /* Mic 1 Setup */
- chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
- chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);
- /* This ConnPointID is unique to Recon3Di. Haven't seen it elsewhere */
- chipio_set_conn_rate(codec, 0x0F, SR_96_000);
- tmp = FLOAT_ONE;
- dspio_set_uint_param(codec, 0x80, 0x00, tmp);
+ snd_hda_codec_setup_stream(codec, spec->dacs[0], spec->dsp_stream_id,
+ 0, stream_format);
- /* Mic 2 Setup, even though it isn't connected on SBZ */
- chipio_set_conn_rate(codec, MEM_CONNID_MICIN2, SR_96_000);
- chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2, SR_96_000);
- chipio_set_conn_rate(codec, 0x0F, SR_96_000);
- tmp = FLOAT_ZERO;
- dspio_set_uint_param(codec, 0x80, 0x01, tmp);
+ snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
}
/*
- * Initialize Sound Blaster Z analog microphones.
+ * Initialize mic for non-chromebook ca0132 implementations.
*/
-static void sbz_init_analog_mics(struct hda_codec *codec)
+static void ca0132_alt_init_analog_mics(struct hda_codec *codec)
{
+ struct ca0132_spec *spec = codec->spec;
unsigned int tmp;
/* Mic 1 Setup */
chipio_set_conn_rate(codec, MEM_CONNID_MICIN1, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT1, SR_96_000);
- tmp = FLOAT_THREE;
+ if (spec->quirk == QUIRK_R3DI) {
+ chipio_set_conn_rate(codec, 0x0F, SR_96_000);
+ tmp = FLOAT_ONE;
+ } else
+ tmp = FLOAT_THREE;
dspio_set_uint_param(codec, 0x80, 0x00, tmp);
- /* Mic 2 Setup, even though it isn't connected on SBZ */
+ /* Mic 2 setup (not present on desktop cards) */
chipio_set_conn_rate(codec, MEM_CONNID_MICIN2, SR_96_000);
chipio_set_conn_rate(codec, MEM_CONNID_MICOUT2, SR_96_000);
+ if (spec->quirk == QUIRK_R3DI)
+ chipio_set_conn_rate(codec, 0x0F, SR_96_000);
tmp = FLOAT_ZERO;
dspio_set_uint_param(codec, 0x80, 0x01, tmp);
-
}
/*
@@ -6333,7 +6922,6 @@ static void sbz_connect_streams(struct hda_codec *codec)
codec_dbg(codec, "Connect Streams exited, mutex released.\n");
mutex_unlock(&spec->chipio_mutex);
-
}
/*
@@ -6360,19 +6948,29 @@ static void sbz_chipio_startup_data(struct hda_codec *codec)
chipio_set_stream_channels(codec, 0x0C, 6);
chipio_set_stream_control(codec, 0x0C, 1);
/* No clue what these control */
- chipio_write_no_mutex(codec, 0x190030, 0x0001e0c0);
- chipio_write_no_mutex(codec, 0x190034, 0x0001e1c1);
- chipio_write_no_mutex(codec, 0x190038, 0x0001e4c2);
- chipio_write_no_mutex(codec, 0x19003c, 0x0001e5c3);
- chipio_write_no_mutex(codec, 0x190040, 0x0001e2c4);
- chipio_write_no_mutex(codec, 0x190044, 0x0001e3c5);
- chipio_write_no_mutex(codec, 0x190048, 0x0001e8c6);
- chipio_write_no_mutex(codec, 0x19004c, 0x0001e9c7);
- chipio_write_no_mutex(codec, 0x190050, 0x0001ecc8);
- chipio_write_no_mutex(codec, 0x190054, 0x0001edc9);
- chipio_write_no_mutex(codec, 0x190058, 0x0001eaca);
- chipio_write_no_mutex(codec, 0x19005c, 0x0001ebcb);
-
+ if (spec->quirk == QUIRK_SBZ) {
+ chipio_write_no_mutex(codec, 0x190030, 0x0001e0c0);
+ chipio_write_no_mutex(codec, 0x190034, 0x0001e1c1);
+ chipio_write_no_mutex(codec, 0x190038, 0x0001e4c2);
+ chipio_write_no_mutex(codec, 0x19003c, 0x0001e5c3);
+ chipio_write_no_mutex(codec, 0x190040, 0x0001e2c4);
+ chipio_write_no_mutex(codec, 0x190044, 0x0001e3c5);
+ chipio_write_no_mutex(codec, 0x190048, 0x0001e8c6);
+ chipio_write_no_mutex(codec, 0x19004c, 0x0001e9c7);
+ chipio_write_no_mutex(codec, 0x190050, 0x0001ecc8);
+ chipio_write_no_mutex(codec, 0x190054, 0x0001edc9);
+ chipio_write_no_mutex(codec, 0x190058, 0x0001eaca);
+ chipio_write_no_mutex(codec, 0x19005c, 0x0001ebcb);
+ } else if (spec->quirk == QUIRK_ZXR) {
+ chipio_write_no_mutex(codec, 0x190038, 0x000140c2);
+ chipio_write_no_mutex(codec, 0x19003c, 0x000141c3);
+ chipio_write_no_mutex(codec, 0x190040, 0x000150c4);
+ chipio_write_no_mutex(codec, 0x190044, 0x000151c5);
+ chipio_write_no_mutex(codec, 0x190050, 0x000142c8);
+ chipio_write_no_mutex(codec, 0x190054, 0x000143c9);
+ chipio_write_no_mutex(codec, 0x190058, 0x000152ca);
+ chipio_write_no_mutex(codec, 0x19005c, 0x000153cb);
+ }
chipio_write_no_mutex(codec, 0x19042c, 0x00000001);
codec_dbg(codec, "Startup Data exited, mutex released.\n");
@@ -6380,35 +6978,56 @@ static void sbz_chipio_startup_data(struct hda_codec *codec)
}
/*
- * Sound Blaster Z uses these after DSP is loaded. Weird SCP commands
- * without a 0x20 source like normal.
+ * Custom DSP SCP commands where the src value is 0x00 instead of 0x20. This is
+ * done after the DSP is loaded.
*/
-static void sbz_dsp_scp_startup(struct hda_codec *codec)
+static void ca0132_alt_dsp_scp_startup(struct hda_codec *codec)
{
- unsigned int tmp;
-
- tmp = 0x00000003;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
- tmp = 0x00000000;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0A, tmp);
-
- tmp = 0x00000001;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0B, tmp);
-
- tmp = 0x00000004;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
- tmp = 0x00000005;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
-
- tmp = 0x00000000;
- dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int tmp, i;
+ /*
+ * Gotta run these twice, or else mic works inconsistently. Not clear
+ * why this is, but multiple tests have confirmed it.
+ */
+ for (i = 0; i < 2; i++) {
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ case QUIRK_AE5:
+ tmp = 0x00000003;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ tmp = 0x00000000;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0A, tmp);
+ tmp = 0x00000001;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0B, tmp);
+ tmp = 0x00000004;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ tmp = 0x00000005;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ tmp = 0x00000000;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ break;
+ case QUIRK_R3D:
+ case QUIRK_R3DI:
+ tmp = 0x00000000;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0A, tmp);
+ tmp = 0x00000001;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0B, tmp);
+ tmp = 0x00000004;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ tmp = 0x00000005;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ tmp = 0x00000000;
+ dspio_set_uint_param_no_source(codec, 0x80, 0x0C, tmp);
+ break;
+ }
+ msleep(100);
+ }
}
-static void sbz_dsp_initial_mic_setup(struct hda_codec *codec)
+static void ca0132_alt_dsp_initial_mic_setup(struct hda_codec *codec)
{
+ struct ca0132_spec *spec = codec->spec;
unsigned int tmp;
chipio_set_stream_control(codec, 0x03, 0);
@@ -6423,8 +7042,161 @@ static void sbz_dsp_initial_mic_setup(struct hda_codec *codec)
chipio_set_stream_control(codec, 0x03, 1);
chipio_set_stream_control(codec, 0x04, 1);
- chipio_write(codec, 0x18b098, 0x0000000c);
- chipio_write(codec, 0x18b09C, 0x0000000c);
+ switch (spec->quirk) {
+ case QUIRK_SBZ:
+ chipio_write(codec, 0x18b098, 0x0000000c);
+ chipio_write(codec, 0x18b09C, 0x0000000c);
+ break;
+ case QUIRK_AE5:
+ chipio_write(codec, 0x18b098, 0x0000000c);
+ chipio_write(codec, 0x18b09c, 0x0000004c);
+ break;
+ }
+}
+
+static void ae5_post_dsp_register_set(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ chipio_8051_write_direct(codec, 0x93, 0x10);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x44);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xc2);
+
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0x00, spec->mem_base + 0x100);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0x00, spec->mem_base + 0x100);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0x00, spec->mem_base + 0x100);
+ writeb(0xff, spec->mem_base + 0x304);
+ writeb(0x00, spec->mem_base + 0x100);
+ writeb(0xff, spec->mem_base + 0x304);
+
+ ca0113_mmio_command_set(codec, 0x30, 0x2b, 0x3f);
+ ca0113_mmio_command_set(codec, 0x30, 0x2d, 0x3f);
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);
+}
+
+static void ae5_post_dsp_param_setup(struct hda_codec *codec)
+{
+ /*
+ * Param3 in the 8051's memory is represented by the ascii string 'mch'
+ * which seems to be 'multichannel'. This is also mentioned in the
+ * AE-5's registry values in Windows.
+ */
+ chipio_set_control_param(codec, 3, 0);
+ /*
+ * I believe ASI is 'audio serial interface' and that it's used to
+ * change colors on the external LED strip connected to the AE-5.
+ */
+ chipio_set_control_flag(codec, CONTROL_FLAG_ASI_96KHZ, 1);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0, 0x724, 0x83);
+ chipio_set_control_param(codec, CONTROL_PARAM_ASI, 0);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x92);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_HIGH, 0xfa);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_DATA_WRITE, 0x22);
+}
+
+static void ae5_post_dsp_pll_setup(struct hda_codec *codec)
+{
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x41);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xc8);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x45);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xcc);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x40);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xcb);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x43);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xc7);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x51);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0x8d);
+}
+
+static void ae5_post_dsp_stream_setup(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ mutex_lock(&spec->chipio_mutex);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0, 0x725, 0x81);
+
+ chipio_set_conn_rate_no_mutex(codec, 0x70, SR_96_000);
+
+ chipio_set_stream_channels(codec, 0x0C, 6);
+ chipio_set_stream_control(codec, 0x0C, 1);
+
+ chipio_set_stream_source_dest(codec, 0x5, 0x43, 0x0);
+
+ chipio_set_stream_source_dest(codec, 0x18, 0x9, 0xd0);
+ chipio_set_conn_rate_no_mutex(codec, 0xd0, SR_96_000);
+ chipio_set_stream_channels(codec, 0x18, 6);
+ chipio_set_stream_control(codec, 0x18, 1);
+
+ chipio_set_control_param_no_mutex(codec, CONTROL_PARAM_ASI, 4);
+
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x43);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xc7);
+
+ ca0113_mmio_command_set(codec, 0x48, 0x01, 0x80);
+
+ mutex_unlock(&spec->chipio_mutex);
+}
+
+static void ae5_post_dsp_startup_data(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ mutex_lock(&spec->chipio_mutex);
+
+ chipio_write_no_mutex(codec, 0x189000, 0x0001f101);
+ chipio_write_no_mutex(codec, 0x189004, 0x0001f101);
+ chipio_write_no_mutex(codec, 0x189024, 0x00014004);
+ chipio_write_no_mutex(codec, 0x189028, 0x0002000f);
+
+ ca0113_mmio_command_set(codec, 0x48, 0x0a, 0x05);
+ chipio_set_control_param_no_mutex(codec, CONTROL_PARAM_ASI, 7);
+ ca0113_mmio_command_set(codec, 0x48, 0x0b, 0x12);
+ ca0113_mmio_command_set(codec, 0x48, 0x04, 0x00);
+ ca0113_mmio_command_set(codec, 0x48, 0x06, 0x48);
+ ca0113_mmio_command_set(codec, 0x48, 0x0a, 0x05);
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);
+ ca0113_mmio_command_set(codec, 0x48, 0x0f, 0x00);
+ ca0113_mmio_command_set(codec, 0x48, 0x10, 0x00);
+ ca0113_mmio_gpio_set(codec, 0, true);
+ ca0113_mmio_gpio_set(codec, 1, true);
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x80);
+
+ chipio_write_no_mutex(codec, 0x18b03c, 0x00000012);
+
+ ca0113_mmio_command_set(codec, 0x48, 0x0f, 0x00);
+ ca0113_mmio_command_set(codec, 0x48, 0x10, 0x00);
+
+ mutex_unlock(&spec->chipio_mutex);
}
/*
@@ -6485,9 +7257,8 @@ static void r3d_setup_defaults(struct hda_codec *codec)
if (spec->dsp_state != DSP_DOWNLOADED)
return;
- r3d_dsp_scp_startup(codec);
-
- r3d_dsp_initial_mic_setup(codec);
+ ca0132_alt_dsp_scp_startup(codec);
+ ca0132_alt_init_analog_mics(codec);
/*remove DSP headroom*/
tmp = FLOAT_ZERO;
@@ -6523,19 +7294,16 @@ static void r3d_setup_defaults(struct hda_codec *codec)
static void sbz_setup_defaults(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- unsigned int tmp, stream_format;
+ unsigned int tmp;
int num_fx;
int idx, i;
if (spec->dsp_state != DSP_DOWNLOADED)
return;
- sbz_dsp_scp_startup(codec);
-
- sbz_init_analog_mics(codec);
-
+ ca0132_alt_dsp_scp_startup(codec);
+ ca0132_alt_init_analog_mics(codec);
sbz_connect_streams(codec);
-
sbz_chipio_startup_data(codec);
chipio_set_stream_control(codec, 0x03, 1);
@@ -6561,8 +7329,7 @@ static void sbz_setup_defaults(struct hda_codec *codec)
/* Set speaker source? */
dspio_set_uint_param(codec, 0x32, 0x00, tmp);
- sbz_dsp_initial_mic_setup(codec);
-
+ ca0132_alt_dsp_initial_mic_setup(codec);
/* out, in effects + voicefx */
num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT + 1;
@@ -6575,23 +7342,74 @@ static void sbz_setup_defaults(struct hda_codec *codec)
}
}
- /*
- * Have to make a stream to bind the sound output to, otherwise
- * you'll get dead audio. Before I did this, it would bind to an
- * audio input, and would never work
- */
- stream_format = snd_hdac_calc_stream_format(48000, 2,
- SNDRV_PCM_FORMAT_S32_LE, 32, 0);
+ ca0132_alt_create_dummy_stream(codec);
+}
- snd_hda_codec_setup_stream(codec, spec->dacs[0], spec->dsp_stream_id,
- 0, stream_format);
+/*
+ * Setup default parameters for the Sound BlasterX AE-5 DSP.
+ */
+static void ae5_setup_defaults(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ unsigned int tmp;
+ int num_fx;
+ int idx, i;
- snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
+ if (spec->dsp_state != DSP_DOWNLOADED)
+ return;
- snd_hda_codec_setup_stream(codec, spec->dacs[0], spec->dsp_stream_id,
- 0, stream_format);
+ ca0132_alt_dsp_scp_startup(codec);
+ ca0132_alt_init_analog_mics(codec);
+ chipio_set_stream_control(codec, 0x03, 1);
+ chipio_set_stream_control(codec, 0x04, 1);
- snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
+ /* New, unknown SCP req's */
+ tmp = FLOAT_ZERO;
+ dspio_set_uint_param(codec, 0x96, 0x29, tmp);
+ dspio_set_uint_param(codec, 0x96, 0x2a, tmp);
+ dspio_set_uint_param(codec, 0x80, 0x0d, tmp);
+ dspio_set_uint_param(codec, 0x80, 0x0e, tmp);
+
+ ca0113_mmio_command_set(codec, 0x30, 0x2e, 0x3f);
+ ca0113_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_command_set(codec, 0x30, 0x28, 0x00);
+
+ /* Internal loopback off */
+ tmp = FLOAT_ONE;
+ dspio_set_uint_param(codec, 0x37, 0x08, tmp);
+ dspio_set_uint_param(codec, 0x37, 0x10, tmp);
+
+ /*remove DSP headroom*/
+ tmp = FLOAT_ZERO;
+ dspio_set_uint_param(codec, 0x96, 0x3C, tmp);
+
+ /* set WUH source */
+ tmp = FLOAT_TWO;
+ dspio_set_uint_param(codec, 0x31, 0x00, tmp);
+ chipio_set_conn_rate(codec, MEM_CONNID_WUH, SR_48_000);
+
+ /* Set speaker source? */
+ dspio_set_uint_param(codec, 0x32, 0x00, tmp);
+
+ ca0132_alt_dsp_initial_mic_setup(codec);
+ ae5_post_dsp_register_set(codec);
+ ae5_post_dsp_param_setup(codec);
+ ae5_post_dsp_pll_setup(codec);
+ ae5_post_dsp_stream_setup(codec);
+ ae5_post_dsp_startup_data(codec);
+
+ /* out, in effects + voicefx */
+ num_fx = OUT_EFFECTS_COUNT + IN_EFFECTS_COUNT + 1;
+ for (idx = 0; idx < num_fx; idx++) {
+ for (i = 0; i <= ca0132_effects[idx].params; i++) {
+ dspio_set_uint_param(codec,
+ ca0132_effects[idx].mid,
+ ca0132_effects[idx].reqs[i],
+ ca0132_effects[idx].def_vals[i]);
+ }
+ }
+
+ ca0132_alt_create_dummy_stream(codec);
}
/*
@@ -6673,12 +7491,14 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
*/
switch (spec->quirk) {
case QUIRK_SBZ:
- if (request_firmware(&fw_entry, SBZ_EFX_FILE,
+ case QUIRK_R3D:
+ case QUIRK_AE5:
+ if (request_firmware(&fw_entry, DESKTOP_EFX_FILE,
codec->card->dev) != 0) {
- codec_dbg(codec, "SBZ alt firmware not detected. ");
+ codec_dbg(codec, "Desktop firmware not found.");
spec->alt_firmware_present = false;
} else {
- codec_dbg(codec, "Sound Blaster Z firmware selected.");
+ codec_dbg(codec, "Desktop firmware selected.");
spec->alt_firmware_present = true;
}
break;
@@ -6921,6 +7741,14 @@ static void ca0132_init_chip(struct hda_codec *codec)
spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID] = 1;
spec->effects_switch[CRYSTAL_VOICE - EFFECT_START_NID] = 0;
+ /*
+ * The ZxR doesn't have a front panel header, and it's line-in is on
+ * the daughter board. So, there is no input enum control, and we need
+ * to make sure that spec->in_enum_val is set properly.
+ */
+ if (spec->quirk == QUIRK_ZXR)
+ spec->in_enum_val = REAR_MIC;
+
#ifdef ENABLE_TUNING_CONTROLS
ca0132_init_tuning_defaults(codec);
#endif
@@ -6948,11 +7776,11 @@ static void sbz_region2_exit(struct hda_codec *codec)
for (i = 0; i < 8; i++)
writeb(0xb3, spec->mem_base + 0x304);
- ca0132_mmio_gpio_set(codec, 0, false);
- ca0132_mmio_gpio_set(codec, 1, false);
- ca0132_mmio_gpio_set(codec, 4, true);
- ca0132_mmio_gpio_set(codec, 5, false);
- ca0132_mmio_gpio_set(codec, 7, false);
+ ca0113_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_gpio_set(codec, 1, false);
+ ca0113_mmio_gpio_set(codec, 4, true);
+ ca0113_mmio_gpio_set(codec, 5, false);
+ ca0113_mmio_gpio_set(codec, 7, false);
}
static void sbz_set_pin_ctl_default(struct hda_codec *codec)
@@ -6995,6 +7823,16 @@ static void sbz_gpio_shutdown_commands(struct hda_codec *codec, int dir,
AC_VERB_SET_GPIO_DATA, data);
}
+static void zxr_dbpro_power_state_shutdown(struct hda_codec *codec)
+{
+ hda_nid_t pins[7] = {0x05, 0x0c, 0x09, 0x0e, 0x08, 0x11, 0x01};
+ unsigned int i;
+
+ for (i = 0; i < 7; i++)
+ snd_hda_codec_write(codec, pins[i], 0,
+ AC_VERB_SET_POWER_STATE, 0x03);
+}
+
static void sbz_exit_chip(struct hda_codec *codec)
{
chipio_set_stream_control(codec, 0x03, 0);
@@ -7037,6 +7875,61 @@ static void r3d_exit_chip(struct hda_codec *codec)
snd_hda_codec_write(codec, 0x01, 0, 0x794, 0x5b);
}
+static void ae5_exit_chip(struct hda_codec *codec)
+{
+ chipio_set_stream_control(codec, 0x03, 0);
+ chipio_set_stream_control(codec, 0x04, 0);
+
+ ca0113_mmio_command_set(codec, 0x30, 0x32, 0x3f);
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);
+ ca0113_mmio_command_set(codec, 0x30, 0x30, 0x00);
+ ca0113_mmio_command_set(codec, 0x30, 0x2b, 0x00);
+ ca0113_mmio_command_set(codec, 0x30, 0x2d, 0x00);
+ ca0113_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_gpio_set(codec, 1, false);
+
+ snd_hda_codec_write(codec, 0x01, 0, 0x793, 0x00);
+ snd_hda_codec_write(codec, 0x01, 0, 0x794, 0x53);
+
+ chipio_set_control_param(codec, CONTROL_PARAM_ASI, 0);
+
+ chipio_set_stream_control(codec, 0x18, 0);
+ chipio_set_stream_control(codec, 0x0c, 0);
+
+ snd_hda_codec_write(codec, 0x01, 0, 0x724, 0x83);
+}
+
+static void zxr_exit_chip(struct hda_codec *codec)
+{
+ chipio_set_stream_control(codec, 0x03, 0);
+ chipio_set_stream_control(codec, 0x04, 0);
+ chipio_set_stream_control(codec, 0x14, 0);
+ chipio_set_stream_control(codec, 0x0C, 0);
+
+ chipio_set_conn_rate(codec, 0x41, SR_192_000);
+ chipio_set_conn_rate(codec, 0x91, SR_192_000);
+
+ chipio_write(codec, 0x18a020, 0x00000083);
+
+ snd_hda_codec_write(codec, 0x01, 0, 0x793, 0x00);
+ snd_hda_codec_write(codec, 0x01, 0, 0x794, 0x53);
+
+ ca0132_clear_unsolicited(codec);
+ sbz_set_pin_ctl_default(codec);
+ snd_hda_codec_write(codec, 0x0B, 0, AC_VERB_SET_EAPD_BTLENABLE, 0x00);
+
+ ca0113_mmio_gpio_set(codec, 5, false);
+ ca0113_mmio_gpio_set(codec, 2, false);
+ ca0113_mmio_gpio_set(codec, 3, false);
+ ca0113_mmio_gpio_set(codec, 0, false);
+ ca0113_mmio_gpio_set(codec, 4, true);
+ ca0113_mmio_gpio_set(codec, 0, true);
+ ca0113_mmio_gpio_set(codec, 5, true);
+ ca0113_mmio_gpio_set(codec, 2, false);
+ ca0113_mmio_gpio_set(codec, 3, false);
+}
+
static void ca0132_exit_chip(struct hda_codec *codec)
{
/* put any chip cleanup stuffs here. */
@@ -7140,11 +8033,6 @@ static void sbz_pre_dsp_setup(struct hda_codec *codec)
writel(0x00820680, spec->mem_base + 0x01C);
writel(0x00820680, spec->mem_base + 0x01C);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfc);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfd);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfe);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xff);
-
chipio_write(codec, 0x18b0a4, 0x000000c2);
snd_hda_codec_write(codec, 0x11, 0,
@@ -7153,12 +8041,6 @@ static void sbz_pre_dsp_setup(struct hda_codec *codec)
static void r3d_pre_dsp_setup(struct hda_codec *codec)
{
-
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfc);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfd);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xfe);
- snd_hda_codec_write(codec, 0x15, 0, 0xd00, 0xff);
-
chipio_write(codec, 0x18b0a4, 0x000000c2);
snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
@@ -7205,23 +8087,116 @@ static void ca0132_mmio_init(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- writel(0x00000000, spec->mem_base + 0x400);
- writel(0x00000000, spec->mem_base + 0x408);
- writel(0x00000000, spec->mem_base + 0x40C);
- writel(0x00880680, spec->mem_base + 0x01C);
- writel(0x00000083, spec->mem_base + 0xC0C);
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000001, spec->mem_base + 0x400);
+ else
+ writel(0x00000000, spec->mem_base + 0x400);
+
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000001, spec->mem_base + 0x408);
+ else
+ writel(0x00000000, spec->mem_base + 0x408);
+
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000001, spec->mem_base + 0x40c);
+ else
+ writel(0x00000000, spec->mem_base + 0x40C);
+
+ if (spec->quirk == QUIRK_ZXR)
+ writel(0x00880640, spec->mem_base + 0x01C);
+ else
+ writel(0x00880680, spec->mem_base + 0x01C);
+
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000080, spec->mem_base + 0xC0C);
+ else
+ writel(0x00000083, spec->mem_base + 0xC0C);
+
writel(0x00000030, spec->mem_base + 0xC00);
writel(0x00000000, spec->mem_base + 0xC04);
+
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000000, spec->mem_base + 0xC0C);
+ else
+ writel(0x00000003, spec->mem_base + 0xC0C);
+
writel(0x00000003, spec->mem_base + 0xC0C);
writel(0x00000003, spec->mem_base + 0xC0C);
writel(0x00000003, spec->mem_base + 0xC0C);
- writel(0x00000003, spec->mem_base + 0xC0C);
- writel(0x000000C1, spec->mem_base + 0xC08);
+
+ if (spec->quirk == QUIRK_AE5)
+ writel(0x00000001, spec->mem_base + 0xC08);
+ else
+ writel(0x000000C1, spec->mem_base + 0xC08);
+
writel(0x000000F1, spec->mem_base + 0xC08);
writel(0x00000001, spec->mem_base + 0xC08);
writel(0x000000C7, spec->mem_base + 0xC08);
writel(0x000000C1, spec->mem_base + 0xC08);
writel(0x00000080, spec->mem_base + 0xC04);
+
+ if (spec->quirk == QUIRK_AE5) {
+ writel(0x00000000, spec->mem_base + 0x42c);
+ writel(0x00000000, spec->mem_base + 0x46c);
+ writel(0x00000000, spec->mem_base + 0x4ac);
+ writel(0x00000000, spec->mem_base + 0x4ec);
+ writel(0x00000000, spec->mem_base + 0x43c);
+ writel(0x00000000, spec->mem_base + 0x47c);
+ writel(0x00000000, spec->mem_base + 0x4bc);
+ writel(0x00000000, spec->mem_base + 0x4fc);
+ writel(0x00000600, spec->mem_base + 0x100);
+ writel(0x00000014, spec->mem_base + 0x410);
+ writel(0x0000060f, spec->mem_base + 0x100);
+ writel(0x0000070f, spec->mem_base + 0x100);
+ writel(0x00000aff, spec->mem_base + 0x830);
+ writel(0x00000000, spec->mem_base + 0x86c);
+ writel(0x0000006b, spec->mem_base + 0x800);
+ writel(0x00000001, spec->mem_base + 0x86c);
+ writel(0x0000006b, spec->mem_base + 0x800);
+ writel(0x00000057, spec->mem_base + 0x804);
+ writel(0x00800000, spec->mem_base + 0x20c);
+ }
+}
+
+/*
+ * This function writes to some SFR's, does some region2 writes, and then
+ * eventually resets the codec with the 0x7ff verb. Not quite sure why it does
+ * what it does.
+ */
+static void ae5_register_set(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ chipio_8051_write_direct(codec, 0x93, 0x10);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x44);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0xc2);
+
+ writeb(0x0f, spec->mem_base + 0x304);
+ writeb(0x0f, spec->mem_base + 0x304);
+ writeb(0x0f, spec->mem_base + 0x304);
+ writeb(0x0f, spec->mem_base + 0x304);
+ writeb(0x0e, spec->mem_base + 0x100);
+ writeb(0x1f, spec->mem_base + 0x304);
+ writeb(0x0c, spec->mem_base + 0x100);
+ writeb(0x3f, spec->mem_base + 0x304);
+ writeb(0x08, spec->mem_base + 0x100);
+ writeb(0x7f, spec->mem_base + 0x304);
+ writeb(0x00, spec->mem_base + 0x100);
+ writeb(0xff, spec->mem_base + 0x304);
+
+ ca0113_mmio_command_set(codec, 0x30, 0x2d, 0x3f);
+
+ chipio_8051_write_direct(codec, 0x90, 0x00);
+ chipio_8051_write_direct(codec, 0x90, 0x10);
+
+ ca0113_mmio_command_set(codec, 0x48, 0x07, 0x83);
+
+ chipio_write(codec, 0x18b0a4, 0x000000c2);
+
+ snd_hda_codec_write(codec, 0x01, 0, 0x7ff, 0x00);
+ snd_hda_codec_write(codec, 0x01, 0, 0x7ff, 0x00);
}
/*
@@ -7257,6 +8232,21 @@ static void ca0132_alt_init(struct hda_codec *codec)
snd_hda_sequence_write(codec, spec->chip_init_verbs);
snd_hda_sequence_write(codec, spec->desktop_init_verbs);
break;
+ case QUIRK_AE5:
+ ca0132_gpio_init(codec);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_8051_ADDRESS_LOW, 0x49);
+ snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0,
+ VENDOR_CHIPIO_PLL_PMU_WRITE, 0x88);
+ chipio_write(codec, 0x18b030, 0x00000020);
+ snd_hda_sequence_write(codec, spec->chip_init_verbs);
+ snd_hda_sequence_write(codec, spec->desktop_init_verbs);
+ ca0113_mmio_command_set(codec, 0x30, 0x32, 0x3f);
+ break;
+ case QUIRK_ZXR:
+ snd_hda_sequence_write(codec, spec->chip_init_verbs);
+ snd_hda_sequence_write(codec, spec->desktop_init_verbs);
+ break;
}
}
@@ -7298,6 +8288,9 @@ static int ca0132_init(struct hda_codec *codec)
snd_hda_power_up_pm(codec);
+ if (spec->quirk == QUIRK_AE5)
+ ae5_register_set(codec);
+
ca0132_init_unsol(codec);
ca0132_init_params(codec);
ca0132_init_flags(codec);
@@ -7317,8 +8310,12 @@ static int ca0132_init(struct hda_codec *codec)
r3d_setup_defaults(codec);
break;
case QUIRK_SBZ:
+ case QUIRK_ZXR:
sbz_setup_defaults(codec);
break;
+ case QUIRK_AE5:
+ ae5_setup_defaults(codec);
+ break;
default:
ca0132_setup_defaults(codec);
ca0132_init_analog_mic2(codec);
@@ -7372,6 +8369,21 @@ static int ca0132_init(struct hda_codec *codec)
return 0;
}
+static int dbpro_init(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
+ unsigned int i;
+
+ init_output(codec, cfg->dig_out_pins[0], spec->dig_out);
+ init_input(codec, cfg->dig_in_pin, spec->dig_in);
+
+ for (i = 0; i < spec->num_inputs; i++)
+ init_input(codec, spec->input_pins[i], spec->adcs[i]);
+
+ return 0;
+}
+
static void ca0132_free(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
@@ -7382,9 +8394,15 @@ static void ca0132_free(struct hda_codec *codec)
case QUIRK_SBZ:
sbz_exit_chip(codec);
break;
+ case QUIRK_ZXR:
+ zxr_exit_chip(codec);
+ break;
case QUIRK_R3D:
r3d_exit_chip(codec);
break;
+ case QUIRK_AE5:
+ ae5_exit_chip(codec);
+ break;
case QUIRK_R3DI:
r3di_gpio_shutdown(codec);
break;
@@ -7400,6 +8418,16 @@ static void ca0132_free(struct hda_codec *codec)
kfree(codec->spec);
}
+static void dbpro_free(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ zxr_dbpro_power_state_shutdown(codec);
+
+ kfree(spec->spec_init_verbs);
+ kfree(codec->spec);
+}
+
static void ca0132_reboot_notify(struct hda_codec *codec)
{
codec->patch_ops.free(codec);
@@ -7414,6 +8442,13 @@ static const struct hda_codec_ops ca0132_patch_ops = {
.reboot_notify = ca0132_reboot_notify,
};
+static const struct hda_codec_ops dbpro_patch_ops = {
+ .build_controls = dbpro_build_controls,
+ .build_pcms = dbpro_build_pcms,
+ .init = dbpro_init,
+ .free = dbpro_free,
+};
+
static void ca0132_config(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
@@ -7432,9 +8467,33 @@ static void ca0132_config(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_ALIENWARE:
- codec_dbg(codec, "ca0132_config: QUIRK_ALIENWARE applied.\n");
+ codec_dbg(codec, "%s: QUIRK_ALIENWARE applied.\n", __func__);
snd_hda_apply_pincfgs(codec, alienware_pincfgs);
+ break;
+ case QUIRK_SBZ:
+ codec_dbg(codec, "%s: QUIRK_SBZ applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, sbz_pincfgs);
+ break;
+ case QUIRK_ZXR:
+ codec_dbg(codec, "%s: QUIRK_ZXR applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, zxr_pincfgs);
+ break;
+ case QUIRK_R3D:
+ codec_dbg(codec, "%s: QUIRK_R3D applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, r3d_pincfgs);
+ break;
+ case QUIRK_R3DI:
+ codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, r3di_pincfgs);
+ break;
+ case QUIRK_AE5:
+ codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__);
+ snd_hda_apply_pincfgs(codec, r3di_pincfgs);
+ break;
+ }
+ switch (spec->quirk) {
+ case QUIRK_ALIENWARE:
spec->num_outputs = 2;
spec->out_pins[0] = 0x0b; /* speaker out */
spec->out_pins[1] = 0x0f;
@@ -7454,15 +8513,6 @@ static void ca0132_config(struct hda_codec *codec)
break;
case QUIRK_SBZ:
case QUIRK_R3D:
- if (spec->quirk == QUIRK_SBZ) {
- codec_dbg(codec, "%s: QUIRK_SBZ applied.\n", __func__);
- snd_hda_apply_pincfgs(codec, sbz_pincfgs);
- }
- if (spec->quirk == QUIRK_R3D) {
- codec_dbg(codec, "%s: QUIRK_R3D applied.\n", __func__);
- snd_hda_apply_pincfgs(codec, r3d_pincfgs);
- }
-
spec->num_outputs = 2;
spec->out_pins[0] = 0x0B; /* Line out */
spec->out_pins[1] = 0x0F; /* Rear headphone out */
@@ -7487,10 +8537,62 @@ static void ca0132_config(struct hda_codec *codec)
spec->multiout.dig_out_nid = spec->dig_out;
spec->dig_in = 0x09;
break;
- case QUIRK_R3DI:
- codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__);
- snd_hda_apply_pincfgs(codec, r3di_pincfgs);
+ case QUIRK_ZXR:
+ spec->num_outputs = 2;
+ spec->out_pins[0] = 0x0B; /* Line out */
+ spec->out_pins[1] = 0x0F; /* Rear headphone out */
+ spec->out_pins[2] = 0x10; /* Center/LFE */
+ spec->out_pins[3] = 0x11; /* Rear surround */
+ spec->shared_out_nid = 0x2;
+ spec->unsol_tag_hp = spec->out_pins[1];
+ spec->unsol_tag_front_hp = spec->out_pins[2];
+ spec->adcs[0] = 0x7; /* Rear Mic / Line-in */
+ spec->adcs[1] = 0x8; /* Not connected, no front mic */
+ spec->adcs[2] = 0xa; /* what u hear */
+
+ spec->num_inputs = 2;
+ spec->input_pins[0] = 0x12; /* Rear Mic / Line-in */
+ spec->input_pins[1] = 0x13; /* What U Hear */
+ spec->shared_mic_nid = 0x7;
+ spec->unsol_tag_amic1 = spec->input_pins[0];
+ break;
+ case QUIRK_ZXR_DBPRO:
+ spec->adcs[0] = 0x8; /* ZxR DBPro Aux In */
+
+ spec->num_inputs = 1;
+ spec->input_pins[0] = 0x11; /* RCA Line-in */
+
+ spec->dig_out = 0x05;
+ spec->multiout.dig_out_nid = spec->dig_out;
+
+ spec->dig_in = 0x09;
+ break;
+ case QUIRK_AE5:
+ spec->num_outputs = 2;
+ spec->out_pins[0] = 0x0B; /* Line out */
+ spec->out_pins[1] = 0x11; /* Rear headphone out */
+ spec->out_pins[2] = 0x10; /* Front Headphone / Center/LFE*/
+ spec->out_pins[3] = 0x0F; /* Rear surround */
+ spec->shared_out_nid = 0x2;
+ spec->unsol_tag_hp = spec->out_pins[1];
+ spec->unsol_tag_front_hp = spec->out_pins[2];
+
+ spec->adcs[0] = 0x7; /* Rear Mic / Line-in */
+ spec->adcs[1] = 0x8; /* Front Mic, but only if no DSP */
+ spec->adcs[2] = 0xa; /* what u hear */
+
+ spec->num_inputs = 2;
+ spec->input_pins[0] = 0x12; /* Rear Mic / Line-in */
+ spec->input_pins[1] = 0x13; /* What U Hear */
+ spec->shared_mic_nid = 0x7;
+ spec->unsol_tag_amic1 = spec->input_pins[0];
+
+ /* SPDIF I/O */
+ spec->dig_out = 0x05;
+ spec->multiout.dig_out_nid = spec->dig_out;
+ break;
+ case QUIRK_R3DI:
spec->num_outputs = 2;
spec->out_pins[0] = 0x0B; /* Line out */
spec->out_pins[1] = 0x0F; /* Rear headphone out */
@@ -7547,7 +8649,11 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
struct ca0132_spec *spec = codec->spec;
spec->chip_init_verbs = ca0132_init_verbs0;
- if (spec->quirk == QUIRK_SBZ || spec->quirk == QUIRK_R3D)
+ /*
+ * Since desktop cards use pci_mmio, this can be used to determine
+ * whether or not to use these verbs instead of a separate bool.
+ */
+ if (spec->use_pci_mmio)
spec->desktop_init_verbs = ca0132_init_verbs1;
spec->spec_init_verbs = kcalloc(NUM_SPEC_VERBS,
sizeof(struct hda_verb),
@@ -7579,6 +8685,29 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
return 0;
}
+/*
+ * The Sound Blaster ZxR shares the same PCI subsystem ID as some regular
+ * Sound Blaster Z cards. However, they have different HDA codec subsystem
+ * ID's. So, we check for the ZxR's subsystem ID, as well as the DBPro
+ * daughter boards ID.
+ */
+static void sbz_detect_quirk(struct hda_codec *codec)
+{
+ struct ca0132_spec *spec = codec->spec;
+
+ switch (codec->core.subsystem_id) {
+ case 0x11020033:
+ spec->quirk = QUIRK_ZXR;
+ break;
+ case 0x1102003f:
+ spec->quirk = QUIRK_ZXR_DBPRO;
+ break;
+ default:
+ spec->quirk = QUIRK_SBZ;
+ break;
+ }
+}
+
static int patch_ca0132(struct hda_codec *codec)
{
struct ca0132_spec *spec;
@@ -7593,10 +8722,6 @@ static int patch_ca0132(struct hda_codec *codec)
codec->spec = spec;
spec->codec = codec;
- codec->patch_ops = ca0132_patch_ops;
- codec->pcm_format_first = 1;
- codec->no_sticky_stream = 1;
-
/* Detect codec quirk */
quirk = snd_pci_quirk_lookup(codec->bus->pci, ca0132_quirks);
if (quirk)
@@ -7604,6 +8729,18 @@ static int patch_ca0132(struct hda_codec *codec)
else
spec->quirk = QUIRK_NONE;
+ if (spec->quirk == QUIRK_SBZ)
+ sbz_detect_quirk(codec);
+
+ if (spec->quirk == QUIRK_ZXR_DBPRO)
+ codec->patch_ops = dbpro_patch_ops;
+ else
+ codec->patch_ops = ca0132_patch_ops;
+
+ codec->pcm_format_first = 1;
+ codec->no_sticky_stream = 1;
+
+
spec->dsp_state = DSP_DOWNLOAD_INIT;
spec->num_mixers = 1;
@@ -7613,6 +8750,12 @@ static int patch_ca0132(struct hda_codec *codec)
spec->mixers[0] = desktop_mixer;
snd_hda_codec_set_name(codec, "Sound Blaster Z");
break;
+ case QUIRK_ZXR:
+ spec->mixers[0] = desktop_mixer;
+ snd_hda_codec_set_name(codec, "Sound Blaster ZxR");
+ break;
+ case QUIRK_ZXR_DBPRO:
+ break;
case QUIRK_R3D:
spec->mixers[0] = desktop_mixer;
snd_hda_codec_set_name(codec, "Recon3D");
@@ -7621,6 +8764,10 @@ static int patch_ca0132(struct hda_codec *codec)
spec->mixers[0] = r3di_mixer;
snd_hda_codec_set_name(codec, "Recon3Di");
break;
+ case QUIRK_AE5:
+ spec->mixers[0] = desktop_mixer;
+ snd_hda_codec_set_name(codec, "Sound BlasterX AE-5");
+ break;
default:
spec->mixers[0] = ca0132_mixer;
break;
@@ -7630,6 +8777,8 @@ static int patch_ca0132(struct hda_codec *codec)
switch (spec->quirk) {
case QUIRK_SBZ:
case QUIRK_R3D:
+ case QUIRK_AE5:
+ case QUIRK_ZXR:
spec->use_alt_controls = true;
spec->use_alt_functions = true;
spec->use_pci_mmio = true;
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index a7f91be45194..64fa5a82bb9f 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <sound/core.h>
#include <sound/tlv.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c
index 1b2195dd2b26..52642ba3e2c0 100644
--- a/sound/pci/hda/patch_cmedia.c
+++ b/sound/pci/hda/patch_cmedia.c
@@ -25,7 +25,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index cfd4e4f97f8f..950e02e71766 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -27,7 +27,7 @@
#include <sound/core.h>
#include <sound/jack.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_beep.h"
@@ -943,6 +943,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
+ SND_PCI_QUIRK(0x17aa, 0x3905, "Lenovo G50-30", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index cb587dce67a9..67099cbb6be2 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -41,7 +41,7 @@
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
#include <sound/hda_chmap.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_jack.h"
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 3ac7ba9b342d..fa61674a5605 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -32,7 +32,7 @@
#include <linux/input.h>
#include <sound/core.h>
#include <sound/jack.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
@@ -6843,6 +6843,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
{0x14, 0x90170110},
+ {0x19, 0x02a11030},
+ {0x1a, 0x02a11040},
+ {0x1b, 0x01011020},
+ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+ {0x14, 0x90170110},
{0x19, 0x02a11020},
{0x1a, 0x02a11030},
{0x21, 0x0221101f}),
@@ -7738,6 +7744,8 @@ enum {
ALC662_FIXUP_ASUS_Nx50,
ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
ALC668_FIXUP_ASUS_Nx51,
+ ALC668_FIXUP_MIC_COEF,
+ ALC668_FIXUP_ASUS_G751,
ALC891_FIXUP_HEADSET_MODE,
ALC891_FIXUP_DELL_MIC_NO_PRESENCE,
ALC662_FIXUP_ACER_VERITON,
@@ -8007,6 +8015,23 @@ static const struct hda_fixup alc662_fixups[] = {
.chained = true,
.chain_id = ALC668_FIXUP_ASUS_Nx51_HEADSET_MODE,
},
+ [ALC668_FIXUP_MIC_COEF] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0xc3 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x4000 },
+ {}
+ },
+ },
+ [ALC668_FIXUP_ASUS_G751] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x16, 0x0421101f }, /* HP */
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC668_FIXUP_MIC_COEF
+ },
[ALC891_FIXUP_HEADSET_MODE] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_headset_mode,
@@ -8080,6 +8105,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
+ SND_PCI_QUIRK(0x1043, 0x12ff, "ASUS G751", ALC668_FIXUP_ASUS_G751),
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
@@ -8184,6 +8210,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
{.id = ALC668_FIXUP_DELL_XPS13, .name = "dell-xps13"},
{.id = ALC662_FIXUP_ASUS_Nx50, .name = "asus-nx50"},
{.id = ALC668_FIXUP_ASUS_Nx51, .name = "asus-nx51"},
+ {.id = ALC668_FIXUP_ASUS_G751, .name = "asus-g751"},
{.id = ALC891_FIXUP_HEADSET_MODE, .name = "alc891-headset"},
{.id = ALC891_FIXUP_DELL_MIC_NO_PRESENCE, .name = "alc891-headset-multi"},
{.id = ALC662_FIXUP_ACER_VERITON, .name = "acer-veriton"},
diff --git a/sound/pci/hda/patch_si3054.c b/sound/pci/hda/patch_si3054.c
index f63acb1b965c..c49d25bcd7f2 100644
--- a/sound/pci/hda/patch_si3054.c
+++ b/sound/pci/hda/patch_si3054.c
@@ -27,7 +27,7 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <sound/core.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
/* si3054 verbs */
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 046705b4691a..1b6ecfb01759 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -32,7 +32,7 @@
#include <linux/module.h>
#include <sound/core.h>
#include <sound/jack.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_beep.h"
@@ -77,6 +77,7 @@ enum {
STAC_DELL_M6_BOTH,
STAC_DELL_EQ,
STAC_ALIENWARE_M17X,
+ STAC_ELO_VUPOINT_15MX,
STAC_92HD89XX_HP_FRONT_JACK,
STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
STAC_92HD73XX_ASUS_MOBO,
@@ -1879,6 +1880,18 @@ static void stac92hd73xx_fixup_no_jd(struct hda_codec *codec,
codec->no_jack_detect = 1;
}
+
+static void stac92hd73xx_disable_automute(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct sigmatel_spec *spec = codec->spec;
+
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+
+ spec->gen.suppress_auto_mute = 1;
+}
+
static const struct hda_fixup stac92hd73xx_fixups[] = {
[STAC_92HD73XX_REF] = {
.type = HDA_FIXUP_FUNC,
@@ -1904,6 +1917,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = stac92hd73xx_fixup_alienware_m17x,
},
+ [STAC_ELO_VUPOINT_15MX] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = stac92hd73xx_disable_automute,
+ },
[STAC_92HD73XX_INTEL] = {
.type = HDA_FIXUP_PINS,
.v.pins = intel_dg45id_pin_configs,
@@ -1942,6 +1959,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
{ .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
{ .id = STAC_DELL_EQ, .name = "dell-eq" },
{ .id = STAC_ALIENWARE_M17X, .name = "alienware" },
+ { .id = STAC_ELO_VUPOINT_15MX, .name = "elo-vupoint-15mx" },
{ .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
{}
};
@@ -1991,6 +2009,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
"Alienware M17x", STAC_ALIENWARE_M17X),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
"Alienware M17x R3", STAC_DELL_EQ),
+ SND_PCI_QUIRK(0x1059, 0x1011,
+ "ELO VuPoint 15MX", STAC_ELO_VUPOINT_15MX),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1927,
"HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 6b9617aee0e6..9f6f13e25145 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -52,7 +52,7 @@
#include <linux/module.h>
#include <sound/core.h>
#include <sound/asoundef.h>
-#include "hda_codec.h"
+#include <sound/hda_codec.h>
#include "hda_local.h"
#include "hda_auto_parser.h"
#include "hda_jack.h"
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 5ee468d1aefe..ffddcdfe0c66 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -38,11 +38,6 @@
#include <sound/ac97_codec.h>
#include <sound/info.h>
#include <sound/initval.h>
-/* for 440MX workaround */
-#include <asm/pgtable.h>
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455");
@@ -374,7 +369,6 @@ struct ichdev {
unsigned int ali_slot; /* ALI DMA slot */
struct ac97_pcm *pcm;
int pcm_open_flag;
- unsigned int page_attr_changed: 1;
unsigned int suspended: 1;
};
@@ -724,25 +718,6 @@ static void snd_intel8x0_setup_periods(struct intel8x0 *chip, struct ichdev *ich
iputbyte(chip, port + ichdev->roff_sr, ICH_FIFOE | ICH_BCIS | ICH_LVBCI);
}
-#ifdef __i386__
-/*
- * Intel 82443MX running a 100MHz processor system bus has a hardware bug,
- * which aborts PCI busmaster for audio transfer. A workaround is to set
- * the pages as non-cached. For details, see the errata in
- * http://download.intel.com/design/chipsets/specupdt/24505108.pdf
- */
-static void fill_nocache(void *buf, int size, int nocache)
-{
- size = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- if (nocache)
- set_pages_uc(virt_to_page(buf), size);
- else
- set_pages_wb(virt_to_page(buf), size);
-}
-#else
-#define fill_nocache(buf, size, nocache) do { ; } while (0)
-#endif
-
/*
* Interrupt handler
*/
@@ -850,7 +825,7 @@ static int snd_intel8x0_pcm_trigger(struct snd_pcm_substream *substream, int cmd
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
ichdev->suspended = 0;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
val = ICH_IOCE | ICH_STARTBM;
@@ -858,7 +833,7 @@ static int snd_intel8x0_pcm_trigger(struct snd_pcm_substream *substream, int cmd
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
ichdev->suspended = 1;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
val = 0;
break;
@@ -892,7 +867,7 @@ static int snd_intel8x0_ali_trigger(struct snd_pcm_substream *substream, int cmd
switch (cmd) {
case SNDRV_PCM_TRIGGER_RESUME:
ichdev->suspended = 0;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -909,7 +884,7 @@ static int snd_intel8x0_ali_trigger(struct snd_pcm_substream *substream, int cmd
break;
case SNDRV_PCM_TRIGGER_SUSPEND:
ichdev->suspended = 1;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
/* pause */
@@ -938,23 +913,12 @@ static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
{
struct intel8x0 *chip = snd_pcm_substream_chip(substream);
struct ichdev *ichdev = get_ichdev(substream);
- struct snd_pcm_runtime *runtime = substream->runtime;
int dbl = params_rate(hw_params) > 48000;
int err;
- if (chip->fix_nocache && ichdev->page_attr_changed) {
- fill_nocache(runtime->dma_area, runtime->dma_bytes, 0); /* clear */
- ichdev->page_attr_changed = 0;
- }
err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
if (err < 0)
return err;
- if (chip->fix_nocache) {
- if (runtime->dma_area && ! ichdev->page_attr_changed) {
- fill_nocache(runtime->dma_area, runtime->dma_bytes, 1);
- ichdev->page_attr_changed = 1;
- }
- }
if (ichdev->pcm_open_flag) {
snd_ac97_pcm_close(ichdev->pcm);
ichdev->pcm_open_flag = 0;
@@ -974,17 +938,12 @@ static int snd_intel8x0_hw_params(struct snd_pcm_substream *substream,
static int snd_intel8x0_hw_free(struct snd_pcm_substream *substream)
{
- struct intel8x0 *chip = snd_pcm_substream_chip(substream);
struct ichdev *ichdev = get_ichdev(substream);
if (ichdev->pcm_open_flag) {
snd_ac97_pcm_close(ichdev->pcm);
ichdev->pcm_open_flag = 0;
}
- if (chip->fix_nocache && ichdev->page_attr_changed) {
- fill_nocache(substream->runtime->dma_area, substream->runtime->dma_bytes, 0);
- ichdev->page_attr_changed = 0;
- }
return snd_pcm_lib_free_pages(substream);
}
@@ -1510,6 +1469,9 @@ struct ich_pcm_table {
int ac97_idx;
};
+#define intel8x0_dma_type(chip) \
+ ((chip)->fix_nocache ? SNDRV_DMA_TYPE_DEV_UC : SNDRV_DMA_TYPE_DEV)
+
static int snd_intel8x0_pcm1(struct intel8x0 *chip, int device,
struct ich_pcm_table *rec)
{
@@ -1540,7 +1502,7 @@ static int snd_intel8x0_pcm1(struct intel8x0 *chip, int device,
strcpy(pcm->name, chip->card->shortname);
chip->pcm[device] = pcm;
- snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+ snd_pcm_lib_preallocate_pages_for_all(pcm, intel8x0_dma_type(chip),
snd_dma_pci_data(chip->pci),
rec->prealloc_size, rec->prealloc_max_size);
@@ -2629,11 +2591,8 @@ static int snd_intel8x0_free(struct intel8x0 *chip)
__hw_end:
if (chip->irq >= 0)
free_irq(chip->irq, chip);
- if (chip->bdbars.area) {
- if (chip->fix_nocache)
- fill_nocache(chip->bdbars.area, chip->bdbars.bytes, 0);
+ if (chip->bdbars.area)
snd_dma_free_pages(&chip->bdbars);
- }
if (chip->addr)
pci_iounmap(chip->pci, chip->addr);
if (chip->bmaddr)
@@ -2657,17 +2616,6 @@ static int intel8x0_suspend(struct device *dev)
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
for (i = 0; i < chip->pcm_devs; i++)
snd_pcm_suspend_all(chip->pcm[i]);
- /* clear nocache */
- if (chip->fix_nocache) {
- for (i = 0; i < chip->bdbars_count; i++) {
- struct ichdev *ichdev = &chip->ichd[i];
- if (ichdev->substream && ichdev->page_attr_changed) {
- struct snd_pcm_runtime *runtime = ichdev->substream->runtime;
- if (runtime->dma_area)
- fill_nocache(runtime->dma_area, runtime->dma_bytes, 0);
- }
- }
- }
for (i = 0; i < chip->ncodecs; i++)
snd_ac97_suspend(chip->ac97[i]);
if (chip->device_type == DEVICE_INTEL_ICH4)
@@ -2708,25 +2656,9 @@ static int intel8x0_resume(struct device *dev)
ICH_PCM_SPDIF_1011);
}
- /* refill nocache */
- if (chip->fix_nocache)
- fill_nocache(chip->bdbars.area, chip->bdbars.bytes, 1);
-
for (i = 0; i < chip->ncodecs; i++)
snd_ac97_resume(chip->ac97[i]);
- /* refill nocache */
- if (chip->fix_nocache) {
- for (i = 0; i < chip->bdbars_count; i++) {
- struct ichdev *ichdev = &chip->ichd[i];
- if (ichdev->substream && ichdev->page_attr_changed) {
- struct snd_pcm_runtime *runtime = ichdev->substream->runtime;
- if (runtime->dma_area)
- fill_nocache(runtime->dma_area, runtime->dma_bytes, 1);
- }
- }
- }
-
/* resume status */
for (i = 0; i < chip->bdbars_count; i++) {
struct ichdev *ichdev = &chip->ichd[i];
@@ -3057,6 +2989,12 @@ static int snd_intel8x0_create(struct snd_card *card,
chip->inside_vm = snd_intel8x0_inside_vm(pci);
+ /*
+ * Intel 82443MX running a 100MHz processor system bus has a hardware
+ * bug, which aborts PCI busmaster for audio transfer. A workaround
+ * is to set the pages as non-cached. For details, see the errata in
+ * http://download.intel.com/design/chipsets/specupdt/24505108.pdf
+ */
if (pci->vendor == PCI_VENDOR_ID_INTEL &&
pci->device == PCI_DEVICE_ID_INTEL_440MX)
chip->fix_nocache = 1; /* enable workaround */
@@ -3128,7 +3066,7 @@ static int snd_intel8x0_create(struct snd_card *card,
/* allocate buffer descriptor lists */
/* the start of each lists must be aligned to 8 bytes */
- if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
+ if (snd_dma_alloc_pages(intel8x0_dma_type(chip), snd_dma_pci_data(pci),
chip->bdbars_count * sizeof(u32) * ICH_MAX_FRAGS * 2,
&chip->bdbars) < 0) {
snd_intel8x0_free(chip);
@@ -3137,9 +3075,6 @@ static int snd_intel8x0_create(struct snd_card *card,
}
/* tables must be aligned to 8 bytes here, but the kernel pages
are much bigger, so we don't care (on i386) */
- /* workaround for 440MX */
- if (chip->fix_nocache)
- fill_nocache(chip->bdbars.area, chip->bdbars.bytes, 1);
int_sta_masks = 0;
for (i = 0; i < chip->bdbars_count; i++) {
ichdev = &chip->ichd[i];
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index 943a726b1c1b..c84629190cba 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -1171,16 +1171,6 @@ static int snd_intel8x0m_create(struct snd_card *card,
}
port_inited:
- if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED,
- KBUILD_MODNAME, chip)) {
- dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
- snd_intel8x0m_free(chip);
- return -EBUSY;
- }
- chip->irq = pci->irq;
- pci_set_master(pci);
- synchronize_irq(chip->irq);
-
/* initialize offsets */
chip->bdbars_count = 2;
tbl = intel_regs;
@@ -1224,11 +1214,21 @@ static int snd_intel8x0m_create(struct snd_card *card,
chip->int_sta_reg = ICH_REG_GLOB_STA;
chip->int_sta_mask = int_sta_masks;
+ pci_set_master(pci);
+
if ((err = snd_intel8x0m_chip_init(chip, 1)) < 0) {
snd_intel8x0m_free(chip);
return err;
}
+ if (request_irq(pci->irq, snd_intel8x0m_interrupt, IRQF_SHARED,
+ KBUILD_MODNAME, chip)) {
+ dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
+ snd_intel8x0m_free(chip);
+ return -EBUSY;
+ }
+ chip->irq = pci->irq;
+
if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
snd_intel8x0m_free(chip);
return err;
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index f0906ba416d4..3ac8c71d567c 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -319,7 +319,8 @@ static const struct snd_pcm_hardware snd_rme32_spdif_info = {
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_SYNC_START),
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE),
.rates = (SNDRV_PCM_RATE_32000 |
@@ -346,7 +347,8 @@ static const struct snd_pcm_hardware snd_rme32_adat_info =
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_SYNC_START),
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats= SNDRV_PCM_FMTBIT_S16_LE,
.rates = (SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000),
@@ -370,7 +372,8 @@ static const struct snd_pcm_hardware snd_rme32_spdif_fd_info = {
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_SYNC_START),
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S32_LE),
.rates = (SNDRV_PCM_RATE_32000 |
@@ -397,7 +400,8 @@ static const struct snd_pcm_hardware snd_rme32_adat_fd_info =
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_SYNC_START),
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_SYNC_APPLPTR),
.formats= SNDRV_PCM_FMTBIT_S16_LE,
.rates = (SNDRV_PCM_RATE_44100 |
SNDRV_PCM_RATE_48000),
@@ -1104,16 +1108,6 @@ snd_rme32_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
snd_pcm_trigger_done(s, substream);
}
- /* prefill playback buffer */
- if (cmd == SNDRV_PCM_TRIGGER_START && rme32->fullduplex_mode) {
- snd_pcm_group_for_each_entry(s, substream) {
- if (s == rme32->playback_substream) {
- s->ops->ack(s);
- break;
- }
- }
- }
-
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if (rme32->running && ! RME32_ISWORKING(rme32))
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 11b5b5e0e058..679ad0415e3b 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6534,7 +6534,7 @@ static int snd_hdspm_create_alsa_devices(struct snd_card *card,
dev_dbg(card->dev, "Update mixer controls...\n");
hdspm_update_simple_mixer_controls(hdspm);
- dev_dbg(card->dev, "Initializeing complete ???\n");
+ dev_dbg(card->dev, "Initializing complete?\n");
err = snd_card_register(card);
if (err < 0) {
diff --git a/sound/soc/amd/acp-da7219-max98357a.c b/sound/soc/amd/acp-da7219-max98357a.c
index 8e3275a96a82..3f813ea5210a 100644
--- a/sound/soc/amd/acp-da7219-max98357a.c
+++ b/sound/soc/amd/acp-da7219-max98357a.c
@@ -42,7 +42,7 @@
#include "../codecs/da7219.h"
#include "../codecs/da7219-aad.h"
-#define CZ_PLAT_CLK 25000000
+#define CZ_PLAT_CLK 48000000
#define DUAL_CHANNEL 2
static struct snd_soc_jack cz_jack;
@@ -75,7 +75,7 @@ static int cz_da7219_init(struct snd_soc_pcm_runtime *rtd)
da7219_dai_clk = clk_get(component->dev, "da7219-dai-clks");
ret = snd_soc_card_jack_new(card, "Headset Jack",
- SND_JACK_HEADPHONE | SND_JACK_MICROPHONE |
+ SND_JACK_HEADSET | SND_JACK_LINEOUT |
SND_JACK_BTN_0 | SND_JACK_BTN_1 |
SND_JACK_BTN_2 | SND_JACK_BTN_3,
&cz_jack, NULL, 0);
@@ -133,7 +133,7 @@ static const struct snd_pcm_hw_constraint_list constraints_channels = {
.mask = 0,
};
-static int cz_da7219_startup(struct snd_pcm_substream *substream)
+static int cz_da7219_play_startup(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
@@ -150,7 +150,28 @@ static int cz_da7219_startup(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
&constraints_rates);
- machine->i2s_instance = I2S_SP_INSTANCE;
+ machine->play_i2s_instance = I2S_SP_INSTANCE;
+ return da7219_clk_enable(substream);
+}
+
+static int cz_da7219_cap_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_card *card = rtd->card;
+ struct acp_platform_info *machine = snd_soc_card_get_drvdata(card);
+
+ /*
+ * On this platform for PCM device we support stereo
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+
+ machine->cap_i2s_instance = I2S_SP_INSTANCE;
machine->capture_channel = CAP_CHANNEL1;
return da7219_clk_enable(substream);
}
@@ -162,11 +183,22 @@ static void cz_da7219_shutdown(struct snd_pcm_substream *substream)
static int cz_max_startup(struct snd_pcm_substream *substream)
{
+ struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct acp_platform_info *machine = snd_soc_card_get_drvdata(card);
- machine->i2s_instance = I2S_BT_INSTANCE;
+ /*
+ * On this platform for PCM device we support stereo
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+
+ machine->play_i2s_instance = I2S_BT_INSTANCE;
return da7219_clk_enable(substream);
}
@@ -177,21 +209,43 @@ static void cz_max_shutdown(struct snd_pcm_substream *substream)
static int cz_dmic0_startup(struct snd_pcm_substream *substream)
{
+ struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct acp_platform_info *machine = snd_soc_card_get_drvdata(card);
- machine->i2s_instance = I2S_BT_INSTANCE;
+ /*
+ * On this platform for PCM device we support stereo
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+
+ machine->cap_i2s_instance = I2S_BT_INSTANCE;
return da7219_clk_enable(substream);
}
static int cz_dmic1_startup(struct snd_pcm_substream *substream)
{
+ struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_card *card = rtd->card;
struct acp_platform_info *machine = snd_soc_card_get_drvdata(card);
- machine->i2s_instance = I2S_SP_INSTANCE;
+ /*
+ * On this platform for PCM device we support stereo
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_rates);
+
+ machine->cap_i2s_instance = I2S_SP_INSTANCE;
machine->capture_channel = CAP_CHANNEL0;
return da7219_clk_enable(substream);
}
@@ -201,8 +255,13 @@ static void cz_dmic_shutdown(struct snd_pcm_substream *substream)
da7219_clk_disable();
}
+static const struct snd_soc_ops cz_da7219_play_ops = {
+ .startup = cz_da7219_play_startup,
+ .shutdown = cz_da7219_shutdown,
+};
+
static const struct snd_soc_ops cz_da7219_cap_ops = {
- .startup = cz_da7219_startup,
+ .startup = cz_da7219_cap_startup,
.shutdown = cz_da7219_shutdown,
};
@@ -233,7 +292,7 @@ static struct snd_soc_dai_link cz_dai_7219_98357[] = {
| SND_SOC_DAIFMT_CBM_CFM,
.init = cz_da7219_init,
.dpcm_playback = 1,
- .ops = &cz_da7219_cap_ops,
+ .ops = &cz_da7219_play_ops,
},
{
.name = "amd-da7219-cap",
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 77b265bd0505..cdebab2f8ce5 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -867,8 +867,12 @@ static int acp_dma_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
if (pinfo) {
- rtd->i2s_instance = pinfo->i2s_instance;
- rtd->capture_channel = pinfo->capture_channel;
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ rtd->i2s_instance = pinfo->play_i2s_instance;
+ } else {
+ rtd->i2s_instance = pinfo->cap_i2s_instance;
+ rtd->capture_channel = pinfo->capture_channel;
+ }
}
if (adata->asic_type == CHIP_STONEY) {
val = acp_reg_read(adata->acp_mmio,
@@ -1036,16 +1040,22 @@ static snd_pcm_uframes_t acp_dma_pointer(struct snd_pcm_substream *substream)
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
period_bytes = frames_to_bytes(runtime, runtime->period_size);
- dscr = acp_reg_read(rtd->acp_mmio, rtd->dma_curr_dscr);
- if (dscr == rtd->dma_dscr_idx_1)
- pos = period_bytes;
- else
- pos = 0;
bytescount = acp_get_byte_count(rtd);
- if (bytescount > rtd->bytescount)
+ if (bytescount >= rtd->bytescount)
bytescount -= rtd->bytescount;
- delay = do_div(bytescount, period_bytes);
- runtime->delay = bytes_to_frames(runtime, delay);
+ if (bytescount < period_bytes) {
+ pos = 0;
+ } else {
+ dscr = acp_reg_read(rtd->acp_mmio, rtd->dma_curr_dscr);
+ if (dscr == rtd->dma_dscr_idx_1)
+ pos = period_bytes;
+ else
+ pos = 0;
+ }
+ if (bytescount > 0) {
+ delay = do_div(bytescount, period_bytes);
+ runtime->delay = bytes_to_frames(runtime, delay);
+ }
} else {
buffersize = frames_to_bytes(runtime, runtime->buffer_size);
bytescount = acp_get_byte_count(rtd);
diff --git a/sound/soc/amd/acp.h b/sound/soc/amd/acp.h
index be3963e8f4fa..dbbb1a85638d 100644
--- a/sound/soc/amd/acp.h
+++ b/sound/soc/amd/acp.h
@@ -158,7 +158,8 @@ struct audio_drv_data {
* and dma driver
*/
struct acp_platform_info {
- u16 i2s_instance;
+ u16 play_i2s_instance;
+ u16 cap_i2s_instance;
u16 capture_channel;
};
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 64b784e96f84..64f86f0b87e5 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -97,4 +97,16 @@ config SND_ATMEL_SOC_I2S
help
Say Y or M if you want to add support for Atmel ASoc driver for boards
using I2S.
+
+config SND_SOC_MIKROE_PROTO
+ tristate "Support for Mikroe-PROTO board"
+ depends on OF
+ depends on SND_SOC_I2C_AND_SPI
+ select SND_SOC_WM8731
+ help
+ Say Y or M if you want to add support for MikroElektronika PROTO Audio
+ Board. This board contains the WM8731 codec, which can be configured
+ using I2C over SDA (MPU Data Input) and SCL (MPU Clock Input) pins.
+ Both playback and capture are supported.
+
endif
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index cd87cb4bcff5..9f41bfa0fea3 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -17,6 +17,7 @@ snd-soc-sam9x5-wm8731-objs := sam9x5_wm8731.o
snd-atmel-soc-classd-objs := atmel-classd.o
snd-atmel-soc-pdmic-objs := atmel-pdmic.o
snd-atmel-soc-tse850-pcm5142-objs := tse850-pcm5142.o
+snd-soc-mikroe-proto-objs := mikroe-proto.o
obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
obj-$(CONFIG_SND_ATMEL_SOC_WM8904) += snd-atmel-soc-wm8904.o
@@ -24,3 +25,4 @@ obj-$(CONFIG_SND_AT91_SOC_SAM9X5_WM8731) += snd-soc-sam9x5-wm8731.o
obj-$(CONFIG_SND_ATMEL_SOC_CLASSD) += snd-atmel-soc-classd.o
obj-$(CONFIG_SND_ATMEL_SOC_PDMIC) += snd-atmel-soc-pdmic.o
obj-$(CONFIG_SND_ATMEL_SOC_TSE850_PCM5142) += snd-atmel-soc-tse850-pcm5142.o
+obj-$(CONFIG_SND_SOC_MIKROE_PROTO) += snd-soc-mikroe-proto.o
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index d3b69682d9c2..6291ec7f9dd6 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -1005,11 +1005,11 @@ static int asoc_ssc_init(struct device *dev)
struct ssc_device *ssc = dev_get_drvdata(dev);
int ret;
- ret = snd_soc_register_component(dev, &atmel_ssc_component,
+ ret = devm_snd_soc_register_component(dev, &atmel_ssc_component,
&atmel_ssc_dai, 1);
if (ret) {
dev_err(dev, "Could not register DAI: %d\n", ret);
- goto err;
+ return ret;
}
if (ssc->pdata->use_dma)
@@ -1019,15 +1019,10 @@ static int asoc_ssc_init(struct device *dev)
if (ret) {
dev_err(dev, "Could not register PCM: %d\n", ret);
- goto err_unregister_dai;
+ return ret;
}
return 0;
-
-err_unregister_dai:
- snd_soc_unregister_component(dev);
-err:
- return ret;
}
static void asoc_ssc_exit(struct device *dev)
@@ -1038,8 +1033,6 @@ static void asoc_ssc_exit(struct device *dev)
atmel_pcm_dma_platform_unregister(dev);
else
atmel_pcm_pdc_platform_unregister(dev);
-
- snd_soc_unregister_component(dev);
}
/**
diff --git a/sound/soc/atmel/mikroe-proto.c b/sound/soc/atmel/mikroe-proto.c
new file mode 100644
index 000000000000..d47aaa5bf75a
--- /dev/null
+++ b/sound/soc/atmel/mikroe-proto.c
@@ -0,0 +1,165 @@
+/*
+ * ASoC driver for PROTO AudioCODEC (with a WM8731)
+ *
+ * Author: Florian Meier, <koalo@koalo.de>
+ * Copyright 2013
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+
+#include "../codecs/wm8731.h"
+
+#define XTAL_RATE 12288000 /* This is fixed on this board */
+
+static int snd_proto_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_card *card = rtd->card;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+
+ /* Set proto sysclk */
+ int ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
+ XTAL_RATE, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(card->dev, "Failed to set WM8731 SYSCLK: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget snd_proto_widget[] = {
+ SND_SOC_DAPM_MIC("Microphone Jack", NULL),
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+};
+
+static const struct snd_soc_dapm_route snd_proto_route[] = {
+ /* speaker connected to LHPOUT/RHPOUT */
+ {"Headphone Jack", NULL, "LHPOUT"},
+ {"Headphone Jack", NULL, "RHPOUT"},
+
+ /* mic is connected to Mic Jack, with WM8731 Mic Bias */
+ {"MICIN", NULL, "Mic Bias"},
+ {"Mic Bias", NULL, "Microphone Jack"},
+};
+
+/* audio machine driver */
+static struct snd_soc_card snd_proto = {
+ .name = "snd_mikroe_proto",
+ .owner = THIS_MODULE,
+ .dapm_widgets = snd_proto_widget,
+ .num_dapm_widgets = ARRAY_SIZE(snd_proto_widget),
+ .dapm_routes = snd_proto_route,
+ .num_dapm_routes = ARRAY_SIZE(snd_proto_route),
+};
+
+static int snd_proto_probe(struct platform_device *pdev)
+{
+ struct snd_soc_dai_link *dai;
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *codec_np, *cpu_np;
+ struct device_node *bitclkmaster = NULL;
+ struct device_node *framemaster = NULL;
+ unsigned int dai_fmt;
+ int ret = 0;
+
+ if (!np) {
+ dev_err(&pdev->dev, "No device node supplied\n");
+ return -EINVAL;
+ }
+
+ snd_proto.dev = &pdev->dev;
+ ret = snd_soc_of_parse_card_name(&snd_proto, "model");
+ if (ret)
+ return ret;
+
+ dai = devm_kzalloc(&pdev->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai)
+ return -ENOMEM;
+
+ snd_proto.dai_link = dai;
+ snd_proto.num_links = 1;
+
+ dai->name = "WM8731";
+ dai->stream_name = "WM8731 HiFi";
+ dai->codec_dai_name = "wm8731-hifi";
+ dai->init = &snd_proto_init;
+
+ codec_np = of_parse_phandle(np, "audio-codec", 0);
+ if (!codec_np) {
+ dev_err(&pdev->dev, "audio-codec node missing\n");
+ return -EINVAL;
+ }
+ dai->codec_of_node = codec_np;
+
+ cpu_np = of_parse_phandle(np, "i2s-controller", 0);
+ if (!cpu_np) {
+ dev_err(&pdev->dev, "i2s-controller missing\n");
+ return -EINVAL;
+ }
+ dai->cpu_of_node = cpu_np;
+ dai->platform_of_node = cpu_np;
+
+ dai_fmt = snd_soc_of_parse_daifmt(np, NULL,
+ &bitclkmaster, &framemaster);
+ if (bitclkmaster != framemaster) {
+ dev_err(&pdev->dev, "Must be the same bitclock and frame master\n");
+ return -EINVAL;
+ }
+ if (bitclkmaster) {
+ dai_fmt &= ~SND_SOC_DAIFMT_MASTER_MASK;
+ if (codec_np == bitclkmaster)
+ dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
+ else
+ dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
+ }
+ of_node_put(bitclkmaster);
+ of_node_put(framemaster);
+ dai->dai_fmt = dai_fmt;
+
+ of_node_put(codec_np);
+ of_node_put(cpu_np);
+
+ ret = snd_soc_register_card(&snd_proto);
+ if (ret && ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "snd_soc_register_card() failed: %d\n", ret);
+
+ return ret;
+}
+
+static int snd_proto_remove(struct platform_device *pdev)
+{
+ return snd_soc_unregister_card(&snd_proto);
+}
+
+static const struct of_device_id snd_proto_of_match[] = {
+ { .compatible = "mikroe,mikroe-proto", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, snd_proto_of_match);
+
+static struct platform_driver snd_proto_driver = {
+ .driver = {
+ .name = "snd-mikroe-proto",
+ .of_match_table = snd_proto_of_match,
+ },
+ .probe = snd_proto_probe,
+ .remove = snd_proto_remove,
+};
+
+module_platform_driver(snd_proto_driver);
+
+MODULE_AUTHOR("Florian Meier");
+MODULE_DESCRIPTION("ASoC Driver for PROTO board (WM8731)");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/atmel/tse850-pcm5142.c b/sound/soc/atmel/tse850-pcm5142.c
index 3a1393283156..214adcad5419 100644
--- a/sound/soc/atmel/tse850-pcm5142.c
+++ b/sound/soc/atmel/tse850-pcm5142.c
@@ -1,44 +1,38 @@
-/*
- * TSE-850 audio - ASoC driver for the Axentia TSE-850 with a PCM5142 codec
- *
- * Copyright (C) 2016 Axentia Technologies AB
- *
- * Author: Peter Rosin <peda@axentia.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * loop1 relays
- * IN1 +---o +------------+ o---+ OUT1
- * \ /
- * + +
- * | / |
- * +--o +--. |
- * | add | |
- * | V |
- * | .---. |
- * DAC +----------->|Sum|---+
- * | '---' |
- * | |
- * + +
- *
- * IN2 +---o--+------------+--o---+ OUT2
- * loop2 relays
- *
- * The 'loop1' gpio pin controlls two relays, which are either in loop
- * position, meaning that input and output are directly connected, or
- * they are in mixer position, meaning that the signal is passed through
- * the 'Sum' mixer. Similarly for 'loop2'.
- *
- * In the above, the 'loop1' relays are inactive, thus feeding IN1 to the
- * mixer (if 'add' is active) and feeding the mixer output to OUT1. The
- * 'loop2' relays are active, short-cutting the TSE-850 from channel 2.
- * IN1, IN2, OUT1 and OUT2 are TSE-850 connectors and DAC is the PCB name
- * of the (filtered) output from the PCM5142 codec.
- */
+// SPDX-License-Identifier: GPL-2.0
+//
+// TSE-850 audio - ASoC driver for the Axentia TSE-850 with a PCM5142 codec
+//
+// Copyright (C) 2016 Axentia Technologies AB
+//
+// Author: Peter Rosin <peda@axentia.se>
+//
+// loop1 relays
+// IN1 +---o +------------+ o---+ OUT1
+// \ /
+// + +
+// | / |
+// +--o +--. |
+// | add | |
+// | V |
+// | .---. |
+// DAC +----------->|Sum|---+
+// | '---' |
+// | |
+// + +
+//
+// IN2 +---o--+------------+--o---+ OUT2
+// loop2 relays
+//
+// The 'loop1' gpio pin controlls two relays, which are either in loop
+// position, meaning that input and output are directly connected, or
+// they are in mixer position, meaning that the signal is passed through
+// the 'Sum' mixer. Similarly for 'loop2'.
+//
+// In the above, the 'loop1' relays are inactive, thus feeding IN1 to the
+// mixer (if 'add' is active) and feeding the mixer output to OUT1. The
+// 'loop2' relays are active, short-cutting the TSE-850 from channel 2.
+// IN1, IN2, OUT1 and OUT2 are TSE-850 connectors and DAC is the PCB name
+// of the (filtered) output from the PCM5142 codec.
#include <linux/clk.h>
#include <linux/gpio.h>
@@ -452,4 +446,4 @@ module_platform_driver(tse850_driver);
/* Module information */
MODULE_AUTHOR("Peter Rosin <peda@axentia.se>");
MODULE_DESCRIPTION("ALSA SoC driver for TSE-850 with PCM5142 codec");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/bcm/cygnus-ssp.c b/sound/soc/bcm/cygnus-ssp.c
index b733f1446353..b7c358b48d8d 100644
--- a/sound/soc/bcm/cygnus-ssp.c
+++ b/sound/soc/bcm/cygnus-ssp.c
@@ -1334,7 +1334,7 @@ static int cygnus_ssp_probe(struct platform_device *pdev)
cygaud->active_ports = 0;
dev_dbg(dev, "Registering %d DAIs\n", active_port_count);
- err = snd_soc_register_component(dev, &cygnus_ssp_component,
+ err = devm_snd_soc_register_component(dev, &cygnus_ssp_component,
cygnus_ssp_dai, active_port_count);
if (err) {
dev_err(dev, "snd_soc_register_dai failed\n");
@@ -1345,32 +1345,27 @@ static int cygnus_ssp_probe(struct platform_device *pdev)
if (cygaud->irq_num <= 0) {
dev_err(dev, "platform_get_irq failed\n");
err = cygaud->irq_num;
- goto err_irq;
+ return err;
}
err = audio_clk_init(pdev, cygaud);
if (err) {
dev_err(dev, "audio clock initialization failed\n");
- goto err_irq;
+ return err;
}
err = cygnus_soc_platform_register(dev, cygaud);
if (err) {
dev_err(dev, "platform reg error %d\n", err);
- goto err_irq;
+ return err;
}
return 0;
-
-err_irq:
- snd_soc_unregister_component(dev);
- return err;
}
static int cygnus_ssp_remove(struct platform_device *pdev)
{
cygnus_soc_platform_unregister(&pdev->dev);
- snd_soc_unregister_component(&pdev->dev);
return 0;
}
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index efb095dbcd71..9cc4f1848c9b 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -82,6 +82,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_ES7241
select SND_SOC_GTM601
select SND_SOC_HDAC_HDMI
+ select SND_SOC_HDAC_HDA
select SND_SOC_ICS43432
select SND_SOC_INNO_RK3036
select SND_SOC_ISABELLE if I2C
@@ -109,6 +110,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MT6351 if MTK_PMIC_WRAP
select SND_SOC_NAU8540 if I2C
select SND_SOC_NAU8810 if I2C
+ select SND_SOC_NAU8822 if I2C
select SND_SOC_NAU8824 if I2C
select SND_SOC_NAU8825 if I2C
select SND_SOC_HDMI_CODEC
@@ -119,6 +121,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_PCM186X_I2C if I2C
select SND_SOC_PCM186X_SPI if SPI_MASTER
select SND_SOC_PCM3008
+ select SND_SOC_PCM3060_I2C if I2C
+ select SND_SOC_PCM3060_SPI if SPI_MASTER
select SND_SOC_PCM3168A_I2C if I2C
select SND_SOC_PCM3168A_SPI if SPI_MASTER
select SND_SOC_PCM5102A
@@ -575,7 +579,11 @@ config SND_SOC_DA9055
tristate
config SND_SOC_DMIC
- tristate
+ tristate "Generic Digital Microphone CODEC"
+ depends on GPIOLIB
+ help
+ Enable support for the Generic Digital Microphone CODEC.
+ Select this if your sound card has DMICs.
config SND_SOC_HDMI_CODEC
tristate
@@ -615,6 +623,10 @@ config SND_SOC_HDAC_HDMI
select SND_PCM_ELD
select HDMI
+config SND_SOC_HDAC_HDA
+ tristate
+ select SND_HDA
+
config SND_SOC_ICS43432
tristate
@@ -629,7 +641,8 @@ config SND_SOC_LM49453
tristate
config SND_SOC_MAX98088
- tristate
+ tristate "Maxim MAX98088/9 Low-Power, Stereo Audio Codec"
+ depends on I2C
config SND_SOC_MAX98090
tristate
@@ -732,6 +745,21 @@ config SND_SOC_PCM186X_SPI
config SND_SOC_PCM3008
tristate
+config SND_SOC_PCM3060
+ tristate
+
+config SND_SOC_PCM3060_I2C
+ tristate "Texas Instruments PCM3060 CODEC - I2C"
+ depends on I2C
+ select SND_SOC_PCM3060
+ select REGMAP_I2C
+
+config SND_SOC_PCM3060_SPI
+ tristate "Texas Instruments PCM3060 CODEC - SPI"
+ depends on SPI_MASTER
+ select SND_SOC_PCM3060
+ select REGMAP_SPI
+
config SND_SOC_PCM3168A
tristate
@@ -1299,6 +1327,10 @@ config SND_SOC_NAU8810
tristate "Nuvoton Technology Corporation NAU88C10 CODEC"
depends on I2C
+config SND_SOC_NAU8822
+ tristate "Nuvoton Technology Corporation NAU88C22 CODEC"
+ depends on I2C
+
config SND_SOC_NAU8824
tristate "Nuvoton Technology Corporation NAU88L24 CODEC"
depends on I2C
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 7ae7c85e8219..8ffab8c8dbfa 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -78,6 +78,7 @@ snd-soc-es8328-i2c-objs := es8328-i2c.o
snd-soc-es8328-spi-objs := es8328-spi.o
snd-soc-gtm601-objs := gtm601.o
snd-soc-hdac-hdmi-objs := hdac_hdmi.o
+snd-soc-hdac-hda-objs := hdac_hda.o
snd-soc-ics43432-objs := ics43432.o
snd-soc-inno-rk3036-objs := inno_rk3036.o
snd-soc-isabelle-objs := isabelle.o
@@ -106,6 +107,7 @@ snd-soc-msm8916-digital-objs := msm8916-wcd-digital.o
snd-soc-mt6351-objs := mt6351.o
snd-soc-nau8540-objs := nau8540.o
snd-soc-nau8810-objs := nau8810.o
+snd-soc-nau8822-objs := nau8822.o
snd-soc-nau8824-objs := nau8824.o
snd-soc-nau8825-objs := nau8825.o
snd-soc-hdmi-codec-objs := hdmi-codec.o
@@ -119,6 +121,9 @@ snd-soc-pcm186x-objs := pcm186x.o
snd-soc-pcm186x-i2c-objs := pcm186x-i2c.o
snd-soc-pcm186x-spi-objs := pcm186x-spi.o
snd-soc-pcm3008-objs := pcm3008.o
+snd-soc-pcm3060-objs := pcm3060.o
+snd-soc-pcm3060-i2c-objs := pcm3060-i2c.o
+snd-soc-pcm3060-spi-objs := pcm3060-spi.o
snd-soc-pcm3168a-objs := pcm3168a.o
snd-soc-pcm3168a-i2c-objs := pcm3168a-i2c.o
snd-soc-pcm3168a-spi-objs := pcm3168a-spi.o
@@ -338,6 +343,7 @@ obj-$(CONFIG_SND_SOC_ES8328_I2C)+= snd-soc-es8328-i2c.o
obj-$(CONFIG_SND_SOC_ES8328_SPI)+= snd-soc-es8328-spi.o
obj-$(CONFIG_SND_SOC_GTM601) += snd-soc-gtm601.o
obj-$(CONFIG_SND_SOC_HDAC_HDMI) += snd-soc-hdac-hdmi.o
+obj-$(CONFIG_SND_SOC_HDAC_HDA) += snd-soc-hdac-hda.o
obj-$(CONFIG_SND_SOC_ICS43432) += snd-soc-ics43432.o
obj-$(CONFIG_SND_SOC_INNO_RK3036) += snd-soc-inno-rk3036.o
obj-$(CONFIG_SND_SOC_ISABELLE) += snd-soc-isabelle.o
@@ -366,6 +372,7 @@ obj-$(CONFIG_SND_SOC_MSM8916_WCD_DIGITAL) +=snd-soc-msm8916-digital.o
obj-$(CONFIG_SND_SOC_MT6351) += snd-soc-mt6351.o
obj-$(CONFIG_SND_SOC_NAU8540) += snd-soc-nau8540.o
obj-$(CONFIG_SND_SOC_NAU8810) += snd-soc-nau8810.o
+obj-$(CONFIG_SND_SOC_NAU8822) += snd-soc-nau8822.o
obj-$(CONFIG_SND_SOC_NAU8824) += snd-soc-nau8824.o
obj-$(CONFIG_SND_SOC_NAU8825) += snd-soc-nau8825.o
obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o
@@ -379,6 +386,9 @@ obj-$(CONFIG_SND_SOC_PCM186X) += snd-soc-pcm186x.o
obj-$(CONFIG_SND_SOC_PCM186X_I2C) += snd-soc-pcm186x-i2c.o
obj-$(CONFIG_SND_SOC_PCM186X_SPI) += snd-soc-pcm186x-spi.o
obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o
+obj-$(CONFIG_SND_SOC_PCM3060) += snd-soc-pcm3060.o
+obj-$(CONFIG_SND_SOC_PCM3060_I2C) += snd-soc-pcm3060-i2c.o
+obj-$(CONFIG_SND_SOC_PCM3060_SPI) += snd-soc-pcm3060-spi.o
obj-$(CONFIG_SND_SOC_PCM3168A) += snd-soc-pcm3168a.o
obj-$(CONFIG_SND_SOC_PCM3168A_I2C) += snd-soc-pcm3168a-i2c.o
obj-$(CONFIG_SND_SOC_PCM3168A_SPI) += snd-soc-pcm3168a-spi.o
diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c
index be136e981653..bef3e9e74c26 100644
--- a/sound/soc/codecs/adau1761.c
+++ b/sound/soc/codecs/adau1761.c
@@ -518,7 +518,8 @@ static int adau1761_setup_digmic_jackdetect(struct snd_soc_component *component)
ARRAY_SIZE(adau1761_jack_detect_controls));
if (ret)
return ret;
- case ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE: /* fallthrough */
+ /* fall through */
+ case ADAU1761_DIGMIC_JACKDET_PIN_MODE_NONE:
ret = snd_soc_dapm_add_routes(dapm, adau1761_no_dmic_routes,
ARRAY_SIZE(adau1761_no_dmic_routes));
if (ret)
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 57169b8ff14e..3959e6ad113d 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -21,11 +21,18 @@
#include <linux/i2c.h>
#include <linux/spi/spi.h>
#include <linux/regmap.h>
+#include <asm/unaligned.h>
#include "sigmadsp.h"
#include "adau17x1.h"
#include "adau-utils.h"
+#define ADAU17X1_SAFELOAD_TARGET_ADDRESS 0x0006
+#define ADAU17X1_SAFELOAD_TRIGGER 0x0007
+#define ADAU17X1_SAFELOAD_DATA 0x0001
+#define ADAU17X1_SAFELOAD_DATA_SIZE 20
+#define ADAU17X1_WORD_SIZE 4
+
static const char * const adau17x1_capture_mixer_boost_text[] = {
"Normal operation", "Boost Level 1", "Boost Level 2", "Boost Level 3",
};
@@ -60,6 +67,9 @@ static const struct snd_kcontrol_new adau17x1_controls[] = {
SOC_ENUM("Mic Bias Mode", adau17x1_mic_bias_mode_enum),
};
+static int adau17x1_setup_firmware(struct snd_soc_component *component,
+ unsigned int rate);
+
static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -313,7 +323,7 @@ static const struct snd_soc_dapm_route adau17x1_no_dsp_dapm_routes[] = {
{ "Capture", NULL, "Right Decimator" },
};
-bool adau17x1_has_dsp(struct adau *adau)
+static bool adau17x1_has_dsp(struct adau *adau)
{
switch (adau->type) {
case ADAU1761:
@@ -324,7 +334,17 @@ bool adau17x1_has_dsp(struct adau *adau)
return false;
}
}
-EXPORT_SYMBOL_GPL(adau17x1_has_dsp);
+
+static bool adau17x1_has_safeload(struct adau *adau)
+{
+ switch (adau->type) {
+ case ADAU1761:
+ case ADAU1781:
+ return true;
+ default:
+ return false;
+ }
+}
static int adau17x1_set_dai_pll(struct snd_soc_dai *dai, int pll_id,
int source, unsigned int freq_in, unsigned int freq_out)
@@ -836,7 +856,7 @@ bool adau17x1_volatile_register(struct device *dev, unsigned int reg)
}
EXPORT_SYMBOL_GPL(adau17x1_volatile_register);
-int adau17x1_setup_firmware(struct snd_soc_component *component,
+static int adau17x1_setup_firmware(struct snd_soc_component *component,
unsigned int rate)
{
int ret;
@@ -880,7 +900,6 @@ err:
return ret;
}
-EXPORT_SYMBOL_GPL(adau17x1_setup_firmware);
int adau17x1_add_widgets(struct snd_soc_component *component)
{
@@ -957,6 +976,56 @@ int adau17x1_resume(struct snd_soc_component *component)
}
EXPORT_SYMBOL_GPL(adau17x1_resume);
+static int adau17x1_safeload(struct sigmadsp *sigmadsp, unsigned int addr,
+ const uint8_t bytes[], size_t len)
+{
+ uint8_t buf[ADAU17X1_WORD_SIZE];
+ uint8_t data[ADAU17X1_SAFELOAD_DATA_SIZE];
+ unsigned int addr_offset;
+ unsigned int nbr_words;
+ int ret;
+
+ /* write data to safeload addresses. Check if len is not a multiple of
+ * 4 bytes, if so we need to zero pad.
+ */
+ nbr_words = len / ADAU17X1_WORD_SIZE;
+ if ((len - nbr_words * ADAU17X1_WORD_SIZE) == 0) {
+ ret = regmap_raw_write(sigmadsp->control_data,
+ ADAU17X1_SAFELOAD_DATA, bytes, len);
+ } else {
+ nbr_words++;
+ memset(data, 0, ADAU17X1_SAFELOAD_DATA_SIZE);
+ memcpy(data, bytes, len);
+ ret = regmap_raw_write(sigmadsp->control_data,
+ ADAU17X1_SAFELOAD_DATA, data,
+ nbr_words * ADAU17X1_WORD_SIZE);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* Write target address, target address is offset by 1 */
+ addr_offset = addr - 1;
+ put_unaligned_be32(addr_offset, buf);
+ ret = regmap_raw_write(sigmadsp->control_data,
+ ADAU17X1_SAFELOAD_TARGET_ADDRESS, buf, ADAU17X1_WORD_SIZE);
+ if (ret < 0)
+ return ret;
+
+ /* write nbr of words to trigger address */
+ put_unaligned_be32(nbr_words, buf);
+ ret = regmap_raw_write(sigmadsp->control_data,
+ ADAU17X1_SAFELOAD_TRIGGER, buf, ADAU17X1_WORD_SIZE);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct sigmadsp_ops adau17x1_sigmadsp_ops = {
+ .safeload = adau17x1_safeload,
+};
+
int adau17x1_probe(struct device *dev, struct regmap *regmap,
enum adau17x1_type type, void (*switch_mode)(struct device *dev),
const char *firmware_name)
@@ -1002,8 +1071,13 @@ int adau17x1_probe(struct device *dev, struct regmap *regmap,
dev_set_drvdata(dev, adau);
if (firmware_name) {
- adau->sigmadsp = devm_sigmadsp_init_regmap(dev, regmap, NULL,
- firmware_name);
+ if (adau17x1_has_safeload(adau)) {
+ adau->sigmadsp = devm_sigmadsp_init_regmap(dev, regmap,
+ &adau17x1_sigmadsp_ops, firmware_name);
+ } else {
+ adau->sigmadsp = devm_sigmadsp_init_regmap(dev, regmap,
+ NULL, firmware_name);
+ }
if (IS_ERR(adau->sigmadsp)) {
dev_warn(dev, "Could not find firmware file: %ld\n",
PTR_ERR(adau->sigmadsp));
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h
index e6fe87beec07..98a3b6f5bc96 100644
--- a/sound/soc/codecs/adau17x1.h
+++ b/sound/soc/codecs/adau17x1.h
@@ -68,10 +68,6 @@ int adau17x1_resume(struct snd_soc_component *component);
extern const struct snd_soc_dai_ops adau17x1_dai_ops;
-int adau17x1_setup_firmware(struct snd_soc_component *component,
- unsigned int rate);
-bool adau17x1_has_dsp(struct adau *adau);
-
#define ADAU17X1_CLOCK_CONTROL 0x4000
#define ADAU17X1_PLL_CONTROL 0x4002
#define ADAU17X1_REC_POWER_MGMT 0x4009
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 407554175282..ab27d2b94d02 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -154,11 +154,11 @@ static const struct snd_kcontrol_new cs4265_snd_controls[] = {
SOC_SINGLE("E to F Buffer Disable Switch", CS4265_SPDIF_CTL1,
6, 1, 0),
SOC_ENUM("C Data Access", cam_mode_enum),
+ SOC_SINGLE("SPDIF Switch", CS4265_SPDIF_CTL2, 5, 1, 1),
SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
3, 1, 0),
SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
- SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
- 0, 1, 0),
+ SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2, 0, 1, 0),
SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
};
@@ -221,10 +221,11 @@ static const struct snd_soc_dapm_route cs4265_audio_map[] = {
{"LINEOUTR", NULL, "DAC"},
{"SPDIFOUT", NULL, "SPDIF"},
+ {"Pre-amp MIC", NULL, "MICL"},
+ {"Pre-amp MIC", NULL, "MICR"},
+ {"ADC Mux", "MIC", "Pre-amp MIC"},
{"ADC Mux", "LINEIN", "LINEINL"},
{"ADC Mux", "LINEIN", "LINEINR"},
- {"ADC Mux", "MIC", "MICL"},
- {"ADC Mux", "MIC", "MICR"},
{"ADC", NULL, "ADC Mux"},
{"DOUT", NULL, "ADC"},
{"DAI1 Capture", NULL, "DOUT"},
@@ -496,7 +497,8 @@ static int cs4265_set_bias_level(struct snd_soc_component *component,
SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
#define CS4265_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE | \
- SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE)
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_U24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_U32_LE)
static const struct snd_soc_dai_ops cs4265_ops = {
.hw_params = cs4265_pcm_hw_params,
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index 5080d7a3c279..fd2bd74024c1 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -21,6 +21,7 @@
* - master mode *NOT* supported
*/
+#include <linux/clk.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <sound/core.h>
@@ -41,6 +42,7 @@ enum master_slave_mode {
struct cs42l51_private {
unsigned int mclk;
+ struct clk *mclk_handle;
unsigned int audio_mode; /* The mode (I2S or left-justified) */
enum master_slave_mode func;
};
@@ -237,6 +239,10 @@ static const struct snd_soc_dapm_widget cs42l51_dapm_widgets[] = {
&cs42l51_adcr_mux_controls),
};
+static const struct snd_soc_dapm_widget cs42l51_dapm_mclk_widgets[] = {
+ SND_SOC_DAPM_CLOCK_SUPPLY("MCLK")
+};
+
static const struct snd_soc_dapm_route cs42l51_routes[] = {
{"HPL", NULL, "Left DAC"},
{"HPR", NULL, "Right DAC"},
@@ -487,6 +493,14 @@ static struct snd_soc_dai_driver cs42l51_dai = {
static int cs42l51_component_probe(struct snd_soc_component *component)
{
int ret, reg;
+ struct snd_soc_dapm_context *dapm;
+ struct cs42l51_private *cs42l51;
+
+ cs42l51 = snd_soc_component_get_drvdata(component);
+ dapm = snd_soc_component_get_dapm(component);
+
+ if (cs42l51->mclk_handle)
+ snd_soc_dapm_new_controls(dapm, cs42l51_dapm_mclk_widgets, 1);
/*
* DAC configuration
@@ -540,6 +554,13 @@ int cs42l51_probe(struct device *dev, struct regmap *regmap)
dev_set_drvdata(dev, cs42l51);
+ cs42l51->mclk_handle = devm_clk_get(dev, "MCLK");
+ if (IS_ERR(cs42l51->mclk_handle)) {
+ if (PTR_ERR(cs42l51->mclk_handle) != -ENOENT)
+ return PTR_ERR(cs42l51->mclk_handle);
+ cs42l51->mclk_handle = NULL;
+ }
+
/* Verify that we have a CS42L51 */
ret = regmap_read(regmap, CS42L51_CHIP_REV_ID, &val);
if (ret < 0) {
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index 8c4926df9286..71322e0410ee 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -148,6 +148,7 @@ static const struct of_device_id dmic_dev_match[] = {
{.compatible = "dmic-codec"},
{}
};
+MODULE_DEVICE_TABLE(of, dmic_dev_match);
static struct platform_driver dmic_driver = {
.driver = {
diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
index 4b5827dc23aa..04a3aa770722 100644
--- a/sound/soc/codecs/es8328.c
+++ b/sound/soc/codecs/es8328.c
@@ -566,14 +566,14 @@ static int es8328_set_sysclk(struct snd_soc_dai *codec_dai,
break;
case 22579200:
mclkdiv2 = 1;
- /* fallthru */
+ /* fall through */
case 11289600:
es8328->sysclk_constraints = &constraints_11289;
es8328->mclk_ratios = ratios_11289;
break;
case 24576000:
mclkdiv2 = 1;
- /* fallthru */
+ /* fall through */
case 12288000:
es8328->sysclk_constraints = &constraints_12288;
es8328->mclk_ratios = ratios_12288;
diff --git a/sound/soc/codecs/hdac_hda.c b/sound/soc/codecs/hdac_hda.c
new file mode 100644
index 000000000000..2aaa83028e55
--- /dev/null
+++ b/sound/soc/codecs/hdac_hda.c
@@ -0,0 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-18 Intel Corporation.
+
+/*
+ * hdac_hda.c - ASoC extensions to reuse the legacy HDA codec drivers
+ * with ASoC platform drivers. These APIs are called by the legacy HDA
+ * codec drivers using hdac_ext_bus_ops ops.
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/hdaudio_ext.h>
+#include <sound/hda_codec.h>
+#include <sound/hda_register.h>
+#include "hdac_hda.h"
+
+#define HDAC_ANALOG_DAI_ID 0
+#define HDAC_DIGITAL_DAI_ID 1
+#define HDAC_ALT_ANALOG_DAI_ID 2
+
+#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S8 | \
+ SNDRV_PCM_FMTBIT_U8 | \
+ SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_U16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_U24_LE | \
+ SNDRV_PCM_FMTBIT_S32_LE | \
+ SNDRV_PCM_FMTBIT_U32_LE | \
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
+
+static int hdac_hda_dai_open(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+static void hdac_hda_dai_close(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai);
+static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width);
+static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt,
+ struct snd_soc_dai *dai);
+
+static struct snd_soc_dai_ops hdac_hda_dai_ops = {
+ .startup = hdac_hda_dai_open,
+ .shutdown = hdac_hda_dai_close,
+ .prepare = hdac_hda_dai_prepare,
+ .hw_free = hdac_hda_dai_hw_free,
+ .set_tdm_slot = hdac_hda_dai_set_tdm_slot,
+};
+
+static struct snd_soc_dai_driver hdac_hda_dais[] = {
+{
+ .id = HDAC_ANALOG_DAI_ID,
+ .name = "Analog Codec DAI",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "Analog Codec Playback",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+ .capture = {
+ .stream_name = "Analog Codec Capture",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+},
+{
+ .id = HDAC_DIGITAL_DAI_ID,
+ .name = "Digital Codec DAI",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "Digital Codec Playback",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+ .capture = {
+ .stream_name = "Digital Codec Capture",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+},
+{
+ .id = HDAC_ALT_ANALOG_DAI_ID,
+ .name = "Alt Analog Codec DAI",
+ .ops = &hdac_hda_dai_ops,
+ .playback = {
+ .stream_name = "Alt Analog Codec Playback",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+ .capture = {
+ .stream_name = "Alt Analog Codec Capture",
+ .channels_min = 1,
+ .channels_max = 16,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = STUB_FORMATS,
+ .sig_bits = 24,
+ },
+}
+
+};
+
+static int hdac_hda_dai_set_tdm_slot(struct snd_soc_dai *dai,
+ unsigned int tx_mask, unsigned int rx_mask,
+ int slots, int slot_width)
+{
+ struct snd_soc_component *component = dai->component;
+ struct hdac_hda_priv *hda_pvt;
+ struct hdac_hda_pcm *pcm;
+
+ hda_pvt = snd_soc_component_get_drvdata(component);
+ pcm = &hda_pvt->pcm[dai->id];
+ if (tx_mask)
+ pcm[dai->id].stream_tag[SNDRV_PCM_STREAM_PLAYBACK] = tx_mask;
+ else
+ pcm[dai->id].stream_tag[SNDRV_PCM_STREAM_CAPTURE] = rx_mask;
+
+ return 0;
+}
+
+static int hdac_hda_dai_hw_free(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct hdac_hda_priv *hda_pvt;
+ struct hda_pcm_stream *hda_stream;
+ struct hda_pcm *pcm;
+
+ hda_pvt = snd_soc_component_get_drvdata(component);
+ pcm = snd_soc_find_pcm_from_dai(hda_pvt, dai);
+ if (!pcm)
+ return -EINVAL;
+
+ hda_stream = &pcm->stream[substream->stream];
+ snd_hda_codec_cleanup(&hda_pvt->codec, hda_stream, substream);
+
+ return 0;
+}
+
+static int hdac_hda_dai_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct hdac_hda_priv *hda_pvt;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct hdac_device *hdev;
+ struct hda_pcm_stream *hda_stream;
+ unsigned int format_val;
+ struct hda_pcm *pcm;
+ unsigned int stream;
+ int ret = 0;
+
+ hda_pvt = snd_soc_component_get_drvdata(component);
+ hdev = &hda_pvt->codec.core;
+ pcm = snd_soc_find_pcm_from_dai(hda_pvt, dai);
+ if (!pcm)
+ return -EINVAL;
+
+ hda_stream = &pcm->stream[substream->stream];
+
+ format_val = snd_hdac_calc_stream_format(runtime->rate,
+ runtime->channels,
+ runtime->format,
+ hda_stream->maxbps,
+ 0);
+ if (!format_val) {
+ dev_err(&hdev->dev,
+ "invalid format_val, rate=%d, ch=%d, format=%d\n",
+ runtime->rate, runtime->channels, runtime->format);
+ return -EINVAL;
+ }
+
+ stream = hda_pvt->pcm[dai->id].stream_tag[substream->stream];
+
+ ret = snd_hda_codec_prepare(&hda_pvt->codec, hda_stream,
+ stream, format_val, substream);
+ if (ret < 0)
+ dev_err(&hdev->dev, "codec prepare failed %d\n", ret);
+
+ return ret;
+}
+
+static int hdac_hda_dai_open(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct hdac_hda_priv *hda_pvt;
+ struct hda_pcm_stream *hda_stream;
+ struct hda_pcm *pcm;
+ int ret;
+
+ hda_pvt = snd_soc_component_get_drvdata(component);
+ pcm = snd_soc_find_pcm_from_dai(hda_pvt, dai);
+ if (!pcm)
+ return -EINVAL;
+
+ snd_hda_codec_pcm_get(pcm);
+
+ hda_stream = &pcm->stream[substream->stream];
+
+ ret = hda_stream->ops.open(hda_stream, &hda_pvt->codec, substream);
+ if (ret < 0)
+ snd_hda_codec_pcm_put(pcm);
+
+ return ret;
+}
+
+static void hdac_hda_dai_close(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct hdac_hda_priv *hda_pvt;
+ struct hda_pcm_stream *hda_stream;
+ struct hda_pcm *pcm;
+
+ hda_pvt = snd_soc_component_get_drvdata(component);
+ pcm = snd_soc_find_pcm_from_dai(hda_pvt, dai);
+ if (!pcm)
+ return;
+
+ hda_stream = &pcm->stream[substream->stream];
+
+ hda_stream->ops.close(hda_stream, &hda_pvt->codec, substream);
+
+ snd_hda_codec_pcm_put(pcm);
+}
+
+static struct hda_pcm *snd_soc_find_pcm_from_dai(struct hdac_hda_priv *hda_pvt,
+ struct snd_soc_dai *dai)
+{
+ struct hda_codec *hcodec = &hda_pvt->codec;
+ struct hda_pcm *cpcm;
+ const char *pcm_name;
+
+ switch (dai->id) {
+ case HDAC_ANALOG_DAI_ID:
+ pcm_name = "Analog";
+ break;
+ case HDAC_DIGITAL_DAI_ID:
+ pcm_name = "Digital";
+ break;
+ case HDAC_ALT_ANALOG_DAI_ID:
+ pcm_name = "Alt Analog";
+ break;
+ default:
+ dev_err(&hcodec->core.dev, "invalid dai id %d\n", dai->id);
+ return NULL;
+ }
+
+ list_for_each_entry(cpcm, &hcodec->pcm_list_head, list) {
+ if (strpbrk(cpcm->name, pcm_name))
+ return cpcm;
+ }
+
+ dev_err(&hcodec->core.dev, "didn't find PCM for DAI %s\n", dai->name);
+ return NULL;
+}
+
+static int hdac_hda_codec_probe(struct snd_soc_component *component)
+{
+ struct hdac_hda_priv *hda_pvt =
+ snd_soc_component_get_drvdata(component);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
+ struct hdac_device *hdev = &hda_pvt->codec.core;
+ struct hda_codec *hcodec = &hda_pvt->codec;
+ struct hdac_ext_link *hlink;
+ hda_codec_patch_t patch;
+ int ret;
+
+ hlink = snd_hdac_ext_bus_get_link(hdev->bus, dev_name(&hdev->dev));
+ if (!hlink) {
+ dev_err(&hdev->dev, "hdac link not found\n");
+ return -EIO;
+ }
+
+ snd_hdac_ext_bus_link_get(hdev->bus, hlink);
+
+ ret = snd_hda_codec_device_new(hcodec->bus, component->card->snd_card,
+ hdev->addr, hcodec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to create hda codec %d\n", ret);
+ goto error_no_pm;
+ }
+
+ /*
+ * snd_hda_codec_device_new decrements the usage count so call get pm
+ * else the device will be powered off
+ */
+ pm_runtime_get_noresume(&hdev->dev);
+
+ hcodec->bus->card = dapm->card->snd_card;
+
+ ret = snd_hda_codec_set_name(hcodec, hcodec->preset->name);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "name failed %s\n", hcodec->preset->name);
+ goto error;
+ }
+
+ ret = snd_hdac_regmap_init(&hcodec->core);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "regmap init failed\n");
+ goto error;
+ }
+
+ patch = (hda_codec_patch_t)hcodec->preset->driver_data;
+ if (patch) {
+ ret = patch(hcodec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "patch failed %d\n", ret);
+ goto error;
+ }
+ } else {
+ dev_dbg(&hdev->dev, "no patch file found\n");
+ }
+
+ ret = snd_hda_codec_parse_pcms(hcodec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "unable to map pcms to dai %d\n", ret);
+ goto error;
+ }
+
+ ret = snd_hda_codec_build_controls(hcodec);
+ if (ret < 0) {
+ dev_err(&hdev->dev, "unable to create controls %d\n", ret);
+ goto error;
+ }
+
+ hcodec->core.lazy_cache = true;
+
+ /*
+ * hdac_device core already sets the state to active and calls
+ * get_noresume. So enable runtime and set the device to suspend.
+ * pm_runtime_enable is also called during codec registeration
+ */
+ pm_runtime_put(&hdev->dev);
+ pm_runtime_suspend(&hdev->dev);
+
+ return 0;
+
+error:
+ pm_runtime_put(&hdev->dev);
+error_no_pm:
+ snd_hdac_ext_bus_link_put(hdev->bus, hlink);
+ return ret;
+}
+
+static void hdac_hda_codec_remove(struct snd_soc_component *component)
+{
+ struct hdac_hda_priv *hda_pvt =
+ snd_soc_component_get_drvdata(component);
+ struct hdac_device *hdev = &hda_pvt->codec.core;
+ struct hdac_ext_link *hlink = NULL;
+
+ hlink = snd_hdac_ext_bus_get_link(hdev->bus, dev_name(&hdev->dev));
+ if (!hlink) {
+ dev_err(&hdev->dev, "hdac link not found\n");
+ return;
+ }
+
+ snd_hdac_ext_bus_link_put(hdev->bus, hlink);
+ pm_runtime_disable(&hdev->dev);
+}
+
+static const struct snd_soc_dapm_route hdac_hda_dapm_routes[] = {
+ {"AIF1TX", NULL, "Codec Input Pin1"},
+ {"AIF2TX", NULL, "Codec Input Pin2"},
+ {"AIF3TX", NULL, "Codec Input Pin3"},
+
+ {"Codec Output Pin1", NULL, "AIF1RX"},
+ {"Codec Output Pin2", NULL, "AIF2RX"},
+ {"Codec Output Pin3", NULL, "AIF3RX"},
+};
+
+static const struct snd_soc_dapm_widget hdac_hda_dapm_widgets[] = {
+ /* Audio Interface */
+ SND_SOC_DAPM_AIF_IN("AIF1RX", "Analog Codec Playback", 0,
+ SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF2RX", "Digital Codec Playback", 0,
+ SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("AIF3RX", "Alt Analog Codec Playback", 0,
+ SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF1TX", "Analog Codec Capture", 0,
+ SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF2TX", "Digital Codec Capture", 0,
+ SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("AIF3TX", "Alt Analog Codec Capture", 0,
+ SND_SOC_NOPM, 0, 0),
+
+ /* Input Pins */
+ SND_SOC_DAPM_INPUT("Codec Input Pin1"),
+ SND_SOC_DAPM_INPUT("Codec Input Pin2"),
+ SND_SOC_DAPM_INPUT("Codec Input Pin3"),
+
+ /* Output Pins */
+ SND_SOC_DAPM_OUTPUT("Codec Output Pin1"),
+ SND_SOC_DAPM_OUTPUT("Codec Output Pin2"),
+ SND_SOC_DAPM_OUTPUT("Codec Output Pin3"),
+};
+
+static const struct snd_soc_component_driver hdac_hda_codec = {
+ .probe = hdac_hda_codec_probe,
+ .remove = hdac_hda_codec_remove,
+ .idle_bias_on = false,
+ .dapm_widgets = hdac_hda_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(hdac_hda_dapm_widgets),
+ .dapm_routes = hdac_hda_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(hdac_hda_dapm_routes),
+};
+
+static int hdac_hda_dev_probe(struct hdac_device *hdev)
+{
+ struct hdac_ext_link *hlink;
+ struct hdac_hda_priv *hda_pvt;
+ int ret;
+
+ /* hold the ref while we probe */
+ hlink = snd_hdac_ext_bus_get_link(hdev->bus, dev_name(&hdev->dev));
+ if (!hlink) {
+ dev_err(&hdev->dev, "hdac link not found\n");
+ return -EIO;
+ }
+ snd_hdac_ext_bus_link_get(hdev->bus, hlink);
+
+ hda_pvt = hdac_to_hda_priv(hdev);
+ if (!hda_pvt)
+ return -ENOMEM;
+
+ /* ASoC specific initialization */
+ ret = devm_snd_soc_register_component(&hdev->dev,
+ &hdac_hda_codec, hdac_hda_dais,
+ ARRAY_SIZE(hdac_hda_dais));
+ if (ret < 0) {
+ dev_err(&hdev->dev, "failed to register HDA codec %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&hdev->dev, hda_pvt);
+ snd_hdac_ext_bus_link_put(hdev->bus, hlink);
+
+ return ret;
+}
+
+static int hdac_hda_dev_remove(struct hdac_device *hdev)
+{
+ return 0;
+}
+
+static struct hdac_ext_bus_ops hdac_ops = {
+ .hdev_attach = hdac_hda_dev_probe,
+ .hdev_detach = hdac_hda_dev_remove,
+};
+
+struct hdac_ext_bus_ops *snd_soc_hdac_hda_get_ops(void)
+{
+ return &hdac_ops;
+}
+EXPORT_SYMBOL_GPL(snd_soc_hdac_hda_get_ops);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ASoC Extensions for legacy HDA Drivers");
+MODULE_AUTHOR("Rakesh Ughreja<rakesh.a.ughreja@intel.com>");
diff --git a/sound/soc/codecs/hdac_hda.h b/sound/soc/codecs/hdac_hda.h
new file mode 100644
index 000000000000..e444ef593360
--- /dev/null
+++ b/sound/soc/codecs/hdac_hda.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2015-18 Intel Corporation.
+ */
+
+#ifndef __HDAC_HDA_H__
+#define __HDAC_HDA_H__
+
+struct hdac_hda_pcm {
+ int stream_tag[2];
+};
+
+struct hdac_hda_priv {
+ struct hda_codec codec;
+ struct hdac_hda_pcm pcm[2];
+};
+
+#define hdac_to_hda_priv(_hdac) \
+ container_of(_hdac, struct hdac_hda_priv, codec.core)
+#define hdac_to_hda_codec(_hdac) container_of(_hdac, struct hda_codec, core)
+
+struct hdac_ext_bus_ops *snd_soc_hdac_hda_get_ops(void);
+
+#endif /* __HDAC_HDA_H__ */
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 7b8533abf637..4e9854889a95 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1410,6 +1410,12 @@ static int hdac_hdmi_create_dais(struct hdac_device *hdev,
if (ret)
return ret;
+ /* Filter out 44.1, 88.2 and 176.4Khz */
+ rates &= ~(SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_88200 |
+ SNDRV_PCM_RATE_176400);
+ if (!rates)
+ return -EINVAL;
+
sprintf(dai_name, "intel-hdmi-hifi%d", i+1);
hdmi_dais[i].name = devm_kstrdup(&hdev->dev,
dai_name, GFP_KERNEL);
@@ -1598,7 +1604,7 @@ static struct snd_pcm *hdac_hdmi_get_pcm_from_id(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->pcm && (rtd->pcm->device == device))
return rtd->pcm;
}
@@ -1961,9 +1967,6 @@ static int hdac_hdmi_get_spk_alloc(struct hdac_device *hdev, int pcm_idx)
port = list_first_entry(&pcm->port_list, struct hdac_hdmi_port, head);
- if (!port)
- return 0;
-
if (!port || !port->eld.eld_valid)
return 0;
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index fb515aaa54fc..ca172a4b6849 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -16,6 +16,7 @@
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
+#include <linux/clk.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -42,6 +43,7 @@ struct max98088_priv {
struct regmap *regmap;
enum max98088_type devtype;
struct max98088_pdata *pdata;
+ struct clk *mclk;
unsigned int sysclk;
struct max98088_cdata dai[2];
int eq_textcnt;
@@ -1103,6 +1105,11 @@ static int max98088_dai_set_sysclk(struct snd_soc_dai *dai,
if (freq == max98088->sysclk)
return 0;
+ if (!IS_ERR(max98088->mclk)) {
+ freq = clk_round_rate(max98088->mclk, freq);
+ clk_set_rate(max98088->mclk, freq);
+ }
+
/* Setup clocks for slave mode, and using the PLL
* PSCLK = 0x01 (when master clk is 10MHz to 20MHz)
* 0x02 (when master clk is 20MHz to 30MHz)..
@@ -1310,6 +1317,20 @@ static int max98088_set_bias_level(struct snd_soc_component *component,
break;
case SND_SOC_BIAS_PREPARE:
+ /*
+ * SND_SOC_BIAS_PREPARE is called while preparing for a
+ * transition to ON or away from ON. If current bias_level
+ * is SND_SOC_BIAS_ON, then it is preparing for a transition
+ * away from ON. Disable the clock in that case, otherwise
+ * enable it.
+ */
+ if (!IS_ERR(max98088->mclk)) {
+ if (snd_soc_component_get_bias_level(component) ==
+ SND_SOC_BIAS_ON)
+ clk_disable_unprepare(max98088->mclk);
+ else
+ clk_prepare_enable(max98088->mclk);
+ }
break;
case SND_SOC_BIAS_STANDBY:
@@ -1725,6 +1746,11 @@ static int max98088_i2c_probe(struct i2c_client *i2c,
if (IS_ERR(max98088->regmap))
return PTR_ERR(max98088->regmap);
+ max98088->mclk = devm_clk_get(&i2c->dev, "mclk");
+ if (IS_ERR(max98088->mclk))
+ if (PTR_ERR(max98088->mclk) == -EPROBE_DEFER)
+ return PTR_ERR(max98088->mclk);
+
max98088->devtype = id->driver_data;
i2c_set_clientdata(i2c, max98088);
@@ -1742,9 +1768,19 @@ static const struct i2c_device_id max98088_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max98088_i2c_id);
+#if defined(CONFIG_OF)
+static const struct of_device_id max98088_of_match[] = {
+ { .compatible = "maxim,max98088" },
+ { .compatible = "maxim,max98089" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max98088_of_match);
+#endif
+
static struct i2c_driver max98088_i2c_driver = {
.driver = {
.name = "max98088",
+ .of_match_table = of_match_ptr(max98088_of_match),
},
.probe = max98088_i2c_probe,
.id_table = max98088_i2c_id,
diff --git a/sound/soc/codecs/max98373.c b/sound/soc/codecs/max98373.c
index 1093f766d0d2..a09d01318f79 100644
--- a/sound/soc/codecs/max98373.c
+++ b/sound/soc/codecs/max98373.c
@@ -2,6 +2,7 @@
// Copyright (c) 2017, Maxim Integrated
#include <linux/acpi.h>
+#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -454,7 +455,7 @@ SND_SOC_DAPM_SIGGEN("IMON"),
SND_SOC_DAPM_SIGGEN("FBMON"),
};
-static DECLARE_TLV_DB_SCALE(max98373_digital_tlv, 0, -50, 0);
+static DECLARE_TLV_DB_SCALE(max98373_digital_tlv, -6350, 50, 1);
static const DECLARE_TLV_DB_RANGE(max98373_spk_tlv,
0, 8, TLV_DB_SCALE_ITEM(0, 50, 0),
9, 10, TLV_DB_SCALE_ITEM(500, 100, 0),
@@ -470,19 +471,19 @@ static const DECLARE_TLV_DB_RANGE(max98373_dht_spkgain_min_tlv,
0, 9, TLV_DB_SCALE_ITEM(800, 100, 0),
);
static const DECLARE_TLV_DB_RANGE(max98373_dht_rotation_point_tlv,
- 0, 1, TLV_DB_SCALE_ITEM(-50, -50, 0),
- 2, 7, TLV_DB_SCALE_ITEM(-200, -100, 0),
- 8, 9, TLV_DB_SCALE_ITEM(-1000, -200, 0),
- 10, 11, TLV_DB_SCALE_ITEM(-1500, -300, 0),
- 12, 13, TLV_DB_SCALE_ITEM(-2000, -200, 0),
- 14, 15, TLV_DB_SCALE_ITEM(-2500, -500, 0),
+ 0, 1, TLV_DB_SCALE_ITEM(-3000, 500, 0),
+ 2, 4, TLV_DB_SCALE_ITEM(-2200, 200, 0),
+ 5, 6, TLV_DB_SCALE_ITEM(-1500, 300, 0),
+ 7, 9, TLV_DB_SCALE_ITEM(-1000, 200, 0),
+ 10, 13, TLV_DB_SCALE_ITEM(-500, 100, 0),
+ 14, 15, TLV_DB_SCALE_ITEM(-100, 50, 0),
);
static const DECLARE_TLV_DB_RANGE(max98373_limiter_thresh_tlv,
- 0, 15, TLV_DB_SCALE_ITEM(0, -100, 0),
+ 0, 15, TLV_DB_SCALE_ITEM(-1500, 100, 0),
);
static const DECLARE_TLV_DB_RANGE(max98373_bde_gain_tlv,
- 0, 60, TLV_DB_SCALE_ITEM(0, -25, 0),
+ 0, 60, TLV_DB_SCALE_ITEM(-1500, 25, 0),
);
static bool max98373_readable_register(struct device *dev, unsigned int reg)
@@ -604,7 +605,7 @@ SOC_SINGLE("Dither Switch", MAX98373_R203F_AMP_DSP_CFG,
SOC_SINGLE("DC Blocker Switch", MAX98373_R203F_AMP_DSP_CFG,
MAX98373_AMP_DSP_CFG_DCBLK_SHIFT, 1, 0),
SOC_SINGLE_TLV("Digital Volume", MAX98373_R203D_AMP_DIG_VOL_CTRL,
- 0, 0x7F, 0, max98373_digital_tlv),
+ 0, 0x7F, 1, max98373_digital_tlv),
SOC_SINGLE_TLV("Speaker Volume", MAX98373_R203E_AMP_PATH_GAIN,
MAX98373_SPK_DIGI_GAIN_SHIFT, 10, 0, max98373_spk_tlv),
SOC_SINGLE_TLV("FS Max Volume", MAX98373_R203E_AMP_PATH_GAIN,
@@ -616,7 +617,7 @@ SOC_SINGLE("DHT Switch", MAX98373_R20D4_DHT_EN,
SOC_SINGLE_TLV("DHT Min Volume", MAX98373_R20D1_DHT_CFG,
MAX98373_DHT_SPK_GAIN_MIN_SHIFT, 9, 0, max98373_dht_spkgain_min_tlv),
SOC_SINGLE_TLV("DHT Rot Pnt Volume", MAX98373_R20D1_DHT_CFG,
- MAX98373_DHT_ROT_PNT_SHIFT, 15, 0, max98373_dht_rotation_point_tlv),
+ MAX98373_DHT_ROT_PNT_SHIFT, 15, 1, max98373_dht_rotation_point_tlv),
SOC_SINGLE_TLV("DHT Attack Step Volume", MAX98373_R20D2_DHT_ATTACK_CFG,
MAX98373_DHT_ATTACK_STEP_SHIFT, 4, 0, max98373_dht_step_size_tlv),
SOC_SINGLE_TLV("DHT Release Step Volume", MAX98373_R20D3_DHT_RELEASE_CFG,
@@ -653,29 +654,29 @@ SOC_SINGLE("BDE Hold Time", MAX98373_R2090_BDE_LVL_HOLD, 0, 0xFF, 0),
SOC_SINGLE("BDE Attack Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 4, 0xF, 0),
SOC_SINGLE("BDE Release Rate", MAX98373_R2091_BDE_GAIN_ATK_REL_RATE, 0, 0xF, 0),
SOC_SINGLE_TLV("BDE LVL1 Clip Thresh Volume", MAX98373_R20A9_BDE_L1_CFG_2,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL2 Clip Thresh Volume", MAX98373_R20AC_BDE_L2_CFG_2,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL3 Clip Thresh Volume", MAX98373_R20AF_BDE_L3_CFG_2,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL4 Clip Thresh Volume", MAX98373_R20B2_BDE_L4_CFG_2,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL1 Clip Reduction Volume", MAX98373_R20AA_BDE_L1_CFG_3,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL2 Clip Reduction Volume", MAX98373_R20AD_BDE_L2_CFG_3,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL3 Clip Reduction Volume", MAX98373_R20B0_BDE_L3_CFG_3,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL4 Clip Reduction Volume", MAX98373_R20B3_BDE_L4_CFG_3,
- 0, 0x3C, 0, max98373_bde_gain_tlv),
+ 0, 0x3C, 1, max98373_bde_gain_tlv),
SOC_SINGLE_TLV("BDE LVL1 Limiter Thresh Volume", MAX98373_R20A8_BDE_L1_CFG_1,
- 0, 0xF, 0, max98373_limiter_thresh_tlv),
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
SOC_SINGLE_TLV("BDE LVL2 Limiter Thresh Volume", MAX98373_R20AB_BDE_L2_CFG_1,
- 0, 0xF, 0, max98373_limiter_thresh_tlv),
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
SOC_SINGLE_TLV("BDE LVL3 Limiter Thresh Volume", MAX98373_R20AE_BDE_L3_CFG_1,
- 0, 0xF, 0, max98373_limiter_thresh_tlv),
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
SOC_SINGLE_TLV("BDE LVL4 Limiter Thresh Volume", MAX98373_R20B1_BDE_L4_CFG_1,
- 0, 0xF, 0, max98373_limiter_thresh_tlv),
+ 0, 0xF, 1, max98373_limiter_thresh_tlv),
/* Limiter */
SOC_SINGLE("Limiter Switch", MAX98373_R20E2_LIMITER_EN,
MAX98373_LIMITER_EN_SHIFT, 1, 0),
diff --git a/sound/soc/codecs/nau8822.c b/sound/soc/codecs/nau8822.c
new file mode 100644
index 000000000000..622ce947f134
--- /dev/null
+++ b/sound/soc/codecs/nau8822.c
@@ -0,0 +1,1136 @@
+/*
+ * nau8822.c -- NAU8822 ALSA Soc Audio Codec driver
+ *
+ * Copyright 2017 Nuvoton Technology Corp.
+ *
+ * Author: David Lin <ctlin0@nuvoton.com>
+ * Co-author: John Hsu <kchsu0@nuvoton.com>
+ * Co-author: Seven Li <wtli@nuvoton.com>
+ *
+ * Based on WM8974.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+#include <asm/div64.h>
+#include "nau8822.h"
+
+#define NAU_PLL_FREQ_MAX 100000000
+#define NAU_PLL_FREQ_MIN 90000000
+#define NAU_PLL_REF_MAX 33000000
+#define NAU_PLL_REF_MIN 8000000
+#define NAU_PLL_OPTOP_MIN 6
+
+static const int nau8822_mclk_scaler[] = { 10, 15, 20, 30, 40, 60, 80, 120 };
+
+static const struct reg_default nau8822_reg_defaults[] = {
+ { NAU8822_REG_POWER_MANAGEMENT_1, 0x0000 },
+ { NAU8822_REG_POWER_MANAGEMENT_2, 0x0000 },
+ { NAU8822_REG_POWER_MANAGEMENT_3, 0x0000 },
+ { NAU8822_REG_AUDIO_INTERFACE, 0x0050 },
+ { NAU8822_REG_COMPANDING_CONTROL, 0x0000 },
+ { NAU8822_REG_CLOCKING, 0x0140 },
+ { NAU8822_REG_ADDITIONAL_CONTROL, 0x0000 },
+ { NAU8822_REG_GPIO_CONTROL, 0x0000 },
+ { NAU8822_REG_JACK_DETECT_CONTROL_1, 0x0000 },
+ { NAU8822_REG_DAC_CONTROL, 0x0000 },
+ { NAU8822_REG_LEFT_DAC_DIGITAL_VOLUME, 0x00ff },
+ { NAU8822_REG_RIGHT_DAC_DIGITAL_VOLUME, 0x00ff },
+ { NAU8822_REG_JACK_DETECT_CONTROL_2, 0x0000 },
+ { NAU8822_REG_ADC_CONTROL, 0x0100 },
+ { NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME, 0x00ff },
+ { NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME, 0x00ff },
+ { NAU8822_REG_EQ1, 0x012c },
+ { NAU8822_REG_EQ2, 0x002c },
+ { NAU8822_REG_EQ3, 0x002c },
+ { NAU8822_REG_EQ4, 0x002c },
+ { NAU8822_REG_EQ5, 0x002c },
+ { NAU8822_REG_DAC_LIMITER_1, 0x0032 },
+ { NAU8822_REG_DAC_LIMITER_2, 0x0000 },
+ { NAU8822_REG_NOTCH_FILTER_1, 0x0000 },
+ { NAU8822_REG_NOTCH_FILTER_2, 0x0000 },
+ { NAU8822_REG_NOTCH_FILTER_3, 0x0000 },
+ { NAU8822_REG_NOTCH_FILTER_4, 0x0000 },
+ { NAU8822_REG_ALC_CONTROL_1, 0x0038 },
+ { NAU8822_REG_ALC_CONTROL_2, 0x000b },
+ { NAU8822_REG_ALC_CONTROL_3, 0x0032 },
+ { NAU8822_REG_NOISE_GATE, 0x0010 },
+ { NAU8822_REG_PLL_N, 0x0008 },
+ { NAU8822_REG_PLL_K1, 0x000c },
+ { NAU8822_REG_PLL_K2, 0x0093 },
+ { NAU8822_REG_PLL_K3, 0x00e9 },
+ { NAU8822_REG_3D_CONTROL, 0x0000 },
+ { NAU8822_REG_RIGHT_SPEAKER_CONTROL, 0x0000 },
+ { NAU8822_REG_INPUT_CONTROL, 0x0033 },
+ { NAU8822_REG_LEFT_INP_PGA_CONTROL, 0x0010 },
+ { NAU8822_REG_RIGHT_INP_PGA_CONTROL, 0x0010 },
+ { NAU8822_REG_LEFT_ADC_BOOST_CONTROL, 0x0100 },
+ { NAU8822_REG_RIGHT_ADC_BOOST_CONTROL, 0x0100 },
+ { NAU8822_REG_OUTPUT_CONTROL, 0x0002 },
+ { NAU8822_REG_LEFT_MIXER_CONTROL, 0x0001 },
+ { NAU8822_REG_RIGHT_MIXER_CONTROL, 0x0001 },
+ { NAU8822_REG_LHP_VOLUME, 0x0039 },
+ { NAU8822_REG_RHP_VOLUME, 0x0039 },
+ { NAU8822_REG_LSPKOUT_VOLUME, 0x0039 },
+ { NAU8822_REG_RSPKOUT_VOLUME, 0x0039 },
+ { NAU8822_REG_AUX2_MIXER, 0x0001 },
+ { NAU8822_REG_AUX1_MIXER, 0x0001 },
+ { NAU8822_REG_POWER_MANAGEMENT_4, 0x0000 },
+ { NAU8822_REG_LEFT_TIME_SLOT, 0x0000 },
+ { NAU8822_REG_MISC, 0x0020 },
+ { NAU8822_REG_RIGHT_TIME_SLOT, 0x0000 },
+ { NAU8822_REG_DEVICE_REVISION, 0x007f },
+ { NAU8822_REG_DEVICE_ID, 0x001a },
+ { NAU8822_REG_DAC_DITHER, 0x0114 },
+ { NAU8822_REG_ALC_ENHANCE_1, 0x0000 },
+ { NAU8822_REG_ALC_ENHANCE_2, 0x0000 },
+ { NAU8822_REG_192KHZ_SAMPLING, 0x0008 },
+ { NAU8822_REG_MISC_CONTROL, 0x0000 },
+ { NAU8822_REG_INPUT_TIEOFF, 0x0000 },
+ { NAU8822_REG_POWER_REDUCTION, 0x0000 },
+ { NAU8822_REG_AGC_PEAK2PEAK, 0x0000 },
+ { NAU8822_REG_AGC_PEAK_DETECT, 0x0000 },
+ { NAU8822_REG_AUTOMUTE_CONTROL, 0x0000 },
+ { NAU8822_REG_OUTPUT_TIEOFF, 0x0000 },
+};
+
+static bool nau8822_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NAU8822_REG_RESET ... NAU8822_REG_JACK_DETECT_CONTROL_1:
+ case NAU8822_REG_DAC_CONTROL ... NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME:
+ case NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME:
+ case NAU8822_REG_EQ1 ... NAU8822_REG_EQ5:
+ case NAU8822_REG_DAC_LIMITER_1 ... NAU8822_REG_DAC_LIMITER_2:
+ case NAU8822_REG_NOTCH_FILTER_1 ... NAU8822_REG_NOTCH_FILTER_4:
+ case NAU8822_REG_ALC_CONTROL_1 ...NAU8822_REG_PLL_K3:
+ case NAU8822_REG_3D_CONTROL:
+ case NAU8822_REG_RIGHT_SPEAKER_CONTROL:
+ case NAU8822_REG_INPUT_CONTROL ... NAU8822_REG_LEFT_ADC_BOOST_CONTROL:
+ case NAU8822_REG_RIGHT_ADC_BOOST_CONTROL ... NAU8822_REG_AUX1_MIXER:
+ case NAU8822_REG_POWER_MANAGEMENT_4 ... NAU8822_REG_DEVICE_ID:
+ case NAU8822_REG_DAC_DITHER:
+ case NAU8822_REG_ALC_ENHANCE_1 ... NAU8822_REG_MISC_CONTROL:
+ case NAU8822_REG_INPUT_TIEOFF ... NAU8822_REG_OUTPUT_TIEOFF:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool nau8822_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NAU8822_REG_RESET ... NAU8822_REG_JACK_DETECT_CONTROL_1:
+ case NAU8822_REG_DAC_CONTROL ... NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME:
+ case NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME:
+ case NAU8822_REG_EQ1 ... NAU8822_REG_EQ5:
+ case NAU8822_REG_DAC_LIMITER_1 ... NAU8822_REG_DAC_LIMITER_2:
+ case NAU8822_REG_NOTCH_FILTER_1 ... NAU8822_REG_NOTCH_FILTER_4:
+ case NAU8822_REG_ALC_CONTROL_1 ...NAU8822_REG_PLL_K3:
+ case NAU8822_REG_3D_CONTROL:
+ case NAU8822_REG_RIGHT_SPEAKER_CONTROL:
+ case NAU8822_REG_INPUT_CONTROL ... NAU8822_REG_LEFT_ADC_BOOST_CONTROL:
+ case NAU8822_REG_RIGHT_ADC_BOOST_CONTROL ... NAU8822_REG_AUX1_MIXER:
+ case NAU8822_REG_POWER_MANAGEMENT_4 ... NAU8822_REG_DEVICE_ID:
+ case NAU8822_REG_DAC_DITHER:
+ case NAU8822_REG_ALC_ENHANCE_1 ... NAU8822_REG_MISC_CONTROL:
+ case NAU8822_REG_INPUT_TIEOFF ... NAU8822_REG_OUTPUT_TIEOFF:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool nau8822_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case NAU8822_REG_RESET:
+ case NAU8822_REG_DEVICE_REVISION:
+ case NAU8822_REG_DEVICE_ID:
+ case NAU8822_REG_AGC_PEAK2PEAK:
+ case NAU8822_REG_AGC_PEAK_DETECT:
+ case NAU8822_REG_AUTOMUTE_CONTROL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* The EQ parameters get function is to get the 5 band equalizer control.
+ * The regmap raw read can't work here because regmap doesn't provide
+ * value format for value width of 9 bits. Therefore, the driver reads data
+ * from cache and makes value format according to the endianness of
+ * bytes type control element.
+ */
+static int nau8822_eq_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ int i, reg;
+ u16 reg_val, *val;
+
+ val = (u16 *)ucontrol->value.bytes.data;
+ reg = NAU8822_REG_EQ1;
+ for (i = 0; i < params->max / sizeof(u16); i++) {
+ reg_val = snd_soc_component_read32(component, reg + i);
+ /* conversion of 16-bit integers between native CPU format
+ * and big endian format
+ */
+ reg_val = cpu_to_be16(reg_val);
+ memcpy(val + i, &reg_val, sizeof(reg_val));
+ }
+
+ return 0;
+}
+
+/* The EQ parameters put function is to make configuration of 5 band equalizer
+ * control. These configuration includes central frequency, equalizer gain,
+ * cut-off frequency, bandwidth control, and equalizer path.
+ * The regmap raw write can't work here because regmap doesn't provide
+ * register and value format for register with address 7 bits and value 9 bits.
+ * Therefore, the driver makes value format according to the endianness of
+ * bytes type control element and writes data to codec.
+ */
+static int nau8822_eq_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component =
+ snd_soc_kcontrol_component(kcontrol);
+ struct soc_bytes_ext *params = (void *)kcontrol->private_value;
+ void *data;
+ u16 *val, value;
+ int i, reg, ret;
+
+ data = kmemdup(ucontrol->value.bytes.data,
+ params->max, GFP_KERNEL | GFP_DMA);
+ if (!data)
+ return -ENOMEM;
+
+ val = (u16 *)data;
+ reg = NAU8822_REG_EQ1;
+ for (i = 0; i < params->max / sizeof(u16); i++) {
+ /* conversion of 16-bit integers between native CPU format
+ * and big endian format
+ */
+ value = be16_to_cpu(*(val + i));
+ ret = snd_soc_component_write(component, reg + i, value);
+ if (ret) {
+ dev_err(component->dev,
+ "EQ configuration fail, register: %x ret: %d\n",
+ reg + i, ret);
+ kfree(data);
+ return ret;
+ }
+ }
+ kfree(data);
+
+ return 0;
+}
+
+static const char * const nau8822_companding[] = {
+ "Off", "NC", "u-law", "A-law"};
+
+static const struct soc_enum nau8822_companding_adc_enum =
+ SOC_ENUM_SINGLE(NAU8822_REG_COMPANDING_CONTROL, NAU8822_ADCCM_SFT,
+ ARRAY_SIZE(nau8822_companding), nau8822_companding);
+
+static const struct soc_enum nau8822_companding_dac_enum =
+ SOC_ENUM_SINGLE(NAU8822_REG_COMPANDING_CONTROL, NAU8822_DACCM_SFT,
+ ARRAY_SIZE(nau8822_companding), nau8822_companding);
+
+static const char * const nau8822_eqmode[] = {"Capture", "Playback"};
+
+static const struct soc_enum nau8822_eqmode_enum =
+ SOC_ENUM_SINGLE(NAU8822_REG_EQ1, NAU8822_EQM_SFT,
+ ARRAY_SIZE(nau8822_eqmode), nau8822_eqmode);
+
+static const char * const nau8822_alc1[] = {"Off", "Right", "Left", "Both"};
+static const char * const nau8822_alc3[] = {"Normal", "Limiter"};
+
+static const struct soc_enum nau8822_alc_enable_enum =
+ SOC_ENUM_SINGLE(NAU8822_REG_ALC_CONTROL_1, NAU8822_ALCEN_SFT,
+ ARRAY_SIZE(nau8822_alc1), nau8822_alc1);
+
+static const struct soc_enum nau8822_alc_mode_enum =
+ SOC_ENUM_SINGLE(NAU8822_REG_ALC_CONTROL_3, NAU8822_ALCM_SFT,
+ ARRAY_SIZE(nau8822_alc3), nau8822_alc3);
+
+static const DECLARE_TLV_DB_SCALE(digital_tlv, -12750, 50, 1);
+static const DECLARE_TLV_DB_SCALE(inpga_tlv, -1200, 75, 0);
+static const DECLARE_TLV_DB_SCALE(spk_tlv, -5700, 100, 0);
+static const DECLARE_TLV_DB_SCALE(pga_boost_tlv, 0, 2000, 0);
+static const DECLARE_TLV_DB_SCALE(boost_tlv, -1500, 300, 1);
+static const DECLARE_TLV_DB_SCALE(limiter_tlv, 0, 100, 0);
+
+static const struct snd_kcontrol_new nau8822_snd_controls[] = {
+ SOC_ENUM("ADC Companding", nau8822_companding_adc_enum),
+ SOC_ENUM("DAC Companding", nau8822_companding_dac_enum),
+
+ SOC_ENUM("EQ Function", nau8822_eqmode_enum),
+ SND_SOC_BYTES_EXT("EQ Parameters", 10,
+ nau8822_eq_get, nau8822_eq_put),
+
+ SOC_DOUBLE("DAC Inversion Switch",
+ NAU8822_REG_DAC_CONTROL, 0, 1, 1, 0),
+ SOC_DOUBLE_R_TLV("PCM Volume",
+ NAU8822_REG_LEFT_DAC_DIGITAL_VOLUME,
+ NAU8822_REG_RIGHT_DAC_DIGITAL_VOLUME, 0, 255, 0, digital_tlv),
+
+ SOC_SINGLE("High Pass Filter Switch",
+ NAU8822_REG_ADC_CONTROL, 8, 1, 0),
+ SOC_SINGLE("High Pass Cut Off",
+ NAU8822_REG_ADC_CONTROL, 4, 7, 0),
+
+ SOC_DOUBLE("ADC Inversion Switch",
+ NAU8822_REG_ADC_CONTROL, 0, 1, 1, 0),
+ SOC_DOUBLE_R_TLV("ADC Volume",
+ NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME,
+ NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME, 0, 255, 0, digital_tlv),
+
+ SOC_SINGLE("DAC Limiter Switch",
+ NAU8822_REG_DAC_LIMITER_1, 8, 1, 0),
+ SOC_SINGLE("DAC Limiter Decay",
+ NAU8822_REG_DAC_LIMITER_1, 4, 15, 0),
+ SOC_SINGLE("DAC Limiter Attack",
+ NAU8822_REG_DAC_LIMITER_1, 0, 15, 0),
+ SOC_SINGLE("DAC Limiter Threshold",
+ NAU8822_REG_DAC_LIMITER_2, 4, 7, 0),
+ SOC_SINGLE_TLV("DAC Limiter Volume",
+ NAU8822_REG_DAC_LIMITER_2, 0, 12, 0, limiter_tlv),
+
+ SOC_ENUM("ALC Mode", nau8822_alc_mode_enum),
+ SOC_ENUM("ALC Enable Switch", nau8822_alc_enable_enum),
+ SOC_SINGLE("ALC Min Gain",
+ NAU8822_REG_ALC_CONTROL_1, 0, 7, 0),
+ SOC_SINGLE("ALC Max Gain",
+ NAU8822_REG_ALC_CONTROL_1, 3, 7, 0),
+ SOC_SINGLE("ALC Hold",
+ NAU8822_REG_ALC_CONTROL_2, 4, 10, 0),
+ SOC_SINGLE("ALC Target",
+ NAU8822_REG_ALC_CONTROL_2, 0, 15, 0),
+ SOC_SINGLE("ALC Decay",
+ NAU8822_REG_ALC_CONTROL_3, 4, 10, 0),
+ SOC_SINGLE("ALC Attack",
+ NAU8822_REG_ALC_CONTROL_3, 0, 10, 0),
+ SOC_SINGLE("ALC Noise Gate Switch",
+ NAU8822_REG_NOISE_GATE, 3, 1, 0),
+ SOC_SINGLE("ALC Noise Gate Threshold",
+ NAU8822_REG_NOISE_GATE, 0, 7, 0),
+
+ SOC_DOUBLE_R("PGA ZC Switch",
+ NAU8822_REG_LEFT_INP_PGA_CONTROL,
+ NAU8822_REG_RIGHT_INP_PGA_CONTROL,
+ 7, 1, 0),
+ SOC_DOUBLE_R_TLV("PGA Volume",
+ NAU8822_REG_LEFT_INP_PGA_CONTROL,
+ NAU8822_REG_RIGHT_INP_PGA_CONTROL, 0, 63, 0, inpga_tlv),
+
+ SOC_DOUBLE_R("Headphone ZC Switch",
+ NAU8822_REG_LHP_VOLUME,
+ NAU8822_REG_RHP_VOLUME, 7, 1, 0),
+ SOC_DOUBLE_R("Headphone Playback Switch",
+ NAU8822_REG_LHP_VOLUME,
+ NAU8822_REG_RHP_VOLUME, 6, 1, 1),
+ SOC_DOUBLE_R_TLV("Headphone Volume",
+ NAU8822_REG_LHP_VOLUME,
+ NAU8822_REG_RHP_VOLUME, 0, 63, 0, spk_tlv),
+
+ SOC_DOUBLE_R("Speaker ZC Switch",
+ NAU8822_REG_LSPKOUT_VOLUME,
+ NAU8822_REG_RSPKOUT_VOLUME, 7, 1, 0),
+ SOC_DOUBLE_R("Speaker Playback Switch",
+ NAU8822_REG_LSPKOUT_VOLUME,
+ NAU8822_REG_RSPKOUT_VOLUME, 6, 1, 1),
+ SOC_DOUBLE_R_TLV("Speaker Volume",
+ NAU8822_REG_LSPKOUT_VOLUME,
+ NAU8822_REG_RSPKOUT_VOLUME, 0, 63, 0, spk_tlv),
+
+ SOC_DOUBLE_R("AUXOUT Playback Switch",
+ NAU8822_REG_AUX2_MIXER,
+ NAU8822_REG_AUX1_MIXER, 6, 1, 1),
+
+ SOC_DOUBLE_R_TLV("PGA Boost Volume",
+ NAU8822_REG_LEFT_ADC_BOOST_CONTROL,
+ NAU8822_REG_RIGHT_ADC_BOOST_CONTROL, 8, 1, 0, pga_boost_tlv),
+ SOC_DOUBLE_R_TLV("L2/R2 Boost Volume",
+ NAU8822_REG_LEFT_ADC_BOOST_CONTROL,
+ NAU8822_REG_RIGHT_ADC_BOOST_CONTROL, 4, 7, 0, boost_tlv),
+ SOC_DOUBLE_R_TLV("Aux Boost Volume",
+ NAU8822_REG_LEFT_ADC_BOOST_CONTROL,
+ NAU8822_REG_RIGHT_ADC_BOOST_CONTROL, 0, 7, 0, boost_tlv),
+
+ SOC_SINGLE("DAC 128x Oversampling Switch",
+ NAU8822_REG_DAC_CONTROL, 5, 1, 0),
+ SOC_SINGLE("ADC 128x Oversampling Switch",
+ NAU8822_REG_ADC_CONTROL, 5, 1, 0),
+};
+
+/* LMAIN and RMAIN Mixer */
+static const struct snd_kcontrol_new nau8822_left_out_mixer[] = {
+ SOC_DAPM_SINGLE("LINMIX Switch",
+ NAU8822_REG_LEFT_MIXER_CONTROL, 1, 1, 0),
+ SOC_DAPM_SINGLE("LAUX Switch",
+ NAU8822_REG_LEFT_MIXER_CONTROL, 5, 1, 0),
+ SOC_DAPM_SINGLE("LDAC Switch",
+ NAU8822_REG_LEFT_MIXER_CONTROL, 0, 1, 0),
+ SOC_DAPM_SINGLE("RDAC Switch",
+ NAU8822_REG_OUTPUT_CONTROL, 5, 1, 0),
+};
+
+static const struct snd_kcontrol_new nau8822_right_out_mixer[] = {
+ SOC_DAPM_SINGLE("RINMIX Switch",
+ NAU8822_REG_RIGHT_MIXER_CONTROL, 1, 1, 0),
+ SOC_DAPM_SINGLE("RAUX Switch",
+ NAU8822_REG_RIGHT_MIXER_CONTROL, 5, 1, 0),
+ SOC_DAPM_SINGLE("RDAC Switch",
+ NAU8822_REG_RIGHT_MIXER_CONTROL, 0, 1, 0),
+ SOC_DAPM_SINGLE("LDAC Switch",
+ NAU8822_REG_OUTPUT_CONTROL, 6, 1, 0),
+};
+
+/* AUX1 and AUX2 Mixer */
+static const struct snd_kcontrol_new nau8822_auxout1_mixer[] = {
+ SOC_DAPM_SINGLE("RDAC Switch", NAU8822_REG_AUX1_MIXER, 0, 1, 0),
+ SOC_DAPM_SINGLE("RMIX Switch", NAU8822_REG_AUX1_MIXER, 1, 1, 0),
+ SOC_DAPM_SINGLE("RINMIX Switch", NAU8822_REG_AUX1_MIXER, 2, 1, 0),
+ SOC_DAPM_SINGLE("LDAC Switch", NAU8822_REG_AUX1_MIXER, 3, 1, 0),
+ SOC_DAPM_SINGLE("LMIX Switch", NAU8822_REG_AUX1_MIXER, 4, 1, 0),
+};
+
+static const struct snd_kcontrol_new nau8822_auxout2_mixer[] = {
+ SOC_DAPM_SINGLE("LDAC Switch", NAU8822_REG_AUX2_MIXER, 0, 1, 0),
+ SOC_DAPM_SINGLE("LMIX Switch", NAU8822_REG_AUX2_MIXER, 1, 1, 0),
+ SOC_DAPM_SINGLE("LINMIX Switch", NAU8822_REG_AUX2_MIXER, 2, 1, 0),
+ SOC_DAPM_SINGLE("AUX1MIX Output Switch",
+ NAU8822_REG_AUX2_MIXER, 3, 1, 0),
+};
+
+/* Input PGA */
+static const struct snd_kcontrol_new nau8822_left_input_mixer[] = {
+ SOC_DAPM_SINGLE("L2 Switch", NAU8822_REG_INPUT_CONTROL, 2, 1, 0),
+ SOC_DAPM_SINGLE("MicN Switch", NAU8822_REG_INPUT_CONTROL, 1, 1, 0),
+ SOC_DAPM_SINGLE("MicP Switch", NAU8822_REG_INPUT_CONTROL, 0, 1, 0),
+};
+static const struct snd_kcontrol_new nau8822_right_input_mixer[] = {
+ SOC_DAPM_SINGLE("R2 Switch", NAU8822_REG_INPUT_CONTROL, 6, 1, 0),
+ SOC_DAPM_SINGLE("MicN Switch", NAU8822_REG_INPUT_CONTROL, 5, 1, 0),
+ SOC_DAPM_SINGLE("MicP Switch", NAU8822_REG_INPUT_CONTROL, 4, 1, 0),
+};
+
+/* Loopback Switch */
+static const struct snd_kcontrol_new nau8822_loopback =
+ SOC_DAPM_SINGLE("Switch", NAU8822_REG_COMPANDING_CONTROL,
+ NAU8822_ADDAP_SFT, 1, 0);
+
+static int check_mclk_select_pll(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(source->dapm);
+ unsigned int value;
+
+ value = snd_soc_component_read32(component, NAU8822_REG_CLOCKING);
+
+ return (value & NAU8822_CLKM_MASK);
+}
+
+static const struct snd_soc_dapm_widget nau8822_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback",
+ NAU8822_REG_POWER_MANAGEMENT_3, 0, 0),
+ SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback",
+ NAU8822_REG_POWER_MANAGEMENT_3, 1, 0),
+ SND_SOC_DAPM_ADC("Left ADC", "Left HiFi Capture",
+ NAU8822_REG_POWER_MANAGEMENT_2, 0, 0),
+ SND_SOC_DAPM_ADC("Right ADC", "Right HiFi Capture",
+ NAU8822_REG_POWER_MANAGEMENT_2, 1, 0),
+
+ SOC_MIXER_ARRAY("Left Output Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_3, 2, 0, nau8822_left_out_mixer),
+ SOC_MIXER_ARRAY("Right Output Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_3, 3, 0, nau8822_right_out_mixer),
+ SOC_MIXER_ARRAY("AUX1 Output Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_1, 7, 0, nau8822_auxout1_mixer),
+ SOC_MIXER_ARRAY("AUX2 Output Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_1, 6, 0, nau8822_auxout2_mixer),
+
+ SOC_MIXER_ARRAY("Left Input Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_2,
+ 2, 0, nau8822_left_input_mixer),
+ SOC_MIXER_ARRAY("Right Input Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_2,
+ 3, 0, nau8822_right_input_mixer),
+
+ SND_SOC_DAPM_PGA("Left Boost Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_2, 4, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Boost Mixer",
+ NAU8822_REG_POWER_MANAGEMENT_2, 5, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("Left Capture PGA",
+ NAU8822_REG_LEFT_INP_PGA_CONTROL, 6, 1, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Capture PGA",
+ NAU8822_REG_RIGHT_INP_PGA_CONTROL, 6, 1, NULL, 0),
+
+ SND_SOC_DAPM_PGA("Left Headphone Out",
+ NAU8822_REG_POWER_MANAGEMENT_2, 7, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Headphone Out",
+ NAU8822_REG_POWER_MANAGEMENT_2, 8, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("Left Speaker Out",
+ NAU8822_REG_POWER_MANAGEMENT_3, 6, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("Right Speaker Out",
+ NAU8822_REG_POWER_MANAGEMENT_3, 5, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("AUX1 Out",
+ NAU8822_REG_POWER_MANAGEMENT_3, 8, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("AUX2 Out",
+ NAU8822_REG_POWER_MANAGEMENT_3, 7, 0, NULL, 0),
+
+ SND_SOC_DAPM_SUPPLY("Mic Bias",
+ NAU8822_REG_POWER_MANAGEMENT_1, 4, 0, NULL, 0),
+ SND_SOC_DAPM_SUPPLY("PLL",
+ NAU8822_REG_POWER_MANAGEMENT_1, 5, 0, NULL, 0),
+
+ SND_SOC_DAPM_SWITCH("Digital Loopback", SND_SOC_NOPM, 0, 0,
+ &nau8822_loopback),
+
+ SND_SOC_DAPM_INPUT("LMICN"),
+ SND_SOC_DAPM_INPUT("LMICP"),
+ SND_SOC_DAPM_INPUT("RMICN"),
+ SND_SOC_DAPM_INPUT("RMICP"),
+ SND_SOC_DAPM_INPUT("LAUX"),
+ SND_SOC_DAPM_INPUT("RAUX"),
+ SND_SOC_DAPM_INPUT("L2"),
+ SND_SOC_DAPM_INPUT("R2"),
+ SND_SOC_DAPM_OUTPUT("LHP"),
+ SND_SOC_DAPM_OUTPUT("RHP"),
+ SND_SOC_DAPM_OUTPUT("LSPK"),
+ SND_SOC_DAPM_OUTPUT("RSPK"),
+ SND_SOC_DAPM_OUTPUT("AUXOUT1"),
+ SND_SOC_DAPM_OUTPUT("AUXOUT2"),
+};
+
+static const struct snd_soc_dapm_route nau8822_dapm_routes[] = {
+ {"Right DAC", NULL, "PLL", check_mclk_select_pll},
+ {"Left DAC", NULL, "PLL", check_mclk_select_pll},
+
+ /* LMAIN and RMAIN Mixer */
+ {"Right Output Mixer", "LDAC Switch", "Left DAC"},
+ {"Right Output Mixer", "RDAC Switch", "Right DAC"},
+ {"Right Output Mixer", "RAUX Switch", "RAUX"},
+ {"Right Output Mixer", "RINMIX Switch", "Right Boost Mixer"},
+
+ {"Left Output Mixer", "LDAC Switch", "Left DAC"},
+ {"Left Output Mixer", "RDAC Switch", "Right DAC"},
+ {"Left Output Mixer", "LAUX Switch", "LAUX"},
+ {"Left Output Mixer", "LINMIX Switch", "Left Boost Mixer"},
+
+ /* AUX1 and AUX2 Mixer */
+ {"AUX1 Output Mixer", "RDAC Switch", "Right DAC"},
+ {"AUX1 Output Mixer", "RMIX Switch", "Right Output Mixer"},
+ {"AUX1 Output Mixer", "RINMIX Switch", "Right Boost Mixer"},
+ {"AUX1 Output Mixer", "LDAC Switch", "Left DAC"},
+ {"AUX1 Output Mixer", "LMIX Switch", "Left Output Mixer"},
+
+ {"AUX2 Output Mixer", "LDAC Switch", "Left DAC"},
+ {"AUX2 Output Mixer", "LMIX Switch", "Left Output Mixer"},
+ {"AUX2 Output Mixer", "LINMIX Switch", "Left Boost Mixer"},
+ {"AUX2 Output Mixer", "AUX1MIX Output Switch", "AUX1 Output Mixer"},
+
+ /* Outputs */
+ {"Right Headphone Out", NULL, "Right Output Mixer"},
+ {"RHP", NULL, "Right Headphone Out"},
+
+ {"Left Headphone Out", NULL, "Left Output Mixer"},
+ {"LHP", NULL, "Left Headphone Out"},
+
+ {"Right Speaker Out", NULL, "Right Output Mixer"},
+ {"RSPK", NULL, "Right Speaker Out"},
+
+ {"Left Speaker Out", NULL, "Left Output Mixer"},
+ {"LSPK", NULL, "Left Speaker Out"},
+
+ {"AUX1 Out", NULL, "AUX1 Output Mixer"},
+ {"AUX2 Out", NULL, "AUX2 Output Mixer"},
+ {"AUXOUT1", NULL, "AUX1 Out"},
+ {"AUXOUT2", NULL, "AUX2 Out"},
+
+ /* Boost Mixer */
+ {"Right ADC", NULL, "PLL", check_mclk_select_pll},
+ {"Left ADC", NULL, "PLL", check_mclk_select_pll},
+
+ {"Right ADC", NULL, "Right Boost Mixer"},
+
+ {"Right Boost Mixer", NULL, "RAUX"},
+ {"Right Boost Mixer", NULL, "Right Capture PGA"},
+ {"Right Boost Mixer", NULL, "R2"},
+
+ {"Left ADC", NULL, "Left Boost Mixer"},
+
+ {"Left Boost Mixer", NULL, "LAUX"},
+ {"Left Boost Mixer", NULL, "Left Capture PGA"},
+ {"Left Boost Mixer", NULL, "L2"},
+
+ /* Input PGA */
+ {"Right Capture PGA", NULL, "Right Input Mixer"},
+ {"Left Capture PGA", NULL, "Left Input Mixer"},
+
+ /* Enable Microphone Power */
+ {"Right Capture PGA", NULL, "Mic Bias"},
+ {"Left Capture PGA", NULL, "Mic Bias"},
+
+ {"Right Input Mixer", "R2 Switch", "R2"},
+ {"Right Input Mixer", "MicN Switch", "RMICN"},
+ {"Right Input Mixer", "MicP Switch", "RMICP"},
+
+ {"Left Input Mixer", "L2 Switch", "L2"},
+ {"Left Input Mixer", "MicN Switch", "LMICN"},
+ {"Left Input Mixer", "MicP Switch", "LMICP"},
+
+ /* Digital Loopback */
+ {"Digital Loopback", "Switch", "Left ADC"},
+ {"Digital Loopback", "Switch", "Right ADC"},
+ {"Left DAC", NULL, "Digital Loopback"},
+ {"Right DAC", NULL, "Digital Loopback"},
+};
+
+static int nau8822_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+
+ nau8822->div_id = clk_id;
+ nau8822->sysclk = freq;
+ dev_dbg(component->dev, "master sysclk %dHz, source %s\n", freq,
+ clk_id == NAU8822_CLK_PLL ? "PLL" : "MCLK");
+
+ return 0;
+}
+
+static int nau8822_calc_pll(unsigned int pll_in, unsigned int fs,
+ struct nau8822_pll *pll_param)
+{
+ u64 f2, f2_max, pll_ratio;
+ int i, scal_sel;
+
+ if (pll_in > NAU_PLL_REF_MAX || pll_in < NAU_PLL_REF_MIN)
+ return -EINVAL;
+ f2_max = 0;
+ scal_sel = ARRAY_SIZE(nau8822_mclk_scaler);
+
+ for (i = 0; i < scal_sel; i++) {
+ f2 = 256 * fs * 4 * nau8822_mclk_scaler[i] / 10;
+ if (f2 > NAU_PLL_FREQ_MIN && f2 < NAU_PLL_FREQ_MAX &&
+ f2_max < f2) {
+ f2_max = f2;
+ scal_sel = i;
+ }
+ }
+
+ if (ARRAY_SIZE(nau8822_mclk_scaler) == scal_sel)
+ return -EINVAL;
+ pll_param->mclk_scaler = scal_sel;
+ f2 = f2_max;
+
+ /* Calculate the PLL 4-bit integer input and the PLL 24-bit fractional
+ * input; round up the 24+4bit.
+ */
+ pll_ratio = div_u64(f2 << 28, pll_in);
+ pll_param->pre_factor = 0;
+ if (((pll_ratio >> 28) & 0xF) < NAU_PLL_OPTOP_MIN) {
+ pll_ratio <<= 1;
+ pll_param->pre_factor = 1;
+ }
+ pll_param->pll_int = (pll_ratio >> 28) & 0xF;
+ pll_param->pll_frac = ((pll_ratio & 0xFFFFFFF) >> 4);
+
+ return 0;
+}
+
+static int nau8822_config_clkdiv(struct snd_soc_dai *dai, int div, int rate)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+ struct nau8822_pll *pll = &nau8822->pll;
+ int i, sclk, imclk;
+
+ switch (nau8822->div_id) {
+ case NAU8822_CLK_MCLK:
+ /* Configure the master clock prescaler div to make system
+ * clock to approximate the internal master clock (IMCLK);
+ * and large or equal to IMCLK.
+ */
+ div = 0;
+ imclk = rate * 256;
+ for (i = 1; i < ARRAY_SIZE(nau8822_mclk_scaler); i++) {
+ sclk = (nau8822->sysclk * 10) / nau8822_mclk_scaler[i];
+ if (sclk < imclk)
+ break;
+ div = i;
+ }
+ dev_dbg(component->dev, "master clock prescaler %x for fs %d\n",
+ div, rate);
+
+ /* master clock from MCLK and disable PLL */
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_MCLKSEL_MASK,
+ (div << NAU8822_MCLKSEL_SFT));
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_CLKM_MASK,
+ NAU8822_CLKM_MCLK);
+ break;
+
+ case NAU8822_CLK_PLL:
+ /* master clock from PLL and enable PLL */
+ if (pll->mclk_scaler != div) {
+ dev_err(component->dev,
+ "master clock prescaler not meet PLL parameters\n");
+ return -EINVAL;
+ }
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_MCLKSEL_MASK,
+ (div << NAU8822_MCLKSEL_SFT));
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_CLKM_MASK,
+ NAU8822_CLKM_PLL);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nau8822_set_pll(struct snd_soc_dai *dai, int pll_id, int source,
+ unsigned int freq_in, unsigned int freq_out)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+ struct nau8822_pll *pll_param = &nau8822->pll;
+ int ret, fs;
+
+ fs = freq_out / 256;
+
+ ret = nau8822_calc_pll(freq_in, fs, pll_param);
+ if (ret < 0) {
+ dev_err(component->dev, "Unsupported input clock %d\n",
+ freq_in);
+ return ret;
+ }
+
+ dev_info(component->dev,
+ "pll_int=%x pll_frac=%x mclk_scaler=%x pre_factor=%x\n",
+ pll_param->pll_int, pll_param->pll_frac,
+ pll_param->mclk_scaler, pll_param->pre_factor);
+
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_PLL_N, NAU8822_PLLMCLK_DIV2 | NAU8822_PLLN_MASK,
+ (pll_param->pre_factor ? NAU8822_PLLMCLK_DIV2 : 0) |
+ pll_param->pll_int);
+ snd_soc_component_write(component,
+ NAU8822_REG_PLL_K1, (pll_param->pll_frac >> NAU8822_PLLK1_SFT) &
+ NAU8822_PLLK1_MASK);
+ snd_soc_component_write(component,
+ NAU8822_REG_PLL_K2, (pll_param->pll_frac >> NAU8822_PLLK2_SFT) &
+ NAU8822_PLLK2_MASK);
+ snd_soc_component_write(component,
+ NAU8822_REG_PLL_K3, pll_param->pll_frac & NAU8822_PLLK3_MASK);
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_MCLKSEL_MASK,
+ pll_param->mclk_scaler << NAU8822_MCLKSEL_SFT);
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_CLKM_MASK, NAU8822_CLKM_PLL);
+
+ return 0;
+}
+
+static int nau8822_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *component = dai->component;
+ u16 ctrl1_val = 0, ctrl2_val = 0;
+
+ dev_dbg(component->dev, "%s\n", __func__);
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ ctrl2_val |= 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ ctrl2_val &= ~1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ ctrl1_val |= 0x10;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ ctrl1_val |= 0x8;
+ break;
+ case SND_SOC_DAIFMT_DSP_A:
+ ctrl1_val |= 0x18;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ ctrl1_val |= 0x180;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ ctrl1_val |= 0x100;
+ break;
+ case SND_SOC_DAIFMT_NB_IF:
+ ctrl1_val |= 0x80;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_AUDIO_INTERFACE,
+ NAU8822_AIFMT_MASK | NAU8822_LRP_MASK | NAU8822_BCLKP_MASK,
+ ctrl1_val);
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_CLOCKING, NAU8822_CLKIOEN_MASK, ctrl2_val);
+
+ return 0;
+}
+
+static int nau8822_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+ int val_len = 0, val_rate = 0;
+
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ break;
+ case SNDRV_PCM_FORMAT_S20_3LE:
+ val_len |= NAU8822_WLEN_20;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ val_len |= NAU8822_WLEN_24;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ val_len |= NAU8822_WLEN_32;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (params_rate(params)) {
+ case 8000:
+ val_rate |= NAU8822_SMPLR_8K;
+ break;
+ case 11025:
+ val_rate |= NAU8822_SMPLR_12K;
+ break;
+ case 16000:
+ val_rate |= NAU8822_SMPLR_16K;
+ break;
+ case 22050:
+ val_rate |= NAU8822_SMPLR_24K;
+ break;
+ case 32000:
+ val_rate |= NAU8822_SMPLR_32K;
+ break;
+ case 44100:
+ case 48000:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_AUDIO_INTERFACE, NAU8822_WLEN_MASK, val_len);
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_ADDITIONAL_CONTROL, NAU8822_SMPLR_MASK, val_rate);
+
+ /* If the master clock is from MCLK, provide the runtime FS for driver
+ * to get the master clock prescaler configuration.
+ */
+ if (nau8822->div_id == NAU8822_CLK_MCLK)
+ nau8822_config_clkdiv(dai, 0, params_rate(params));
+
+ return 0;
+}
+
+static int nau8822_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_component *component = dai->component;
+
+ dev_dbg(component->dev, "%s: %d\n", __func__, mute);
+
+ if (mute)
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_DAC_CONTROL, 0x40, 0x40);
+ else
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_DAC_CONTROL, 0x40, 0);
+
+ return 0;
+}
+
+static int nau8822_set_bias_level(struct snd_soc_component *component,
+ enum snd_soc_bias_level level)
+{
+ switch (level) {
+ case SND_SOC_BIAS_ON:
+ case SND_SOC_BIAS_PREPARE:
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_POWER_MANAGEMENT_1,
+ NAU8822_REFIMP_MASK, NAU8822_REFIMP_80K);
+ break;
+
+ case SND_SOC_BIAS_STANDBY:
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_POWER_MANAGEMENT_1,
+ NAU8822_IOBUF_EN | NAU8822_ABIAS_EN,
+ NAU8822_IOBUF_EN | NAU8822_ABIAS_EN);
+
+ if (snd_soc_component_get_bias_level(component) ==
+ SND_SOC_BIAS_OFF) {
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_POWER_MANAGEMENT_1,
+ NAU8822_REFIMP_MASK, NAU8822_REFIMP_3K);
+ mdelay(100);
+ }
+ snd_soc_component_update_bits(component,
+ NAU8822_REG_POWER_MANAGEMENT_1,
+ NAU8822_REFIMP_MASK, NAU8822_REFIMP_300K);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+ snd_soc_component_write(component,
+ NAU8822_REG_POWER_MANAGEMENT_1, 0);
+ snd_soc_component_write(component,
+ NAU8822_REG_POWER_MANAGEMENT_2, 0);
+ snd_soc_component_write(component,
+ NAU8822_REG_POWER_MANAGEMENT_3, 0);
+ break;
+ }
+
+ dev_dbg(component->dev, "%s: %d\n", __func__, level);
+
+ return 0;
+}
+
+#define NAU8822_RATES (SNDRV_PCM_RATE_8000_48000)
+
+#define NAU8822_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static const struct snd_soc_dai_ops nau8822_dai_ops = {
+ .hw_params = nau8822_hw_params,
+ .digital_mute = nau8822_mute,
+ .set_fmt = nau8822_set_dai_fmt,
+ .set_sysclk = nau8822_set_dai_sysclk,
+ .set_pll = nau8822_set_pll,
+};
+
+static struct snd_soc_dai_driver nau8822_dai = {
+ .name = "nau8822-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = NAU8822_RATES,
+ .formats = NAU8822_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = NAU8822_RATES,
+ .formats = NAU8822_FORMATS,
+ },
+ .ops = &nau8822_dai_ops,
+ .symmetric_rates = 1,
+};
+
+static int nau8822_suspend(struct snd_soc_component *component)
+{
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+
+ snd_soc_component_force_bias_level(component, SND_SOC_BIAS_OFF);
+
+ regcache_mark_dirty(nau8822->regmap);
+
+ return 0;
+}
+
+static int nau8822_resume(struct snd_soc_component *component)
+{
+ struct nau8822 *nau8822 = snd_soc_component_get_drvdata(component);
+
+ regcache_sync(nau8822->regmap);
+
+ snd_soc_component_force_bias_level(component, SND_SOC_BIAS_STANDBY);
+
+ return 0;
+}
+
+/*
+ * These registers contain an "update" bit - bit 8. This means, for example,
+ * that one can write new DAC digital volume for both channels, but only when
+ * the update bit is set, will also the volume be updated - simultaneously for
+ * both channels.
+ */
+static const int update_reg[] = {
+ NAU8822_REG_LEFT_DAC_DIGITAL_VOLUME,
+ NAU8822_REG_RIGHT_DAC_DIGITAL_VOLUME,
+ NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME,
+ NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME,
+ NAU8822_REG_LEFT_INP_PGA_CONTROL,
+ NAU8822_REG_RIGHT_INP_PGA_CONTROL,
+ NAU8822_REG_LHP_VOLUME,
+ NAU8822_REG_RHP_VOLUME,
+ NAU8822_REG_LSPKOUT_VOLUME,
+ NAU8822_REG_RSPKOUT_VOLUME,
+};
+
+static int nau8822_probe(struct snd_soc_component *component)
+{
+ int i;
+
+ /*
+ * Set the update bit in all registers, that have one. This way all
+ * writes to those registers will also cause the update bit to be
+ * written.
+ */
+ for (i = 0; i < ARRAY_SIZE(update_reg); i++)
+ snd_soc_component_update_bits(component,
+ update_reg[i], 0x100, 0x100);
+
+ return 0;
+}
+
+static const struct snd_soc_component_driver soc_component_dev_nau8822 = {
+ .probe = nau8822_probe,
+ .suspend = nau8822_suspend,
+ .resume = nau8822_resume,
+ .set_bias_level = nau8822_set_bias_level,
+ .controls = nau8822_snd_controls,
+ .num_controls = ARRAY_SIZE(nau8822_snd_controls),
+ .dapm_widgets = nau8822_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(nau8822_dapm_widgets),
+ .dapm_routes = nau8822_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(nau8822_dapm_routes),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
+static const struct regmap_config nau8822_regmap_config = {
+ .reg_bits = 7,
+ .val_bits = 9,
+
+ .max_register = NAU8822_REG_MAX_REGISTER,
+ .volatile_reg = nau8822_volatile,
+
+ .readable_reg = nau8822_readable_reg,
+ .writeable_reg = nau8822_writeable_reg,
+
+ .cache_type = REGCACHE_RBTREE,
+ .reg_defaults = nau8822_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(nau8822_reg_defaults),
+};
+
+static int nau8822_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &i2c->dev;
+ struct nau8822 *nau8822 = dev_get_platdata(dev);
+ int ret;
+
+ if (!nau8822) {
+ nau8822 = devm_kzalloc(dev, sizeof(*nau8822), GFP_KERNEL);
+ if (nau8822 == NULL)
+ return -ENOMEM;
+ }
+ i2c_set_clientdata(i2c, nau8822);
+
+ nau8822->regmap = devm_regmap_init_i2c(i2c, &nau8822_regmap_config);
+ if (IS_ERR(nau8822->regmap)) {
+ ret = PTR_ERR(nau8822->regmap);
+ dev_err(&i2c->dev, "Failed to allocate regmap: %d\n", ret);
+ return ret;
+ }
+ nau8822->dev = dev;
+
+ /* Reset the codec */
+ ret = regmap_write(nau8822->regmap, NAU8822_REG_RESET, 0x00);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to issue reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_snd_soc_register_component(dev, &soc_component_dev_nau8822,
+ &nau8822_dai, 1);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Failed to register CODEC: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct i2c_device_id nau8822_i2c_id[] = {
+ { "nau8822", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nau8822_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id nau8822_of_match[] = {
+ { .compatible = "nuvoton,nau8822", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nau8822_of_match);
+#endif
+
+static struct i2c_driver nau8822_i2c_driver = {
+ .driver = {
+ .name = "nau8822",
+ .of_match_table = of_match_ptr(nau8822_of_match),
+ },
+ .probe = nau8822_i2c_probe,
+ .id_table = nau8822_i2c_id,
+};
+module_i2c_driver(nau8822_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC NAU8822 codec driver");
+MODULE_AUTHOR("David Lin <ctlin0@nuvoton.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/nau8822.h b/sound/soc/codecs/nau8822.h
new file mode 100644
index 000000000000..aa79c969cd44
--- /dev/null
+++ b/sound/soc/codecs/nau8822.h
@@ -0,0 +1,204 @@
+/*
+ * nau8822.h -- NAU8822 Soc Audio Codec driver
+ *
+ * Author: David Lin <ctlin0@nuvoton.com>
+ * Co-author: John Hsu <kchsu0@nuvoton.com>
+ * Co-author: Seven Li <wtli@nuvoton.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __NAU8822_H__
+#define __NAU8822_H__
+
+#define NAU8822_REG_RESET 0x00
+#define NAU8822_REG_POWER_MANAGEMENT_1 0x01
+#define NAU8822_REG_POWER_MANAGEMENT_2 0x02
+#define NAU8822_REG_POWER_MANAGEMENT_3 0x03
+#define NAU8822_REG_AUDIO_INTERFACE 0x04
+#define NAU8822_REG_COMPANDING_CONTROL 0x05
+#define NAU8822_REG_CLOCKING 0x06
+#define NAU8822_REG_ADDITIONAL_CONTROL 0x07
+#define NAU8822_REG_GPIO_CONTROL 0x08
+#define NAU8822_REG_JACK_DETECT_CONTROL_1 0x09
+#define NAU8822_REG_DAC_CONTROL 0x0A
+#define NAU8822_REG_LEFT_DAC_DIGITAL_VOLUME 0x0B
+#define NAU8822_REG_RIGHT_DAC_DIGITAL_VOLUME 0x0C
+#define NAU8822_REG_JACK_DETECT_CONTROL_2 0x0D
+#define NAU8822_REG_ADC_CONTROL 0x0E
+#define NAU8822_REG_LEFT_ADC_DIGITAL_VOLUME 0x0F
+#define NAU8822_REG_RIGHT_ADC_DIGITAL_VOLUME 0x10
+#define NAU8822_REG_EQ1 0x12
+#define NAU8822_REG_EQ2 0x13
+#define NAU8822_REG_EQ3 0x14
+#define NAU8822_REG_EQ4 0x15
+#define NAU8822_REG_EQ5 0x16
+#define NAU8822_REG_DAC_LIMITER_1 0x18
+#define NAU8822_REG_DAC_LIMITER_2 0x19
+#define NAU8822_REG_NOTCH_FILTER_1 0x1B
+#define NAU8822_REG_NOTCH_FILTER_2 0x1C
+#define NAU8822_REG_NOTCH_FILTER_3 0x1D
+#define NAU8822_REG_NOTCH_FILTER_4 0x1E
+#define NAU8822_REG_ALC_CONTROL_1 0x20
+#define NAU8822_REG_ALC_CONTROL_2 0x21
+#define NAU8822_REG_ALC_CONTROL_3 0x22
+#define NAU8822_REG_NOISE_GATE 0x23
+#define NAU8822_REG_PLL_N 0x24
+#define NAU8822_REG_PLL_K1 0x25
+#define NAU8822_REG_PLL_K2 0x26
+#define NAU8822_REG_PLL_K3 0x27
+#define NAU8822_REG_3D_CONTROL 0x29
+#define NAU8822_REG_RIGHT_SPEAKER_CONTROL 0x2B
+#define NAU8822_REG_INPUT_CONTROL 0x2C
+#define NAU8822_REG_LEFT_INP_PGA_CONTROL 0x2D
+#define NAU8822_REG_RIGHT_INP_PGA_CONTROL 0x2E
+#define NAU8822_REG_LEFT_ADC_BOOST_CONTROL 0x2F
+#define NAU8822_REG_RIGHT_ADC_BOOST_CONTROL 0x30
+#define NAU8822_REG_OUTPUT_CONTROL 0x31
+#define NAU8822_REG_LEFT_MIXER_CONTROL 0x32
+#define NAU8822_REG_RIGHT_MIXER_CONTROL 0x33
+#define NAU8822_REG_LHP_VOLUME 0x34
+#define NAU8822_REG_RHP_VOLUME 0x35
+#define NAU8822_REG_LSPKOUT_VOLUME 0x36
+#define NAU8822_REG_RSPKOUT_VOLUME 0x37
+#define NAU8822_REG_AUX2_MIXER 0x38
+#define NAU8822_REG_AUX1_MIXER 0x39
+#define NAU8822_REG_POWER_MANAGEMENT_4 0x3A
+#define NAU8822_REG_LEFT_TIME_SLOT 0x3B
+#define NAU8822_REG_MISC 0x3C
+#define NAU8822_REG_RIGHT_TIME_SLOT 0x3D
+#define NAU8822_REG_DEVICE_REVISION 0x3E
+#define NAU8822_REG_DEVICE_ID 0x3F
+#define NAU8822_REG_DAC_DITHER 0x41
+#define NAU8822_REG_ALC_ENHANCE_1 0x46
+#define NAU8822_REG_ALC_ENHANCE_2 0x47
+#define NAU8822_REG_192KHZ_SAMPLING 0x48
+#define NAU8822_REG_MISC_CONTROL 0x49
+#define NAU8822_REG_INPUT_TIEOFF 0x4A
+#define NAU8822_REG_POWER_REDUCTION 0x4B
+#define NAU8822_REG_AGC_PEAK2PEAK 0x4C
+#define NAU8822_REG_AGC_PEAK_DETECT 0x4D
+#define NAU8822_REG_AUTOMUTE_CONTROL 0x4E
+#define NAU8822_REG_OUTPUT_TIEOFF 0x4F
+#define NAU8822_REG_MAX_REGISTER NAU8822_REG_OUTPUT_TIEOFF
+
+/* NAU8822_REG_POWER_MANAGEMENT_1 (0x1) */
+#define NAU8822_REFIMP_MASK 0x3
+#define NAU8822_REFIMP_80K 0x1
+#define NAU8822_REFIMP_300K 0x2
+#define NAU8822_REFIMP_3K 0x3
+#define NAU8822_IOBUF_EN (0x1 << 2)
+#define NAU8822_ABIAS_EN (0x1 << 3)
+
+/* NAU8822_REG_AUDIO_INTERFACE (0x4) */
+#define NAU8822_AIFMT_MASK (0x3 << 3)
+#define NAU8822_WLEN_MASK (0x3 << 5)
+#define NAU8822_WLEN_20 (0x1 << 5)
+#define NAU8822_WLEN_24 (0x2 << 5)
+#define NAU8822_WLEN_32 (0x3 << 5)
+#define NAU8822_LRP_MASK (0x1 << 7)
+#define NAU8822_BCLKP_MASK (0x1 << 8)
+
+/* NAU8822_REG_COMPANDING_CONTROL (0x5) */
+#define NAU8822_ADDAP_SFT 0
+#define NAU8822_ADCCM_SFT 1
+#define NAU8822_DACCM_SFT 3
+
+/* NAU8822_REG_CLOCKING (0x6) */
+#define NAU8822_CLKIOEN_MASK 0x1
+#define NAU8822_MCLKSEL_SFT 5
+#define NAU8822_MCLKSEL_MASK (0x7 << 5)
+#define NAU8822_BCLKSEL_SFT 2
+#define NAU8822_BCLKSEL_MASK (0x7 << 2)
+#define NAU8822_CLKM_MASK (0x1 << 8)
+#define NAU8822_CLKM_MCLK (0x0 << 8)
+#define NAU8822_CLKM_PLL (0x1 << 8)
+
+/* NAU8822_REG_ADDITIONAL_CONTROL (0x08) */
+#define NAU8822_SMPLR_SFT 1
+#define NAU8822_SMPLR_MASK (0x7 << 1)
+#define NAU8822_SMPLR_48K (0x0 << 1)
+#define NAU8822_SMPLR_32K (0x1 << 1)
+#define NAU8822_SMPLR_24K (0x2 << 1)
+#define NAU8822_SMPLR_16K (0x3 << 1)
+#define NAU8822_SMPLR_12K (0x4 << 1)
+#define NAU8822_SMPLR_8K (0x5 << 1)
+
+/* NAU8822_REG_EQ1 (0x12) */
+#define NAU8822_EQ1GC_SFT 0
+#define NAU8822_EQ1CF_SFT 5
+#define NAU8822_EQM_SFT 8
+
+/* NAU8822_REG_EQ2 (0x13) */
+#define NAU8822_EQ2GC_SFT 0
+#define NAU8822_EQ2CF_SFT 5
+#define NAU8822_EQ2BW_SFT 8
+
+/* NAU8822_REG_EQ3 (0x14) */
+#define NAU8822_EQ3GC_SFT 0
+#define NAU8822_EQ3CF_SFT 5
+#define NAU8822_EQ3BW_SFT 8
+
+/* NAU8822_REG_EQ4 (0x15) */
+#define NAU8822_EQ4GC_SFT 0
+#define NAU8822_EQ4CF_SFT 5
+#define NAU8822_EQ4BW_SFT 8
+
+/* NAU8822_REG_EQ5 (0x16) */
+#define NAU8822_EQ5GC_SFT 0
+#define NAU8822_EQ5CF_SFT 5
+
+/* NAU8822_REG_ALC_CONTROL_1 (0x20) */
+#define NAU8822_ALCMINGAIN_SFT 0
+#define NAU8822_ALCMXGAIN_SFT 3
+#define NAU8822_ALCEN_SFT 7
+
+/* NAU8822_REG_ALC_CONTROL_2 (0x21) */
+#define NAU8822_ALCSL_SFT 0
+#define NAU8822_ALCHT_SFT 4
+
+/* NAU8822_REG_ALC_CONTROL_3 (0x22) */
+#define NAU8822_ALCATK_SFT 0
+#define NAU8822_ALCDCY_SFT 4
+#define NAU8822_ALCM_SFT 8
+
+/* NAU8822_REG_PLL_N (0x24) */
+#define NAU8822_PLLMCLK_DIV2 (0x1 << 4)
+#define NAU8822_PLLN_MASK 0xF
+
+#define NAU8822_PLLK1_SFT 18
+#define NAU8822_PLLK1_MASK 0x3F
+
+/* NAU8822_REG_PLL_K2 (0x26) */
+#define NAU8822_PLLK2_SFT 9
+#define NAU8822_PLLK2_MASK 0x1FF
+
+/* NAU8822_REG_PLL_K3 (0x27) */
+#define NAU8822_PLLK3_MASK 0x1FF
+
+/* System Clock Source */
+enum {
+ NAU8822_CLK_MCLK,
+ NAU8822_CLK_PLL,
+};
+
+struct nau8822_pll {
+ int pre_factor;
+ int mclk_scaler;
+ int pll_frac;
+ int pll_int;
+};
+
+/* Codec Private Data */
+struct nau8822 {
+ struct device *dev;
+ struct regmap *regmap;
+ int mclk_idx;
+ struct nau8822_pll pll;
+ int sysclk;
+ int div_id;
+};
+
+#endif /* __NAU8822_H__ */
diff --git a/sound/soc/codecs/pcm186x.c b/sound/soc/codecs/pcm186x.c
index 690c26e7389e..809b7e9f03ca 100644
--- a/sound/soc/codecs/pcm186x.c
+++ b/sound/soc/codecs/pcm186x.c
@@ -401,7 +401,8 @@ static int pcm186x_set_fmt(struct snd_soc_dai *dai, unsigned int format)
break;
case SND_SOC_DAIFMT_DSP_A:
priv->tdm_offset += 1;
- /* Fall through... DSP_A uses the same basic config as DSP_B
+ /* fall through */
+ /* DSP_A uses the same basic config as DSP_B
* except we need to shift the TDM output by one BCK cycle
*/
case SND_SOC_DAIFMT_DSP_B:
diff --git a/sound/soc/codecs/pcm3060-i2c.c b/sound/soc/codecs/pcm3060-i2c.c
new file mode 100644
index 000000000000..cdc8314882bc
--- /dev/null
+++ b/sound/soc/codecs/pcm3060-i2c.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// PCM3060 I2C driver
+//
+// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech>
+
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <sound/soc.h>
+
+#include "pcm3060.h"
+
+static int pcm3060_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct pcm3060_priv *priv;
+
+ priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, priv);
+
+ priv->regmap = devm_regmap_init_i2c(i2c, &pcm3060_regmap);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ return pcm3060_probe(&i2c->dev);
+}
+
+static const struct i2c_device_id pcm3060_i2c_id[] = {
+ { .name = "pcm3060" },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, pcm3060_i2c_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pcm3060_of_match[] = {
+ { .compatible = "ti,pcm3060" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pcm3060_of_match);
+#endif /* CONFIG_OF */
+
+static struct i2c_driver pcm3060_i2c_driver = {
+ .driver = {
+ .name = "pcm3060",
+#ifdef CONFIG_OF
+ .of_match_table = pcm3060_of_match,
+#endif /* CONFIG_OF */
+ },
+ .id_table = pcm3060_i2c_id,
+ .probe = pcm3060_i2c_probe,
+};
+
+module_i2c_driver(pcm3060_i2c_driver);
+
+MODULE_DESCRIPTION("PCM3060 I2C driver");
+MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm3060-spi.c b/sound/soc/codecs/pcm3060-spi.c
new file mode 100644
index 000000000000..f6f19fa80932
--- /dev/null
+++ b/sound/soc/codecs/pcm3060-spi.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// PCM3060 SPI driver
+//
+// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech>
+
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <sound/soc.h>
+
+#include "pcm3060.h"
+
+static int pcm3060_spi_probe(struct spi_device *spi)
+{
+ struct pcm3060_priv *priv;
+
+ priv = devm_kzalloc(&spi->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, priv);
+
+ priv->regmap = devm_regmap_init_spi(spi, &pcm3060_regmap);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ return pcm3060_probe(&spi->dev);
+}
+
+static const struct spi_device_id pcm3060_spi_id[] = {
+ { .name = "pcm3060" },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, pcm3060_spi_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id pcm3060_of_match[] = {
+ { .compatible = "ti,pcm3060" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pcm3060_of_match);
+#endif /* CONFIG_OF */
+
+static struct spi_driver pcm3060_spi_driver = {
+ .driver = {
+ .name = "pcm3060",
+#ifdef CONFIG_OF
+ .of_match_table = pcm3060_of_match,
+#endif /* CONFIG_OF */
+ },
+ .id_table = pcm3060_spi_id,
+ .probe = pcm3060_spi_probe,
+};
+
+module_spi_driver(pcm3060_spi_driver);
+
+MODULE_DESCRIPTION("PCM3060 SPI driver");
+MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm3060.c b/sound/soc/codecs/pcm3060.c
new file mode 100644
index 000000000000..494d9d662be8
--- /dev/null
+++ b/sound/soc/codecs/pcm3060.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// PCM3060 codec driver
+//
+// Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech>
+
+#include <linux/module.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#include "pcm3060.h"
+
+/* dai */
+
+static int pcm3060_set_sysclk(struct snd_soc_dai *dai, int clk_id,
+ unsigned int freq, int dir)
+{
+ struct snd_soc_component *comp = dai->component;
+ struct pcm3060_priv *priv = snd_soc_component_get_drvdata(comp);
+
+ if (dir != SND_SOC_CLOCK_IN) {
+ dev_err(comp->dev, "unsupported sysclock dir: %d\n", dir);
+ return -EINVAL;
+ }
+
+ priv->dai[dai->id].sclk_freq = freq;
+
+ return 0;
+}
+
+static int pcm3060_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct snd_soc_component *comp = dai->component;
+ struct pcm3060_priv *priv = snd_soc_component_get_drvdata(comp);
+ unsigned int reg;
+ unsigned int val;
+
+ if ((fmt & SND_SOC_DAIFMT_INV_MASK) != SND_SOC_DAIFMT_NB_NF) {
+ dev_err(comp->dev, "unsupported DAI polarity: 0x%x\n", fmt);
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ priv->dai[dai->id].is_master = true;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ priv->dai[dai->id].is_master = false;
+ break;
+ default:
+ dev_err(comp->dev, "unsupported DAI master mode: 0x%x\n", fmt);
+ return -EINVAL;
+ }
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ val = PCM3060_REG_FMT_I2S;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ val = PCM3060_REG_FMT_RJ;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ val = PCM3060_REG_FMT_LJ;
+ break;
+ default:
+ dev_err(comp->dev, "unsupported DAI format: 0x%x\n", fmt);
+ return -EINVAL;
+ }
+
+ if (dai->id == PCM3060_DAI_ID_DAC)
+ reg = PCM3060_REG67;
+ else
+ reg = PCM3060_REG72;
+
+ regmap_update_bits(priv->regmap, reg, PCM3060_REG_MASK_FMT, val);
+
+ return 0;
+}
+
+static int pcm3060_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *comp = dai->component;
+ struct pcm3060_priv *priv = snd_soc_component_get_drvdata(comp);
+ unsigned int rate;
+ unsigned int ratio;
+ unsigned int reg;
+ unsigned int val;
+
+ if (!priv->dai[dai->id].is_master) {
+ val = PCM3060_REG_MS_S;
+ goto val_ready;
+ }
+
+ rate = params_rate(params);
+ if (!rate) {
+ dev_err(comp->dev, "rate is not configured\n");
+ return -EINVAL;
+ }
+
+ ratio = priv->dai[dai->id].sclk_freq / rate;
+
+ switch (ratio) {
+ case 768:
+ val = PCM3060_REG_MS_M768;
+ break;
+ case 512:
+ val = PCM3060_REG_MS_M512;
+ break;
+ case 384:
+ val = PCM3060_REG_MS_M384;
+ break;
+ case 256:
+ val = PCM3060_REG_MS_M256;
+ break;
+ case 192:
+ val = PCM3060_REG_MS_M192;
+ break;
+ case 128:
+ val = PCM3060_REG_MS_M128;
+ break;
+ default:
+ dev_err(comp->dev, "unsupported ratio: %d\n", ratio);
+ return -EINVAL;
+ }
+
+val_ready:
+ if (dai->id == PCM3060_DAI_ID_DAC)
+ reg = PCM3060_REG67;
+ else
+ reg = PCM3060_REG72;
+
+ regmap_update_bits(priv->regmap, reg, PCM3060_REG_MASK_MS, val);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops pcm3060_dai_ops = {
+ .set_sysclk = pcm3060_set_sysclk,
+ .set_fmt = pcm3060_set_fmt,
+ .hw_params = pcm3060_hw_params,
+};
+
+#define PCM3060_DAI_RATES_ADC (SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+
+#define PCM3060_DAI_RATES_DAC (PCM3060_DAI_RATES_ADC | \
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
+
+static struct snd_soc_dai_driver pcm3060_dai[] = {
+ {
+ .name = "pcm3060-dac",
+ .id = PCM3060_DAI_ID_DAC,
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = PCM3060_DAI_RATES_DAC,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &pcm3060_dai_ops,
+ },
+ {
+ .name = "pcm3060-adc",
+ .id = PCM3060_DAI_ID_ADC,
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = PCM3060_DAI_RATES_ADC,
+ .formats = SNDRV_PCM_FMTBIT_S24_LE,
+ },
+ .ops = &pcm3060_dai_ops,
+ },
+};
+
+/* dapm */
+
+static DECLARE_TLV_DB_SCALE(pcm3060_dapm_tlv, -10050, 50, 1);
+
+static const struct snd_kcontrol_new pcm3060_dapm_controls[] = {
+ SOC_DOUBLE_R_RANGE_TLV("Master Playback Volume",
+ PCM3060_REG65, PCM3060_REG66, 0,
+ PCM3060_REG_AT2_MIN, PCM3060_REG_AT2_MAX,
+ 0, pcm3060_dapm_tlv),
+ SOC_DOUBLE("Master Playback Switch", PCM3060_REG68,
+ PCM3060_REG_SHIFT_MUT21, PCM3060_REG_SHIFT_MUT22, 1, 1),
+
+ SOC_DOUBLE_R_RANGE_TLV("Master Capture Volume",
+ PCM3060_REG70, PCM3060_REG71, 0,
+ PCM3060_REG_AT1_MIN, PCM3060_REG_AT1_MAX,
+ 0, pcm3060_dapm_tlv),
+ SOC_DOUBLE("Master Capture Switch", PCM3060_REG73,
+ PCM3060_REG_SHIFT_MUT11, PCM3060_REG_SHIFT_MUT12, 1, 1),
+};
+
+static const struct snd_soc_dapm_widget pcm3060_dapm_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("OUTL+"),
+ SND_SOC_DAPM_OUTPUT("OUTR+"),
+ SND_SOC_DAPM_OUTPUT("OUTL-"),
+ SND_SOC_DAPM_OUTPUT("OUTR-"),
+
+ SND_SOC_DAPM_INPUT("INL"),
+ SND_SOC_DAPM_INPUT("INR"),
+};
+
+static const struct snd_soc_dapm_route pcm3060_dapm_map[] = {
+ { "OUTL+", NULL, "Playback" },
+ { "OUTR+", NULL, "Playback" },
+ { "OUTL-", NULL, "Playback" },
+ { "OUTR-", NULL, "Playback" },
+
+ { "Capture", NULL, "INL" },
+ { "Capture", NULL, "INR" },
+};
+
+/* soc component */
+
+static const struct snd_soc_component_driver pcm3060_soc_comp_driver = {
+ .controls = pcm3060_dapm_controls,
+ .num_controls = ARRAY_SIZE(pcm3060_dapm_controls),
+ .dapm_widgets = pcm3060_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm3060_dapm_widgets),
+ .dapm_routes = pcm3060_dapm_map,
+ .num_dapm_routes = ARRAY_SIZE(pcm3060_dapm_map),
+};
+
+/* regmap */
+
+static bool pcm3060_reg_writeable(struct device *dev, unsigned int reg)
+{
+ return (reg >= PCM3060_REG64);
+}
+
+static bool pcm3060_reg_readable(struct device *dev, unsigned int reg)
+{
+ return (reg >= PCM3060_REG64);
+}
+
+static bool pcm3060_reg_volatile(struct device *dev, unsigned int reg)
+{
+ /* PCM3060_REG64 is volatile */
+ return (reg == PCM3060_REG64);
+}
+
+static const struct reg_default pcm3060_reg_defaults[] = {
+ { PCM3060_REG64, 0xF0 },
+ { PCM3060_REG65, 0xFF },
+ { PCM3060_REG66, 0xFF },
+ { PCM3060_REG67, 0x00 },
+ { PCM3060_REG68, 0x00 },
+ { PCM3060_REG69, 0x00 },
+ { PCM3060_REG70, 0xD7 },
+ { PCM3060_REG71, 0xD7 },
+ { PCM3060_REG72, 0x00 },
+ { PCM3060_REG73, 0x00 },
+};
+
+const struct regmap_config pcm3060_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .writeable_reg = pcm3060_reg_writeable,
+ .readable_reg = pcm3060_reg_readable,
+ .volatile_reg = pcm3060_reg_volatile,
+ .max_register = PCM3060_REG73,
+ .reg_defaults = pcm3060_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(pcm3060_reg_defaults),
+ .cache_type = REGCACHE_RBTREE,
+};
+EXPORT_SYMBOL(pcm3060_regmap);
+
+/* device */
+
+int pcm3060_probe(struct device *dev)
+{
+ int rc;
+
+ rc = devm_snd_soc_register_component(dev, &pcm3060_soc_comp_driver,
+ pcm3060_dai,
+ ARRAY_SIZE(pcm3060_dai));
+ if (rc) {
+ dev_err(dev, "failed to register component, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pcm3060_probe);
+
+MODULE_DESCRIPTION("PCM3060 codec driver");
+MODULE_AUTHOR("Kirill Marinushkin <kmarinushkin@birdec.tech>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/pcm3060.h b/sound/soc/codecs/pcm3060.h
new file mode 100644
index 000000000000..fd89a68aa8a7
--- /dev/null
+++ b/sound/soc/codecs/pcm3060.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * PCM3060 codec driver
+ *
+ * Copyright (C) 2018 Kirill Marinushkin <kmarinushkin@birdec.tech>
+ */
+
+#ifndef _SND_SOC_PCM3060_H
+#define _SND_SOC_PCM3060_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+extern const struct regmap_config pcm3060_regmap;
+
+#define PCM3060_DAI_ID_DAC 0
+#define PCM3060_DAI_ID_ADC 1
+#define PCM3060_DAI_IDS_NUM 2
+
+struct pcm3060_priv_dai {
+ bool is_master;
+ unsigned int sclk_freq;
+};
+
+struct pcm3060_priv {
+ struct regmap *regmap;
+ struct pcm3060_priv_dai dai[PCM3060_DAI_IDS_NUM];
+};
+
+int pcm3060_probe(struct device *dev);
+int pcm3060_remove(struct device *dev);
+
+/* registers */
+
+#define PCM3060_REG64 0x40
+#define PCM3060_REG_MRST 0x80
+#define PCM3060_REG_SRST 0x40
+#define PCM3060_REG_ADPSV 0x20
+#define PCM3060_REG_DAPSV 0x10
+#define PCM3060_REG_SE 0x01
+
+#define PCM3060_REG65 0x41
+#define PCM3060_REG66 0x42
+#define PCM3060_REG_AT2_MIN 0x36
+#define PCM3060_REG_AT2_MAX 0xFF
+
+#define PCM3060_REG67 0x43
+#define PCM3060_REG72 0x48
+#define PCM3060_REG_CSEL 0x80
+#define PCM3060_REG_MASK_MS 0x70
+#define PCM3060_REG_MS_S 0x00
+#define PCM3060_REG_MS_M768 (0x01 << 4)
+#define PCM3060_REG_MS_M512 (0x02 << 4)
+#define PCM3060_REG_MS_M384 (0x03 << 4)
+#define PCM3060_REG_MS_M256 (0x04 << 4)
+#define PCM3060_REG_MS_M192 (0x05 << 4)
+#define PCM3060_REG_MS_M128 (0x06 << 4)
+#define PCM3060_REG_MASK_FMT 0x03
+#define PCM3060_REG_FMT_I2S 0x00
+#define PCM3060_REG_FMT_LJ 0x01
+#define PCM3060_REG_FMT_RJ 0x02
+
+#define PCM3060_REG68 0x44
+#define PCM3060_REG_OVER 0x40
+#define PCM3060_REG_DREV2 0x04
+#define PCM3060_REG_SHIFT_MUT21 0x00
+#define PCM3060_REG_SHIFT_MUT22 0x01
+
+#define PCM3060_REG69 0x45
+#define PCM3060_REG_FLT 0x80
+#define PCM3060_REG_MASK_DMF 0x60
+#define PCM3060_REG_DMC 0x10
+#define PCM3060_REG_ZREV 0x02
+#define PCM3060_REG_AZRO 0x01
+
+#define PCM3060_REG70 0x46
+#define PCM3060_REG71 0x47
+#define PCM3060_REG_AT1_MIN 0x0E
+#define PCM3060_REG_AT1_MAX 0xFF
+
+#define PCM3060_REG73 0x49
+#define PCM3060_REG_ZCDD 0x10
+#define PCM3060_REG_BYP 0x08
+#define PCM3060_REG_DREV1 0x04
+#define PCM3060_REG_SHIFT_MUT11 0x00
+#define PCM3060_REG_SHIFT_MUT12 0x01
+
+#endif /* _SND_SOC_PCM3060_H */
diff --git a/sound/soc/codecs/pcm3168a.c b/sound/soc/codecs/pcm3168a.c
index 3356c91f55b0..52cc950c9fd1 100644
--- a/sound/soc/codecs/pcm3168a.c
+++ b/sound/soc/codecs/pcm3168a.c
@@ -33,6 +33,8 @@
#define PCM3168A_FMT_RIGHT_J_16 0x3
#define PCM3168A_FMT_DSP_A 0x4
#define PCM3168A_FMT_DSP_B 0x5
+#define PCM3168A_FMT_I2S_TDM 0x6
+#define PCM3168A_FMT_LEFT_J_TDM 0x7
#define PCM3168A_FMT_DSP_MASK 0x4
#define PCM3168A_NUM_SUPPLIES 6
@@ -401,9 +403,11 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream,
bool tx, master_mode;
u32 val, mask, shift, reg;
unsigned int rate, fmt, ratio, max_ratio;
+ unsigned int chan;
int i, min_frame_size;
rate = params_rate(params);
+ chan = params_channels(params);
ratio = pcm3168a->sysclk / rate;
@@ -456,6 +460,21 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
+ /* for TDM */
+ if (chan > 2) {
+ switch (fmt) {
+ case PCM3168A_FMT_I2S:
+ fmt = PCM3168A_FMT_I2S_TDM;
+ break;
+ case PCM3168A_FMT_LEFT_J:
+ fmt = PCM3168A_FMT_LEFT_J_TDM;
+ break;
+ default:
+ dev_err(component->dev, "TDM is supported under I2S/Left_J only\n");
+ return -EINVAL;
+ }
+ }
+
if (master_mode)
val = ((i + 1) << shift);
else
@@ -476,7 +495,69 @@ static int pcm3168a_hw_params(struct snd_pcm_substream *substream,
return 0;
}
+static int pcm3168a_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_component *component = dai->component;
+ struct pcm3168a_priv *pcm3168a = snd_soc_component_get_drvdata(component);
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
+ unsigned int fmt;
+ unsigned int sample_min;
+ unsigned int channel_max;
+
+ if (tx)
+ fmt = pcm3168a->dac_fmt;
+ else
+ fmt = pcm3168a->adc_fmt;
+
+ /*
+ * Available Data Bits
+ *
+ * RIGHT_J : 24 / 16
+ * LEFT_J : 24
+ * I2S : 24
+ *
+ * TDM available
+ *
+ * I2S
+ * LEFT_J
+ */
+ switch (fmt) {
+ case PCM3168A_FMT_RIGHT_J:
+ sample_min = 16;
+ channel_max = 2;
+ break;
+ case PCM3168A_FMT_LEFT_J:
+ sample_min = 24;
+ if (tx)
+ channel_max = 8;
+ else
+ channel_max = 6;
+ break;
+ case PCM3168A_FMT_I2S:
+ sample_min = 24;
+ if (tx)
+ channel_max = 8;
+ else
+ channel_max = 6;
+ break;
+ default:
+ sample_min = 24;
+ channel_max = 2;
+ }
+
+ snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
+ sample_min, 32);
+
+ snd_pcm_hw_constraint_minmax(substream->runtime,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ 2, channel_max);
+
+ return 0;
+}
static const struct snd_soc_dai_ops pcm3168a_dac_dai_ops = {
+ .startup = pcm3168a_startup,
.set_fmt = pcm3168a_set_dai_fmt_dac,
.set_sysclk = pcm3168a_set_dai_sysclk,
.hw_params = pcm3168a_hw_params,
@@ -484,6 +565,7 @@ static const struct snd_soc_dai_ops pcm3168a_dac_dai_ops = {
};
static const struct snd_soc_dai_ops pcm3168a_adc_dai_ops = {
+ .startup = pcm3168a_startup,
.set_fmt = pcm3168a_set_dai_fmt_adc,
.set_sysclk = pcm3168a_set_dai_sysclk,
.hw_params = pcm3168a_hw_params
diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c
index d88e67341083..0ef966d56bac 100644
--- a/sound/soc/codecs/rt274.c
+++ b/sound/soc/codecs/rt274.c
@@ -755,6 +755,7 @@ static int rt274_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
break;
default:
dev_warn(component->dev, "invalid pll source, use BCLK\n");
+ /* fall through */
case RT274_PLL2_S_BCLK:
snd_soc_component_update_bits(component, RT274_PLL2_CTRL,
RT274_PLL2_SRC_MASK, RT274_PLL2_SRC_BCLK);
@@ -782,6 +783,7 @@ static int rt274_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
break;
default:
dev_warn(component->dev, "invalid freq_in, assume 4.8M\n");
+ /* fall through */
case 100:
snd_soc_component_write(component, 0x7a, 0xaab6);
snd_soc_component_write(component, 0x7b, 0x0301);
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
index 6478d10c4f4a..4d46f4567c3a 100644
--- a/sound/soc/codecs/rt5514-spi.c
+++ b/sound/soc/codecs/rt5514-spi.c
@@ -91,6 +91,14 @@ static void rt5514_spi_copy_work(struct work_struct *work)
runtime = rt5514_dsp->substream->runtime;
period_bytes = snd_pcm_lib_period_bytes(rt5514_dsp->substream);
+ if (!period_bytes) {
+ schedule_delayed_work(&rt5514_dsp->copy_work, 5);
+ goto done;
+ }
+
+ if (rt5514_dsp->buf_size % period_bytes)
+ rt5514_dsp->buf_size = (rt5514_dsp->buf_size / period_bytes) *
+ period_bytes;
if (rt5514_dsp->get_size >= rt5514_dsp->buf_size) {
rt5514_spi_burst_read(RT5514_BUFFER_VOICE_WP, (u8 *)&buf,
@@ -149,13 +157,11 @@ done:
static void rt5514_schedule_copy(struct rt5514_dsp *rt5514_dsp)
{
- size_t period_bytes;
u8 buf[8];
if (!rt5514_dsp->substream)
return;
- period_bytes = snd_pcm_lib_period_bytes(rt5514_dsp->substream);
rt5514_dsp->get_size = 0;
/**
@@ -183,10 +189,6 @@ static void rt5514_schedule_copy(struct rt5514_dsp *rt5514_dsp)
rt5514_dsp->buf_size = rt5514_dsp->buf_limit - rt5514_dsp->buf_base;
- if (rt5514_dsp->buf_size % period_bytes)
- rt5514_dsp->buf_size = (rt5514_dsp->buf_size / period_bytes) *
- period_bytes;
-
if (rt5514_dsp->buf_base && rt5514_dsp->buf_limit &&
rt5514_dsp->buf_rp && rt5514_dsp->buf_size)
schedule_delayed_work(&rt5514_dsp->copy_work, 0);
diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
index 5bcedbc7eb4a..b7ba64350a07 100644
--- a/sound/soc/codecs/rt5651.c
+++ b/sound/soc/codecs/rt5651.c
@@ -10,7 +10,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c
index 70441661ea4a..7eb2cbd39d6e 100644
--- a/sound/soc/codecs/rt5663.c
+++ b/sound/soc/codecs/rt5663.c
@@ -72,6 +72,7 @@ struct rt5663_priv {
static const struct reg_sequence rt5663_patch_list[] = {
{ 0x002a, 0x8020 },
{ 0x0086, 0x0028 },
+ { 0x0100, 0xa020 },
{ 0x0117, 0x0f28 },
{ 0x02fb, 0x8089 },
};
@@ -580,7 +581,7 @@ static const struct reg_default rt5663_reg[] = {
{ 0x00fd, 0x0001 },
{ 0x00fe, 0x10ec },
{ 0x00ff, 0x6406 },
- { 0x0100, 0xa0a0 },
+ { 0x0100, 0xa020 },
{ 0x0108, 0x4444 },
{ 0x0109, 0x4444 },
{ 0x010a, 0xaaaa },
@@ -2337,6 +2338,8 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
0x8000);
snd_soc_component_update_bits(component, RT5663_DEPOP_1, 0x3000,
0x3000);
+ snd_soc_component_update_bits(component,
+ RT5663_DIG_VOL_ZCD, 0x00c0, 0x0080);
}
break;
@@ -2351,6 +2354,8 @@ static int rt5663_hp_event(struct snd_soc_dapm_widget *w,
RT5663_OVCD_HP_MASK, RT5663_OVCD_HP_EN);
snd_soc_component_update_bits(component,
RT5663_DACREF_LDO, 0x3e0e, 0);
+ snd_soc_component_update_bits(component,
+ RT5663_DIG_VOL_ZCD, 0x00c0, 0);
}
break;
diff --git a/sound/soc/codecs/rt5668.c b/sound/soc/codecs/rt5668.c
index 3f6046a66b56..230a21c93b6b 100644
--- a/sound/soc/codecs/rt5668.c
+++ b/sound/soc/codecs/rt5668.c
@@ -2588,17 +2588,10 @@ static int rt5668_i2c_probe(struct i2c_client *i2c,
}
- return snd_soc_register_component(&i2c->dev, &soc_component_dev_rt5668,
+ return devm_snd_soc_register_component(&i2c->dev, &soc_component_dev_rt5668,
rt5668_dai, ARRAY_SIZE(rt5668_dai));
}
-static int rt5668_i2c_remove(struct i2c_client *i2c)
-{
- snd_soc_unregister_component(&i2c->dev);
-
- return 0;
-}
-
static void rt5668_i2c_shutdown(struct i2c_client *client)
{
struct rt5668_priv *rt5668 = i2c_get_clientdata(client);
@@ -2629,7 +2622,6 @@ static struct i2c_driver rt5668_i2c_driver = {
.acpi_match_table = ACPI_PTR(rt5668_acpi_match),
},
.probe = rt5668_i2c_probe,
- .remove = rt5668_i2c_remove,
.shutdown = rt5668_i2c_shutdown,
.id_table = rt5668_i2c_id,
};
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index f0f8debc2829..453328c988c0 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -2878,6 +2878,18 @@ static const struct dmi_system_id dmi_platform_intel_quirks[] = {
},
{
.callback = rt5670_quirk_cb,
+ .ident = "Lenovo Thinkpad Tablet 8",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ .driver_data = (unsigned long *)(RT5670_DMIC_EN |
+ RT5670_DMIC2_INR |
+ RT5670_DEV_GPIO |
+ RT5670_JD_MODE1),
+ },
+ {
+ .callback = rt5670_quirk_cb,
.ident = "Lenovo Thinkpad Tablet 10",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/sound/soc/codecs/rt5677-spi.c b/sound/soc/codecs/rt5677-spi.c
index bd51f3655ee3..84501c2020c7 100644
--- a/sound/soc/codecs/rt5677-spi.c
+++ b/sound/soc/codecs/rt5677-spi.c
@@ -18,7 +18,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include <linux/regulator/consumer.h>
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c
index 8a3052874c29..34cfaf8f6f34 100644
--- a/sound/soc/codecs/rt5682.c
+++ b/sound/soc/codecs/rt5682.c
@@ -67,7 +67,8 @@ struct rt5682_priv {
};
static const struct reg_sequence patch_list[] = {
- {0x01c1, 0x1000},
+ {RT5682_HP_IMP_SENS_CTRL_19, 0x1000},
+ {RT5682_DAC_ADC_DIG_VOL1, 0xa020},
};
static const struct reg_default rt5682_reg[] = {
@@ -749,7 +750,6 @@ static bool rt5682_readable_register(struct device *dev, unsigned int reg)
}
}
-static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -2250, 150, 0);
static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -6525, 75, 0);
static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -1725, 75, 0);
static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
@@ -1108,10 +1108,6 @@ static void rt5682_jack_detect_handler(struct work_struct *work)
}
static const struct snd_kcontrol_new rt5682_snd_controls[] = {
- /* Headphone Output Volume */
- SOC_DOUBLE_R_TLV("Headphone Playback Volume", RT5682_HPL_GAIN,
- RT5682_HPR_GAIN, RT5682_G_HP_SFT, 15, 1, hp_vol_tlv),
-
/* DAC Digital Volume */
SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5682_DAC1_DIG_VOL,
RT5682_L_VOL_SFT + 1, RT5682_R_VOL_SFT + 1, 86, 0, dac_vol_tlv),
@@ -1437,6 +1433,28 @@ static const struct snd_kcontrol_new hpor_switch =
SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5682_HP_CTRL_1,
RT5682_R_MUTE_SFT, 1, 1);
+static int rt5682_charge_pump_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component =
+ snd_soc_dapm_to_component(w->dapm);
+
+ switch (event) {
+ case SND_SOC_DAPM_PRE_PMU:
+ snd_soc_component_update_bits(component,
+ RT5682_HP_CHARGE_PUMP_1, RT5682_PM_HP_MASK, RT5682_PM_HP_HV);
+ break;
+ case SND_SOC_DAPM_POST_PMD:
+ snd_soc_component_update_bits(component,
+ RT5682_HP_CHARGE_PUMP_1, RT5682_PM_HP_MASK, RT5682_PM_HP_LV);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -1449,10 +1467,10 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
RT5682_HP_LOGIC_CTRL_2, 0x0012);
snd_soc_component_write(component,
RT5682_HP_CTRL_2, 0x6000);
- snd_soc_component_update_bits(component, RT5682_STO_NG2_CTRL_1,
- RT5682_NG2_EN_MASK, RT5682_NG2_EN);
snd_soc_component_update_bits(component,
RT5682_DEPOP_1, 0x60, 0x60);
+ snd_soc_component_update_bits(component,
+ RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0080);
break;
case SND_SOC_DAPM_POST_PMD:
@@ -1460,6 +1478,8 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w,
RT5682_DEPOP_1, 0x60, 0x0);
snd_soc_component_write(component,
RT5682_HP_CTRL_2, 0x0000);
+ snd_soc_component_update_bits(component,
+ RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0000);
break;
default:
@@ -1723,7 +1743,8 @@ static const struct snd_soc_dapm_widget rt5682_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("HP Amp R", RT5682_PWR_ANLG_1,
RT5682_PWR_HA_R_BIT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("Charge Pump", 1, RT5682_DEPOP_1,
- RT5682_PUMP_EN_SFT, 0, NULL, 0),
+ RT5682_PUMP_EN_SFT, 0, rt5682_charge_pump_event,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY_S("Capless", 2, RT5682_DEPOP_1,
RT5682_CAPLESS_EN_SFT, 0, NULL, 0),
@@ -1884,6 +1905,7 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = {
{"HP Amp", NULL, "Charge Pump"},
{"HP Amp", NULL, "CLKDET SYS"},
{"HP Amp", NULL, "CBJ Power"},
+ {"HP Amp", NULL, "Vref1"},
{"HP Amp", NULL, "Vref2"},
{"HPOL Playback", "Switch", "HP Amp"},
{"HPOR Playback", "Switch", "HP Amp"},
@@ -2452,30 +2474,23 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
mutex_lock(&rt5682->calibrate_mutex);
rt5682_reset(rt5682->regmap);
- regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xa2bf);
+ regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xa2af);
usleep_range(15000, 20000);
- regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xf2bf);
- regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0380);
- regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x8001);
- regmap_write(rt5682->regmap, RT5682_TEST_MODE_CTRL_1, 0x0000);
- regmap_write(rt5682->regmap, RT5682_STO1_DAC_MIXER, 0x2080);
- regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0x4040);
- regmap_write(rt5682->regmap, RT5682_DEPOP_1, 0x0069);
+ regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0xf2af);
+ regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0300);
+ regmap_write(rt5682->regmap, RT5682_GLB_CLK, 0x8000);
+ regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0100);
+ regmap_write(rt5682->regmap, RT5682_HP_IMP_SENS_CTRL_19, 0x3800);
regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x3000);
- regmap_write(rt5682->regmap, RT5682_HP_CTRL_2, 0x6000);
- regmap_write(rt5682->regmap, RT5682_HP_CHARGE_PUMP_1, 0x0f26);
- regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x7f05);
+ regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x7005);
regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0x686c);
regmap_write(rt5682->regmap, RT5682_CAL_REC, 0x0d0d);
- regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_9, 0x000f);
- regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x8d01);
regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_2, 0x0321);
regmap_write(rt5682->regmap, RT5682_HP_LOGIC_CTRL_2, 0x0004);
regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_1, 0x7c00);
regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_3, 0x06a1);
regmap_write(rt5682->regmap, RT5682_A_DAC1_MUX, 0x0311);
- regmap_write(rt5682->regmap, RT5682_RESET_HPF_CTRL, 0x0000);
- regmap_write(rt5682->regmap, RT5682_ADC_STO1_HP_CTRL_1, 0x3320);
+ regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_1, 0x7c00);
regmap_write(rt5682->regmap, RT5682_HP_CALIB_CTRL_1, 0xfc00);
@@ -2491,8 +2506,12 @@ static void rt5682_calibrate(struct rt5682_priv *rt5682)
pr_err("HP Calibration Failure\n");
/* restore settings */
- regmap_write(rt5682->regmap, RT5682_STO1_ADC_MIXER, 0xc0c4);
+ regmap_write(rt5682->regmap, RT5682_PWR_ANLG_1, 0x02af);
+ regmap_write(rt5682->regmap, RT5682_MICBIAS_2, 0x0080);
+ regmap_write(rt5682->regmap, RT5682_GLB_CLK, 0x0000);
regmap_write(rt5682->regmap, RT5682_PWR_DIG_1, 0x0000);
+ regmap_write(rt5682->regmap, RT5682_CHOP_DAC, 0x2000);
+ regmap_write(rt5682->regmap, RT5682_CALIB_ADC_CTRL, 0x2005);
mutex_unlock(&rt5682->calibrate_mutex);
@@ -2566,7 +2585,7 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
rt5682_calibrate(rt5682);
- ret = regmap_register_patch(rt5682->regmap, patch_list,
+ ret = regmap_multi_reg_write(rt5682->regmap, patch_list,
ARRAY_SIZE(patch_list));
if (ret != 0)
dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
@@ -2620,6 +2639,10 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
RT5682_GP4_PIN_MASK | RT5682_GP5_PIN_MASK,
RT5682_GP4_PIN_ADCDAT1 | RT5682_GP5_PIN_DACDAT1);
regmap_write(rt5682->regmap, RT5682_TEST_MODE_CTRL_1, 0x0000);
+ regmap_update_bits(rt5682->regmap, RT5682_BIAS_CUR_CTRL_8,
+ RT5682_HPA_CP_BIAS_CTRL_MASK, RT5682_HPA_CP_BIAS_3UA);
+ regmap_update_bits(rt5682->regmap, RT5682_CHARGE_PUMP_1,
+ RT5682_CP_CLK_HP_MASK, RT5682_CP_CLK_HP_300KHZ);
INIT_DELAYED_WORK(&rt5682->jack_detect_work,
rt5682_jack_detect_handler);
@@ -2637,11 +2660,17 @@ static int rt5682_i2c_probe(struct i2c_client *i2c,
}
- return devm_snd_soc_register_component(&i2c->dev,
- &soc_component_dev_rt5682,
+ return snd_soc_register_component(&i2c->dev, &soc_component_dev_rt5682,
rt5682_dai, ARRAY_SIZE(rt5682_dai));
}
+static int rt5682_i2c_remove(struct i2c_client *i2c)
+{
+ snd_soc_unregister_component(&i2c->dev);
+
+ return 0;
+}
+
static void rt5682_i2c_shutdown(struct i2c_client *client)
{
struct rt5682_priv *rt5682 = i2c_get_clientdata(client);
@@ -2672,6 +2701,7 @@ static struct i2c_driver rt5682_i2c_driver = {
.acpi_match_table = ACPI_PTR(rt5682_acpi_match),
},
.probe = rt5682_i2c_probe,
+ .remove = rt5682_i2c_remove,
.shutdown = rt5682_i2c_shutdown,
.id_table = rt5682_i2c_id,
};
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index 8068140ebe3f..d82a8301fd74 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -1214,6 +1214,20 @@
#define RT5682_JDH_NO_PLUG (0x1 << 4)
#define RT5682_JDH_PLUG (0x0 << 4)
+/* Bias current control 8 (0x0111) */
+#define RT5682_HPA_CP_BIAS_CTRL_MASK (0x3 << 2)
+#define RT5682_HPA_CP_BIAS_2UA (0x0 << 2)
+#define RT5682_HPA_CP_BIAS_3UA (0x1 << 2)
+#define RT5682_HPA_CP_BIAS_4UA (0x2 << 2)
+#define RT5682_HPA_CP_BIAS_6UA (0x3 << 2)
+
+/* Charge Pump Internal Register1 (0x0125) */
+#define RT5682_CP_CLK_HP_MASK (0x3 << 4)
+#define RT5682_CP_CLK_HP_100KHZ (0x0 << 4)
+#define RT5682_CP_CLK_HP_200KHZ (0x1 << 4)
+#define RT5682_CP_CLK_HP_300KHZ (0x2 << 4)
+#define RT5682_CP_CLK_HP_600KHZ (0x3 << 4)
+
/* Chopper and Clock control for DAC (0x013a)*/
#define RT5682_CKXEN_DAC1_MASK (0x1 << 13)
#define RT5682_CKXEN_DAC1_SFT 13
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 60764f6201b1..add18d6d77da 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1218,7 +1218,7 @@ static int sgtl5000_set_power_regs(struct snd_soc_component *component)
* Searching for a suitable index solving this formula:
* idx = 40 * log10(vag_val / lo_cagcntrl) + 15
*/
- vol_quot = (vag * 100) / lo_vag;
+ vol_quot = lo_vag ? (vag * 100) / lo_vag : 0;
lo_vol = 0;
for (i = 0; i < ARRAY_SIZE(vol_quot_table); i++) {
if (vol_quot >= vol_quot_table[i])
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index d5035f2f2b2b..f753d2db0a5a 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
@@ -142,6 +143,7 @@ static const char *sta32x_supply_names[] = {
/* codec private data */
struct sta32x_priv {
struct regmap *regmap;
+ struct clk *xti_clk;
struct regulator_bulk_data supplies[ARRAY_SIZE(sta32x_supply_names)];
struct snd_soc_component *component;
struct sta32x_platform_data *pdata;
@@ -879,6 +881,18 @@ static int sta32x_probe(struct snd_soc_component *component)
struct sta32x_priv *sta32x = snd_soc_component_get_drvdata(component);
struct sta32x_platform_data *pdata = sta32x->pdata;
int i, ret = 0, thermal = 0;
+
+ sta32x->component = component;
+
+ if (sta32x->xti_clk) {
+ ret = clk_prepare_enable(sta32x->xti_clk);
+ if (ret != 0) {
+ dev_err(component->dev,
+ "Failed to enable clock: %d\n", ret);
+ return ret;
+ }
+ }
+
ret = regulator_bulk_enable(ARRAY_SIZE(sta32x->supplies),
sta32x->supplies);
if (ret != 0) {
@@ -981,6 +995,9 @@ static void sta32x_remove(struct snd_soc_component *component)
sta32x_watchdog_stop(sta32x);
regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
+
+ if (sta32x->xti_clk)
+ clk_disable_unprepare(sta32x->xti_clk);
}
static const struct snd_soc_component_driver sta32x_component = {
@@ -1038,6 +1055,8 @@ static int sta32x_probe_dt(struct device *dev, struct sta32x_priv *sta32x)
of_property_read_u8(np, "st,ch3-output-mapping",
&pdata->ch3_output_mapping);
+ if (of_get_property(np, "st,fault-detect-recovery", NULL))
+ pdata->fault_detect_recovery = 1;
if (of_get_property(np, "st,thermal-warning-recovery", NULL))
pdata->thermal_warning_recovery = 1;
if (of_get_property(np, "st,thermal-warning-adjustment", NULL))
@@ -1095,6 +1114,17 @@ static int sta32x_i2c_probe(struct i2c_client *i2c,
}
#endif
+ /* Clock */
+ sta32x->xti_clk = devm_clk_get(dev, "xti");
+ if (IS_ERR(sta32x->xti_clk)) {
+ ret = PTR_ERR(sta32x->xti_clk);
+
+ if (ret == -EPROBE_DEFER)
+ return ret;
+
+ sta32x->xti_clk = NULL;
+ }
+
/* GPIOs */
sta32x->gpiod_nreset = devm_gpiod_get_optional(dev, "reset",
GPIOD_OUT_LOW);
diff --git a/sound/soc/codecs/tas5720.c b/sound/soc/codecs/tas5720.c
index ae3d032ac35a..6bd0e5d5347f 100644
--- a/sound/soc/codecs/tas5720.c
+++ b/sound/soc/codecs/tas5720.c
@@ -152,6 +152,7 @@ static int tas5720_set_dai_tdm_slot(struct snd_soc_dai *dai,
int slots, int slot_width)
{
struct snd_soc_component *component = dai->component;
+ struct tas5720_data *tas5720 = snd_soc_component_get_drvdata(component);
unsigned int first_slot;
int ret;
@@ -185,6 +186,20 @@ static int tas5720_set_dai_tdm_slot(struct snd_soc_dai *dai,
if (ret < 0)
goto error_snd_soc_component_update_bits;
+ /* Configure TDM slot width. This is only applicable to TAS5722. */
+ switch (tas5720->devtype) {
+ case TAS5722:
+ ret = snd_soc_component_update_bits(component, TAS5722_DIGITAL_CTRL2_REG,
+ TAS5722_TDM_SLOT_16B,
+ slot_width == 16 ?
+ TAS5722_TDM_SLOT_16B : 0);
+ if (ret < 0)
+ goto error_snd_soc_component_update_bits;
+ break;
+ default:
+ break;
+ }
+
return 0;
error_snd_soc_component_update_bits:
@@ -485,15 +500,56 @@ static const DECLARE_TLV_DB_RANGE(dac_analog_tlv,
);
/*
- * DAC digital volumes. From -103.5 to 24 dB in 0.5 dB steps. Note that
- * setting the gain below -100 dB (register value <0x7) is effectively a MUTE
- * as per device datasheet.
+ * DAC digital volumes. From -103.5 to 24 dB in 0.5 dB or 0.25 dB steps
+ * depending on the device. Note that setting the gain below -100 dB
+ * (register value <0x7) is effectively a MUTE as per device datasheet.
+ *
+ * Note that for the TAS5722 the digital volume controls are actually split
+ * over two registers, so we need custom getters/setters for access.
*/
-static DECLARE_TLV_DB_SCALE(dac_tlv, -10350, 50, 0);
+static DECLARE_TLV_DB_SCALE(tas5720_dac_tlv, -10350, 50, 0);
+static DECLARE_TLV_DB_SCALE(tas5722_dac_tlv, -10350, 25, 0);
+
+static int tas5722_volume_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ unsigned int val;
+
+ snd_soc_component_read(component, TAS5720_VOLUME_CTRL_REG, &val);
+ ucontrol->value.integer.value[0] = val << 1;
+
+ snd_soc_component_read(component, TAS5722_DIGITAL_CTRL2_REG, &val);
+ ucontrol->value.integer.value[0] |= val & TAS5722_VOL_CONTROL_LSB;
+
+ return 0;
+}
+
+static int tas5722_volume_set(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol);
+ unsigned int sel = ucontrol->value.integer.value[0];
+
+ snd_soc_component_write(component, TAS5720_VOLUME_CTRL_REG, sel >> 1);
+ snd_soc_component_update_bits(component, TAS5722_DIGITAL_CTRL2_REG,
+ TAS5722_VOL_CONTROL_LSB, sel);
+
+ return 0;
+}
static const struct snd_kcontrol_new tas5720_snd_controls[] = {
SOC_SINGLE_TLV("Speaker Driver Playback Volume",
- TAS5720_VOLUME_CTRL_REG, 0, 0xff, 0, dac_tlv),
+ TAS5720_VOLUME_CTRL_REG, 0, 0xff, 0, tas5720_dac_tlv),
+ SOC_SINGLE_TLV("Speaker Driver Analog Gain", TAS5720_ANALOG_CTRL_REG,
+ TAS5720_ANALOG_GAIN_SHIFT, 3, 0, dac_analog_tlv),
+};
+
+static const struct snd_kcontrol_new tas5722_snd_controls[] = {
+ SOC_SINGLE_EXT_TLV("Speaker Driver Playback Volume",
+ 0, 0, 511, 0,
+ tas5722_volume_get, tas5722_volume_set,
+ tas5722_dac_tlv),
SOC_SINGLE_TLV("Speaker Driver Analog Gain", TAS5720_ANALOG_CTRL_REG,
TAS5720_ANALOG_GAIN_SHIFT, 3, 0, dac_analog_tlv),
};
@@ -527,6 +583,23 @@ static const struct snd_soc_component_driver soc_component_dev_tas5720 = {
.non_legacy_dai_naming = 1,
};
+static const struct snd_soc_component_driver soc_component_dev_tas5722 = {
+ .probe = tas5720_codec_probe,
+ .remove = tas5720_codec_remove,
+ .suspend = tas5720_suspend,
+ .resume = tas5720_resume,
+ .controls = tas5722_snd_controls,
+ .num_controls = ARRAY_SIZE(tas5722_snd_controls),
+ .dapm_widgets = tas5720_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tas5720_dapm_widgets),
+ .dapm_routes = tas5720_audio_map,
+ .num_dapm_routes = ARRAY_SIZE(tas5720_audio_map),
+ .idle_bias_on = 1,
+ .use_pmdown_time = 1,
+ .endianness = 1,
+ .non_legacy_dai_naming = 1,
+};
+
/* PCM rates supported by the TAS5720 driver */
#define TAS5720_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
@@ -613,9 +686,23 @@ static int tas5720_probe(struct i2c_client *client,
dev_set_drvdata(dev, data);
- ret = devm_snd_soc_register_component(&client->dev,
- &soc_component_dev_tas5720,
- tas5720_dai, ARRAY_SIZE(tas5720_dai));
+ switch (id->driver_data) {
+ case TAS5720:
+ ret = devm_snd_soc_register_component(&client->dev,
+ &soc_component_dev_tas5720,
+ tas5720_dai,
+ ARRAY_SIZE(tas5720_dai));
+ break;
+ case TAS5722:
+ ret = devm_snd_soc_register_component(&client->dev,
+ &soc_component_dev_tas5722,
+ tas5720_dai,
+ ARRAY_SIZE(tas5720_dai));
+ break;
+ default:
+ dev_err(dev, "unexpected private driver data\n");
+ return -EINVAL;
+ }
if (ret < 0) {
dev_err(dev, "failed to register component: %d\n", ret);
return ret;
diff --git a/sound/soc/codecs/tas6424.c b/sound/soc/codecs/tas6424.c
index 0d6145549a98..36aebdb8f55c 100644
--- a/sound/soc/codecs/tas6424.c
+++ b/sound/soc/codecs/tas6424.c
@@ -41,6 +41,7 @@ struct tas6424_data {
struct regmap *regmap;
struct regulator_bulk_data supplies[TAS6424_NUM_SUPPLIES];
struct delayed_work fault_check_work;
+ unsigned int last_cfault;
unsigned int last_fault1;
unsigned int last_fault2;
unsigned int last_warn;
@@ -406,9 +407,54 @@ static void tas6424_fault_check_work(struct work_struct *work)
unsigned int reg;
int ret;
+ ret = regmap_read(tas6424->regmap, TAS6424_CHANNEL_FAULT, &reg);
+ if (ret < 0) {
+ dev_err(dev, "failed to read CHANNEL_FAULT register: %d\n", ret);
+ goto out;
+ }
+
+ if (!reg) {
+ tas6424->last_cfault = reg;
+ goto check_global_fault1_reg;
+ }
+
+ /*
+ * Only flag errors once for a given occurrence. This is needed as
+ * the TAS6424 will take time clearing the fault condition internally
+ * during which we don't want to bombard the system with the same
+ * error message over and over.
+ */
+ if ((reg & TAS6424_FAULT_OC_CH1) && !(tas6424->last_cfault & TAS6424_FAULT_OC_CH1))
+ dev_crit(dev, "experienced a channel 1 overcurrent fault\n");
+
+ if ((reg & TAS6424_FAULT_OC_CH2) && !(tas6424->last_cfault & TAS6424_FAULT_OC_CH2))
+ dev_crit(dev, "experienced a channel 2 overcurrent fault\n");
+
+ if ((reg & TAS6424_FAULT_OC_CH3) && !(tas6424->last_cfault & TAS6424_FAULT_OC_CH3))
+ dev_crit(dev, "experienced a channel 3 overcurrent fault\n");
+
+ if ((reg & TAS6424_FAULT_OC_CH4) && !(tas6424->last_cfault & TAS6424_FAULT_OC_CH4))
+ dev_crit(dev, "experienced a channel 4 overcurrent fault\n");
+
+ if ((reg & TAS6424_FAULT_DC_CH1) && !(tas6424->last_cfault & TAS6424_FAULT_DC_CH1))
+ dev_crit(dev, "experienced a channel 1 DC fault\n");
+
+ if ((reg & TAS6424_FAULT_DC_CH2) && !(tas6424->last_cfault & TAS6424_FAULT_DC_CH2))
+ dev_crit(dev, "experienced a channel 2 DC fault\n");
+
+ if ((reg & TAS6424_FAULT_DC_CH3) && !(tas6424->last_cfault & TAS6424_FAULT_DC_CH3))
+ dev_crit(dev, "experienced a channel 3 DC fault\n");
+
+ if ((reg & TAS6424_FAULT_DC_CH4) && !(tas6424->last_cfault & TAS6424_FAULT_DC_CH4))
+ dev_crit(dev, "experienced a channel 4 DC fault\n");
+
+ /* Store current fault1 value so we can detect any changes next time */
+ tas6424->last_cfault = reg;
+
+check_global_fault1_reg:
ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT1, &reg);
if (ret < 0) {
- dev_err(dev, "failed to read FAULT1 register: %d\n", ret);
+ dev_err(dev, "failed to read GLOB_FAULT1 register: %d\n", ret);
goto out;
}
@@ -429,12 +475,6 @@ static void tas6424_fault_check_work(struct work_struct *work)
goto check_global_fault2_reg;
}
- /*
- * Only flag errors once for a given occurrence. This is needed as
- * the TAS6424 will take time clearing the fault condition internally
- * during which we don't want to bombard the system with the same
- * error message over and over.
- */
if ((reg & TAS6424_FAULT_PVDD_OV) && !(tas6424->last_fault1 & TAS6424_FAULT_PVDD_OV))
dev_crit(dev, "experienced a PVDD overvoltage fault\n");
@@ -453,7 +493,7 @@ static void tas6424_fault_check_work(struct work_struct *work)
check_global_fault2_reg:
ret = regmap_read(tas6424->regmap, TAS6424_GLOB_FAULT2, &reg);
if (ret < 0) {
- dev_err(dev, "failed to read FAULT2 register: %d\n", ret);
+ dev_err(dev, "failed to read GLOB_FAULT2 register: %d\n", ret);
goto out;
}
@@ -530,7 +570,7 @@ check_warn_reg:
/* Store current warn value so we can detect any changes next time */
tas6424->last_warn = reg;
- /* Clear any faults by toggling the CLEAR_FAULT control bit */
+ /* Clear any warnings by toggling the CLEAR_FAULT control bit */
ret = regmap_write_bits(tas6424->regmap, TAS6424_MISC_CTRL3,
TAS6424_CLEAR_FAULT, TAS6424_CLEAR_FAULT);
if (ret < 0)
diff --git a/sound/soc/codecs/tas6424.h b/sound/soc/codecs/tas6424.h
index b5958c45ed0e..c67a7835ca66 100644
--- a/sound/soc/codecs/tas6424.h
+++ b/sound/soc/codecs/tas6424.h
@@ -116,6 +116,16 @@
#define TAS6424_LDGBYPASS_MASK BIT(TAS6424_LDGBYPASS_SHIFT)
/* TAS6424_GLOB_FAULT1_REG */
+#define TAS6424_FAULT_OC_CH1 BIT(7)
+#define TAS6424_FAULT_OC_CH2 BIT(6)
+#define TAS6424_FAULT_OC_CH3 BIT(5)
+#define TAS6424_FAULT_OC_CH4 BIT(4)
+#define TAS6424_FAULT_DC_CH1 BIT(3)
+#define TAS6424_FAULT_DC_CH2 BIT(2)
+#define TAS6424_FAULT_DC_CH3 BIT(1)
+#define TAS6424_FAULT_DC_CH4 BIT(0)
+
+/* TAS6424_GLOB_FAULT1_REG */
#define TAS6424_FAULT_CLOCK BIT(4)
#define TAS6424_FAULT_PVDD_OV BIT(3)
#define TAS6424_FAULT_VBAT_OV BIT(2)
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index bf92d36b8f8a..608ad49ad978 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -167,6 +167,7 @@ struct aic31xx_priv {
u8 p_div;
int rate_div_line;
bool master_dapm_route_applied;
+ int irq;
};
struct aic31xx_rate_divs {
@@ -1391,6 +1392,69 @@ static const struct acpi_device_id aic31xx_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, aic31xx_acpi_match);
#endif
+static irqreturn_t aic31xx_irq(int irq, void *data)
+{
+ struct aic31xx_priv *aic31xx = data;
+ struct device *dev = aic31xx->dev;
+ unsigned int value;
+ bool handled = false;
+ int ret;
+
+ ret = regmap_read(aic31xx->regmap, AIC31XX_INTRDACFLAG, &value);
+ if (ret) {
+ dev_err(dev, "Failed to read interrupt mask: %d\n", ret);
+ goto exit;
+ }
+
+ if (value)
+ handled = true;
+ else
+ goto read_overflow;
+
+ if (value & AIC31XX_HPLSCDETECT)
+ dev_err(dev, "Short circuit on Left output is detected\n");
+ if (value & AIC31XX_HPRSCDETECT)
+ dev_err(dev, "Short circuit on Right output is detected\n");
+ if (value & ~(AIC31XX_HPLSCDETECT |
+ AIC31XX_HPRSCDETECT))
+ dev_err(dev, "Unknown DAC interrupt flags: 0x%08x\n", value);
+
+read_overflow:
+ ret = regmap_read(aic31xx->regmap, AIC31XX_OFFLAG, &value);
+ if (ret) {
+ dev_err(dev, "Failed to read overflow flag: %d\n", ret);
+ goto exit;
+ }
+
+ if (value)
+ handled = true;
+ else
+ goto exit;
+
+ if (value & AIC31XX_DAC_OF_LEFT)
+ dev_warn(dev, "Left-channel DAC overflow has occurred\n");
+ if (value & AIC31XX_DAC_OF_RIGHT)
+ dev_warn(dev, "Right-channel DAC overflow has occurred\n");
+ if (value & AIC31XX_DAC_OF_SHIFTER)
+ dev_warn(dev, "DAC barrel shifter overflow has occurred\n");
+ if (value & AIC31XX_ADC_OF)
+ dev_warn(dev, "ADC overflow has occurred\n");
+ if (value & AIC31XX_ADC_OF_SHIFTER)
+ dev_warn(dev, "ADC barrel shifter overflow has occurred\n");
+ if (value & ~(AIC31XX_DAC_OF_LEFT |
+ AIC31XX_DAC_OF_RIGHT |
+ AIC31XX_DAC_OF_SHIFTER |
+ AIC31XX_ADC_OF |
+ AIC31XX_ADC_OF_SHIFTER))
+ dev_warn(dev, "Unknown overflow interrupt flags: 0x%08x\n", value);
+
+exit:
+ if (handled)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
static int aic31xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
@@ -1413,6 +1477,7 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
return ret;
}
aic31xx->dev = &i2c->dev;
+ aic31xx->irq = i2c->irq;
aic31xx->codec_type = id->driver_data;
@@ -1456,6 +1521,26 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
return ret;
}
+ if (aic31xx->irq > 0) {
+ regmap_update_bits(aic31xx->regmap, AIC31XX_GPIO1,
+ AIC31XX_GPIO1_FUNC_MASK,
+ AIC31XX_GPIO1_INT1 <<
+ AIC31XX_GPIO1_FUNC_SHIFT);
+
+ regmap_write(aic31xx->regmap, AIC31XX_INT1CTRL,
+ AIC31XX_SC |
+ AIC31XX_ENGINE);
+
+ ret = devm_request_threaded_irq(aic31xx->dev, aic31xx->irq,
+ NULL, aic31xx_irq,
+ IRQF_ONESHOT, "aic31xx-irq",
+ aic31xx);
+ if (ret) {
+ dev_err(aic31xx->dev, "Unable to request IRQ\n");
+ return ret;
+ }
+ }
+
if (aic31xx->codec_type & DAC31XX_BIT)
return devm_snd_soc_register_component(&i2c->dev,
&soc_codec_driver_aic31xx,
diff --git a/sound/soc/codecs/tlv320aic31xx.h b/sound/soc/codecs/tlv320aic31xx.h
index 0b587585b38b..2636f2c6bc79 100644
--- a/sound/soc/codecs/tlv320aic31xx.h
+++ b/sound/soc/codecs/tlv320aic31xx.h
@@ -173,6 +173,13 @@ struct aic31xx_pdata {
#define AIC31XX_HPRDRVPWRSTATUS_MASK BIT(1)
#define AIC31XX_SPRDRVPWRSTATUS_MASK BIT(0)
+/* AIC31XX_OFFLAG */
+#define AIC31XX_DAC_OF_LEFT BIT(7)
+#define AIC31XX_DAC_OF_RIGHT BIT(6)
+#define AIC31XX_DAC_OF_SHIFTER BIT(5)
+#define AIC31XX_ADC_OF BIT(3)
+#define AIC31XX_ADC_OF_SHIFTER BIT(1)
+
/* AIC31XX_INTRDACFLAG */
#define AIC31XX_HPLSCDETECT BIT(7)
#define AIC31XX_HPRSCDETECT BIT(6)
@@ -191,6 +198,22 @@ struct aic31xx_pdata {
#define AIC31XX_SC BIT(3)
#define AIC31XX_ENGINE BIT(2)
+/* AIC31XX_GPIO1 */
+#define AIC31XX_GPIO1_FUNC_MASK GENMASK(5, 2)
+#define AIC31XX_GPIO1_FUNC_SHIFT 2
+#define AIC31XX_GPIO1_DISABLED 0x00
+#define AIC31XX_GPIO1_INPUT 0x01
+#define AIC31XX_GPIO1_GPI 0x02
+#define AIC31XX_GPIO1_GPO 0x03
+#define AIC31XX_GPIO1_CLKOUT 0x04
+#define AIC31XX_GPIO1_INT1 0x05
+#define AIC31XX_GPIO1_INT2 0x06
+#define AIC31XX_GPIO1_ADC_WCLK 0x07
+#define AIC31XX_GPIO1_SBCLK 0x08
+#define AIC31XX_GPIO1_SWCLK 0x09
+#define AIC31XX_GPIO1_ADC_MOD_CLK 0x10
+#define AIC31XX_GPIO1_SDOUT 0x11
+
/* AIC31XX_DACSETUP */
#define AIC31XX_SOFTSTEP_MASK GENMASK(1, 0)
diff --git a/sound/soc/codecs/tscs454.c b/sound/soc/codecs/tscs454.c
index ff85a0bf6170..93d84e5ae2d5 100644
--- a/sound/soc/codecs/tscs454.c
+++ b/sound/soc/codecs/tscs454.c
@@ -3459,7 +3459,7 @@ static int tscs454_i2c_probe(struct i2c_client *i2c,
/* Sync pg sel reg with cache */
regmap_write(tscs454->regmap, R_PAGESEL, 0x00);
- ret = snd_soc_register_component(&i2c->dev, &soc_component_dev_tscs454,
+ ret = devm_snd_soc_register_component(&i2c->dev, &soc_component_dev_tscs454,
tscs454_dais, ARRAY_SIZE(tscs454_dais));
if (ret) {
dev_err(&i2c->dev, "Failed to register component (%d)\n", ret);
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index c5ae07234a00..bba330e30162 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -88,19 +88,6 @@ static int wm2000_write(struct i2c_client *i2c, unsigned int reg,
return regmap_write(wm2000->regmap, reg, value);
}
-static unsigned int wm2000_read(struct i2c_client *i2c, unsigned int r)
-{
- struct wm2000_priv *wm2000 = i2c_get_clientdata(i2c);
- unsigned int val;
- int ret;
-
- ret = regmap_read(wm2000->regmap, r, &val);
- if (ret < 0)
- return -1;
-
- return val;
-}
-
static void wm2000_reset(struct wm2000_priv *wm2000)
{
struct i2c_client *i2c = wm2000->i2c;
@@ -115,14 +102,15 @@ static void wm2000_reset(struct wm2000_priv *wm2000)
static int wm2000_poll_bit(struct i2c_client *i2c,
unsigned int reg, u8 mask)
{
+ struct wm2000_priv *wm2000 = i2c_get_clientdata(i2c);
int timeout = 4000;
- int val;
+ unsigned int val;
- val = wm2000_read(i2c, reg);
+ regmap_read(wm2000->regmap, reg, &val);
while (!(val & mask) && --timeout) {
msleep(1);
- val = wm2000_read(i2c, reg);
+ regmap_read(wm2000->regmap, reg, &val);
}
if (timeout == 0)
@@ -135,6 +123,7 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
{
struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
unsigned long rate;
+ unsigned int val;
int ret;
if (WARN_ON(wm2000->anc_mode != ANC_OFF))
@@ -213,12 +202,17 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
WM2000_MODE_THERMAL_ENABLE);
}
- ret = wm2000_read(i2c, WM2000_REG_SPEECH_CLARITY);
+ ret = regmap_read(wm2000->regmap, WM2000_REG_SPEECH_CLARITY, &val);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Unable to read Speech Clarity: %d\n", ret);
+ regulator_bulk_disable(WM2000_NUM_SUPPLIES, wm2000->supplies);
+ return ret;
+ }
if (wm2000->speech_clarity)
- ret |= WM2000_SPEECH_CLARITY;
+ val |= WM2000_SPEECH_CLARITY;
else
- ret &= ~WM2000_SPEECH_CLARITY;
- wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, ret);
+ val &= ~WM2000_SPEECH_CLARITY;
+ wm2000_write(i2c, WM2000_REG_SPEECH_CLARITY, val);
wm2000_write(i2c, WM2000_REG_SYS_START0, 0x33);
wm2000_write(i2c, WM2000_REG_SYS_START1, 0x02);
@@ -824,7 +818,7 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
const char *filename;
const struct firmware *fw = NULL;
int ret, i;
- int reg;
+ unsigned int reg;
u16 id;
wm2000 = devm_kzalloc(&i2c->dev, sizeof(*wm2000), GFP_KERNEL);
@@ -860,9 +854,17 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
}
/* Verify that this is a WM2000 */
- reg = wm2000_read(i2c, WM2000_REG_ID1);
+ ret = regmap_read(wm2000->regmap, WM2000_REG_ID1, &reg);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Unable to read ID1: %d\n", ret);
+ return ret;
+ }
id = reg << 8;
- reg = wm2000_read(i2c, WM2000_REG_ID2);
+ ret = regmap_read(wm2000->regmap, WM2000_REG_ID2, &reg);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Unable to read ID2: %d\n", ret);
+ return ret;
+ }
id |= reg & 0xff;
if (id != 0x2000) {
@@ -871,7 +873,11 @@ static int wm2000_i2c_probe(struct i2c_client *i2c,
goto err_supplies;
}
- reg = wm2000_read(i2c, WM2000_REG_REVISON);
+ ret = regmap_read(wm2000->regmap, WM2000_REG_REVISON, &reg);
+ if (ret != 0) {
+ dev_err(&i2c->dev, "Unable to read Revision: %d\n", ret);
+ return ret;
+ }
dev_info(&i2c->dev, "revision %c\n", reg + 'A');
wm2000->mclk = devm_clk_get(&i2c->dev, "MCLK");
diff --git a/sound/soc/codecs/wm8782.c b/sound/soc/codecs/wm8782.c
index 317db9a149a7..cf2cdbece122 100644
--- a/sound/soc/codecs/wm8782.c
+++ b/sound/soc/codecs/wm8782.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/regulator/consumer.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
@@ -50,7 +51,51 @@ static struct snd_soc_dai_driver wm8782_dai = {
},
};
+/* regulator power supply names */
+static const char *supply_names[] = {
+ "Vdda", /* analog supply, 2.7V - 3.6V */
+ "Vdd", /* digital supply, 2.7V - 5.5V */
+};
+
+struct wm8782_priv {
+ struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
+};
+
+static int wm8782_soc_probe(struct snd_soc_component *component)
+{
+ struct wm8782_priv *priv = snd_soc_component_get_drvdata(component);
+ return regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
+}
+
+static void wm8782_soc_remove(struct snd_soc_component *component)
+{
+ struct wm8782_priv *priv = snd_soc_component_get_drvdata(component);
+ regulator_bulk_disable(ARRAY_SIZE(priv->supplies), priv->supplies);
+}
+
+#ifdef CONFIG_PM
+static int wm8782_soc_suspend(struct snd_soc_component *component)
+{
+ struct wm8782_priv *priv = snd_soc_component_get_drvdata(component);
+ regulator_bulk_disable(ARRAY_SIZE(priv->supplies), priv->supplies);
+ return 0;
+}
+
+static int wm8782_soc_resume(struct snd_soc_component *component)
+{
+ struct wm8782_priv *priv = snd_soc_component_get_drvdata(component);
+ return regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
+}
+#else
+#define wm8782_soc_suspend NULL
+#define wm8782_soc_resume NULL
+#endif /* CONFIG_PM */
+
static const struct snd_soc_component_driver soc_component_dev_wm8782 = {
+ .probe = wm8782_soc_probe,
+ .remove = wm8782_soc_remove,
+ .suspend = wm8782_soc_suspend,
+ .resume = wm8782_soc_resume,
.dapm_widgets = wm8782_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8782_dapm_widgets),
.dapm_routes = wm8782_dapm_routes,
@@ -63,6 +108,24 @@ static const struct snd_soc_component_driver soc_component_dev_wm8782 = {
static int wm8782_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct wm8782_priv *priv;
+ int ret, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ for (i = 0; i < ARRAY_SIZE(supply_names); i++)
+ priv->supplies[i].supply = supply_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret < 0)
+ return ret;
+
return devm_snd_soc_register_component(&pdev->dev,
&soc_component_dev_wm8782, &wm8782_dai, 1);
}
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 1965635ec07c..2a3e5fbd04e4 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
diff --git a/sound/soc/codecs/wm8974.c b/sound/soc/codecs/wm8974.c
index 43edaf8cd276..593a11960888 100644
--- a/sound/soc/codecs/wm8974.c
+++ b/sound/soc/codecs/wm8974.c
@@ -11,7 +11,6 @@
*/
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index ade34c26ad2f..e873baa9e778 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -638,13 +638,14 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
{
struct wm9712_priv *wm9712 = snd_soc_component_get_drvdata(component);
struct regmap *regmap;
- int ret;
if (wm9712->mfd_pdata) {
wm9712->ac97 = wm9712->mfd_pdata->ac97;
regmap = wm9712->mfd_pdata->regmap;
} else {
#ifdef CONFIG_SND_SOC_AC97_BUS
+ int ret;
+
wm9712->ac97 = snd_soc_new_ac97_component(component, WM9712_VENDOR_ID,
WM9712_VENDOR_ID_MASK);
if (IS_ERR(wm9712->ac97)) {
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index f61656070225..a53dc174bbf0 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -311,12 +311,12 @@ struct wm_adsp_alg_xm_struct {
};
struct wm_adsp_buffer {
- __be32 X_buf_base; /* XM base addr of first X area */
- __be32 X_buf_size; /* Size of 1st X area in words */
- __be32 X_buf_base2; /* XM base addr of 2nd X area */
- __be32 X_buf_brk; /* Total X size in words */
- __be32 Y_buf_base; /* YM base addr of Y area */
- __be32 wrap; /* Total size X and Y in words */
+ __be32 buf1_base; /* Base addr of first buffer area */
+ __be32 buf1_size; /* Size of buf1 area in DSP words */
+ __be32 buf2_base; /* Base addr of 2nd buffer area */
+ __be32 buf1_buf2_size; /* Size of buf1+buf2 in DSP words */
+ __be32 buf3_base; /* Base addr of buf3 area */
+ __be32 buf_total_size; /* Size of buf1+buf2+buf3 in DSP words */
__be32 high_water_mark; /* Point at which IRQ is asserted */
__be32 irq_count; /* bits 1-31 count IRQ assertions */
__be32 irq_ack; /* acked IRQ count, bit 0 enables IRQ */
@@ -393,18 +393,18 @@ struct wm_adsp_buffer_region_def {
static const struct wm_adsp_buffer_region_def default_regions[] = {
{
.mem_type = WMFW_ADSP2_XM,
- .base_offset = HOST_BUFFER_FIELD(X_buf_base),
- .size_offset = HOST_BUFFER_FIELD(X_buf_size),
+ .base_offset = HOST_BUFFER_FIELD(buf1_base),
+ .size_offset = HOST_BUFFER_FIELD(buf1_size),
},
{
.mem_type = WMFW_ADSP2_XM,
- .base_offset = HOST_BUFFER_FIELD(X_buf_base2),
- .size_offset = HOST_BUFFER_FIELD(X_buf_brk),
+ .base_offset = HOST_BUFFER_FIELD(buf2_base),
+ .size_offset = HOST_BUFFER_FIELD(buf1_buf2_size),
},
{
.mem_type = WMFW_ADSP2_YM,
- .base_offset = HOST_BUFFER_FIELD(Y_buf_base),
- .size_offset = HOST_BUFFER_FIELD(wrap),
+ .base_offset = HOST_BUFFER_FIELD(buf3_base),
+ .size_offset = HOST_BUFFER_FIELD(buf_total_size),
},
};
@@ -3345,7 +3345,7 @@ static int wm_adsp_buffer_populate(struct wm_adsp_compr_buf *buf)
region->cumulative_size = offset;
adsp_dbg(buf->dsp,
- "region=%d type=%d base=%04x off=%04x size=%04x\n",
+ "region=%d type=%d base=%08x off=%08x size=%08x\n",
i, region->mem_type, region->base_addr,
region->offset, region->cumulative_size);
}
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index f70db8412c7c..267aee776b2d 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1041,6 +1041,42 @@ static int davinci_mcasp_calc_clk_div(struct davinci_mcasp *mcasp,
return error_ppm;
}
+static inline u32 davinci_mcasp_tx_delay(struct davinci_mcasp *mcasp)
+{
+ if (!mcasp->txnumevt)
+ return 0;
+
+ return mcasp_get_reg(mcasp, mcasp->fifo_base + MCASP_WFIFOSTS_OFFSET);
+}
+
+static inline u32 davinci_mcasp_rx_delay(struct davinci_mcasp *mcasp)
+{
+ if (!mcasp->rxnumevt)
+ return 0;
+
+ return mcasp_get_reg(mcasp, mcasp->fifo_base + MCASP_RFIFOSTS_OFFSET);
+}
+
+static snd_pcm_sframes_t davinci_mcasp_delay(
+ struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
+ u32 fifo_use;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ fifo_use = davinci_mcasp_tx_delay(mcasp);
+ else
+ fifo_use = davinci_mcasp_rx_delay(mcasp);
+
+ /*
+ * Divide the used locations with the channel count to get the
+ * FIFO usage in samples (don't care about partial samples in the
+ * buffer).
+ */
+ return fifo_use / substream->runtime->channels;
+}
+
static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
@@ -1365,6 +1401,7 @@ static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
.startup = davinci_mcasp_startup,
.shutdown = davinci_mcasp_shutdown,
.trigger = davinci_mcasp_trigger,
+ .delay = davinci_mcasp_delay,
.hw_params = davinci_mcasp_hw_params,
.set_fmt = davinci_mcasp_set_dai_fmt,
.set_clkdiv = davinci_mcasp_set_clkdiv,
diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c
index 1033ac6631b0..01052a0808b0 100644
--- a/sound/soc/fsl/fsl_asrc_dma.c
+++ b/sound/soc/fsl/fsl_asrc_dma.c
@@ -151,7 +151,7 @@ static int fsl_asrc_dma_hw_params(struct snd_pcm_substream *substream,
int ret;
/* Fetch the Back-End dma_data from DPCM */
- list_for_each_entry(dpcm, &rtd->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(rtd, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *substream_be;
struct snd_soc_dai *dai = be->cpu_dai;
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index c1d1d06783e5..57b484768a58 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -807,7 +807,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
return -ENOMEM;
esai_priv->pdev = pdev;
- strncpy(esai_priv->name, np->name, sizeof(esai_priv->name) - 1);
+ snprintf(esai_priv->name, sizeof(esai_priv->name), "%pOFn", np);
/* Get the addresses and IRQ */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
index 7f0fa4b52223..9981668ab590 100644
--- a/sound/soc/fsl/fsl_utils.c
+++ b/sound/soc/fsl/fsl_utils.c
@@ -57,8 +57,8 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np,
of_node_put(dma_channel_np);
return ret;
}
- snprintf((char *)dai->platform_name, DAI_NAME_SIZE, "%llx.%s",
- (unsigned long long) res.start, dma_channel_np->name);
+ snprintf((char *)dai->platform_name, DAI_NAME_SIZE, "%llx.%pOFn",
+ (unsigned long long) res.start, dma_channel_np);
iprop = of_get_property(dma_channel_np, "cell-index", NULL);
if (!iprop) {
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index ec731223cab3..e339f36cea95 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -57,6 +57,7 @@ static int pcm030_fabric_probe(struct platform_device *op)
struct device_node *platform_np;
struct snd_soc_card *card = &pcm030_card;
struct pcm030_audio_data *pdata;
+ struct snd_soc_dai_link *dai_link;
int ret;
int i;
@@ -78,8 +79,8 @@ static int pcm030_fabric_probe(struct platform_device *op)
return -ENODEV;
}
- for (i = 0; i < card->num_links; i++)
- card->dai_link[i].platform_of_node = platform_np;
+ for_each_card_prelinks(card, i, dai_link)
+ dai_link->platform_of_node = platform_np;
ret = request_module("snd-soc-wm9712");
if (ret)
diff --git a/sound/soc/generic/audio-graph-card.c b/sound/soc/generic/audio-graph-card.c
index 2094d2c8919f..25c819e402e1 100644
--- a/sound/soc/generic/audio-graph-card.c
+++ b/sound/soc/generic/audio-graph-card.c
@@ -25,6 +25,8 @@ struct graph_card_data {
struct graph_dai_props {
struct asoc_simple_dai cpu_dai;
struct asoc_simple_dai codec_dai;
+ struct snd_soc_dai_link_component codecs; /* single codec */
+ struct snd_soc_dai_link_component platform;
unsigned int mclk_fs;
} *dai_props;
unsigned int mclk_fs;
@@ -180,7 +182,8 @@ static int asoc_graph_card_dai_link_of(struct device_node *cpu_port,
if (ret < 0)
goto dai_link_of_err;
- of_property_read_u32(rcpu_ep, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(cpu_ep, "mclk-fs", &dai_props->mclk_fs);
+ of_property_read_u32(codec_ep, "mclk-fs", &dai_props->mclk_fs);
ret = asoc_simple_card_parse_graph_cpu(cpu_ep, dai_link);
if (ret < 0)
@@ -213,7 +216,7 @@ static int asoc_graph_card_dai_link_of(struct device_node *cpu_port,
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"%s-%s",
dai_link->cpu_dai_name,
- dai_link->codec_dai_name);
+ dai_link->codecs->dai_name);
if (ret < 0)
goto dai_link_of_err;
@@ -299,7 +302,7 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
struct graph_dai_props *dai_props;
struct device *dev = &pdev->dev;
struct snd_soc_card *card;
- int num, ret;
+ int num, ret, i;
/* Allocate the private data and the DAI link array */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -315,6 +318,18 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
if (!dai_props || !dai_link)
return -ENOMEM;
+ /*
+ * Use snd_soc_dai_link_component instead of legacy style
+ * It is codec only. but cpu/platform will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ for (i = 0; i < num; i++) {
+ dai_link[i].codecs = &dai_props[i].codecs;
+ dai_link[i].num_codecs = 1;
+ dai_link[i].platform = &dai_props[i].platform;
+ }
+
priv->pa_gpio = devm_gpiod_get_optional(dev, "pa", GPIOD_OUT_LOW);
if (IS_ERR(priv->pa_gpio)) {
ret = PTR_ERR(priv->pa_gpio);
diff --git a/sound/soc/generic/audio-graph-scu-card.c b/sound/soc/generic/audio-graph-scu-card.c
index 92882e392d6c..b83bb31021a9 100644
--- a/sound/soc/generic/audio-graph-scu-card.c
+++ b/sound/soc/generic/audio-graph-scu-card.c
@@ -25,7 +25,11 @@
struct graph_card_data {
struct snd_soc_card snd_card;
struct snd_soc_codec_conf codec_conf;
- struct asoc_simple_dai *dai_props;
+ struct graph_dai_props {
+ struct asoc_simple_dai dai;
+ struct snd_soc_dai_link_component codecs;
+ struct snd_soc_dai_link_component platform;
+ } *dai_props;
struct snd_soc_dai_link *dai_link;
struct asoc_simple_card_data adata;
};
@@ -39,18 +43,18 @@ static int asoc_graph_card_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct asoc_simple_dai *dai_props = graph_priv_to_props(priv, rtd->num);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
- return asoc_simple_card_clk_enable(dai_props);
+ return asoc_simple_card_clk_enable(&dai_props->dai);
}
static void asoc_graph_card_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct asoc_simple_dai *dai_props = graph_priv_to_props(priv, rtd->num);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, rtd->num);
- asoc_simple_card_clk_disable(dai_props);
+ asoc_simple_card_clk_disable(&dai_props->dai);
}
static const struct snd_soc_ops asoc_graph_card_ops = {
@@ -63,7 +67,7 @@ static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
struct graph_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_dai *dai;
struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dai_props;
+ struct graph_dai_props *dai_props;
int num = rtd->num;
dai_link = graph_priv_to_link(priv, num);
@@ -72,7 +76,7 @@ static int asoc_graph_card_dai_init(struct snd_soc_pcm_runtime *rtd)
rtd->cpu_dai :
rtd->codec_dai;
- return asoc_simple_card_init_dai(dai, dai_props);
+ return asoc_simple_card_init_dai(dai, &dai_props->dai);
}
static int asoc_graph_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
@@ -92,15 +96,18 @@ static int asoc_graph_card_dai_link_of(struct device_node *ep,
{
struct device *dev = graph_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = graph_priv_to_link(priv, idx);
- struct asoc_simple_dai *dai_props = graph_priv_to_props(priv, idx);
+ struct graph_dai_props *dai_props = graph_priv_to_props(priv, idx);
struct snd_soc_card *card = graph_priv_to_card(priv);
int ret;
if (is_fe) {
+ struct snd_soc_dai_link_component *codecs;
+
/* BE is dummy */
- dai_link->codec_of_node = NULL;
- dai_link->codec_dai_name = "snd-soc-dummy-dai";
- dai_link->codec_name = "snd-soc-dummy";
+ codecs = dai_link->codecs;
+ codecs->of_node = NULL;
+ codecs->dai_name = "snd-soc-dummy-dai";
+ codecs->name = "snd-soc-dummy";
/* FE settings */
dai_link->dynamic = 1;
@@ -110,7 +117,7 @@ static int asoc_graph_card_dai_link_of(struct device_node *ep,
if (ret)
return ret;
- ret = asoc_simple_card_parse_clk_cpu(dev, ep, dai_link, dai_props);
+ ret = asoc_simple_card_parse_clk_cpu(dev, ep, dai_link, &dai_props->dai);
if (ret < 0)
return ret;
@@ -137,23 +144,23 @@ static int asoc_graph_card_dai_link_of(struct device_node *ep,
if (ret < 0)
return ret;
- ret = asoc_simple_card_parse_clk_codec(dev, ep, dai_link, dai_props);
+ ret = asoc_simple_card_parse_clk_codec(dev, ep, dai_link, &dai_props->dai);
if (ret < 0)
return ret;
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"be.%s",
- dai_link->codec_dai_name);
+ dai_link->codecs->dai_name);
if (ret < 0)
return ret;
snd_soc_of_parse_audio_prefix(card,
&priv->codec_conf,
- dai_link->codec_of_node,
+ dai_link->codecs->of_node,
"prefix");
}
- ret = asoc_simple_card_of_parse_tdm(ep, dai_props);
+ ret = asoc_simple_card_of_parse_tdm(ep, &dai_props->dai);
if (ret)
return ret;
@@ -331,10 +338,10 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
{
struct graph_card_data *priv;
struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dai_props;
+ struct graph_dai_props *dai_props;
struct device *dev = &pdev->dev;
struct snd_soc_card *card;
- int num, ret;
+ int num, ret, i;
/* Allocate the private data and the DAI link array */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -350,6 +357,18 @@ static int asoc_graph_card_probe(struct platform_device *pdev)
if (!dai_props || !dai_link)
return -ENOMEM;
+ /*
+ * Use snd_soc_dai_link_component instead of legacy style
+ * It is codec only. but cpu/platform will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ for (i = 0; i < num; i++) {
+ dai_link[i].codecs = &dai_props[i].codecs;
+ dai_link[i].num_codecs = 1;
+ dai_link[i].platform = &dai_props[i].platform;
+ }
+
priv->dai_props = dai_props;
priv->dai_link = dai_link;
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index d3f3f0fec74c..f34cc6cddfa2 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -173,12 +173,24 @@ int asoc_simple_card_parse_clk(struct device *dev,
struct device_node *node,
struct device_node *dai_of_node,
struct asoc_simple_dai *simple_dai,
- const char *name)
+ const char *dai_name,
+ struct snd_soc_dai_link_component *dlc)
{
struct clk *clk;
u32 val;
/*
+ * Use snd_soc_dai_link_component instead of legacy style.
+ * It is only for codec, but cpu will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ if (dlc) {
+ dai_of_node = dlc->of_node;
+ dai_name = dlc->dai_name;
+ }
+
+ /*
* Parse dai->sysclk come from "clocks = <&xxx>"
* (if system has common clock)
* or "system-clock-frequency = <xxx>"
@@ -200,7 +212,7 @@ int asoc_simple_card_parse_clk(struct device *dev,
if (of_property_read_bool(node, "system-clock-direction-out"))
simple_dai->clk_direction = SND_SOC_CLOCK_OUT;
- dev_dbg(dev, "%s : sysclk = %d, direction %d\n", name,
+ dev_dbg(dev, "%s : sysclk = %d, direction %d\n", dai_name,
simple_dai->sysclk, simple_dai->clk_direction);
return 0;
@@ -208,6 +220,7 @@ int asoc_simple_card_parse_clk(struct device *dev,
EXPORT_SYMBOL_GPL(asoc_simple_card_parse_clk);
int asoc_simple_card_parse_dai(struct device_node *node,
+ struct snd_soc_dai_link_component *dlc,
struct device_node **dai_of_node,
const char **dai_name,
const char *list_name,
@@ -221,6 +234,17 @@ int asoc_simple_card_parse_dai(struct device_node *node,
return 0;
/*
+ * Use snd_soc_dai_link_component instead of legacy style.
+ * It is only for codec, but cpu will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ if (dlc) {
+ dai_name = &dlc->dai_name;
+ dai_of_node = &dlc->of_node;
+ }
+
+ /*
* Get node via "sound-dai = <&phandle port>"
* it will be used as xxx_of_node on soc_bind_dai_link()
*/
@@ -278,6 +302,7 @@ static int asoc_simple_card_get_dai_id(struct device_node *ep)
}
int asoc_simple_card_parse_graph_dai(struct device_node *ep,
+ struct snd_soc_dai_link_component *dlc,
struct device_node **dai_of_node,
const char **dai_name)
{
@@ -285,6 +310,17 @@ int asoc_simple_card_parse_graph_dai(struct device_node *ep,
struct of_phandle_args args;
int ret;
+ /*
+ * Use snd_soc_dai_link_component instead of legacy style.
+ * It is only for codec, but cpu will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ if (dlc) {
+ dai_name = &dlc->dai_name;
+ dai_of_node = &dlc->of_node;
+ }
+
if (!ep)
return 0;
if (!dai_name)
@@ -340,10 +376,11 @@ EXPORT_SYMBOL_GPL(asoc_simple_card_init_dai);
int asoc_simple_card_canonicalize_dailink(struct snd_soc_dai_link *dai_link)
{
/* Assumes platform == cpu */
- if (!dai_link->platform_of_node)
- dai_link->platform_of_node = dai_link->cpu_of_node;
+ if (!dai_link->platform->of_node)
+ dai_link->platform->of_node = dai_link->cpu_of_node;
return 0;
+
}
EXPORT_SYMBOL_GPL(asoc_simple_card_canonicalize_dailink);
@@ -367,13 +404,11 @@ EXPORT_SYMBOL_GPL(asoc_simple_card_canonicalize_cpu);
int asoc_simple_card_clean_reference(struct snd_soc_card *card)
{
struct snd_soc_dai_link *dai_link;
- int num_links;
+ int i;
- for (num_links = 0, dai_link = card->dai_link;
- num_links < card->num_links;
- num_links++, dai_link++) {
+ for_each_card_prelinks(card, i, dai_link) {
of_node_put(dai_link->cpu_of_node);
- of_node_put(dai_link->codec_of_node);
+ of_node_put(dai_link->codecs->of_node);
}
return 0;
}
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 64bf3560c1d1..5a3f59aa4ba5 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -20,6 +20,8 @@ struct simple_card_data {
struct simple_dai_props {
struct asoc_simple_dai cpu_dai;
struct asoc_simple_dai codec_dai;
+ struct snd_soc_dai_link_component codecs; /* single codec */
+ struct snd_soc_dai_link_component platform;
unsigned int mclk_fs;
} *dai_props;
unsigned int mclk_fs;
@@ -234,7 +236,7 @@ static int asoc_simple_card_dai_link_of(struct device_node *node,
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"%s-%s",
dai_link->cpu_dai_name,
- dai_link->codec_dai_name);
+ dai_link->codecs->dai_name);
if (ret < 0)
goto dai_link_of_err;
@@ -363,7 +365,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct snd_soc_card *card;
- int num, ret;
+ int num, ret, i;
/* Get the number of DAI links */
if (np && of_get_child_by_name(np, PREFIX "dai-link"))
@@ -381,6 +383,18 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
if (!dai_props || !dai_link)
return -ENOMEM;
+ /*
+ * Use snd_soc_dai_link_component instead of legacy style
+ * It is codec only. but cpu/platform will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ for (i = 0; i < num; i++) {
+ dai_link[i].codecs = &dai_props[i].codecs;
+ dai_link[i].num_codecs = 1;
+ dai_link[i].platform = &dai_props[i].platform;
+ }
+
priv->dai_props = dai_props;
priv->dai_link = dai_link;
@@ -403,6 +417,8 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
} else {
struct asoc_simple_card_info *cinfo;
+ struct snd_soc_dai_link_component *codecs;
+ struct snd_soc_dai_link_component *platform;
cinfo = dev->platform_data;
if (!cinfo) {
@@ -419,13 +435,17 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
return -EINVAL;
}
+ codecs = dai_link->codecs;
+ codecs->name = cinfo->codec;
+ codecs->dai_name = cinfo->codec_dai.name;
+
+ platform = dai_link->platform;
+ platform->name = cinfo->platform;
+
card->name = (cinfo->card) ? cinfo->card : cinfo->name;
dai_link->name = cinfo->name;
dai_link->stream_name = cinfo->name;
- dai_link->platform_name = cinfo->platform;
- dai_link->codec_name = cinfo->codec;
dai_link->cpu_dai_name = cinfo->cpu_dai.name;
- dai_link->codec_dai_name = cinfo->codec_dai.name;
dai_link->dai_fmt = cinfo->daifmt;
dai_link->init = asoc_simple_card_dai_init;
memcpy(&priv->dai_props->cpu_dai, &cinfo->cpu_dai,
diff --git a/sound/soc/generic/simple-scu-card.c b/sound/soc/generic/simple-scu-card.c
index 16a83bc51e0e..85b46f0eae0f 100644
--- a/sound/soc/generic/simple-scu-card.c
+++ b/sound/soc/generic/simple-scu-card.c
@@ -22,7 +22,11 @@
struct simple_card_data {
struct snd_soc_card snd_card;
struct snd_soc_codec_conf codec_conf;
- struct asoc_simple_dai *dai_props;
+ struct simple_dai_props {
+ struct asoc_simple_dai dai;
+ struct snd_soc_dai_link_component codecs;
+ struct snd_soc_dai_link_component platform;
+ } *dai_props;
struct snd_soc_dai_link *dai_link;
struct asoc_simple_card_data adata;
};
@@ -40,20 +44,20 @@ static int asoc_simple_card_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct asoc_simple_dai *dai_props =
+ struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
- return asoc_simple_card_clk_enable(dai_props);
+ return asoc_simple_card_clk_enable(&dai_props->dai);
}
static void asoc_simple_card_shutdown(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
- struct asoc_simple_dai *dai_props =
+ struct simple_dai_props *dai_props =
simple_priv_to_props(priv, rtd->num);
- asoc_simple_card_clk_disable(dai_props);
+ asoc_simple_card_clk_disable(&dai_props->dai);
}
static const struct snd_soc_ops asoc_simple_card_ops = {
@@ -66,7 +70,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
struct simple_card_data *priv = snd_soc_card_get_drvdata(rtd->card);
struct snd_soc_dai *dai;
struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dai_props;
+ struct simple_dai_props *dai_props;
int num = rtd->num;
dai_link = simple_priv_to_link(priv, num);
@@ -75,7 +79,7 @@ static int asoc_simple_card_dai_init(struct snd_soc_pcm_runtime *rtd)
rtd->cpu_dai :
rtd->codec_dai;
- return asoc_simple_card_init_dai(dai, dai_props);
+ return asoc_simple_card_init_dai(dai, &dai_props->dai);
}
static int asoc_simple_card_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
@@ -95,17 +99,19 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
{
struct device *dev = simple_priv_to_dev(priv);
struct snd_soc_dai_link *dai_link = simple_priv_to_link(priv, idx);
- struct asoc_simple_dai *dai_props = simple_priv_to_props(priv, idx);
+ struct simple_dai_props *dai_props = simple_priv_to_props(priv, idx);
struct snd_soc_card *card = simple_priv_to_card(priv);
int ret;
if (is_fe) {
int is_single_links = 0;
+ struct snd_soc_dai_link_component *codecs;
/* BE is dummy */
- dai_link->codec_of_node = NULL;
- dai_link->codec_dai_name = "snd-soc-dummy-dai";
- dai_link->codec_name = "snd-soc-dummy";
+ codecs = dai_link->codecs;
+ codecs->of_node = NULL;
+ codecs->dai_name = "snd-soc-dummy-dai";
+ codecs->name = "snd-soc-dummy";
/* FE settings */
dai_link->dynamic = 1;
@@ -116,7 +122,7 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
if (ret)
return ret;
- ret = asoc_simple_card_parse_clk_cpu(dev, np, dai_link, dai_props);
+ ret = asoc_simple_card_parse_clk_cpu(dev, np, dai_link, &dai_props->dai);
if (ret < 0)
return ret;
@@ -141,23 +147,23 @@ static int asoc_simple_card_dai_link_of(struct device_node *np,
if (ret < 0)
return ret;
- ret = asoc_simple_card_parse_clk_codec(dev, np, dai_link, dai_props);
+ ret = asoc_simple_card_parse_clk_codec(dev, np, dai_link, &dai_props->dai);
if (ret < 0)
return ret;
ret = asoc_simple_card_set_dailink_name(dev, dai_link,
"be.%s",
- dai_link->codec_dai_name);
+ dai_link->codecs->dai_name);
if (ret < 0)
return ret;
snd_soc_of_parse_audio_prefix(card,
&priv->codec_conf,
- dai_link->codec_of_node,
+ dai_link->codecs->of_node,
PREFIX "prefix");
}
- ret = asoc_simple_card_of_parse_tdm(np, dai_props);
+ ret = asoc_simple_card_of_parse_tdm(np, &dai_props->dai);
if (ret)
return ret;
@@ -230,11 +236,11 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
{
struct simple_card_data *priv;
struct snd_soc_dai_link *dai_link;
- struct asoc_simple_dai *dai_props;
+ struct simple_dai_props *dai_props;
struct snd_soc_card *card;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- int num, ret;
+ int num, ret, i;
/* Allocate the private data */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -248,6 +254,18 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
if (!dai_props || !dai_link)
return -ENOMEM;
+ /*
+ * Use snd_soc_dai_link_component instead of legacy style
+ * It is codec only. but cpu/platform will be supported in the future.
+ * see
+ * soc-core.c :: snd_soc_init_multicodec()
+ */
+ for (i = 0; i < num; i++) {
+ dai_link[i].codecs = &dai_props[i].codecs;
+ dai_link[i].num_codecs = 1;
+ dai_link[i].platform = &dai_props[i].platform;
+ }
+
priv->dai_props = dai_props;
priv->dai_link = dai_link;
diff --git a/sound/soc/hisilicon/hi6210-i2s.c b/sound/soc/hisilicon/hi6210-i2s.c
index 53344a3b7a60..a69e5b11b3da 100644
--- a/sound/soc/hisilicon/hi6210-i2s.c
+++ b/sound/soc/hisilicon/hi6210-i2s.c
@@ -269,13 +269,13 @@ static int hi6210_i2s_hw_params(struct snd_pcm_substream *substream,
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_U16_LE:
signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_FORMAT_S16_LE:
bits = HII2S_BITS_16;
break;
case SNDRV_PCM_FORMAT_U24_LE:
signed_data = HII2S_I2S_CFG__S2_CODEC_DATA_FORMAT;
- /* fallthru */
+ /* fall through */
case SNDRV_PCM_FORMAT_S24_LE:
bits = HII2S_BITS_24;
break;
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 6c36da560877..afc559866095 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -765,7 +765,7 @@ static int sst_soc_prepare(struct device *dev)
snd_soc_poweroff(drv->soc_card->dev);
/* set the SSPs to idle */
- list_for_each_entry(rtd, &drv->soc_card->rtd_list, list) {
+ for_each_card_rtds(drv->soc_card, rtd) {
struct snd_soc_dai *dai = rtd->cpu_dai;
if (dai->active) {
@@ -786,7 +786,7 @@ static void sst_soc_complete(struct device *dev)
return;
/* restart SSPs */
- list_for_each_entry(rtd, &drv->soc_card->rtd_list, list) {
+ for_each_card_rtds(drv->soc_card, rtd) {
struct snd_soc_dai *dai = rtd->cpu_dai;
if (dai->active) {
diff --git a/sound/soc/intel/boards/Kconfig b/sound/soc/intel/boards/Kconfig
index cccda87f4b34..73ca1350aa31 100644
--- a/sound/soc/intel/boards/Kconfig
+++ b/sound/soc/intel/boards/Kconfig
@@ -279,6 +279,28 @@ config SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH
This adds support for ASoC Onboard Codec I2S machine driver. This will
create an alsa sound card for DA7219 + MAX98357A I2S audio codec.
Say Y if you have such a device.
+
+config SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH
+ tristate "KBL with DA7219 and MAX98927 in I2S Mode"
+ depends on MFD_INTEL_LPSS && I2C && ACPI
+ select SND_SOC_DA7219
+ select SND_SOC_MAX98927
+ select SND_SOC_DMIC
+ select SND_SOC_HDAC_HDMI
+ help
+ This adds support for ASoC Onboard Codec I2S machine driver. This will
+ create an alsa sound card for DA7219 + MAX98927 I2S audio codec.
+ Say Y if you have such a device.
+ If unsure select "N".
+
+config SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH
+ tristate "SKL/KBL/BXT/APL with HDA Codecs"
+ select SND_SOC_HDAC_HDMI
+ select SND_SOC_HDAC_HDA
+ help
+ This adds support for ASoC machine driver for Intel platforms
+ SKL/KBL/BXT/APL with iDisp, HDA audio codecs.
+ Say Y or m if you have such a device. This is a recommended option.
If unsure select "N".
config SND_SOC_INTEL_GLK_RT5682_MAX98357A_MACH
diff --git a/sound/soc/intel/boards/Makefile b/sound/soc/intel/boards/Makefile
index 87ef8b4058e5..5381e27df9cc 100644
--- a/sound/soc/intel/boards/Makefile
+++ b/sound/soc/intel/boards/Makefile
@@ -17,9 +17,11 @@ snd-soc-sst-byt-cht-da7213-objs := bytcht_da7213.o
snd-soc-sst-byt-cht-es8316-objs := bytcht_es8316.o
snd-soc-sst-byt-cht-nocodec-objs := bytcht_nocodec.o
snd-soc-kbl_da7219_max98357a-objs := kbl_da7219_max98357a.o
+snd-soc-kbl_da7219_max98927-objs := kbl_da7219_max98927.o
snd-soc-kbl_rt5663_max98927-objs := kbl_rt5663_max98927.o
snd-soc-kbl_rt5663_rt5514_max98927-objs := kbl_rt5663_rt5514_max98927.o
snd-soc-skl_rt286-objs := skl_rt286.o
+snd-soc-skl_hda_dsp-objs := skl_hda_dsp_generic.o skl_hda_dsp_common.o
snd-skl_nau88l25_max98357a-objs := skl_nau88l25_max98357a.o
snd-soc-skl_nau88l25_ssm4567-objs := skl_nau88l25_ssm4567.o
@@ -41,8 +43,10 @@ obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_DA7213_MACH) += snd-soc-sst-byt-cht-da7213.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_ES8316_MACH) += snd-soc-sst-byt-cht-es8316.o
obj-$(CONFIG_SND_SOC_INTEL_BYT_CHT_NOCODEC_MACH) += snd-soc-sst-byt-cht-nocodec.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98357A_MACH) += snd-soc-kbl_da7219_max98357a.o
+obj-$(CONFIG_SND_SOC_INTEL_KBL_DA7219_MAX98927_MACH) += snd-soc-kbl_da7219_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_MAX98927_MACH) += snd-soc-kbl_rt5663_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_KBL_RT5663_RT5514_MAX98927_MACH) += snd-soc-kbl_rt5663_rt5514_max98927.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_RT286_MACH) += snd-soc-skl_rt286.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_MAX98357A_MACH) += snd-skl_nau88l25_max98357a.o
obj-$(CONFIG_SND_SOC_INTEL_SKL_NAU88L25_SSM4567_MACH) += snd-soc-skl_nau88l25_ssm4567.o
+obj-$(CONFIG_SND_SOC_INTEL_SKL_HDA_DSP_GENERIC_MACH) += snd-soc-skl_hda_dsp.o
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 7b0ee67b4fc8..68e6543e6cb0 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -223,7 +223,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
static int broadwell_suspend(struct snd_soc_card *card){
struct snd_soc_component *component;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, "i2c-INT343A:00")) {
dev_dbg(component->dev, "disabling jack detect before going to suspend.\n");
@@ -237,7 +237,7 @@ static int broadwell_suspend(struct snd_soc_card *card){
static int broadwell_resume(struct snd_soc_card *card){
struct snd_soc_component *component;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, "i2c-INT343A:00")) {
dev_dbg(component->dev, "enabling jack detect for resume.\n");
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index b6dc524830b2..8587bd3d1cc1 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -1048,7 +1048,7 @@ static int byt_rt5640_suspend(struct snd_soc_card *card)
if (!BYT_RT5640_JDSRC(byt_rt5640_quirk))
return 0;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, byt_rt5640_codec_name)) {
dev_dbg(component->dev, "disabling jack detect before suspend\n");
snd_soc_component_set_jack(component, NULL, NULL);
@@ -1067,7 +1067,7 @@ static int byt_rt5640_resume(struct snd_soc_card *card)
if (!BYT_RT5640_JDSRC(byt_rt5640_quirk))
return 0;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, byt_rt5640_codec_name)) {
dev_dbg(component->dev, "re-enabling jack detect after resume\n");
snd_soc_component_set_jack(component, &priv->jack, NULL);
diff --git a/sound/soc/intel/boards/bytcr_rt5651.c b/sound/soc/intel/boards/bytcr_rt5651.c
index 6af02bf879ac..c44298130720 100644
--- a/sound/soc/intel/boards/bytcr_rt5651.c
+++ b/sound/soc/intel/boards/bytcr_rt5651.c
@@ -742,7 +742,7 @@ static int byt_rt5651_suspend(struct snd_soc_card *card)
if (!BYT_RT5651_JDSRC(byt_rt5651_quirk))
return 0;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, byt_rt5651_codec_name)) {
dev_dbg(component->dev, "disabling jack detect before suspend\n");
snd_soc_component_set_jack(component, NULL, NULL);
@@ -761,7 +761,7 @@ static int byt_rt5651_resume(struct snd_soc_card *card)
if (!BYT_RT5651_JDSRC(byt_rt5651_quirk))
return 0;
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strcmp(component->name, byt_rt5651_codec_name)) {
dev_dbg(component->dev, "re-enabling jack detect after resume\n");
snd_soc_component_set_jack(component, &priv->jack, NULL);
diff --git a/sound/soc/intel/boards/cht_bsw_rt5672.c b/sound/soc/intel/boards/cht_bsw_rt5672.c
index e5aa13058dd7..51f0d45d6f8f 100644
--- a/sound/soc/intel/boards/cht_bsw_rt5672.c
+++ b/sound/soc/intel/boards/cht_bsw_rt5672.c
@@ -16,6 +16,7 @@
* General Public License for more details.
*/
+#include <linux/input.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -212,6 +213,10 @@ static int cht_codec_init(struct snd_soc_pcm_runtime *runtime)
if (ret)
return ret;
+ snd_jack_set_key(ctx->headset.jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(ctx->headset.jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
+ snd_jack_set_key(ctx->headset.jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
+
rt5670_set_jack_detect(component, &ctx->headset);
if (ctx->mclk) {
/*
@@ -342,7 +347,7 @@ static int cht_suspend_pre(struct snd_soc_card *card)
struct snd_soc_component *component;
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strncmp(component->name,
ctx->codec_name, sizeof(ctx->codec_name))) {
@@ -359,7 +364,7 @@ static int cht_resume_post(struct snd_soc_card *card)
struct snd_soc_component *component;
struct cht_mc_private *ctx = snd_soc_card_get_drvdata(card);
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (!strncmp(component->name,
ctx->codec_name, sizeof(ctx->codec_name))) {
diff --git a/sound/soc/intel/boards/kbl_da7219_max98927.c b/sound/soc/intel/boards/kbl_da7219_max98927.c
new file mode 100644
index 000000000000..3fa1c3ca6d37
--- /dev/null
+++ b/sound/soc/intel/boards/kbl_da7219_max98927.c
@@ -0,0 +1,983 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2018 Intel Corporation.
+
+/*
+ * Intel Kabylake I2S Machine Driver with MAX98927 & DA7219 Codecs
+ *
+ * Modified from:
+ * Intel Kabylake I2S Machine driver supporting MAX98927 and
+ * RT5663 codecs
+ */
+
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/da7219.h"
+#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
+#include "../../codecs/da7219-aad.h"
+
+#define KBL_DIALOG_CODEC_DAI "da7219-hifi"
+#define MAX98927_CODEC_DAI "max98927-aif1"
+#define MAXIM_DEV0_NAME "i2c-MX98927:00"
+#define MAXIM_DEV1_NAME "i2c-MX98927:01"
+#define DUAL_CHANNEL 2
+#define QUAD_CHANNEL 4
+#define NAME_SIZE 32
+
+static struct snd_soc_card *kabylake_audio_card;
+static struct snd_soc_jack kabylake_hdmi[3];
+
+struct kbl_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ int device;
+};
+
+struct kbl_codec_private {
+ struct snd_soc_jack kabylake_headset;
+ struct list_head hdmi_pcm_list;
+};
+
+enum {
+ KBL_DPCM_AUDIO_PB = 0,
+ KBL_DPCM_AUDIO_CP,
+ KBL_DPCM_AUDIO_ECHO_REF_CP,
+ KBL_DPCM_AUDIO_REF_CP,
+ KBL_DPCM_AUDIO_DMIC_CP,
+ KBL_DPCM_AUDIO_HDMI1_PB,
+ KBL_DPCM_AUDIO_HDMI2_PB,
+ KBL_DPCM_AUDIO_HDMI3_PB,
+ KBL_DPCM_AUDIO_HS_PB,
+};
+
+static int platform_clock_control(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *k, int event)
+{
+ struct snd_soc_dapm_context *dapm = w->dapm;
+ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dai *codec_dai;
+ int ret = 0;
+
+ codec_dai = snd_soc_card_get_codec_dai(card, KBL_DIALOG_CODEC_DAI);
+ if (!codec_dai) {
+ dev_err(card->dev, "Codec dai not found; Unable to set/unset codec pll\n");
+ return -EIO;
+ }
+
+ /* Configure sysclk for codec */
+ ret = snd_soc_dai_set_sysclk(codec_dai, DA7219_CLKSRC_MCLK, 24576000,
+ SND_SOC_CLOCK_IN);
+ if (ret) {
+ dev_err(card->dev, "can't set codec sysclk configuration\n");
+ return ret;
+ }
+
+ if (SND_SOC_DAPM_EVENT_OFF(event)) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0,
+ DA7219_SYSCLK_MCLK, 0, 0);
+ if (ret)
+ dev_err(card->dev, "failed to stop PLL: %d\n", ret);
+ } else if (SND_SOC_DAPM_EVENT_ON(event)) {
+ ret = snd_soc_dai_set_pll(codec_dai, 0, DA7219_SYSCLK_PLL_SRM,
+ 0, DA7219_PLL_FREQ_OUT_98304);
+ if (ret)
+ dev_err(card->dev, "failed to start PLL: %d\n", ret);
+ }
+
+ return ret;
+}
+
+static const struct snd_kcontrol_new kabylake_controls[] = {
+ SOC_DAPM_PIN_SWITCH("Headphone Jack"),
+ SOC_DAPM_PIN_SWITCH("Headset Mic"),
+ SOC_DAPM_PIN_SWITCH("Left Spk"),
+ SOC_DAPM_PIN_SWITCH("Right Spk"),
+};
+
+static const struct snd_soc_dapm_widget kabylake_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Headset Mic", NULL),
+ SND_SOC_DAPM_SPK("Left Spk", NULL),
+ SND_SOC_DAPM_SPK("Right Spk", NULL),
+ SND_SOC_DAPM_MIC("SoC DMIC", NULL),
+ SND_SOC_DAPM_SPK("DP", NULL),
+ SND_SOC_DAPM_SPK("HDMI", NULL),
+ SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0,
+ platform_clock_control, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+};
+
+static const struct snd_soc_dapm_route kabylake_map[] = {
+ /* speaker */
+ { "Left Spk", NULL, "Left BE_OUT" },
+ { "Right Spk", NULL, "Right BE_OUT" },
+
+ /* other jacks */
+ { "DMic", NULL, "SoC DMIC" },
+
+ { "HDMI", NULL, "hif5 Output" },
+ { "DP", NULL, "hif6 Output" },
+
+ /* CODEC BE connections */
+ { "Left HiFi Playback", NULL, "ssp0 Tx" },
+ { "Right HiFi Playback", NULL, "ssp0 Tx" },
+ { "ssp0 Tx", NULL, "spk_out" },
+
+ /* IV feedback path */
+ { "codec0_fb_in", NULL, "ssp0 Rx"},
+ { "ssp0 Rx", NULL, "Left HiFi Capture" },
+ { "ssp0 Rx", NULL, "Right HiFi Capture" },
+
+ /* AEC capture path */
+ { "echo_ref_out", NULL, "ssp0 Rx" },
+
+ /* DMIC */
+ { "dmic01_hifi", NULL, "DMIC01 Rx" },
+ { "DMIC01 Rx", NULL, "DMIC AIF" },
+
+ { "hifi1", NULL, "iDisp1 Tx" },
+ { "iDisp1 Tx", NULL, "iDisp1_out" },
+ { "hifi2", NULL, "iDisp2 Tx" },
+ { "iDisp2 Tx", NULL, "iDisp2_out" },
+ { "hifi3", NULL, "iDisp3 Tx"},
+ { "iDisp3 Tx", NULL, "iDisp3_out"},
+};
+
+static const struct snd_soc_dapm_route kabylake_ssp1_map[] = {
+ { "Headphone Jack", NULL, "HPL" },
+ { "Headphone Jack", NULL, "HPR" },
+
+ /* other jacks */
+ { "MIC", NULL, "Headset Mic" },
+
+ /* CODEC BE connections */
+ { "Playback", NULL, "ssp1 Tx" },
+ { "ssp1 Tx", NULL, "codec1_out" },
+
+ { "hs_in", NULL, "ssp1 Rx" },
+ { "ssp1 Rx", NULL, "Capture" },
+
+ { "Headphone Jack", NULL, "Platform Clock" },
+ { "Headset Mic", NULL, "Platform Clock" },
+};
+
+static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *runtime = substream->private_data;
+ int ret = 0, j;
+
+ for (j = 0; j < runtime->num_codecs; j++) {
+ struct snd_soc_dai *codec_dai = runtime->codec_dais[j];
+
+ if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x30, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(runtime->dev, "DEV0 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ if (!strcmp(codec_dai->component->name, MAXIM_DEV1_NAME)) {
+ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xC0, 3, 8, 16);
+ if (ret < 0) {
+ dev_err(runtime->dev, "DEV1 TDM slot err:%d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops kabylake_ssp0_ops = {
+ .hw_params = kabylake_ssp0_hw_params,
+};
+
+static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *rate = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_RATE);
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_soc_dpcm *dpcm = container_of(
+ params, struct snd_soc_dpcm, hw_params);
+ struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link;
+ struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link;
+
+ /*
+ * The ADSP will convert the FE rate to 48k, stereo, 24 bit
+ */
+ if (!strcmp(fe_dai_link->name, "Kbl Audio Port") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") ||
+ !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) {
+ rate->min = rate->max = 48000;
+ channels->min = channels->max = 2;
+ snd_mask_none(fmt);
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S24_LE);
+ }
+
+ /*
+ * The speaker on the SSP0 supports S16_LE and not S24_LE.
+ * thus changing the mask here
+ */
+ if (!strcmp(be_dai_link->name, "SSP0-Codec"))
+ snd_mask_set(fmt, SNDRV_PCM_FORMAT_S16_LE);
+
+ return 0;
+}
+
+static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_component *component = rtd->codec_dai->component;
+ struct snd_soc_jack *jack;
+ struct snd_soc_card *card = rtd->card;
+ int ret;
+
+
+ ret = snd_soc_dapm_add_routes(&card->dapm,
+ kabylake_ssp1_map,
+ ARRAY_SIZE(kabylake_ssp1_map));
+
+ /*
+ * Headset buttons map to the google Reference headset.
+ * These can be configured by userspace.
+ */
+ ret = snd_soc_card_jack_new(kabylake_audio_card, "Headset Jack",
+ SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 |
+ SND_JACK_BTN_2 | SND_JACK_BTN_3 | SND_JACK_LINEOUT,
+ &ctx->kabylake_headset, NULL, 0);
+ if (ret) {
+ dev_err(rtd->dev, "Headset Jack creation failed: %d\n", ret);
+ return ret;
+ }
+
+ jack = &ctx->kabylake_headset;
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP);
+ snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN);
+
+ da7219_aad_jack_det(component, &ctx->kabylake_headset);
+
+ ret = snd_soc_dapm_ignore_suspend(&rtd->card->dapm, "SoC DMIC");
+ if (ret)
+ dev_err(rtd->dev, "SoC DMIC - Ignore suspend failed %d\n", ret);
+
+ return ret;
+}
+
+static int kabylake_hdmi_init(struct snd_soc_pcm_runtime *rtd, int device)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(rtd->card);
+ struct snd_soc_dai *dai = rtd->codec_dai;
+ struct kbl_hdmi_pcm *pcm;
+
+ pcm = devm_kzalloc(rtd->card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ pcm->device = device;
+ pcm->codec_dai = dai;
+
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+static int kabylake_hdmi1_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI1_PB);
+}
+
+static int kabylake_hdmi2_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI2_PB);
+}
+
+static int kabylake_hdmi3_init(struct snd_soc_pcm_runtime *rtd)
+{
+ return kabylake_hdmi_init(rtd, KBL_DPCM_AUDIO_HDMI3_PB);
+}
+
+static int kabylake_da7219_fe_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dapm_context *dapm;
+ struct snd_soc_component *component = rtd->cpu_dai->component;
+
+ dapm = snd_soc_component_get_dapm(component);
+ snd_soc_dapm_ignore_suspend(dapm, "Reference Capture");
+
+ return 0;
+}
+
+static const unsigned int rates[] = {
+ 48000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_rates = {
+ .count = ARRAY_SIZE(rates),
+ .list = rates,
+ .mask = 0,
+};
+
+static const unsigned int channels[] = {
+ DUAL_CHANNEL,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_channels = {
+ .count = ARRAY_SIZE(channels),
+ .list = channels,
+ .mask = 0,
+};
+
+static unsigned int channels_quad[] = {
+ QUAD_CHANNEL,
+};
+
+static struct snd_pcm_hw_constraint_list constraints_channels_quad = {
+ .count = ARRAY_SIZE(channels_quad),
+ .list = channels_quad,
+ .mask = 0,
+};
+
+static int kbl_fe_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ /*
+ * On this platform for PCM device we support,
+ * 48Khz
+ * stereo
+ * 16 bit audio
+ */
+
+ runtime->hw.channels_max = DUAL_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels);
+
+ runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
+ snd_pcm_hw_constraint_msbits(runtime, 0, 16, 16);
+
+ snd_pcm_hw_constraint_list(runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+
+ return 0;
+}
+
+static const struct snd_soc_ops kabylake_da7219_fe_ops = {
+ .startup = kbl_fe_startup,
+};
+
+static int kabylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_interval *channels = hw_param_interval(params,
+ SNDRV_PCM_HW_PARAM_CHANNELS);
+
+ /*
+ * set BE channel constraint as user FE channels
+ */
+
+ if (params_channels(params) == 2)
+ channels->min = channels->max = 2;
+ else
+ channels->min = channels->max = 4;
+
+ return 0;
+}
+
+static int kabylake_dmic_startup(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ runtime->hw.channels_min = runtime->hw.channels_max = QUAD_CHANNEL;
+ snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_channels_quad);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE, &constraints_rates);
+}
+
+static struct snd_soc_ops kabylake_dmic_ops = {
+ .startup = kabylake_dmic_startup,
+};
+
+static const unsigned int rates_16000[] = {
+ 16000,
+};
+
+static const struct snd_pcm_hw_constraint_list constraints_16000 = {
+ .count = ARRAY_SIZE(rates_16000),
+ .list = rates_16000,
+};
+
+static const unsigned int ch_mono[] = {
+ 1,
+};
+static const struct snd_pcm_hw_constraint_list constraints_refcap = {
+ .count = ARRAY_SIZE(ch_mono),
+ .list = ch_mono,
+};
+
+static int kabylake_refcap_startup(struct snd_pcm_substream *substream)
+{
+ substream->runtime->hw.channels_max = 1;
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_CHANNELS,
+ &constraints_refcap);
+
+ return snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ &constraints_16000);
+}
+
+
+static struct snd_soc_ops skylaye_refcap_ops = {
+ .startup = kabylake_refcap_startup,
+};
+
+static struct snd_soc_codec_conf max98927_codec_conf[] = {
+
+ {
+ .dev_name = MAXIM_DEV0_NAME,
+ .name_prefix = "Right",
+ },
+
+ {
+ .dev_name = MAXIM_DEV1_NAME,
+ .name_prefix = "Left",
+ },
+};
+
+static struct snd_soc_dai_link_component ssp0_codec_components[] = {
+ { /* Left */
+ .name = MAXIM_DEV0_NAME,
+ .dai_name = MAX98927_CODEC_DAI,
+ },
+
+ { /* For Right */
+ .name = MAXIM_DEV1_NAME,
+ .dai_name = MAX98927_CODEC_DAI,
+ },
+
+};
+
+/* kabylake digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link kabylake_dais[] = {
+ /* Front End DAI links */
+ [KBL_DPCM_AUDIO_PB] = {
+ .name = "Kbl Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_CP] = {
+ .name = "Kbl Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_ECHO_REF_CP] = {
+ .name = "Kbl Audio Echo Reference cap",
+ .stream_name = "Echoreference Capture",
+ .cpu_dai_name = "Echoref Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .capture_only = 1,
+ .nonatomic = 1,
+ },
+ [KBL_DPCM_AUDIO_REF_CP] = {
+ .name = "Kbl Audio Reference cap",
+ .stream_name = "Wake on Voice",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &skylaye_refcap_ops,
+ },
+ [KBL_DPCM_AUDIO_DMIC_CP] = {
+ .name = "Kbl Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &kabylake_dmic_ops,
+ },
+ [KBL_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Kbl HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Kbl HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI3_PB] = {
+ .name = "Kbl HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HS_PB] = {
+ .name = "Kbl Audio Headset Playback",
+ .stream_name = "Headset Audio",
+ .cpu_dai_name = "System Pin2",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .ops = &kabylake_da7219_fe_ops,
+
+ },
+
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP0 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codecs = ssp0_codec_components,
+ .num_codecs = ARRAY_SIZE(ssp0_codec_components),
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .ops = &kabylake_ssp0_ops,
+ },
+ {
+ /* SSP1 - Codec */
+ .name = "SSP1-Codec",
+ .id = 1,
+ .cpu_dai_name = "SSP1 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codec_name = "i2c-DLGS7219:00",
+ .codec_dai_name = KBL_DIALOG_CODEC_DAI,
+ .init = kabylake_da7219_codec_init,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ },
+ {
+ .name = "dmic01",
+ .id = 2,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .platform_name = "0000:00:1f.3",
+ .be_hw_params_fixup = kabylake_dmic_fixup,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 3,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = kabylake_hdmi1_init,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 4,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi2_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 5,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi3_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+/* kabylake digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link kabylake_max98927_dais[] = {
+ /* Front End DAI links */
+ [KBL_DPCM_AUDIO_PB] = {
+ .name = "Kbl Audio Port",
+ .stream_name = "Audio",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .init = kabylake_da7219_fe_init,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_CP] = {
+ .name = "Kbl Audio Capture Port",
+ .stream_name = "Audio Record",
+ .cpu_dai_name = "System Pin",
+ .platform_name = "0000:00:1f.3",
+ .dynamic = 1,
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .nonatomic = 1,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_capture = 1,
+ .ops = &kabylake_da7219_fe_ops,
+ },
+ [KBL_DPCM_AUDIO_ECHO_REF_CP] = {
+ .name = "Kbl Audio Echo Reference cap",
+ .stream_name = "Echoreference Capture",
+ .cpu_dai_name = "Echoref Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .capture_only = 1,
+ .nonatomic = 1,
+ },
+ [KBL_DPCM_AUDIO_REF_CP] = {
+ .name = "Kbl Audio Reference cap",
+ .stream_name = "Wake on Voice",
+ .cpu_dai_name = "Reference Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &skylaye_refcap_ops,
+ },
+ [KBL_DPCM_AUDIO_DMIC_CP] = {
+ .name = "Kbl Audio DMIC cap",
+ .stream_name = "dmiccap",
+ .cpu_dai_name = "DMIC Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .init = NULL,
+ .dpcm_capture = 1,
+ .nonatomic = 1,
+ .dynamic = 1,
+ .ops = &kabylake_dmic_ops,
+ },
+ [KBL_DPCM_AUDIO_HDMI1_PB] = {
+ .name = "Kbl HDMI Port1",
+ .stream_name = "Hdmi1",
+ .cpu_dai_name = "HDMI1 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI2_PB] = {
+ .name = "Kbl HDMI Port2",
+ .stream_name = "Hdmi2",
+ .cpu_dai_name = "HDMI2 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = NULL,
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+ [KBL_DPCM_AUDIO_HDMI3_PB] = {
+ .name = "Kbl HDMI Port3",
+ .stream_name = "Hdmi3",
+ .cpu_dai_name = "HDMI3 Pin",
+ .codec_name = "snd-soc-dummy",
+ .codec_dai_name = "snd-soc-dummy-dai",
+ .platform_name = "0000:00:1f.3",
+ .trigger = {
+ SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
+ .dpcm_playback = 1,
+ .init = NULL,
+ .nonatomic = 1,
+ .dynamic = 1,
+ },
+
+ /* Back End DAI links */
+ {
+ /* SSP0 - Codec */
+ .name = "SSP0-Codec",
+ .id = 0,
+ .cpu_dai_name = "SSP0 Pin",
+ .platform_name = "0000:00:1f.3",
+ .no_pcm = 1,
+ .codecs = ssp0_codec_components,
+ .num_codecs = ARRAY_SIZE(ssp0_codec_components),
+ .dai_fmt = SND_SOC_DAIFMT_DSP_B |
+ SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .ignore_pmdown_time = 1,
+ .be_hw_params_fixup = kabylake_ssp_fixup,
+ .ops = &kabylake_ssp0_ops,
+ },
+ {
+ .name = "dmic01",
+ .id = 1,
+ .cpu_dai_name = "DMIC01 Pin",
+ .codec_name = "dmic-codec",
+ .codec_dai_name = "dmic-hifi",
+ .platform_name = "0000:00:1f.3",
+ .be_hw_params_fixup = kabylake_dmic_fixup,
+ .ignore_suspend = 1,
+ .dpcm_capture = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp1",
+ .id = 2,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .init = kabylake_hdmi1_init,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 3,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi2_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 4,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .platform_name = "0000:00:1f.3",
+ .init = kabylake_hdmi3_init,
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+};
+
+static int kabylake_card_late_probe(struct snd_soc_card *card)
+{
+ struct kbl_codec_private *ctx = snd_soc_card_get_drvdata(card);
+ struct kbl_hdmi_pcm *pcm;
+ struct snd_soc_component *component = NULL;
+ int err, i = 0;
+ char jack_name[NAME_SIZE];
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ component = pcm->codec_dai->component;
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP, pcm=%d Jack", pcm->device);
+ err = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &kabylake_hdmi[i],
+ NULL, 0);
+
+ if (err)
+ return err;
+
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &kabylake_hdmi[i]);
+ if (err < 0)
+ return err;
+
+ i++;
+ }
+
+ if (!component)
+ return -EINVAL;
+
+ return hdac_hdmi_jack_port_init(component, &card->dapm);
+
+ return 0;
+}
+
+/* kabylake audio machine driver for SPT + DA7219 */
+static struct snd_soc_card kbl_audio_card_da7219_m98927 = {
+ .name = "kblda7219m98927",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_dais,
+ .num_links = ARRAY_SIZE(kabylake_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+/* kabylake audio machine driver for Maxim98927 */
+static struct snd_soc_card kbl_audio_card_max98927 = {
+ .name = "kblmax98927",
+ .owner = THIS_MODULE,
+ .dai_link = kabylake_max98927_dais,
+ .num_links = ARRAY_SIZE(kabylake_max98927_dais),
+ .controls = kabylake_controls,
+ .num_controls = ARRAY_SIZE(kabylake_controls),
+ .dapm_widgets = kabylake_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(kabylake_widgets),
+ .dapm_routes = kabylake_map,
+ .num_dapm_routes = ARRAY_SIZE(kabylake_map),
+ .codec_conf = max98927_codec_conf,
+ .num_configs = ARRAY_SIZE(max98927_codec_conf),
+ .fully_routed = true,
+ .late_probe = kabylake_card_late_probe,
+};
+
+static int kabylake_audio_probe(struct platform_device *pdev)
+{
+ struct kbl_codec_private *ctx;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ kabylake_audio_card =
+ (struct snd_soc_card *)pdev->id_entry->driver_data;
+
+ kabylake_audio_card->dev = &pdev->dev;
+ snd_soc_card_set_drvdata(kabylake_audio_card, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev, kabylake_audio_card);
+}
+
+static const struct platform_device_id kbl_board_ids[] = {
+ {
+ .name = "kbl_da7219_max98927",
+ .driver_data =
+ (kernel_ulong_t)&kbl_audio_card_da7219_m98927,
+ },
+ {
+ .name = "kbl_max98927",
+ .driver_data =
+ (kernel_ulong_t)&kbl_audio_card_max98927,
+ },
+ { }
+};
+
+static struct platform_driver kabylake_audio = {
+ .probe = kabylake_audio_probe,
+ .driver = {
+ .name = "kbl_da7219_max98927",
+ .pm = &snd_soc_pm_ops,
+ },
+ .id_table = kbl_board_ids,
+};
+
+module_platform_driver(kabylake_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("Audio KabyLake Machine driver for MAX98927 & DA7219");
+MODULE_AUTHOR("Mac Chiang <mac.chiang@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:kbl_da7219_max98927");
+MODULE_ALIAS("platform:kbl_max98927");
diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c
index 21a6490746a6..99e1320c485f 100644
--- a/sound/soc/intel/boards/kbl_rt5663_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c
@@ -488,11 +488,10 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
int ret = 0, j;
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
-
+ for_each_rtd_codec_dai(rtd, j, codec_dai) {
if (!strcmp(codec_dai->component->name, MAXIM_DEV0_NAME)) {
/*
* Use channel 4 and 5 for the first amp
diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
index a892b37eab7c..a737c915d46a 100644
--- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
+++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
@@ -353,11 +353,10 @@ static int kabylake_ssp0_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
int ret = 0, j;
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
-
+ for_each_rtd_codec_dai(rtd, j, codec_dai) {
if (!strcmp(codec_dai->component->name, RT5514_DEV_NAME)) {
ret = snd_soc_dai_set_tdm_slot(codec_dai, 0xF, 0, 8, 16);
if (ret < 0) {
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.c b/sound/soc/intel/boards/skl_hda_dsp_common.c
new file mode 100644
index 000000000000..3fdbf239da74
--- /dev/null
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-18 Intel Corporation.
+
+/*
+ * Common functions used in different Intel machine drivers
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
+#include "skl_hda_dsp_common.h"
+
+#define NAME_SIZE 32
+
+int skl_hda_hdmi_add_pcm(struct snd_soc_card *card, int device)
+{
+ struct skl_hda_private *ctx = snd_soc_card_get_drvdata(card);
+ struct skl_hda_hdmi_pcm *pcm;
+ char dai_name[NAME_SIZE];
+
+ pcm = devm_kzalloc(card->dev, sizeof(*pcm), GFP_KERNEL);
+ if (!pcm)
+ return -ENOMEM;
+
+ snprintf(dai_name, sizeof(dai_name), "intel-hdmi-hifi%d",
+ ctx->dai_index);
+ pcm->codec_dai = snd_soc_card_get_codec_dai(card, dai_name);
+ if (!pcm->codec_dai)
+ return -EINVAL;
+
+ pcm->device = device;
+ list_add_tail(&pcm->head, &ctx->hdmi_pcm_list);
+
+ return 0;
+}
+
+/* skl_hda_digital audio interface glue - connects codec <--> CPU */
+struct snd_soc_dai_link skl_hda_be_dai_links[HDA_DSP_MAX_BE_DAI_LINKS] = {
+ /* Back End DAI links */
+ {
+ .name = "iDisp1",
+ .id = 1,
+ .cpu_dai_name = "iDisp1 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi1",
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp2",
+ .id = 2,
+ .cpu_dai_name = "iDisp2 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi2",
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "iDisp3",
+ .id = 3,
+ .cpu_dai_name = "iDisp3 Pin",
+ .codec_name = "ehdaudio0D2",
+ .codec_dai_name = "intel-hdmi-hifi3",
+ .dpcm_playback = 1,
+ .no_pcm = 1,
+ },
+ {
+ .name = "Analog Playback and Capture",
+ .id = 4,
+ .cpu_dai_name = "Analog CPU DAI",
+ .codec_name = "ehdaudio0D0",
+ .codec_dai_name = "Analog Codec DAI",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .init = NULL,
+ .no_pcm = 1,
+ },
+ {
+ .name = "Digital Playback and Capture",
+ .id = 5,
+ .cpu_dai_name = "Digital CPU DAI",
+ .codec_name = "ehdaudio0D0",
+ .codec_dai_name = "Digital Codec DAI",
+ .platform_name = "0000:00:1f.3",
+ .dpcm_playback = 1,
+ .dpcm_capture = 1,
+ .init = NULL,
+ .no_pcm = 1,
+ },
+};
+
+int skl_hda_hdmi_jack_init(struct snd_soc_card *card)
+{
+ struct skl_hda_private *ctx = snd_soc_card_get_drvdata(card);
+ struct snd_soc_component *component = NULL;
+ struct skl_hda_hdmi_pcm *pcm;
+ char jack_name[NAME_SIZE];
+ int err;
+
+ list_for_each_entry(pcm, &ctx->hdmi_pcm_list, head) {
+ component = pcm->codec_dai->component;
+ snprintf(jack_name, sizeof(jack_name),
+ "HDMI/DP, pcm=%d Jack", pcm->device);
+ err = snd_soc_card_jack_new(card, jack_name,
+ SND_JACK_AVOUT, &pcm->hdmi_jack,
+ NULL, 0);
+
+ if (err)
+ return err;
+
+ err = hdac_hdmi_jack_init(pcm->codec_dai, pcm->device,
+ &pcm->hdmi_jack);
+ if (err < 0)
+ return err;
+ }
+
+ if (!component)
+ return -EINVAL;
+
+ return hdac_hdmi_jack_port_init(component, &card->dapm);
+}
diff --git a/sound/soc/intel/boards/skl_hda_dsp_common.h b/sound/soc/intel/boards/skl_hda_dsp_common.h
new file mode 100644
index 000000000000..87c50aff56cd
--- /dev/null
+++ b/sound/soc/intel/boards/skl_hda_dsp_common.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright(c) 2015-18 Intel Corporation.
+ */
+
+/*
+ * This file defines data structures used in Machine Driver for Intel
+ * platforms with HDA Codecs.
+ */
+
+#ifndef __SOUND_SOC_HDA_DSP_COMMON_H
+#define __SOUND_SOC_HDA_DSP_COMMON_H
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+
+#define HDA_DSP_MAX_BE_DAI_LINKS 5
+
+struct skl_hda_hdmi_pcm {
+ struct list_head head;
+ struct snd_soc_dai *codec_dai;
+ struct snd_soc_jack hdmi_jack;
+ int device;
+};
+
+struct skl_hda_private {
+ struct list_head hdmi_pcm_list;
+ int pcm_count;
+ int dai_index;
+ const char *platform_name;
+};
+
+extern struct snd_soc_dai_link skl_hda_be_dai_links[HDA_DSP_MAX_BE_DAI_LINKS];
+int skl_hda_hdmi_jack_init(struct snd_soc_card *card);
+int skl_hda_hdmi_add_pcm(struct snd_soc_card *card, int device);
+
+#endif /* __SOUND_SOC_HDA_DSP_COMMON_H */
diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
new file mode 100644
index 000000000000..b415dd4c85f5
--- /dev/null
+++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2015-18 Intel Corporation.
+
+/*
+ * Machine Driver for SKL+ platforms with DSP and iDisp, HDA Codecs
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/jack.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include "../../codecs/hdac_hdmi.h"
+#include "../skylake/skl.h"
+#include "skl_hda_dsp_common.h"
+
+static const struct snd_soc_dapm_widget skl_hda_widgets[] = {
+ SND_SOC_DAPM_HP("Analog Out", NULL),
+ SND_SOC_DAPM_MIC("Analog In", NULL),
+ SND_SOC_DAPM_HP("Alt Analog Out", NULL),
+ SND_SOC_DAPM_MIC("Alt Analog In", NULL),
+ SND_SOC_DAPM_SPK("Digital Out", NULL),
+ SND_SOC_DAPM_MIC("Digital In", NULL),
+};
+
+static const struct snd_soc_dapm_route skl_hda_map[] = {
+ { "hifi3", NULL, "iDisp3 Tx"},
+ { "iDisp3 Tx", NULL, "iDisp3_out"},
+ { "hifi2", NULL, "iDisp2 Tx"},
+ { "iDisp2 Tx", NULL, "iDisp2_out"},
+ { "hifi1", NULL, "iDisp1 Tx"},
+ { "iDisp1 Tx", NULL, "iDisp1_out"},
+
+ { "Analog Out", NULL, "Codec Output Pin1" },
+ { "Digital Out", NULL, "Codec Output Pin2" },
+ { "Alt Analog Out", NULL, "Codec Output Pin3" },
+
+ { "Codec Input Pin1", NULL, "Analog In" },
+ { "Codec Input Pin2", NULL, "Digital In" },
+ { "Codec Input Pin3", NULL, "Alt Analog In" },
+
+ /* CODEC BE connections */
+ { "Analog Codec Playback", NULL, "Analog CPU Playback" },
+ { "Analog CPU Playback", NULL, "codec0_out" },
+ { "Digital Codec Playback", NULL, "Digital CPU Playback" },
+ { "Digital CPU Playback", NULL, "codec1_out" },
+ { "Alt Analog Codec Playback", NULL, "Alt Analog CPU Playback" },
+ { "Alt Analog CPU Playback", NULL, "codec2_out" },
+
+ { "codec0_in", NULL, "Analog CPU Capture" },
+ { "Analog CPU Capture", NULL, "Analog Codec Capture" },
+ { "codec1_in", NULL, "Digital CPU Capture" },
+ { "Digital CPU Capture", NULL, "Digital Codec Capture" },
+ { "codec2_in", NULL, "Alt Analog CPU Capture" },
+ { "Alt Analog CPU Capture", NULL, "Alt Analog Codec Capture" },
+};
+
+static int skl_hda_card_late_probe(struct snd_soc_card *card)
+{
+ return skl_hda_hdmi_jack_init(card);
+}
+
+static int
+skl_hda_add_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *link)
+{
+ struct skl_hda_private *ctx = snd_soc_card_get_drvdata(card);
+ int ret = 0;
+
+ dev_dbg(card->dev, "%s: dai link name - %s\n", __func__, link->name);
+ link->platform_name = ctx->platform_name;
+ link->nonatomic = 1;
+
+ if (strstr(link->name, "HDMI")) {
+ ret = skl_hda_hdmi_add_pcm(card, ctx->pcm_count);
+
+ if (ret < 0)
+ return ret;
+
+ ctx->dai_index++;
+ }
+
+ ctx->pcm_count++;
+ return ret;
+}
+
+static struct snd_soc_card hda_soc_card = {
+ .name = "skl_hda_card",
+ .owner = THIS_MODULE,
+ .dai_link = skl_hda_be_dai_links,
+ .dapm_widgets = skl_hda_widgets,
+ .dapm_routes = skl_hda_map,
+ .add_dai_link = skl_hda_add_dai_link,
+ .fully_routed = true,
+ .late_probe = skl_hda_card_late_probe,
+};
+
+#define IDISP_DAI_COUNT 3
+/* there are two routes per iDisp output */
+#define IDISP_ROUTE_COUNT (IDISP_DAI_COUNT * 2)
+#define IDISP_CODEC_MASK 0x4
+
+static int skl_hda_fill_card_info(struct skl_machine_pdata *pdata)
+{
+ struct snd_soc_card *card = &hda_soc_card;
+ struct snd_soc_dai_link *dai_link;
+ u32 codec_count, codec_mask;
+ int i, num_links, num_route;
+
+ codec_mask = pdata->codec_mask;
+ codec_count = hweight_long(codec_mask);
+
+ if (codec_count == 1 && pdata->codec_mask & IDISP_CODEC_MASK) {
+ num_links = IDISP_DAI_COUNT;
+ num_route = IDISP_ROUTE_COUNT;
+ } else if (codec_count == 2 && codec_mask & IDISP_CODEC_MASK) {
+ num_links = ARRAY_SIZE(skl_hda_be_dai_links);
+ num_route = ARRAY_SIZE(skl_hda_map),
+ card->dapm_widgets = skl_hda_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(skl_hda_widgets);
+ } else {
+ return -EINVAL;
+ }
+
+ card->num_links = num_links;
+ card->num_dapm_routes = num_route;
+
+ for_each_card_prelinks(card, i, dai_link)
+ dai_link->platform_name = pdata->platform;
+
+ return 0;
+}
+
+static int skl_hda_audio_probe(struct platform_device *pdev)
+{
+ struct skl_machine_pdata *pdata;
+ struct skl_hda_private *ctx;
+ int ret;
+
+ dev_dbg(&pdev->dev, "%s: entry\n", __func__);
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctx->hdmi_pcm_list);
+
+ pdata = dev_get_drvdata(&pdev->dev);
+ if (!pdata)
+ return -EINVAL;
+
+ ret = skl_hda_fill_card_info(pdata);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unsupported HDAudio/iDisp configuration found\n");
+ return ret;
+ }
+
+ ctx->pcm_count = hda_soc_card.num_links;
+ ctx->dai_index = 1; /* hdmi codec dai name starts from index 1 */
+ ctx->platform_name = pdata->platform;
+
+ hda_soc_card.dev = &pdev->dev;
+ snd_soc_card_set_drvdata(&hda_soc_card, ctx);
+
+ return devm_snd_soc_register_card(&pdev->dev, &hda_soc_card);
+}
+
+static struct platform_driver skl_hda_audio = {
+ .probe = skl_hda_audio_probe,
+ .driver = {
+ .name = "skl_hda_dsp_generic",
+ .pm = &snd_soc_pm_ops,
+ },
+};
+
+module_platform_driver(skl_hda_audio)
+
+/* Module information */
+MODULE_DESCRIPTION("SKL/KBL/BXT/APL HDA Generic Machine driver");
+MODULE_AUTHOR("Rakesh Ughreja <rakesh.a.ughreja@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:skl_hda_dsp_generic");
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index 915a34cdc8ac..c1f50a079d34 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -7,7 +7,8 @@ snd-soc-acpi-intel-match-objs := soc-acpi-intel-byt-match.o soc-acpi-intel-cht-m
soc-acpi-intel-hsw-bdw-match.o \
soc-acpi-intel-skl-match.o soc-acpi-intel-kbl-match.o \
soc-acpi-intel-bxt-match.o soc-acpi-intel-glk-match.o \
- soc-acpi-intel-cnl-match.o
+ soc-acpi-intel-cnl-match.o \
+ soc-acpi-intel-hda-match.o
obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
diff --git a/sound/soc/intel/common/soc-acpi-intel-byt-match.c b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
index 4daa8a4f0c0c..097dc06377ba 100644
--- a/sound/soc/intel/common/soc-acpi-intel-byt-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-byt-match.c
@@ -34,6 +34,13 @@ static const struct dmi_system_id byt_table[] = {
.callback = byt_thinkpad10_quirk_cb,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 8"),
+ },
+ },
+ {
+ .callback = byt_thinkpad10_quirk_cb,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad 10"),
},
},
diff --git a/sound/soc/intel/common/soc-acpi-intel-hda-match.c b/sound/soc/intel/common/soc-acpi-intel-hda-match.c
new file mode 100644
index 000000000000..533c1064f84b
--- /dev/null
+++ b/sound/soc/intel/common/soc-acpi-intel-hda-match.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, Intel Corporation.
+
+/*
+ * soc-apci-intel-hda-match.c - tables and support for HDA+ACPI enumeration.
+ *
+ */
+
+#include <sound/soc-acpi.h>
+#include <sound/soc-acpi-intel-match.h>
+#include "../skylake/skl.h"
+
+static struct skl_machine_pdata hda_pdata = {
+ .use_tplg_pcm = true,
+};
+
+struct snd_soc_acpi_mach snd_soc_acpi_intel_hda_machines[] = {
+ {
+ /* .id is not used in this file */
+ .drv_name = "skl_hda_dsp_generic",
+
+ /* .fw_filename is dynamically set in skylake driver */
+
+ /* .sof_fw_filename is dynamically set in sof/intel driver */
+
+ .sof_tplg_filename = "intel/sof-hda-generic.tplg",
+
+ /*
+ * .machine_quirk and .quirk_data are not used here but
+ * can be used if we need a more complicated machine driver
+ * combining HDA+other device (e.g. DMIC).
+ */
+ .pdata = &hda_pdata,
+ },
+ {},
+};
+EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_hda_machines);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
index 0ee173ca437d..a317b7790fce 100644
--- a/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
+++ b/sound/soc/intel/common/soc-acpi-intel-kbl-match.c
@@ -32,6 +32,11 @@ static struct snd_soc_acpi_codecs kbl_7219_98357_codecs = {
.codecs = {"MX98357A"}
};
+static struct snd_soc_acpi_codecs kbl_7219_98927_codecs = {
+ .num_codecs = 1,
+ .codecs = {"MX98927"}
+};
+
struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
{
.id = "INT343A",
@@ -83,6 +88,14 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[] = {
.quirk_data = &kbl_7219_98357_codecs,
.pdata = &skl_dmic_data,
},
+ {
+ .id = "DLGS7219",
+ .drv_name = "kbl_da7219_max98927",
+ .fw_filename = "intel/dsp_fw_kbl.bin",
+ .machine_quirk = snd_soc_acpi_codec_list,
+ .quirk_data = &kbl_7219_98927_codecs,
+ .pdata = &skl_dmic_data
+ },
{},
};
EXPORT_SYMBOL_GPL(snd_soc_acpi_intel_kbl_machines);
diff --git a/sound/soc/intel/common/sst-firmware.c b/sound/soc/intel/common/sst-firmware.c
index 11041aedea31..1e067504b604 100644
--- a/sound/soc/intel/common/sst-firmware.c
+++ b/sound/soc/intel/common/sst-firmware.c
@@ -355,7 +355,7 @@ struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
/* allocate DMA buffer to store FW data */
sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
- &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
+ &sst_fw->dmable_fw_paddr, GFP_KERNEL);
if (!sst_fw->dma_buf) {
dev_err(dsp->dev, "error: DMA alloc failed\n");
kfree(sst_fw);
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 823e39103edd..557f80c0bfe5 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -32,6 +32,7 @@
#define HDA_MONO 1
#define HDA_STEREO 2
#define HDA_QUAD 4
+#define HDA_MAX 8
static const struct snd_pcm_hardware azx_pcm_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
@@ -494,6 +495,7 @@ static int skl_pcm_trigger(struct snd_pcm_substream *substream, int cmd,
stream->lpib);
snd_hdac_ext_stream_set_lpib(stream, stream->lpib);
}
+ /* fall through */
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
@@ -569,7 +571,10 @@ static int skl_link_hw_params(struct snd_pcm_substream *substream,
stream_tag = hdac_stream(link_dev)->stream_tag;
/* set the stream tag in the codec dai dma params */
- snd_soc_dai_set_tdm_slot(codec_dai, stream_tag, 0, 0, 0);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ snd_soc_dai_set_tdm_slot(codec_dai, stream_tag, 0, 0, 0);
+ else
+ snd_soc_dai_set_tdm_slot(codec_dai, 0, stream_tag, 0, 0);
p_params.s_fmt = snd_pcm_format_width(params_format(params));
p_params.ch = params_channels(params);
@@ -995,21 +1000,63 @@ static struct snd_soc_dai_driver skl_platform_dai[] = {
},
},
{
- .name = "HD-Codec Pin",
+ .name = "Analog CPU DAI",
.ops = &skl_link_dai_ops,
.playback = {
- .stream_name = "HD-Codec Tx",
- .channels_min = HDA_STEREO,
- .channels_max = HDA_STEREO,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .stream_name = "Analog CPU Playback",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
},
.capture = {
- .stream_name = "HD-Codec Rx",
- .channels_min = HDA_STEREO,
- .channels_max = HDA_STEREO,
- .rates = SNDRV_PCM_RATE_48000,
- .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .stream_name = "Analog CPU Capture",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+},
+{
+ .name = "Alt Analog CPU DAI",
+ .ops = &skl_link_dai_ops,
+ .playback = {
+ .stream_name = "Alt Analog CPU Playback",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .capture = {
+ .stream_name = "Alt Analog CPU Capture",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+},
+{
+ .name = "Digital CPU DAI",
+ .ops = &skl_link_dai_ops,
+ .playback = {
+ .stream_name = "Digital CPU Playback",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
+ },
+ .capture = {
+ .stream_name = "Digital CPU Capture",
+ .channels_min = HDA_MONO,
+ .channels_max = HDA_MAX,
+ .rates = SNDRV_PCM_RATE_8000_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE,
},
},
};
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 2620d77729c5..cf8848b779dc 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -898,11 +898,10 @@ static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
bc = (struct skl_algo_data *)sb->dobj.private;
if (bc->set_params == SKL_PARAM_BIND) {
- params = kzalloc(bc->max, GFP_KERNEL);
+ params = kmemdup(bc->params, bc->max, GFP_KERNEL);
if (!params)
return -ENOMEM;
- memcpy(params, bc->params, bc->max);
skl_fill_sink_instance_id(ctx, params, bc->max,
mconfig);
@@ -2461,6 +2460,7 @@ static int skl_tplg_get_token(struct device *dev,
case SKL_TKN_U8_CORE_ID:
mconfig->core_id = tkn_elem->value;
+ break;
case SKL_TKN_U8_MOD_TYPE:
mconfig->m_type = tkn_elem->value;
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 1d17be0f78a0..29225623b4b4 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -33,9 +33,11 @@
#include <sound/hda_register.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
+#include <sound/hda_codec.h>
#include "skl.h"
#include "skl-sst-dsp.h"
#include "skl-sst-ipc.h"
+#include "../../../soc/codecs/hdac_hda.h"
/*
* initialize the PCI registers
@@ -472,6 +474,25 @@ static struct skl_ssp_clk skl_ssp_clks[] = {
{.name = "ssp5_sclkfs"},
};
+static struct snd_soc_acpi_mach *skl_find_hda_machine(struct skl *skl,
+ struct snd_soc_acpi_mach *machines)
+{
+ struct hdac_bus *bus = skl_to_bus(skl);
+ struct snd_soc_acpi_mach *mach;
+
+ /* check if we have any codecs detected on bus */
+ if (bus->codec_mask == 0)
+ return NULL;
+
+ /* point to common table */
+ mach = snd_soc_acpi_intel_hda_machines;
+
+ /* all entries in the machine table use the same firmware */
+ mach->fw_filename = machines->fw_filename;
+
+ return mach;
+}
+
static int skl_find_machine(struct skl *skl, void *driver_data)
{
struct hdac_bus *bus = skl_to_bus(skl);
@@ -479,9 +500,13 @@ static int skl_find_machine(struct skl *skl, void *driver_data)
struct skl_machine_pdata *pdata;
mach = snd_soc_acpi_find_machine(mach);
- if (mach == NULL) {
- dev_err(bus->dev, "No matching machine driver found\n");
- return -ENODEV;
+ if (!mach) {
+ dev_dbg(bus->dev, "No matching I2S machine driver found\n");
+ mach = skl_find_hda_machine(skl, driver_data);
+ if (!mach) {
+ dev_err(bus->dev, "No matching machine driver found\n");
+ return -ENODEV;
+ }
}
skl->mach = mach;
@@ -498,8 +523,9 @@ static int skl_find_machine(struct skl *skl, void *driver_data)
static int skl_machine_device_register(struct skl *skl)
{
- struct hdac_bus *bus = skl_to_bus(skl);
struct snd_soc_acpi_mach *mach = skl->mach;
+ struct hdac_bus *bus = skl_to_bus(skl);
+ struct skl_machine_pdata *pdata;
struct platform_device *pdev;
int ret;
@@ -516,8 +542,12 @@ static int skl_machine_device_register(struct skl *skl)
return -EIO;
}
- if (mach->pdata)
+ if (mach->pdata) {
+ pdata = (struct skl_machine_pdata *)mach->pdata;
+ pdata->platform = dev_name(bus->dev);
+ pdata->codec_mask = bus->codec_mask;
dev_set_drvdata(&pdev->dev, mach->pdata);
+ }
skl->i2s_dev = pdev;
@@ -628,6 +658,24 @@ static void skl_clock_device_unregister(struct skl *skl)
platform_device_unregister(skl->clk_dev);
}
+#define IDISP_INTEL_VENDOR_ID 0x80860000
+
+/*
+ * load the legacy codec driver
+ */
+static void load_codec_module(struct hda_codec *codec)
+{
+#ifdef MODULE
+ char modalias[MODULE_NAME_LEN];
+ const char *mod = NULL;
+
+ snd_hdac_codec_modalias(&codec->core, modalias, sizeof(modalias));
+ mod = modalias;
+ dev_dbg(&codec->core.dev, "loading %s codec module\n", mod);
+ request_module(mod);
+#endif
+}
+
/*
* Probe the given codec address
*/
@@ -637,7 +685,9 @@ static int probe_codec(struct hdac_bus *bus, int addr)
(AC_VERB_PARAMETERS << 8) | AC_PAR_VENDOR_ID;
unsigned int res = -1;
struct skl *skl = bus_to_skl(bus);
+ struct hdac_hda_priv *hda_codec;
struct hdac_device *hdev;
+ int err;
mutex_lock(&bus->cmd_mutex);
snd_hdac_bus_send_cmd(bus, cmd);
@@ -645,13 +695,26 @@ static int probe_codec(struct hdac_bus *bus, int addr)
mutex_unlock(&bus->cmd_mutex);
if (res == -1)
return -EIO;
- dev_dbg(bus->dev, "codec #%d probed OK\n", addr);
+ dev_dbg(bus->dev, "codec #%d probed OK: %x\n", addr, res);
- hdev = devm_kzalloc(&skl->pci->dev, sizeof(*hdev), GFP_KERNEL);
- if (!hdev)
+ hda_codec = devm_kzalloc(&skl->pci->dev, sizeof(*hda_codec),
+ GFP_KERNEL);
+ if (!hda_codec)
return -ENOMEM;
- return snd_hdac_ext_bus_device_init(bus, addr, hdev);
+ hda_codec->codec.bus = skl_to_hbus(skl);
+ hdev = &hda_codec->codec.core;
+
+ err = snd_hdac_ext_bus_device_init(bus, addr, hdev);
+ if (err < 0)
+ return err;
+
+ /* use legacy bus only for HDA codecs, idisp uses ext bus */
+ if ((res & 0xFFFF0000) != IDISP_INTEL_VENDOR_ID) {
+ hdev->type = HDA_DEV_LEGACY;
+ load_codec_module(&hda_codec->codec);
+ }
+ return 0;
}
/* Codec initialization */
@@ -786,9 +849,10 @@ static int skl_create(struct pci_dev *pci,
const struct hdac_io_ops *io_ops,
struct skl **rskl)
{
+ struct hdac_ext_bus_ops *ext_ops = NULL;
struct skl *skl;
struct hdac_bus *bus;
-
+ struct hda_bus *hbus;
int err;
*rskl = NULL;
@@ -803,13 +867,23 @@ static int skl_create(struct pci_dev *pci,
return -ENOMEM;
}
+ hbus = skl_to_hbus(skl);
bus = skl_to_bus(skl);
- snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, NULL);
+
+#if IS_ENABLED(CONFIG_SND_SOC_HDAC_HDA)
+ ext_ops = snd_soc_hdac_hda_get_ops();
+#endif
+ snd_hdac_ext_bus_init(bus, &pci->dev, &bus_core_ops, io_ops, ext_ops);
bus->use_posbuf = 1;
skl->pci = pci;
INIT_WORK(&skl->probe_work, skl_probe_work);
bus->bdl_pos_adj = 0;
+ mutex_init(&hbus->prepare_mutex);
+ hbus->pci = pci;
+ hbus->mixer_assigned = -1;
+ hbus->modelname = "sklbus";
+
*rskl = skl;
return 0;
diff --git a/sound/soc/intel/skylake/skl.h b/sound/soc/intel/skylake/skl.h
index 78aa8bdcb619..8d48cd7c56c8 100644
--- a/sound/soc/intel/skylake/skl.h
+++ b/sound/soc/intel/skylake/skl.h
@@ -23,6 +23,7 @@
#include <sound/hda_register.h>
#include <sound/hdaudio_ext.h>
+#include <sound/hda_codec.h>
#include <sound/soc.h>
#include "skl-nhlt.h"
#include "skl-ssp-clk.h"
@@ -71,7 +72,7 @@ struct skl_fw_config {
};
struct skl {
- struct hdac_bus hbus;
+ struct hda_bus hbus;
struct pci_dev *pci;
unsigned int init_done:1; /* delayed init status */
@@ -105,8 +106,11 @@ struct skl {
struct snd_soc_acpi_mach *mach;
};
-#define skl_to_bus(s) (&(s)->hbus)
-#define bus_to_skl(bus) container_of(bus, struct skl, hbus)
+#define skl_to_bus(s) (&(s)->hbus.core)
+#define bus_to_skl(bus) container_of(bus, struct skl, hbus.core)
+
+#define skl_to_hbus(s) (&(s)->hbus)
+#define hbus_to_skl(hbus) container_of((hbus), struct skl, (hbus))
/* to pass dai dma data */
struct skl_dma_params {
@@ -117,6 +121,8 @@ struct skl_dma_params {
struct skl_machine_pdata {
u32 dmic_num;
bool use_tplg_pcm; /* use dais and dai links from topology */
+ const char *platform;
+ u32 codec_mask;
};
struct skl_dsp_ops {
diff --git a/sound/soc/mediatek/mt2701/mt2701-cs42448.c b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
index 666282b865a8..97f9f38ce6b3 100644
--- a/sound/soc/mediatek/mt2701/mt2701-cs42448.c
+++ b/sound/soc/mediatek/mt2701/mt2701-cs42448.c
@@ -299,6 +299,7 @@ static int mt2701_cs42448_machine_probe(struct platform_device *pdev)
devm_kzalloc(&pdev->dev, sizeof(struct mt2701_cs42448_private),
GFP_KERNEL);
struct device *dev = &pdev->dev;
+ struct snd_soc_dai_link *dai_link;
if (!priv)
return -ENOMEM;
@@ -309,10 +310,10 @@ static int mt2701_cs42448_machine_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt2701_cs42448_dai_links[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt2701_cs42448_dai_links[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
card->dev = dev;
@@ -324,10 +325,10 @@ static int mt2701_cs42448_machine_probe(struct platform_device *pdev)
"Property 'audio-codec' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt2701_cs42448_dai_links[i].codec_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->codec_name)
continue;
- mt2701_cs42448_dai_links[i].codec_of_node = codec_node;
+ dai_link->codec_of_node = codec_node;
}
codec_node_bt_mrg = of_parse_phandle(pdev->dev.of_node,
diff --git a/sound/soc/mediatek/mt2701/mt2701-wm8960.c b/sound/soc/mediatek/mt2701/mt2701-wm8960.c
index 89f34efd9747..6bc1d3d58e64 100644
--- a/sound/soc/mediatek/mt2701/mt2701-wm8960.c
+++ b/sound/soc/mediatek/mt2701/mt2701-wm8960.c
@@ -97,6 +97,7 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt2701_wm8960_card;
struct device_node *platform_node, *codec_node;
+ struct snd_soc_dai_link *dai_link;
int ret, i;
platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -105,10 +106,10 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt2701_wm8960_dai_links[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt2701_wm8960_dai_links[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
card->dev = &pdev->dev;
@@ -120,10 +121,10 @@ static int mt2701_wm8960_machine_probe(struct platform_device *pdev)
"Property 'audio-codec' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt2701_wm8960_dai_links[i].codec_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->codec_name)
continue;
- mt2701_wm8960_dai_links[i].codec_of_node = codec_node;
+ dai_link->codec_of_node = codec_node;
}
ret = snd_soc_of_parse_audio_routing(card, "audio-routing");
@@ -150,7 +151,6 @@ static const struct of_device_id mt2701_wm8960_machine_dt_match[] = {
static struct platform_driver mt2701_wm8960_machine = {
.driver = {
.name = "mt2701-wm8960",
- .owner = THIS_MODULE,
#ifdef CONFIG_OF
.of_match_table = mt2701_wm8960_machine_dt_match,
#endif
diff --git a/sound/soc/mediatek/mt6797/mt6797-mt6351.c b/sound/soc/mediatek/mt6797/mt6797-mt6351.c
index b1558c57b9ca..cc41eb531653 100644
--- a/sound/soc/mediatek/mt6797/mt6797-mt6351.c
+++ b/sound/soc/mediatek/mt6797/mt6797-mt6351.c
@@ -158,6 +158,7 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt6797_mt6351_card;
struct device_node *platform_node, *codec_node;
+ struct snd_soc_dai_link *dai_link;
int ret, i;
card->dev = &pdev->dev;
@@ -168,10 +169,10 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt6797_mt6351_dai_links[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt6797_mt6351_dai_links[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
codec_node = of_parse_phandle(pdev->dev.of_node,
@@ -181,10 +182,10 @@ static int mt6797_mt6351_dev_probe(struct platform_device *pdev)
"Property 'audio-codec' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt6797_mt6351_dai_links[i].codec_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->codec_name)
continue;
- mt6797_mt6351_dai_links[i].codec_of_node = codec_node;
+ dai_link->codec_of_node = codec_node;
}
ret = devm_snd_soc_register_card(&pdev->dev, card);
@@ -205,7 +206,6 @@ static const struct of_device_id mt6797_mt6351_dt_match[] = {
static struct platform_driver mt6797_mt6351_driver = {
.driver = {
.name = "mt6797-mt6351",
- .owner = THIS_MODULE,
#ifdef CONFIG_OF
.of_match_table = mt6797_mt6351_dt_match,
#endif
diff --git a/sound/soc/mediatek/mt8173/mt8173-max98090.c b/sound/soc/mediatek/mt8173/mt8173-max98090.c
index 902d111016d6..4d6596d5cb07 100644
--- a/sound/soc/mediatek/mt8173/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173/mt8173-max98090.c
@@ -137,6 +137,7 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8173_max98090_card;
struct device_node *codec_node, *platform_node;
+ struct snd_soc_dai_link *dai_link;
int ret, i;
platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -145,10 +146,10 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_max98090_dais[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt8173_max98090_dais[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
codec_node = of_parse_phandle(pdev->dev.of_node,
@@ -158,10 +159,10 @@ static int mt8173_max98090_dev_probe(struct platform_device *pdev)
"Property 'audio-codec' missing or invalid\n");
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_max98090_dais[i].codec_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->codec_name)
continue;
- mt8173_max98090_dais[i].codec_of_node = codec_node;
+ dai_link->codec_of_node = codec_node;
}
card->dev = &pdev->dev;
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
index 582174d98c6c..da5b58ce791b 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c
@@ -44,11 +44,10 @@ static int mt8173_rt5650_rt5514_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
int i, ret;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
-
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/* pll from mclk 12.288M */
ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
params_rate(params) * 512);
@@ -179,6 +178,7 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8173_rt5650_rt5514_card;
struct device_node *platform_node;
+ struct snd_soc_dai_link *dai_link;
int i, ret;
platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -188,10 +188,10 @@ static int mt8173_rt5650_rt5514_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_rt5650_rt5514_dais[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt8173_rt5650_rt5514_dais[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
mt8173_rt5650_rt5514_codecs[0].of_node =
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
index b3670c8a5b8d..d83cd039b413 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c
@@ -48,11 +48,10 @@ static int mt8173_rt5650_rt5676_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
int i, ret;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
-
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/* pll from mclk 12.288M */
ret = snd_soc_dai_set_pll(codec_dai, 0, 0, MCLK_FOR_CODECS,
params_rate(params) * 512);
@@ -225,6 +224,7 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &mt8173_rt5650_rt5676_card;
struct device_node *platform_node;
+ struct snd_soc_dai_link *dai_link;
int i, ret;
platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -234,10 +234,10 @@ static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_rt5650_rt5676_dais[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
mt8173_rt5650_rt5676_codecs[0].of_node =
diff --git a/sound/soc/mediatek/mt8173/mt8173-rt5650.c b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
index 7a89b4aad182..7edf250c8fb1 100644
--- a/sound/soc/mediatek/mt8173/mt8173-rt5650.c
+++ b/sound/soc/mediatek/mt8173/mt8173-rt5650.c
@@ -59,6 +59,7 @@ static int mt8173_rt5650_hw_params(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
unsigned int mclk_clock;
+ struct snd_soc_dai *codec_dai;
int i, ret;
switch (mt8173_rt5650_priv.pll_from) {
@@ -76,9 +77,7 @@ static int mt8173_rt5650_hw_params(struct snd_pcm_substream *substream,
break;
}
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
-
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/* pll from mclk */
ret = snd_soc_dai_set_pll(codec_dai, 0, 0, mclk_clock,
params_rate(params) * 512);
@@ -240,6 +239,7 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
struct device_node *platform_node;
struct device_node *np;
const char *codec_capture_dai;
+ struct snd_soc_dai_link *dai_link;
int i, ret;
platform_node = of_parse_phandle(pdev->dev.of_node,
@@ -249,10 +249,10 @@ static int mt8173_rt5650_dev_probe(struct platform_device *pdev)
return -EINVAL;
}
- for (i = 0; i < card->num_links; i++) {
- if (mt8173_rt5650_dais[i].platform_name)
+ for_each_card_prelinks(card, i, dai_link) {
+ if (dai_link->platform_name)
continue;
- mt8173_rt5650_dais[i].platform_of_node = platform_node;
+ dai_link->platform_of_node = platform_node;
}
mt8173_rt5650_codecs[0].of_node =
diff --git a/sound/soc/meson/Kconfig b/sound/soc/meson/Kconfig
index 8af8bc358a90..8b8426ed2363 100644
--- a/sound/soc/meson/Kconfig
+++ b/sound/soc/meson/Kconfig
@@ -4,6 +4,8 @@ menu "ASoC support for Amlogic platforms"
config SND_MESON_AXG_FIFO
tristate
select REGMAP_MMIO
+ imply COMMON_CLK_AXG_AUDIO
+ imply RESET_MESON_AUDIO_ARB
config SND_MESON_AXG_FRDDR
tristate "Amlogic AXG Playback FIFO support"
@@ -22,6 +24,7 @@ config SND_MESON_AXG_TODDR
config SND_MESON_AXG_TDM_FORMATTER
tristate
select REGMAP_MMIO
+ imply COMMON_CLK_AXG_AUDIO
config SND_MESON_AXG_TDM_INTERFACE
tristate
@@ -51,6 +54,7 @@ config SND_MESON_AXG_SOUND_CARD
imply SND_MESON_AXG_TDMIN
imply SND_MESON_AXG_TDMOUT
imply SND_MESON_AXG_SPDIFOUT
+ imply SND_MESON_AXG_PDM
help
Select Y or M to add support for the AXG SoC sound card
@@ -58,8 +62,17 @@ config SND_MESON_AXG_SPDIFOUT
tristate "Amlogic AXG SPDIF Output Support"
select SND_PCM_IEC958
imply SND_SOC_SPDIF
+ imply COMMON_CLK_AXG_AUDIO
help
Select Y or M to add support for SPDIF output serializer embedded
in the Amlogic AXG SoC family
+config SND_MESON_AXG_PDM
+ tristate "Amlogic AXG PDM Input Support"
+ imply SND_SOC_DMIC
+ imply COMMON_CLK_AXG_AUDIO
+ help
+ Select Y or M to add support for PDM input embedded
+ in the Amlogic AXG SoC family
+
endmenu
diff --git a/sound/soc/meson/Makefile b/sound/soc/meson/Makefile
index c5e003b093db..4cd25104029d 100644
--- a/sound/soc/meson/Makefile
+++ b/sound/soc/meson/Makefile
@@ -9,6 +9,7 @@ snd-soc-meson-axg-tdmin-objs := axg-tdmin.o
snd-soc-meson-axg-tdmout-objs := axg-tdmout.o
snd-soc-meson-axg-sound-card-objs := axg-card.o
snd-soc-meson-axg-spdifout-objs := axg-spdifout.o
+snd-soc-meson-axg-pdm-objs := axg-pdm.o
obj-$(CONFIG_SND_MESON_AXG_FIFO) += snd-soc-meson-axg-fifo.o
obj-$(CONFIG_SND_MESON_AXG_FRDDR) += snd-soc-meson-axg-frddr.o
@@ -19,3 +20,4 @@ obj-$(CONFIG_SND_MESON_AXG_TDMIN) += snd-soc-meson-axg-tdmin.o
obj-$(CONFIG_SND_MESON_AXG_TDMOUT) += snd-soc-meson-axg-tdmout.o
obj-$(CONFIG_SND_MESON_AXG_SOUND_CARD) += snd-soc-meson-axg-sound-card.o
obj-$(CONFIG_SND_MESON_AXG_SPDIFOUT) += snd-soc-meson-axg-spdifout.o
+obj-$(CONFIG_SND_MESON_AXG_PDM) += snd-soc-meson-axg-pdm.o
diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
index 2914ba0d965b..aa54d2c612c9 100644
--- a/sound/soc/meson/axg-card.c
+++ b/sound/soc/meson/axg-card.c
@@ -97,14 +97,14 @@ static void axg_card_clean_references(struct axg_card *priv)
{
struct snd_soc_card *card = &priv->card;
struct snd_soc_dai_link *link;
+ struct snd_soc_dai_link_component *codec;
int i, j;
if (card->dai_link) {
- for (i = 0; i < card->num_links; i++) {
- link = &card->dai_link[i];
+ for_each_card_prelinks(card, i, link) {
of_node_put(link->cpu_of_node);
- for (j = 0; j < link->num_codecs; j++)
- of_node_put(link->codecs[j].of_node);
+ for_each_link_codecs(link, j, codec)
+ of_node_put(codec->of_node);
}
}
@@ -167,8 +167,7 @@ static int axg_card_tdm_be_hw_params(struct snd_pcm_substream *substream,
if (be->mclk_fs) {
mclk = params_rate(params) * be->mclk_fs;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
ret = snd_soc_dai_set_sysclk(codec_dai, 0, mclk,
SND_SOC_CLOCK_IN);
if (ret && ret != -ENOTSUPP)
@@ -196,8 +195,7 @@ static int axg_card_tdm_dai_init(struct snd_soc_pcm_runtime *rtd)
struct snd_soc_dai *codec_dai;
int ret, i;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
ret = snd_soc_dai_set_tdm_slot(codec_dai,
be->codec_masks[i].tx,
be->codec_masks[i].rx,
@@ -478,7 +476,7 @@ static int axg_card_set_be_link(struct snd_soc_card *card,
ret = axg_card_set_link_name(card, link, "be");
if (ret)
- dev_err(card->dev, "error setting %s link name\n", np->name);
+ dev_err(card->dev, "error setting %pOFn link name\n", np);
return ret;
}
diff --git a/sound/soc/meson/axg-fifo.c b/sound/soc/meson/axg-fifo.c
index 30262550e37b..0e4f65e654c4 100644
--- a/sound/soc/meson/axg-fifo.c
+++ b/sound/soc/meson/axg-fifo.c
@@ -203,6 +203,8 @@ static int axg_fifo_pcm_open(struct snd_pcm_substream *ss)
ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
dev_name(dev), ss);
+ if (ret)
+ return ret;
/* Enable pclk to access registers and clock the fifo ip */
ret = clk_prepare_enable(fifo->pclk);
diff --git a/sound/soc/meson/axg-pdm.c b/sound/soc/meson/axg-pdm.c
new file mode 100644
index 000000000000..9d5684493ffc
--- /dev/null
+++ b/sound/soc/meson/axg-pdm.c
@@ -0,0 +1,654 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+//
+// Copyright (c) 2018 BayLibre, SAS.
+// Author: Jerome Brunet <jbrunet@baylibre.com>
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/regmap.h>
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/pcm_params.h>
+
+#define PDM_CTRL 0x00
+#define PDM_CTRL_EN BIT(31)
+#define PDM_CTRL_OUT_MODE BIT(29)
+#define PDM_CTRL_BYPASS_MODE BIT(28)
+#define PDM_CTRL_RST_FIFO BIT(16)
+#define PDM_CTRL_CHAN_RSTN_MASK GENMASK(15, 8)
+#define PDM_CTRL_CHAN_RSTN(x) ((x) << 8)
+#define PDM_CTRL_CHAN_EN_MASK GENMASK(7, 0)
+#define PDM_CTRL_CHAN_EN(x) ((x) << 0)
+#define PDM_HCIC_CTRL1 0x04
+#define PDM_FILTER_EN BIT(31)
+#define PDM_HCIC_CTRL1_GAIN_SFT_MASK GENMASK(29, 24)
+#define PDM_HCIC_CTRL1_GAIN_SFT(x) ((x) << 24)
+#define PDM_HCIC_CTRL1_GAIN_MULT_MASK GENMASK(23, 16)
+#define PDM_HCIC_CTRL1_GAIN_MULT(x) ((x) << 16)
+#define PDM_HCIC_CTRL1_DSR_MASK GENMASK(8, 4)
+#define PDM_HCIC_CTRL1_DSR(x) ((x) << 4)
+#define PDM_HCIC_CTRL1_STAGE_NUM_MASK GENMASK(3, 0)
+#define PDM_HCIC_CTRL1_STAGE_NUM(x) ((x) << 0)
+#define PDM_HCIC_CTRL2 0x08
+#define PDM_F1_CTRL 0x0c
+#define PDM_LPF_ROUND_MODE_MASK GENMASK(17, 16)
+#define PDM_LPF_ROUND_MODE(x) ((x) << 16)
+#define PDM_LPF_DSR_MASK GENMASK(15, 12)
+#define PDM_LPF_DSR(x) ((x) << 12)
+#define PDM_LPF_STAGE_NUM_MASK GENMASK(8, 0)
+#define PDM_LPF_STAGE_NUM(x) ((x) << 0)
+#define PDM_LPF_MAX_STAGE 336
+#define PDM_LPF_NUM 3
+#define PDM_F2_CTRL 0x10
+#define PDM_F3_CTRL 0x14
+#define PDM_HPF_CTRL 0x18
+#define PDM_HPF_SFT_STEPS_MASK GENMASK(20, 16)
+#define PDM_HPF_SFT_STEPS(x) ((x) << 16)
+#define PDM_HPF_OUT_FACTOR_MASK GENMASK(15, 0)
+#define PDM_HPF_OUT_FACTOR(x) ((x) << 0)
+#define PDM_CHAN_CTRL 0x1c
+#define PDM_CHAN_CTRL_POINTER_WIDTH 8
+#define PDM_CHAN_CTRL_POINTER_MAX ((1 << PDM_CHAN_CTRL_POINTER_WIDTH) - 1)
+#define PDM_CHAN_CTRL_NUM 4
+#define PDM_CHAN_CTRL1 0x20
+#define PDM_COEFF_ADDR 0x24
+#define PDM_COEFF_DATA 0x28
+#define PDM_CLKG_CTRL 0x2c
+#define PDM_STS 0x30
+
+struct axg_pdm_lpf {
+ unsigned int ds;
+ unsigned int round_mode;
+ const unsigned int *tap;
+ unsigned int tap_num;
+};
+
+struct axg_pdm_hcic {
+ unsigned int shift;
+ unsigned int mult;
+ unsigned int steps;
+ unsigned int ds;
+};
+
+struct axg_pdm_hpf {
+ unsigned int out_factor;
+ unsigned int steps;
+};
+
+struct axg_pdm_filters {
+ struct axg_pdm_hcic hcic;
+ struct axg_pdm_hpf hpf;
+ struct axg_pdm_lpf lpf[PDM_LPF_NUM];
+};
+
+struct axg_pdm_cfg {
+ const struct axg_pdm_filters *filters;
+ unsigned int sys_rate;
+};
+
+struct axg_pdm {
+ const struct axg_pdm_cfg *cfg;
+ struct regmap *map;
+ struct clk *dclk;
+ struct clk *sysclk;
+ struct clk *pclk;
+};
+
+static void axg_pdm_enable(struct regmap *map)
+{
+ /* Reset AFIFO */
+ regmap_update_bits(map, PDM_CTRL, PDM_CTRL_RST_FIFO, PDM_CTRL_RST_FIFO);
+ regmap_update_bits(map, PDM_CTRL, PDM_CTRL_RST_FIFO, 0);
+
+ /* Enable PDM */
+ regmap_update_bits(map, PDM_CTRL, PDM_CTRL_EN, PDM_CTRL_EN);
+}
+
+static void axg_pdm_disable(struct regmap *map)
+{
+ regmap_update_bits(map, PDM_CTRL, PDM_CTRL_EN, 0);
+}
+
+static void axg_pdm_filters_enable(struct regmap *map, bool enable)
+{
+ unsigned int val = enable ? PDM_FILTER_EN : 0;
+
+ regmap_update_bits(map, PDM_HCIC_CTRL1, PDM_FILTER_EN, val);
+ regmap_update_bits(map, PDM_F1_CTRL, PDM_FILTER_EN, val);
+ regmap_update_bits(map, PDM_F2_CTRL, PDM_FILTER_EN, val);
+ regmap_update_bits(map, PDM_F3_CTRL, PDM_FILTER_EN, val);
+ regmap_update_bits(map, PDM_HPF_CTRL, PDM_FILTER_EN, val);
+}
+
+static int axg_pdm_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ axg_pdm_enable(priv->map);
+ return 0;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ axg_pdm_disable(priv->map);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned int axg_pdm_get_os(struct axg_pdm *priv)
+{
+ const struct axg_pdm_filters *filters = priv->cfg->filters;
+ unsigned int os = filters->hcic.ds;
+ int i;
+
+ /*
+ * The global oversampling factor is defined by the down sampling
+ * factor applied by each filter (HCIC and LPFs)
+ */
+
+ for (i = 0; i < PDM_LPF_NUM; i++)
+ os *= filters->lpf[i].ds;
+
+ return os;
+}
+
+static int axg_pdm_set_sysclk(struct axg_pdm *priv, unsigned int os,
+ unsigned int rate)
+{
+ unsigned int sys_rate = os * 2 * rate * PDM_CHAN_CTRL_POINTER_MAX;
+
+ /*
+ * Set the default system clock rate unless it is too fast for
+ * for the requested sample rate. In this case, the sample pointer
+ * counter could overflow so set a lower system clock rate
+ */
+ if (sys_rate < priv->cfg->sys_rate)
+ return clk_set_rate(priv->sysclk, sys_rate);
+
+ return clk_set_rate(priv->sysclk, priv->cfg->sys_rate);
+}
+
+static int axg_pdm_set_sample_pointer(struct axg_pdm *priv)
+{
+ unsigned int spmax, sp, val;
+ int i;
+
+ /* Max sample counter value per half period of dclk */
+ spmax = DIV_ROUND_UP_ULL((u64)clk_get_rate(priv->sysclk),
+ clk_get_rate(priv->dclk) * 2);
+
+ /* Check if sysclk is not too fast - should not happen */
+ if (WARN_ON(spmax > PDM_CHAN_CTRL_POINTER_MAX))
+ return -EINVAL;
+
+ /* Capture the data when we are at 75% of the half period */
+ sp = spmax * 3 / 4;
+
+ for (i = 0, val = 0; i < PDM_CHAN_CTRL_NUM; i++)
+ val |= sp << (PDM_CHAN_CTRL_POINTER_WIDTH * i);
+
+ regmap_write(priv->map, PDM_CHAN_CTRL, val);
+ regmap_write(priv->map, PDM_CHAN_CTRL1, val);
+
+ return 0;
+}
+
+static void axg_pdm_set_channel_mask(struct axg_pdm *priv,
+ unsigned int channels)
+{
+ unsigned int mask = GENMASK(channels - 1, 0);
+
+ /* Put all channel in reset */
+ regmap_update_bits(priv->map, PDM_CTRL,
+ PDM_CTRL_CHAN_RSTN_MASK, 0);
+
+ /* Take the necessary channels out of reset and enable them */
+ regmap_update_bits(priv->map, PDM_CTRL,
+ PDM_CTRL_CHAN_RSTN_MASK |
+ PDM_CTRL_CHAN_EN_MASK,
+ PDM_CTRL_CHAN_RSTN(mask) |
+ PDM_CTRL_CHAN_EN(mask));
+}
+
+static int axg_pdm_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+ unsigned int os = axg_pdm_get_os(priv);
+ unsigned int rate = params_rate(params);
+ unsigned int val;
+ int ret;
+
+ switch (params_width(params)) {
+ case 24:
+ val = PDM_CTRL_OUT_MODE;
+ break;
+ case 32:
+ val = 0;
+ break;
+ default:
+ dev_err(dai->dev, "unsupported sample width\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(priv->map, PDM_CTRL, PDM_CTRL_OUT_MODE, val);
+
+ ret = axg_pdm_set_sysclk(priv, os, rate);
+ if (ret) {
+ dev_err(dai->dev, "failed to set system clock\n");
+ return ret;
+ }
+
+ ret = clk_set_rate(priv->dclk, rate * os);
+ if (ret) {
+ dev_err(dai->dev, "failed to set dclk\n");
+ return ret;
+ }
+
+ ret = axg_pdm_set_sample_pointer(priv);
+ if (ret) {
+ dev_err(dai->dev, "invalid clock setting\n");
+ return ret;
+ }
+
+ axg_pdm_set_channel_mask(priv, params_channels(params));
+
+ return 0;
+}
+
+static int axg_pdm_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = clk_prepare_enable(priv->dclk);
+ if (ret) {
+ dev_err(dai->dev, "enabling dclk failed\n");
+ return ret;
+ }
+
+ /* Enable the filters */
+ axg_pdm_filters_enable(priv->map, true);
+
+ return ret;
+}
+
+static void axg_pdm_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+
+ axg_pdm_filters_enable(priv->map, false);
+ clk_disable_unprepare(priv->dclk);
+}
+
+static const struct snd_soc_dai_ops axg_pdm_dai_ops = {
+ .trigger = axg_pdm_trigger,
+ .hw_params = axg_pdm_hw_params,
+ .startup = axg_pdm_startup,
+ .shutdown = axg_pdm_shutdown,
+};
+
+static void axg_pdm_set_hcic_ctrl(struct axg_pdm *priv)
+{
+ const struct axg_pdm_hcic *hcic = &priv->cfg->filters->hcic;
+ unsigned int val;
+
+ val = PDM_HCIC_CTRL1_STAGE_NUM(hcic->steps);
+ val |= PDM_HCIC_CTRL1_DSR(hcic->ds);
+ val |= PDM_HCIC_CTRL1_GAIN_MULT(hcic->mult);
+ val |= PDM_HCIC_CTRL1_GAIN_SFT(hcic->shift);
+
+ regmap_update_bits(priv->map, PDM_HCIC_CTRL1,
+ PDM_HCIC_CTRL1_STAGE_NUM_MASK |
+ PDM_HCIC_CTRL1_DSR_MASK |
+ PDM_HCIC_CTRL1_GAIN_MULT_MASK |
+ PDM_HCIC_CTRL1_GAIN_SFT_MASK,
+ val);
+}
+
+static void axg_pdm_set_lpf_ctrl(struct axg_pdm *priv, unsigned int index)
+{
+ const struct axg_pdm_lpf *lpf = &priv->cfg->filters->lpf[index];
+ unsigned int offset = index * regmap_get_reg_stride(priv->map)
+ + PDM_F1_CTRL;
+ unsigned int val;
+
+ val = PDM_LPF_STAGE_NUM(lpf->tap_num);
+ val |= PDM_LPF_DSR(lpf->ds);
+ val |= PDM_LPF_ROUND_MODE(lpf->round_mode);
+
+ regmap_update_bits(priv->map, offset,
+ PDM_LPF_STAGE_NUM_MASK |
+ PDM_LPF_DSR_MASK |
+ PDM_LPF_ROUND_MODE_MASK,
+ val);
+}
+
+static void axg_pdm_set_hpf_ctrl(struct axg_pdm *priv)
+{
+ const struct axg_pdm_hpf *hpf = &priv->cfg->filters->hpf;
+ unsigned int val;
+
+ val = PDM_HPF_OUT_FACTOR(hpf->out_factor);
+ val |= PDM_HPF_SFT_STEPS(hpf->steps);
+
+ regmap_update_bits(priv->map, PDM_HPF_CTRL,
+ PDM_HPF_OUT_FACTOR_MASK |
+ PDM_HPF_SFT_STEPS_MASK,
+ val);
+}
+
+static int axg_pdm_set_lpf_filters(struct axg_pdm *priv)
+{
+ const struct axg_pdm_lpf *lpf = priv->cfg->filters->lpf;
+ unsigned int count = 0;
+ int i, j;
+
+ for (i = 0; i < PDM_LPF_NUM; i++)
+ count += lpf[i].tap_num;
+
+ /* Make sure the coeffs fit in the memory */
+ if (count >= PDM_LPF_MAX_STAGE)
+ return -EINVAL;
+
+ /* Set the initial APB bus register address */
+ regmap_write(priv->map, PDM_COEFF_ADDR, 0);
+
+ /* Set the tap filter values of all 3 filters */
+ for (i = 0; i < PDM_LPF_NUM; i++) {
+ axg_pdm_set_lpf_ctrl(priv, i);
+
+ for (j = 0; j < lpf[i].tap_num; j++)
+ regmap_write(priv->map, PDM_COEFF_DATA, lpf[i].tap[j]);
+ }
+
+ return 0;
+}
+
+static int axg_pdm_dai_probe(struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+ int ret;
+
+ ret = clk_prepare_enable(priv->pclk);
+ if (ret) {
+ dev_err(dai->dev, "enabling pclk failed\n");
+ return ret;
+ }
+
+ /*
+ * sysclk must be set and enabled as well to access the pdm registers
+ * Accessing the register w/o it will give a bus error.
+ */
+ ret = clk_set_rate(priv->sysclk, priv->cfg->sys_rate);
+ if (ret) {
+ dev_err(dai->dev, "setting sysclk failed\n");
+ goto err_pclk;
+ }
+
+ ret = clk_prepare_enable(priv->sysclk);
+ if (ret) {
+ dev_err(dai->dev, "enabling sysclk failed\n");
+ goto err_pclk;
+ }
+
+ /* Make sure the device is initially disabled */
+ axg_pdm_disable(priv->map);
+
+ /* Make sure filter bypass is disabled */
+ regmap_update_bits(priv->map, PDM_CTRL, PDM_CTRL_BYPASS_MODE, 0);
+
+ /* Load filter settings */
+ axg_pdm_set_hcic_ctrl(priv);
+ axg_pdm_set_hpf_ctrl(priv);
+
+ ret = axg_pdm_set_lpf_filters(priv);
+ if (ret) {
+ dev_err(dai->dev, "invalid filter configuration\n");
+ goto err_sysclk;
+ }
+
+ return 0;
+
+err_sysclk:
+ clk_disable_unprepare(priv->sysclk);
+err_pclk:
+ clk_disable_unprepare(priv->pclk);
+ return ret;
+}
+
+static int axg_pdm_dai_remove(struct snd_soc_dai *dai)
+{
+ struct axg_pdm *priv = snd_soc_dai_get_drvdata(dai);
+
+ clk_disable_unprepare(priv->sysclk);
+ clk_disable_unprepare(priv->pclk);
+
+ return 0;
+}
+
+static struct snd_soc_dai_driver axg_pdm_dai_drv = {
+ .name = "PDM",
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 48000,
+ .formats = (SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_S32_LE),
+ },
+ .ops = &axg_pdm_dai_ops,
+ .probe = axg_pdm_dai_probe,
+ .remove = axg_pdm_dai_remove,
+};
+
+static const struct snd_soc_component_driver axg_pdm_component_drv = {};
+
+static const struct regmap_config axg_pdm_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = PDM_STS,
+};
+
+static const unsigned int lpf1_default_tap[] = {
+ 0x000014, 0xffffb2, 0xfffed9, 0xfffdce, 0xfffd45,
+ 0xfffe32, 0x000147, 0x000645, 0x000b86, 0x000e21,
+ 0x000ae3, 0x000000, 0xffeece, 0xffdca8, 0xffd212,
+ 0xffd7d1, 0xfff2a7, 0x001f4c, 0x0050c2, 0x0072aa,
+ 0x006ff1, 0x003c32, 0xffdc4e, 0xff6a18, 0xff0fef,
+ 0xfefbaf, 0xff4c40, 0x000000, 0x00ebc8, 0x01c077,
+ 0x02209e, 0x01c1a4, 0x008e60, 0xfebe52, 0xfcd690,
+ 0xfb8fa5, 0xfba498, 0xfd9812, 0x0181ce, 0x06f5f3,
+ 0x0d112f, 0x12a958, 0x169686, 0x18000e, 0x169686,
+ 0x12a958, 0x0d112f, 0x06f5f3, 0x0181ce, 0xfd9812,
+ 0xfba498, 0xfb8fa5, 0xfcd690, 0xfebe52, 0x008e60,
+ 0x01c1a4, 0x02209e, 0x01c077, 0x00ebc8, 0x000000,
+ 0xff4c40, 0xfefbaf, 0xff0fef, 0xff6a18, 0xffdc4e,
+ 0x003c32, 0x006ff1, 0x0072aa, 0x0050c2, 0x001f4c,
+ 0xfff2a7, 0xffd7d1, 0xffd212, 0xffdca8, 0xffeece,
+ 0x000000, 0x000ae3, 0x000e21, 0x000b86, 0x000645,
+ 0x000147, 0xfffe32, 0xfffd45, 0xfffdce, 0xfffed9,
+ 0xffffb2, 0x000014,
+};
+
+static const unsigned int lpf2_default_tap[] = {
+ 0x00050a, 0xfff004, 0x0002c1, 0x003c12, 0xffa818,
+ 0xffc87d, 0x010aef, 0xff5223, 0xfebd93, 0x028f41,
+ 0xff5c0e, 0xfc63f8, 0x055f81, 0x000000, 0xf478a0,
+ 0x11c5e3, 0x2ea74d, 0x11c5e3, 0xf478a0, 0x000000,
+ 0x055f81, 0xfc63f8, 0xff5c0e, 0x028f41, 0xfebd93,
+ 0xff5223, 0x010aef, 0xffc87d, 0xffa818, 0x003c12,
+ 0x0002c1, 0xfff004, 0x00050a,
+};
+
+static const unsigned int lpf3_default_tap[] = {
+ 0x000000, 0x000081, 0x000000, 0xfffedb, 0x000000,
+ 0x00022d, 0x000000, 0xfffc46, 0x000000, 0x0005f7,
+ 0x000000, 0xfff6eb, 0x000000, 0x000d4e, 0x000000,
+ 0xffed1e, 0x000000, 0x001a1c, 0x000000, 0xffdcb0,
+ 0x000000, 0x002ede, 0x000000, 0xffc2d1, 0x000000,
+ 0x004ebe, 0x000000, 0xff9beb, 0x000000, 0x007dd7,
+ 0x000000, 0xff633a, 0x000000, 0x00c1d2, 0x000000,
+ 0xff11d5, 0x000000, 0x012368, 0x000000, 0xfe9c45,
+ 0x000000, 0x01b252, 0x000000, 0xfdebf6, 0x000000,
+ 0x0290b8, 0x000000, 0xfcca0d, 0x000000, 0x041d7c,
+ 0x000000, 0xfa8152, 0x000000, 0x07e9c6, 0x000000,
+ 0xf28fb5, 0x000000, 0x28b216, 0x3fffde, 0x28b216,
+ 0x000000, 0xf28fb5, 0x000000, 0x07e9c6, 0x000000,
+ 0xfa8152, 0x000000, 0x041d7c, 0x000000, 0xfcca0d,
+ 0x000000, 0x0290b8, 0x000000, 0xfdebf6, 0x000000,
+ 0x01b252, 0x000000, 0xfe9c45, 0x000000, 0x012368,
+ 0x000000, 0xff11d5, 0x000000, 0x00c1d2, 0x000000,
+ 0xff633a, 0x000000, 0x007dd7, 0x000000, 0xff9beb,
+ 0x000000, 0x004ebe, 0x000000, 0xffc2d1, 0x000000,
+ 0x002ede, 0x000000, 0xffdcb0, 0x000000, 0x001a1c,
+ 0x000000, 0xffed1e, 0x000000, 0x000d4e, 0x000000,
+ 0xfff6eb, 0x000000, 0x0005f7, 0x000000, 0xfffc46,
+ 0x000000, 0x00022d, 0x000000, 0xfffedb, 0x000000,
+ 0x000081, 0x000000,
+};
+
+/*
+ * These values are sane defaults for the axg platform:
+ * - OS = 64
+ * - Latency = 38700 (?)
+ *
+ * TODO: There is a lot of different HCIC, LPFs and HPF configurations possible.
+ * the configuration may depend on the dmic used by the platform, the
+ * expected tradeoff between latency and quality, etc ... If/When other
+ * settings are required, we should add a fw interface to this driver to
+ * load new filter settings.
+ */
+static const struct axg_pdm_filters axg_default_filters = {
+ .hcic = {
+ .shift = 0x15,
+ .mult = 0x80,
+ .steps = 7,
+ .ds = 8,
+ },
+ .hpf = {
+ .out_factor = 0x8000,
+ .steps = 13,
+ },
+ .lpf = {
+ [0] = {
+ .ds = 2,
+ .round_mode = 1,
+ .tap = lpf1_default_tap,
+ .tap_num = ARRAY_SIZE(lpf1_default_tap),
+ },
+ [1] = {
+ .ds = 2,
+ .round_mode = 0,
+ .tap = lpf2_default_tap,
+ .tap_num = ARRAY_SIZE(lpf2_default_tap),
+ },
+ [2] = {
+ .ds = 2,
+ .round_mode = 1,
+ .tap = lpf3_default_tap,
+ .tap_num = ARRAY_SIZE(lpf3_default_tap)
+ },
+ },
+};
+
+static const struct axg_pdm_cfg axg_pdm_config = {
+ .filters = &axg_default_filters,
+ .sys_rate = 250000000,
+};
+
+static const struct of_device_id axg_pdm_of_match[] = {
+ {
+ .compatible = "amlogic,axg-pdm",
+ .data = &axg_pdm_config,
+ }, {}
+};
+MODULE_DEVICE_TABLE(of, axg_pdm_of_match);
+
+static int axg_pdm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct axg_pdm *priv;
+ struct resource *res;
+ void __iomem *regs;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, priv);
+
+ priv->cfg = of_device_get_match_data(dev);
+ if (!priv->cfg) {
+ dev_err(dev, "failed to match device\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv->map = devm_regmap_init_mmio(dev, regs, &axg_pdm_regmap_cfg);
+ if (IS_ERR(priv->map)) {
+ dev_err(dev, "failed to init regmap: %ld\n",
+ PTR_ERR(priv->map));
+ return PTR_ERR(priv->map);
+ }
+
+ priv->pclk = devm_clk_get(dev, "pclk");
+ if (IS_ERR(priv->pclk)) {
+ ret = PTR_ERR(priv->pclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get pclk: %d\n", ret);
+ return ret;
+ }
+
+ priv->dclk = devm_clk_get(dev, "dclk");
+ if (IS_ERR(priv->dclk)) {
+ ret = PTR_ERR(priv->dclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get dclk: %d\n", ret);
+ return ret;
+ }
+
+ priv->sysclk = devm_clk_get(dev, "sysclk");
+ if (IS_ERR(priv->sysclk)) {
+ ret = PTR_ERR(priv->sysclk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get dclk: %d\n", ret);
+ return ret;
+ }
+
+ return devm_snd_soc_register_component(dev, &axg_pdm_component_drv,
+ &axg_pdm_dai_drv, 1);
+}
+
+static struct platform_driver axg_pdm_pdrv = {
+ .probe = axg_pdm_probe,
+ .driver = {
+ .name = "axg-pdm",
+ .of_match_table = axg_pdm_of_match,
+ },
+};
+module_platform_driver(axg_pdm_pdrv);
+
+MODULE_DESCRIPTION("Amlogic AXG PDM Input driver");
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
index 7b8baf46d968..585ce030b79b 100644
--- a/sound/soc/meson/axg-tdm-interface.c
+++ b/sound/soc/meson/axg-tdm-interface.c
@@ -42,6 +42,7 @@ int axg_tdm_set_tdm_slots(struct snd_soc_dai *dai, u32 *tx_mask,
struct axg_tdm_stream *rx = (struct axg_tdm_stream *)
dai->capture_dma_data;
unsigned int tx_slots, rx_slots;
+ unsigned int fmt = 0;
tx_slots = axg_tdm_slots_total(tx_mask);
rx_slots = axg_tdm_slots_total(rx_mask);
@@ -52,38 +53,45 @@ int axg_tdm_set_tdm_slots(struct snd_soc_dai *dai, u32 *tx_mask,
return -EINVAL;
}
- /*
- * Amend the dai driver channel number and let dpcm channel merge do
- * its job
- */
- if (tx) {
- tx->mask = tx_mask;
- dai->driver->playback.channels_max = tx_slots;
- }
-
- if (rx) {
- rx->mask = rx_mask;
- dai->driver->capture.channels_max = rx_slots;
- }
-
iface->slots = slots;
switch (slot_width) {
case 0:
- /* defaults width to 32 if not provided */
- iface->slot_width = 32;
- break;
- case 8:
- case 16:
- case 24:
+ slot_width = 32;
+ /* Fall-through */
case 32:
- iface->slot_width = slot_width;
+ fmt |= SNDRV_PCM_FMTBIT_S32_LE;
+ /* Fall-through */
+ case 24:
+ fmt |= SNDRV_PCM_FMTBIT_S24_LE;
+ fmt |= SNDRV_PCM_FMTBIT_S20_LE;
+ /* Fall-through */
+ case 16:
+ fmt |= SNDRV_PCM_FMTBIT_S16_LE;
+ /* Fall-through */
+ case 8:
+ fmt |= SNDRV_PCM_FMTBIT_S8;
break;
default:
dev_err(dai->dev, "unsupported slot width: %d\n", slot_width);
return -EINVAL;
}
+ iface->slot_width = slot_width;
+
+ /* Amend the dai driver and let dpcm merge do its job */
+ if (tx) {
+ tx->mask = tx_mask;
+ dai->driver->playback.channels_max = tx_slots;
+ dai->driver->playback.formats = fmt;
+ }
+
+ if (rx) {
+ rx->mask = rx_mask;
+ dai->driver->capture.channels_max = rx_slots;
+ dai->driver->capture.formats = fmt;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(axg_tdm_set_tdm_slots);
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index 81b09d740ed9..6384bb6dacfd 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -356,7 +356,7 @@ static int nuc900_ac97_drvprobe(struct platform_device *pdev)
if (ret)
goto out;
- ret = snd_soc_register_component(&pdev->dev, &nuc900_ac97_component,
+ ret = devm_snd_soc_register_component(&pdev->dev, &nuc900_ac97_component,
&nuc900_ac97_dai, 1);
if (ret)
goto out;
@@ -373,8 +373,6 @@ out:
static int nuc900_ac97_drvremove(struct platform_device *pdev)
{
- snd_soc_unregister_component(&pdev->dev);
-
nuc900_ac97_data = NULL;
snd_soc_set_ac97_ops(NULL);
diff --git a/sound/soc/omap/omap-hdmi-audio.c b/sound/soc/omap/omap-hdmi-audio.c
index 8a99a8837dc9..673a9eb153b2 100644
--- a/sound/soc/omap/omap-hdmi-audio.c
+++ b/sound/soc/omap/omap-hdmi-audio.c
@@ -348,7 +348,7 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
default:
return -EINVAL;
}
- ret = snd_soc_register_component(ad->dssdev, &omap_hdmi_component,
+ ret = devm_snd_soc_register_component(ad->dssdev, &omap_hdmi_component,
dai_drv, 1);
if (ret)
return ret;
@@ -383,7 +383,6 @@ static int omap_hdmi_audio_probe(struct platform_device *pdev)
ret = snd_soc_register_card(card);
if (ret) {
dev_err(dev, "snd_soc_register_card failed (%d)\n", ret);
- snd_soc_unregister_component(ad->dssdev);
return ret;
}
@@ -400,7 +399,6 @@ static int omap_hdmi_audio_remove(struct platform_device *pdev)
struct hdmi_audio_data *ad = platform_get_drvdata(pdev);
snd_soc_unregister_card(ad->card);
- snd_soc_unregister_component(ad->dssdev);
return 0;
}
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 776e148b0aa2..943b44de1464 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -19,14 +19,13 @@ config SND_MMP_SOC
config SND_PXA2XX_AC97
tristate
- select SND_AC97_CODEC
config SND_PXA2XX_SOC_AC97
tristate
- select AC97_BUS
+ select AC97_BUS_NEW
select SND_PXA2XX_LIB
select SND_PXA2XX_LIB_AC97
- select SND_SOC_AC97_BUS
+ select SND_SOC_AC97_BUS_NEW
config SND_PXA2XX_SOC_I2S
select SND_PXA2XX_LIB
@@ -80,6 +79,7 @@ config SND_PXA2XX_SOC_TOSA
tristate "SoC AC97 Audio support for Tosa"
depends on SND_PXA2XX_SOC && MACH_TOSA
depends on MFD_TC6393XB
+ depends on !AC97_BUS
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -89,6 +89,7 @@ config SND_PXA2XX_SOC_TOSA
config SND_PXA2XX_SOC_E740
tristate "SoC AC97 Audio support for e740"
depends on SND_PXA2XX_SOC && MACH_E740
+ depends on !AC97_BUS
select SND_SOC_WM9705
select SND_PXA2XX_SOC_AC97
help
@@ -98,6 +99,7 @@ config SND_PXA2XX_SOC_E740
config SND_PXA2XX_SOC_E750
tristate "SoC AC97 Audio support for e750"
depends on SND_PXA2XX_SOC && MACH_E750
+ depends on !AC97_BUS
select SND_SOC_WM9705
select SND_PXA2XX_SOC_AC97
help
@@ -107,6 +109,7 @@ config SND_PXA2XX_SOC_E750
config SND_PXA2XX_SOC_E800
tristate "SoC AC97 Audio support for e800"
depends on SND_PXA2XX_SOC && MACH_E800
+ depends on !AC97_BUS
select SND_SOC_WM9712
select SND_PXA2XX_SOC_AC97
help
@@ -117,6 +120,7 @@ config SND_PXA2XX_SOC_EM_X270
tristate "SoC Audio support for CompuLab EM-x270, eXeda and CM-X300"
depends on SND_PXA2XX_SOC && (MACH_EM_X270 || MACH_EXEDA || \
MACH_CM_X300)
+ depends on !AC97_BUS
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -127,6 +131,7 @@ config SND_PXA2XX_SOC_PALM27X
bool "SoC Audio support for Palm T|X, T5, E2 and LifeDrive"
depends on SND_PXA2XX_SOC && (MACH_PALMLD || MACH_PALMTX || \
MACH_PALMT5 || MACH_PALMTE2)
+ depends on !AC97_BUS
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9712
help
@@ -156,6 +161,7 @@ config SND_SOC_TTC_DKB
config SND_SOC_ZYLONITE
tristate "SoC Audio support for Marvell Zylonite"
depends on SND_PXA2XX_SOC && MACH_ZYLONITE
+ depends on !AC97_BUS
select SND_PXA2XX_SOC_AC97
select SND_PXA_SOC_SSP
select SND_SOC_WM9713
@@ -195,6 +201,7 @@ config SND_PXA2XX_SOC_MAGICIAN
config SND_PXA2XX_SOC_MIOA701
tristate "SoC Audio support for MIO A701"
depends on SND_PXA2XX_SOC && MACH_MIOA701
+ depends on !AC97_BUS
select SND_PXA2XX_SOC_AC97
select SND_SOC_WM9713
help
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 69033e1a84e6..adcf8ba9d287 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -103,6 +103,9 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream,
pxa_ssp_disable(ssp);
}
+ if (priv->extclk)
+ clk_prepare_enable(priv->extclk);
+
dma = kzalloc(sizeof(struct snd_dmaengine_dai_dma_data), GFP_KERNEL);
if (!dma)
return -ENOMEM;
@@ -125,6 +128,9 @@ static void pxa_ssp_shutdown(struct snd_pcm_substream *substream,
clk_disable_unprepare(ssp->clk);
}
+ if (priv->extclk)
+ clk_disable_unprepare(priv->extclk);
+
kfree(snd_soc_dai_get_dma_data(cpu_dai, substream));
snd_soc_dai_set_dma_data(cpu_dai, substream, NULL);
}
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 9f779657bc86..f8a3aa6c6d4e 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -17,6 +17,7 @@
#include <linux/dmaengine.h>
#include <linux/dma/pxa-dma.h>
+#include <sound/ac97/controller.h>
#include <sound/core.h>
#include <sound/ac97_codec.h>
#include <sound/soc.h>
@@ -27,43 +28,35 @@
#include <mach/regs-ac97.h>
#include <mach/audio.h>
-static void pxa2xx_ac97_warm_reset(struct snd_ac97 *ac97)
+static void pxa2xx_ac97_warm_reset(struct ac97_controller *adrv)
{
pxa2xx_ac97_try_warm_reset();
pxa2xx_ac97_finish_reset();
}
-static void pxa2xx_ac97_cold_reset(struct snd_ac97 *ac97)
+static void pxa2xx_ac97_cold_reset(struct ac97_controller *adrv)
{
pxa2xx_ac97_try_cold_reset();
pxa2xx_ac97_finish_reset();
}
-static unsigned short pxa2xx_ac97_legacy_read(struct snd_ac97 *ac97,
- unsigned short reg)
+static int pxa2xx_ac97_read_actrl(struct ac97_controller *adrv, int slot,
+ unsigned short reg)
{
- int ret;
-
- ret = pxa2xx_ac97_read(ac97->num, reg);
- if (ret < 0)
- return 0;
- else
- return (unsigned short)(ret & 0xffff);
+ return pxa2xx_ac97_read(slot, reg);
}
-static void pxa2xx_ac97_legacy_write(struct snd_ac97 *ac97,
- unsigned short reg, unsigned short val)
+static int pxa2xx_ac97_write_actrl(struct ac97_controller *adrv, int slot,
+ unsigned short reg, unsigned short val)
{
- int ret;
-
- ret = pxa2xx_ac97_write(ac97->num, reg, val);
+ return pxa2xx_ac97_write(slot, reg, val);
}
-static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
- .read = pxa2xx_ac97_legacy_read,
- .write = pxa2xx_ac97_legacy_write,
+static struct ac97_controller_ops pxa2xx_ac97_ops = {
+ .read = pxa2xx_ac97_read_actrl,
+ .write = pxa2xx_ac97_write_actrl,
.warm_reset = pxa2xx_ac97_warm_reset,
.reset = pxa2xx_ac97_cold_reset,
};
@@ -233,6 +226,9 @@ MODULE_DEVICE_TABLE(of, pxa2xx_ac97_dt_ids);
static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
{
int ret;
+ struct ac97_controller *ctrl;
+ pxa2xx_audio_ops_t *pdata = pdev->dev.platform_data;
+ void **codecs_pdata;
if (pdev->id != -1) {
dev_err(&pdev->dev, "PXA2xx has only one AC97 port.\n");
@@ -245,10 +241,14 @@ static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
return ret;
}
- ret = snd_soc_set_ac97_ops(&pxa2xx_ac97_ops);
- if (ret != 0)
- return ret;
+ codecs_pdata = pdata ? pdata->codec_pdata : NULL;
+ ctrl = snd_ac97_controller_register(&pxa2xx_ac97_ops, &pdev->dev,
+ AC97_SLOTS_AVAILABLE_ALL,
+ codecs_pdata);
+ if (IS_ERR(ctrl))
+ return PTR_ERR(ctrl);
+ platform_set_drvdata(pdev, ctrl);
/* Punt most of the init to the SoC probe; we may need the machine
* driver to do interesting things with the clocking to get us up
* and running.
@@ -259,8 +259,10 @@ static int pxa2xx_ac97_dev_probe(struct platform_device *pdev)
static int pxa2xx_ac97_dev_remove(struct platform_device *pdev)
{
+ struct ac97_controller *ctrl = platform_get_drvdata(pdev);
+
snd_soc_unregister_component(&pdev->dev);
- snd_soc_set_ac97_ops(NULL);
+ snd_ac97_controller_unregister(ctrl);
pxa2xx_ac97_hw_remove(pdev);
return 0;
}
diff --git a/sound/soc/qcom/apq8096.c b/sound/soc/qcom/apq8096.c
index 1543e85629f8..fb45f396ab4a 100644
--- a/sound/soc/qcom/apq8096.c
+++ b/sound/soc/qcom/apq8096.c
@@ -25,13 +25,12 @@ static int apq8096_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
static void apq8096_add_be_ops(struct snd_soc_card *card)
{
- struct snd_soc_dai_link *link = card->dai_link;
- int i, num_links = card->num_links;
+ struct snd_soc_dai_link *link;
+ int i;
- for (i = 0; i < num_links; i++) {
+ for_each_card_prelinks(card, i, link) {
if (link->no_pcm == 1)
link->be_hw_params_fixup = apq8096_be_hw_params_fixup;
- link++;
}
}
diff --git a/sound/soc/qcom/qdsp6/q6adm.c b/sound/soc/qcom/qdsp6/q6adm.c
index 932c3ebfd252..da242515e146 100644
--- a/sound/soc/qcom/qdsp6/q6adm.c
+++ b/sound/soc/qcom/qdsp6/q6adm.c
@@ -2,25 +2,24 @@
// Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
// Copyright (c) 2018, Linaro Limited
-#include <linux/slab.h>
-#include <linux/wait.h>
-#include <linux/kernel.h>
#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/sched.h>
#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/kref.h>
-#include <linux/wait.h>
-#include <linux/soc/qcom/apr.h>
#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/soc/qcom/apr.h>
+#include <linux/wait.h>
#include <sound/asound.h>
#include "q6adm.h"
#include "q6afe.h"
#include "q6core.h"
-#include "q6dsp-errno.h"
#include "q6dsp-common.h"
+#include "q6dsp-errno.h"
#define ADM_CMD_DEVICE_OPEN_V5 0x00010326
#define ADM_CMDRSP_DEVICE_OPEN_V5 0x00010329
diff --git a/sound/soc/qcom/qdsp6/q6asm-dai.c b/sound/soc/qcom/qdsp6/q6asm-dai.c
index 9db9a2944ef2..a16c71c03058 100644
--- a/sound/soc/qcom/qdsp6/q6asm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6asm-dai.c
@@ -8,7 +8,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/soc.h>
-#include <sound/soc.h>
#include <sound/soc-dapm.h>
#include <sound/pcm.h>
#include <asm/dma.h>
@@ -319,10 +318,11 @@ static int q6asm_dai_open(struct snd_pcm_substream *substream)
prtd->audio_client = q6asm_audio_client_alloc(dev,
(q6asm_cb)event_handler, prtd, stream_id,
LEGACY_PCM_MODE);
- if (!prtd->audio_client) {
+ if (IS_ERR(prtd->audio_client)) {
pr_info("%s: Could not allocate memory\n", __func__);
+ ret = PTR_ERR(prtd->audio_client);
kfree(prtd);
- return -ENOMEM;
+ return ret;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -493,7 +493,7 @@ static int q6asm_dai_pcm_new(struct snd_soc_pcm_runtime *rtd)
}
}
- return ret;
+ return 0;
}
static void q6asm_dai_pcm_free(struct snd_pcm *pcm)
diff --git a/sound/soc/qcom/qdsp6/q6asm.c b/sound/soc/qcom/qdsp6/q6asm.c
index 2b2c7233bb5f..e1cfa846a1dc 100644
--- a/sound/soc/qcom/qdsp6/q6asm.c
+++ b/sound/soc/qcom/qdsp6/q6asm.c
@@ -11,7 +11,6 @@
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
#include <uapi/sound/asound.h>
#include <linux/delay.h>
#include <linux/slab.h>
diff --git a/sound/soc/qcom/qdsp6/q6core.c b/sound/soc/qcom/qdsp6/q6core.c
index 06f03a5fe9bd..cdfc8ab6cfc0 100644
--- a/sound/soc/qcom/qdsp6/q6core.c
+++ b/sound/soc/qcom/qdsp6/q6core.c
@@ -10,7 +10,6 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/jiffies.h>
-#include <linux/wait.h>
#include <linux/soc/qcom/apr.h>
#include "q6core.h"
#include "q6dsp-errno.h"
@@ -105,12 +104,10 @@ static int q6core_callback(struct apr_device *adev, struct apr_resp_pkt *data)
bytes = sizeof(*fwk) + fwk->num_services *
sizeof(fwk->svc_api_info[0]);
- core->fwk_version = kzalloc(bytes, GFP_ATOMIC);
+ core->fwk_version = kmemdup(data->payload, bytes, GFP_ATOMIC);
if (!core->fwk_version)
return -ENOMEM;
- memcpy(core->fwk_version, data->payload, bytes);
-
core->fwk_version_supported = true;
core->resp_received = true;
@@ -124,12 +121,10 @@ static int q6core_callback(struct apr_device *adev, struct apr_resp_pkt *data)
len = sizeof(*v) + v->num_services * sizeof(v->svc_api_info[0]);
- core->svc_version = kzalloc(len, GFP_ATOMIC);
+ core->svc_version = kmemdup(data->payload, len, GFP_ATOMIC);
if (!core->svc_version)
return -ENOMEM;
- memcpy(core->svc_version, data->payload, len);
-
core->get_version_supported = true;
core->resp_received = true;
diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c
index 2a781d87ee65..9effbecc571f 100644
--- a/sound/soc/qcom/sdm845.c
+++ b/sound/soc/qcom/sdm845.c
@@ -195,15 +195,14 @@ static int sdm845_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
static void sdm845_add_be_ops(struct snd_soc_card *card)
{
- struct snd_soc_dai_link *link = card->dai_link;
- int i, num_links = card->num_links;
+ struct snd_soc_dai_link *link;
+ int i;
- for (i = 0; i < num_links; i++) {
+ for_each_card_prelinks(card, i, link) {
if (link->no_pcm == 1) {
link->ops = &sdm845_be_ops;
link->be_hw_params_fixup = sdm845_be_hw_params_fixup;
}
- link++;
}
}
diff --git a/sound/soc/rockchip/rk3288_hdmi_analog.c b/sound/soc/rockchip/rk3288_hdmi_analog.c
index 929b3fe289b0..a472d5eb2950 100644
--- a/sound/soc/rockchip/rk3288_hdmi_analog.c
+++ b/sound/soc/rockchip/rk3288_hdmi_analog.c
@@ -286,7 +286,6 @@ static struct platform_driver rockchip_sound_driver = {
.probe = snd_rk_mc_probe,
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.pm = &snd_soc_pm_ops,
.of_match_table = rockchip_sound_of_match,
},
diff --git a/sound/soc/rockchip/rockchip_pcm.c b/sound/soc/rockchip/rockchip_pcm.c
index f77538319221..9e7b5fa4cf59 100644
--- a/sound/soc/rockchip/rockchip_pcm.c
+++ b/sound/soc/rockchip/rockchip_pcm.c
@@ -21,7 +21,8 @@ static const struct snd_pcm_hardware snd_rockchip_hardware = {
.info = SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_RESUME,
+ SNDRV_PCM_INFO_RESUME |
+ SNDRV_PCM_INFO_INTERLEAVED,
.period_bytes_min = 32,
.period_bytes_max = 8192,
.periods_min = 1,
diff --git a/sound/soc/samsung/tm2_wm5110.c b/sound/soc/samsung/tm2_wm5110.c
index 43332c32d7e9..dc93941e01c3 100644
--- a/sound/soc/samsung/tm2_wm5110.c
+++ b/sound/soc/samsung/tm2_wm5110.c
@@ -491,6 +491,7 @@ static int tm2_probe(struct platform_device *pdev)
struct snd_soc_card *card = &tm2_card;
struct tm2_machine_priv *priv;
struct of_phandle_args args;
+ struct snd_soc_dai_link *dai_link;
int num_codecs, ret, i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -558,18 +559,18 @@ static int tm2_probe(struct platform_device *pdev)
}
/* Initialize WM5110 - I2S and HDMI - I2S1 DAI links */
- for (i = 0; i < card->num_links; i++) {
+ for_each_card_prelinks(card, i, dai_link) {
unsigned int dai_index = 0; /* WM5110 */
- card->dai_link[i].cpu_name = NULL;
- card->dai_link[i].platform_name = NULL;
+ dai_link->cpu_name = NULL;
+ dai_link->platform_name = NULL;
if (num_codecs > 1 && i == card->num_links - 1)
dai_index = 1; /* HDMI */
- card->dai_link[i].codec_of_node = codec_dai_node[dai_index];
- card->dai_link[i].cpu_of_node = cpu_dai_node[dai_index];
- card->dai_link[i].platform_of_node = cpu_dai_node[dai_index];
+ dai_link->codec_of_node = codec_dai_node[dai_index];
+ dai_link->cpu_of_node = cpu_dai_node[dai_index];
+ dai_link->platform_of_node = cpu_dai_node[dai_index];
}
if (num_codecs > 1) {
diff --git a/sound/soc/sh/hac.c b/sound/soc/sh/hac.c
index c2b496398e6b..17622ceb98c0 100644
--- a/sound/soc/sh/hac.c
+++ b/sound/soc/sh/hac.c
@@ -319,13 +319,12 @@ static int hac_soc_platform_probe(struct platform_device *pdev)
if (ret != 0)
return ret;
- return snd_soc_register_component(&pdev->dev, &sh4_hac_component,
+ return devm_snd_soc_register_component(&pdev->dev, &sh4_hac_component,
sh4_hac_dai, ARRAY_SIZE(sh4_hac_dai));
}
static int hac_soc_platform_remove(struct platform_device *pdev)
{
- snd_soc_unregister_component(&pdev->dev);
snd_soc_set_ac97_ops(NULL);
return 0;
}
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 051f96405346..28327dd2c6cb 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -582,7 +582,7 @@ static void rsnd_adg_clk_dbg_info(struct rsnd_priv *priv, struct rsnd_adg *adg)
int i;
for_each_rsnd_clk(clk, adg, i)
- dev_dbg(dev, "%s : %p : %ld\n",
+ dev_dbg(dev, "%s : %pa : %ld\n",
clk_name[i], clk, clk_get_rate(clk));
dev_dbg(dev, "BRGCKR = 0x%08x, BRRA/BRRB = 0x%x/0x%x\n",
@@ -595,7 +595,7 @@ static void rsnd_adg_clk_dbg_info(struct rsnd_priv *priv, struct rsnd_adg *adg)
* by BRGCKR::BRGCKR_31
*/
for_each_rsnd_clkout(clk, adg, i)
- dev_dbg(dev, "clkout %d : %p : %ld\n", i,
+ dev_dbg(dev, "clkout %d : %pa : %ld\n", i,
clk, clk_get_rate(clk));
}
#else
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index d23c2bbff0cf..f930f51b686f 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -102,7 +102,9 @@
#include "rsnd.h"
#define RSND_RATES SNDRV_PCM_RATE_8000_192000
-#define RSND_FMTS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
+#define RSND_FMTS (SNDRV_PCM_FMTBIT_S8 |\
+ SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S24_LE)
static const struct of_device_id rsnd_of_match[] = {
{ .compatible = "renesas,rcar_sound-gen1", .data = (void *)RSND_GEN1 },
@@ -280,6 +282,8 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
struct device *dev = rsnd_priv_to_dev(priv);
switch (snd_pcm_format_width(runtime->format)) {
+ case 8:
+ return 16 << 16;
case 16:
return 8 << 16;
case 24:
@@ -331,7 +335,7 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
target = cmd ? cmd : ssiu;
}
- /* Non target mod or 24bit data needs normal DALIGN */
+ /* Non target mod or non 16bit needs normal DALIGN */
if ((snd_pcm_format_width(runtime->format) != 16) ||
(mod != target))
return 0x76543210;
@@ -367,7 +371,7 @@ u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
* HW 24bit data is located as 0x******00
*
*/
- if (snd_pcm_format_width(runtime->format) == 16)
+ if (snd_pcm_format_width(runtime->format) != 24)
return 0;
for (i = 0; i < ARRAY_SIZE(playback_mods); i++) {
@@ -540,6 +544,14 @@ int rsnd_rdai_ssi_lane_ctrl(struct rsnd_dai *rdai,
return rdai->ssi_lane;
}
+int rsnd_rdai_width_ctrl(struct rsnd_dai *rdai, int width)
+{
+ if (width > 0)
+ rdai->chan_width = width;
+
+ return rdai->chan_width;
+}
+
struct rsnd_dai *rsnd_rdai_get(struct rsnd_priv *priv, int id)
{
if ((id < 0) || (id >= rsnd_rdai_nr(priv)))
@@ -681,6 +693,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
rdai->frm_clk_inv = 0;
break;
case SND_SOC_DAIFMT_LEFT_J:
+ case SND_SOC_DAIFMT_DSP_B:
rdai->sys_delay = 1;
rdai->data_alignment = 0;
rdai->frm_clk_inv = 1;
@@ -690,6 +703,11 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
rdai->data_alignment = 1;
rdai->frm_clk_inv = 1;
break;
+ case SND_SOC_DAIFMT_DSP_A:
+ rdai->sys_delay = 0;
+ rdai->data_alignment = 0;
+ rdai->frm_clk_inv = 1;
+ break;
}
/* set clock inversion */
@@ -720,6 +738,16 @@ static int rsnd_soc_set_dai_tdm_slot(struct snd_soc_dai *dai,
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
struct device *dev = rsnd_priv_to_dev(priv);
+ switch (slot_width) {
+ case 16:
+ case 24:
+ case 32:
+ break;
+ default:
+ /* use default */
+ slot_width = 32;
+ }
+
switch (slots) {
case 2:
case 6:
@@ -727,6 +755,7 @@ static int rsnd_soc_set_dai_tdm_slot(struct snd_soc_dai *dai,
/* TDM Extend Mode */
rsnd_rdai_channels_set(rdai, slots);
rsnd_rdai_ssi_lane_set(rdai, 1);
+ rsnd_rdai_width_set(rdai, slot_width);
break;
default:
dev_err(dev, "unsupported TDM slots (%d)\n", slots);
@@ -755,7 +784,7 @@ static unsigned int rsnd_soc_hw_rate_list[] = {
192000,
};
-static int rsnd_soc_hw_rule(struct rsnd_priv *priv,
+static int rsnd_soc_hw_rule(struct rsnd_dai *rdai,
unsigned int *list, int list_num,
struct snd_interval *baseline, struct snd_interval *iv)
{
@@ -772,14 +801,14 @@ static int rsnd_soc_hw_rule(struct rsnd_priv *priv,
if (!snd_interval_test(iv, list[i]))
continue;
- rate = rsnd_ssi_clk_query(priv,
+ rate = rsnd_ssi_clk_query(rdai,
baseline->min, list[i], NULL);
if (rate > 0) {
p.min = min(p.min, list[i]);
p.max = max(p.max, list[i]);
}
- rate = rsnd_ssi_clk_query(priv,
+ rate = rsnd_ssi_clk_query(rdai,
baseline->max, list[i], NULL);
if (rate > 0) {
p.min = min(p.min, list[i]);
@@ -790,17 +819,14 @@ static int rsnd_soc_hw_rule(struct rsnd_priv *priv,
return snd_interval_refine(iv, &p);
}
-static int __rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule,
- int is_play)
+static int rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
{
struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval ic;
- struct snd_soc_dai *dai = rule->private;
- struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
- struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
- struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
+ struct rsnd_dai_stream *io = rule->private;
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
/*
* possible sampling rate limitation is same as
@@ -811,34 +837,19 @@ static int __rsnd_soc_hw_rule_rate(struct snd_pcm_hw_params *params,
ic.min =
ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
- return rsnd_soc_hw_rule(priv, rsnd_soc_hw_rate_list,
+ return rsnd_soc_hw_rule(rdai, rsnd_soc_hw_rate_list,
ARRAY_SIZE(rsnd_soc_hw_rate_list),
&ic, ir);
}
-static int rsnd_soc_hw_rule_rate_playback(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- return __rsnd_soc_hw_rule_rate(params, rule, 1);
-}
-
-static int rsnd_soc_hw_rule_rate_capture(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- return __rsnd_soc_hw_rule_rate(params, rule, 0);
-}
-
-static int __rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule,
- int is_play)
+static int rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
{
struct snd_interval *ic_ = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
struct snd_interval *ir = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
struct snd_interval ic;
- struct snd_soc_dai *dai = rule->private;
- struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
- struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
- struct rsnd_dai_stream *io = is_play ? &rdai->playback : &rdai->capture;
+ struct rsnd_dai_stream *io = rule->private;
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
/*
* possible sampling rate limitation is same as
@@ -849,23 +860,11 @@ static int __rsnd_soc_hw_rule_channels(struct snd_pcm_hw_params *params,
ic.min =
ic.max = rsnd_runtime_channel_for_ssi_with_params(io, params);
- return rsnd_soc_hw_rule(priv, rsnd_soc_hw_channels_list,
+ return rsnd_soc_hw_rule(rdai, rsnd_soc_hw_channels_list,
ARRAY_SIZE(rsnd_soc_hw_channels_list),
ir, &ic);
}
-static int rsnd_soc_hw_rule_channels_playback(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- return __rsnd_soc_hw_rule_channels(params, rule, 1);
-}
-
-static int rsnd_soc_hw_rule_channels_capture(struct snd_pcm_hw_params *params,
- struct snd_pcm_hw_rule *rule)
-{
- return __rsnd_soc_hw_rule_channels(params, rule, 0);
-}
-
static const struct snd_pcm_hardware rsnd_pcm_hardware = {
.info = SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP |
@@ -882,12 +881,10 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
- struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
struct snd_pcm_hw_constraint_list *constraint = &rdai->constraint;
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int max_channels = rsnd_rdai_channels_get(rdai);
- int ret;
int i;
rsnd_dai_stream_init(io, substream);
@@ -922,25 +919,16 @@ static int rsnd_soc_dai_startup(struct snd_pcm_substream *substream,
int is_play = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
- is_play ? rsnd_soc_hw_rule_rate_playback :
- rsnd_soc_hw_rule_rate_capture,
- dai,
+ rsnd_soc_hw_rule_rate,
+ is_play ? &rdai->playback : &rdai->capture,
SNDRV_PCM_HW_PARAM_CHANNELS, -1);
snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
- is_play ? rsnd_soc_hw_rule_channels_playback :
- rsnd_soc_hw_rule_channels_capture,
- dai,
+ rsnd_soc_hw_rule_channels,
+ is_play ? &rdai->playback : &rdai->capture,
SNDRV_PCM_HW_PARAM_RATE, -1);
}
- /*
- * call rsnd_dai_call without spinlock
- */
- ret = rsnd_dai_call(nolock_start, io, priv);
- if (ret < 0)
- rsnd_dai_call(nolock_stop, io, priv);
-
- return ret;
+ return 0;
}
static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
@@ -953,7 +941,7 @@ static void rsnd_soc_dai_shutdown(struct snd_pcm_substream *substream,
/*
* call rsnd_dai_call without spinlock
*/
- rsnd_dai_call(nolock_stop, io, priv);
+ rsnd_dai_call(cleanup, io, priv);
rsnd_dai_stream_quit(io);
}
@@ -1083,6 +1071,7 @@ static void __rsnd_dai_probe(struct rsnd_priv *priv,
rdai->capture.rdai = rdai;
rsnd_rdai_channels_set(rdai, 2); /* default 2ch */
rsnd_rdai_ssi_lane_set(rdai, 1); /* default 1lane */
+ rsnd_rdai_width_set(rdai, 32); /* default 32bit width */
for (io_i = 0;; io_i++) {
playback = of_parse_phandle(dai_np, "playback", io_i);
@@ -1274,8 +1263,15 @@ int rsnd_kctrl_accept_anytime(struct rsnd_dai_stream *io)
int rsnd_kctrl_accept_runtime(struct rsnd_dai_stream *io)
{
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ struct rsnd_priv *priv = rsnd_io_to_priv(io);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ if (!runtime) {
+ dev_warn(dev, "Can't update kctrl when idle\n");
+ return 0;
+ }
- return !!runtime;
+ return 1;
}
struct rsnd_kctrl_cfg *rsnd_kctrl_init_m(struct rsnd_kctrl_cfg_m *cfg)
diff --git a/sound/soc/sh/rcar/ctu.c b/sound/soc/sh/rcar/ctu.c
index 6a55aa753003..ad702377a6c3 100644
--- a/sound/soc/sh/rcar/ctu.c
+++ b/sound/soc/sh/rcar/ctu.c
@@ -258,7 +258,7 @@ static int rsnd_ctu_hw_params(struct rsnd_mod *mod,
struct snd_pcm_hw_params *be_params;
int stream = substream->stream;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
be_params = &dpcm->hw_params;
if (params_channels(fe_params) != params_channels(be_params))
ctu->channels = params_channels(be_params);
diff --git a/sound/soc/sh/rcar/dma.c b/sound/soc/sh/rcar/dma.c
index d65ea7bc4dac..6d1947515dc8 100644
--- a/sound/soc/sh/rcar/dma.c
+++ b/sound/soc/sh/rcar/dma.c
@@ -106,9 +106,9 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- struct rsnd_priv *priv)
+static int rsnd_dmaen_cleanup(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
{
struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
@@ -116,7 +116,7 @@ static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
/*
* DMAEngine release uses mutex lock.
* Thus, it shouldn't be called under spinlock.
- * Let's call it under nolock_start
+ * Let's call it under prepare
*/
if (dmaen->chan)
dma_release_channel(dmaen->chan);
@@ -126,23 +126,22 @@ static int rsnd_dmaen_nolock_stop(struct rsnd_mod *mod,
return 0;
}
-static int rsnd_dmaen_nolock_start(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- struct rsnd_priv *priv)
+static int rsnd_dmaen_prepare(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
{
struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
struct device *dev = rsnd_priv_to_dev(priv);
- if (dmaen->chan) {
- dev_err(dev, "it already has dma channel\n");
- return -EIO;
- }
+ /* maybe suspended */
+ if (dmaen->chan)
+ return 0;
/*
* DMAEngine request uses mutex lock.
* Thus, it shouldn't be called under spinlock.
- * Let's call it under nolock_start
+ * Let's call it under prepare
*/
dmaen->chan = rsnd_dmaen_request_channel(io,
dma->mod_from,
@@ -291,8 +290,8 @@ static int rsnd_dmaen_pointer(struct rsnd_mod *mod,
static struct rsnd_mod_ops rsnd_dmaen_ops = {
.name = "audmac",
- .nolock_start = rsnd_dmaen_nolock_start,
- .nolock_stop = rsnd_dmaen_nolock_stop,
+ .prepare = rsnd_dmaen_prepare,
+ .cleanup = rsnd_dmaen_cleanup,
.start = rsnd_dmaen_start,
.stop = rsnd_dmaen_stop,
.pointer= rsnd_dmaen_pointer,
@@ -302,16 +301,26 @@ static struct rsnd_mod_ops rsnd_dmaen_ops = {
* Audio DMAC peri peri
*/
static const u8 gen2_id_table_ssiu[] = {
- 0x00, /* SSI00 */
- 0x04, /* SSI10 */
- 0x08, /* SSI20 */
- 0x0c, /* SSI3 */
- 0x0d, /* SSI4 */
- 0x0e, /* SSI5 */
- 0x0f, /* SSI6 */
- 0x10, /* SSI7 */
- 0x11, /* SSI8 */
- 0x12, /* SSI90 */
+ /* SSI00 ~ SSI07 */
+ 0x00, 0x01, 0x02, 0x03, 0x39, 0x3a, 0x3b, 0x3c,
+ /* SSI10 ~ SSI17 */
+ 0x04, 0x05, 0x06, 0x07, 0x3d, 0x3e, 0x3f, 0x40,
+ /* SSI20 ~ SSI27 */
+ 0x08, 0x09, 0x0a, 0x0b, 0x41, 0x42, 0x43, 0x44,
+ /* SSI30 ~ SSI37 */
+ 0x0c, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b,
+ /* SSI40 ~ SSI47 */
+ 0x0d, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51, 0x52,
+ /* SSI5 */
+ 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* SSI6 */
+ 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* SSI7 */
+ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* SSI8 */
+ 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* SSI90 ~ SSI97 */
+ 0x12, 0x13, 0x14, 0x15, 0x53, 0x54, 0x55, 0x56,
};
static const u8 gen2_id_table_scu[] = {
0x2d, /* SCU_SRCI0 */
@@ -337,18 +346,23 @@ static u32 rsnd_dmapp_get_id(struct rsnd_dai_stream *io,
struct rsnd_mod *src = rsnd_io_to_mod_src(io);
struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
const u8 *entry = NULL;
- int id = rsnd_mod_id(mod);
+ int id = 255;
int size = 0;
if (mod == ssi) {
+ int busif = rsnd_ssi_get_busif(io);
+
entry = gen2_id_table_ssiu;
size = ARRAY_SIZE(gen2_id_table_ssiu);
+ id = (rsnd_mod_id(mod) * 8) + busif;
} else if (mod == src) {
entry = gen2_id_table_scu;
size = ARRAY_SIZE(gen2_id_table_scu);
+ id = rsnd_mod_id(mod);
} else if (mod == dvc) {
entry = gen2_id_table_cmd;
size = ARRAY_SIZE(gen2_id_table_cmd);
+ id = rsnd_mod_id(mod);
}
if ((!entry) || (size <= id)) {
@@ -382,7 +396,7 @@ static void rsnd_dmapp_write(struct rsnd_dma *dma, u32 data, u32 reg)
struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
struct device *dev = rsnd_priv_to_dev(priv);
- dev_dbg(dev, "w %p : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
+ dev_dbg(dev, "w 0x%px : %08x\n", rsnd_dmapp_addr(dmac, dma, reg), data);
iowrite32(data, rsnd_dmapp_addr(dmac, dma, reg));
}
@@ -491,11 +505,11 @@ static struct rsnd_mod_ops rsnd_dmapp_ops = {
#define RDMA_SSI_I_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0x8)
#define RDMA_SSI_O_N(addr, i) (addr ##_reg - 0x00300000 + (0x40 * i) + 0xc)
-#define RDMA_SSIU_I_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
-#define RDMA_SSIU_O_N(addr, i) (addr ##_reg - 0x00441000 + (0x1000 * i))
+#define RDMA_SSIU_I_N(addr, i, j) (addr ##_reg - 0x00441000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400))
+#define RDMA_SSIU_O_N(addr, i, j) RDMA_SSIU_I_N(addr, i, j)
-#define RDMA_SSIU_I_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
-#define RDMA_SSIU_O_P(addr, i) (addr ##_reg - 0x00141000 + (0x1000 * i))
+#define RDMA_SSIU_I_P(addr, i, j) (addr ##_reg - 0x00141000 + (0x1000 * (i)) + (((j) / 4) * 0xA000) + (((j) % 4) * 0x400))
+#define RDMA_SSIU_O_P(addr, i, j) RDMA_SSIU_I_P(addr, i, j)
#define RDMA_SRC_I_N(addr, i) (addr ##_reg - 0x00500000 + (0x400 * i))
#define RDMA_SRC_O_N(addr, i) (addr ##_reg - 0x004fc000 + (0x400 * i))
@@ -521,6 +535,7 @@ rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
!!rsnd_io_to_mod_mix(io) ||
!!rsnd_io_to_mod_ctu(io);
int id = rsnd_mod_id(mod);
+ int busif = rsnd_ssi_get_busif(io);
struct dma_addr {
dma_addr_t out_addr;
dma_addr_t in_addr;
@@ -537,25 +552,35 @@ rsnd_gen2_dma_addr(struct rsnd_dai_stream *io,
},
/* SSI */
/* Capture */
- {{{ RDMA_SSI_O_N(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 } },
+ {{{ RDMA_SSI_O_N(ssi, id), 0 },
+ { RDMA_SSIU_O_P(ssi, id, busif), 0 },
+ { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
/* Playback */
- {{ 0, RDMA_SSI_I_N(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) } }
+ {{ 0, RDMA_SSI_I_N(ssi, id) },
+ { 0, RDMA_SSIU_I_P(ssi, id, busif) },
+ { 0, RDMA_SSIU_I_P(ssi, id, busif) } }
},
/* SSIU */
/* Capture */
- {{{ RDMA_SSIU_O_N(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 },
- { RDMA_SSIU_O_P(ssi, id), 0 } },
+ {{{ RDMA_SSIU_O_N(ssi, id, busif), 0 },
+ { RDMA_SSIU_O_P(ssi, id, busif), 0 },
+ { RDMA_SSIU_O_P(ssi, id, busif), 0 } },
/* Playback */
- {{ 0, RDMA_SSIU_I_N(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) },
- { 0, RDMA_SSIU_I_P(ssi, id) } } },
+ {{ 0, RDMA_SSIU_I_N(ssi, id, busif) },
+ { 0, RDMA_SSIU_I_P(ssi, id, busif) },
+ { 0, RDMA_SSIU_I_P(ssi, id, busif) } } },
};
+ /*
+ * FIXME
+ *
+ * We can't support SSI9-4/5/6/7, because its address is
+ * out of calculation rule
+ */
+ if ((id == 9) && (busif >= 4))
+ dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
+ id, busif);
+
/* it shouldn't happen */
if (use_cmd && !use_src)
dev_err(dev, "DVC is selected without SRC\n");
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
index 0230301fe078..1f7881cc16b2 100644
--- a/sound/soc/sh/rcar/gen.c
+++ b/sound/soc/sh/rcar/gen.c
@@ -219,12 +219,33 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
RSND_GEN_S_REG(HDMI1_SEL, 0x9e4),
/* FIXME: it needs SSI_MODE2/3 in the future */
- RSND_GEN_M_REG(SSI_BUSIF_MODE, 0x0, 0x80),
- RSND_GEN_M_REG(SSI_BUSIF_ADINR, 0x4, 0x80),
- RSND_GEN_M_REG(SSI_BUSIF_DALIGN,0x8, 0x80),
- RSND_GEN_M_REG(SSI_MODE, 0xc, 0x80),
- RSND_GEN_M_REG(SSI_CTRL, 0x10, 0x80),
- RSND_GEN_M_REG(SSI_INT_ENABLE, 0x18, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF0_MODE, 0x0, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF0_ADINR, 0x4, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF0_DALIGN, 0x8, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF1_MODE, 0x20, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF1_ADINR, 0x24, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF1_DALIGN, 0x28, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF2_MODE, 0x40, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF2_ADINR, 0x44, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF2_DALIGN, 0x48, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF3_MODE, 0x60, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF3_ADINR, 0x64, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF3_DALIGN, 0x68, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF4_MODE, 0x500, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF4_ADINR, 0x504, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF4_DALIGN, 0x508, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF5_MODE, 0x520, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF5_ADINR, 0x524, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF5_DALIGN, 0x528, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF6_MODE, 0x540, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF6_ADINR, 0x544, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF6_DALIGN, 0x548, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF7_MODE, 0x560, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF7_ADINR, 0x564, 0x80),
+ RSND_GEN_M_REG(SSI_BUSIF7_DALIGN, 0x568, 0x80),
+ RSND_GEN_M_REG(SSI_MODE, 0xc, 0x80),
+ RSND_GEN_M_REG(SSI_CTRL, 0x10, 0x80),
+ RSND_GEN_M_REG(SSI_INT_ENABLE, 0x18, 0x80),
};
static const struct rsnd_regmap_field_conf conf_scu[] = {
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
index 8f7a0abfa751..4464d1d0a042 100644
--- a/sound/soc/sh/rcar/rsnd.h
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -156,9 +156,30 @@ enum rsnd_reg {
RSND_REG_SSI_MODE2,
RSND_REG_SSI_CONTROL,
RSND_REG_SSI_CTRL,
- RSND_REG_SSI_BUSIF_MODE,
- RSND_REG_SSI_BUSIF_ADINR,
- RSND_REG_SSI_BUSIF_DALIGN,
+ RSND_REG_SSI_BUSIF0_MODE,
+ RSND_REG_SSI_BUSIF0_ADINR,
+ RSND_REG_SSI_BUSIF0_DALIGN,
+ RSND_REG_SSI_BUSIF1_MODE,
+ RSND_REG_SSI_BUSIF1_ADINR,
+ RSND_REG_SSI_BUSIF1_DALIGN,
+ RSND_REG_SSI_BUSIF2_MODE,
+ RSND_REG_SSI_BUSIF2_ADINR,
+ RSND_REG_SSI_BUSIF2_DALIGN,
+ RSND_REG_SSI_BUSIF3_MODE,
+ RSND_REG_SSI_BUSIF3_ADINR,
+ RSND_REG_SSI_BUSIF3_DALIGN,
+ RSND_REG_SSI_BUSIF4_MODE,
+ RSND_REG_SSI_BUSIF4_ADINR,
+ RSND_REG_SSI_BUSIF4_DALIGN,
+ RSND_REG_SSI_BUSIF5_MODE,
+ RSND_REG_SSI_BUSIF5_ADINR,
+ RSND_REG_SSI_BUSIF5_DALIGN,
+ RSND_REG_SSI_BUSIF6_MODE,
+ RSND_REG_SSI_BUSIF6_ADINR,
+ RSND_REG_SSI_BUSIF6_DALIGN,
+ RSND_REG_SSI_BUSIF7_MODE,
+ RSND_REG_SSI_BUSIF7_ADINR,
+ RSND_REG_SSI_BUSIF7_DALIGN,
RSND_REG_SSI_INT_ENABLE,
RSND_REG_SSI_SYS_STATUS0,
RSND_REG_SSI_SYS_STATUS1,
@@ -274,15 +295,12 @@ struct rsnd_mod_ops {
int (*fallback)(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv);
- int (*nolock_start)(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- struct rsnd_priv *priv);
- int (*nolock_stop)(struct rsnd_mod *mod,
- struct rsnd_dai_stream *io,
- struct rsnd_priv *priv);
int (*prepare)(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv);
+ int (*cleanup)(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv);
};
struct rsnd_dai_stream;
@@ -300,9 +318,8 @@ struct rsnd_mod {
/*
* status
*
- * 0xH0000CBA
+ * 0xH0000CB0
*
- * A 0: nolock_start 1: nolock_stop
* B 0: init 1: quit
* C 0: start 1: stop
*
@@ -313,9 +330,8 @@ struct rsnd_mod {
* H 0: hw_params
* H 0: pointer
* H 0: prepare
+ * H 0: cleanup
*/
-#define __rsnd_mod_shift_nolock_start 0
-#define __rsnd_mod_shift_nolock_stop 0
#define __rsnd_mod_shift_init 4
#define __rsnd_mod_shift_quit 4
#define __rsnd_mod_shift_start 8
@@ -328,11 +344,12 @@ struct rsnd_mod {
#define __rsnd_mod_shift_hw_params 28 /* always called */
#define __rsnd_mod_shift_pointer 28 /* always called */
#define __rsnd_mod_shift_prepare 28 /* always called */
+#define __rsnd_mod_shift_cleanup 28 /* always called */
#define __rsnd_mod_add_probe 0
#define __rsnd_mod_add_remove 0
-#define __rsnd_mod_add_nolock_start 1
-#define __rsnd_mod_add_nolock_stop -1
+#define __rsnd_mod_add_prepare 0
+#define __rsnd_mod_add_cleanup 0
#define __rsnd_mod_add_init 1
#define __rsnd_mod_add_quit -1
#define __rsnd_mod_add_start 1
@@ -342,10 +359,11 @@ struct rsnd_mod {
#define __rsnd_mod_add_fallback 0
#define __rsnd_mod_add_hw_params 0
#define __rsnd_mod_add_pointer 0
-#define __rsnd_mod_add_prepare 0
#define __rsnd_mod_call_probe 0
#define __rsnd_mod_call_remove 0
+#define __rsnd_mod_call_prepare 0
+#define __rsnd_mod_call_cleanup 0
#define __rsnd_mod_call_init 0
#define __rsnd_mod_call_quit 1
#define __rsnd_mod_call_start 0
@@ -355,9 +373,6 @@ struct rsnd_mod {
#define __rsnd_mod_call_fallback 0
#define __rsnd_mod_call_hw_params 0
#define __rsnd_mod_call_pointer 0
-#define __rsnd_mod_call_nolock_start 0
-#define __rsnd_mod_call_nolock_stop 1
-#define __rsnd_mod_call_prepare 0
#define rsnd_mod_to_priv(mod) ((mod)->priv)
#define rsnd_mod_name(mod) ((mod)->ops->name)
@@ -438,6 +453,7 @@ struct rsnd_dai_stream {
char name[RSND_DAI_NAME_SIZE];
struct snd_pcm_substream *substream;
struct rsnd_mod *mod[RSND_MOD_MAX];
+ struct rsnd_mod *dma;
struct rsnd_dai *rdai;
struct device *dmac_dev; /* for IPMMU */
u32 parent_ssi_status;
@@ -467,6 +483,7 @@ struct rsnd_dai {
int max_channels; /* 2ch - 16ch */
int ssi_lane; /* 1lane - 4lane */
+ int chan_width; /* 16/24/32 bit width */
unsigned int clk_master:1;
unsigned int bit_clk_inv:1;
@@ -500,6 +517,11 @@ int rsnd_rdai_channels_ctrl(struct rsnd_dai *rdai,
int rsnd_rdai_ssi_lane_ctrl(struct rsnd_dai *rdai,
int ssi_lane);
+#define rsnd_rdai_width_set(rdai, width) \
+ rsnd_rdai_width_ctrl(rdai, width)
+#define rsnd_rdai_width_get(rdai) \
+ rsnd_rdai_width_ctrl(rdai, 0)
+int rsnd_rdai_width_ctrl(struct rsnd_dai *rdai, int width);
void rsnd_dai_period_elapsed(struct rsnd_dai_stream *io);
int rsnd_dai_connect(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
@@ -692,6 +714,7 @@ void rsnd_ssi_remove(struct rsnd_priv *priv);
struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
int rsnd_ssi_is_dma_mode(struct rsnd_mod *mod);
int rsnd_ssi_use_busif(struct rsnd_dai_stream *io);
+int rsnd_ssi_get_busif(struct rsnd_dai_stream *io);
u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io);
#define RSND_SSI_HDMI_PORT0 0xf0
@@ -709,7 +732,7 @@ int __rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod);
void rsnd_parse_connect_ssi(struct rsnd_dai *rdai,
struct device_node *playback,
struct device_node *capture);
-unsigned int rsnd_ssi_clk_query(struct rsnd_priv *priv,
+unsigned int rsnd_ssi_clk_query(struct rsnd_dai *rdai,
int param1, int param2, int *idx);
/*
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index beccfbac7581..cd38a43b976f 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -158,7 +158,7 @@ static int rsnd_src_hw_params(struct rsnd_mod *mod,
struct snd_soc_dpcm *dpcm;
struct snd_pcm_hw_params *be_params;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
be_params = &dpcm->hw_params;
if (params_rate(fe_params) != params_rate(be_params))
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 3f880ec66459..fcb4df23248c 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -42,7 +42,13 @@
#define DWL_24 (5 << 19) /* Data Word Length */
#define DWL_32 (6 << 19) /* Data Word Length */
+/*
+ * System word length
+ */
+#define SWL_16 (1 << 16) /* R/W System Word Length */
+#define SWL_24 (2 << 16) /* R/W System Word Length */
#define SWL_32 (3 << 16) /* R/W System Word Length */
+
#define SCKD (1 << 15) /* Serial Bit Clock Direction */
#define SWSD (1 << 14) /* Serial WS Direction */
#define SCKP (1 << 13) /* Serial Bit Clock Polarity */
@@ -72,7 +78,6 @@
struct rsnd_ssi {
struct rsnd_mod mod;
- struct rsnd_mod *dma;
u32 flags;
u32 cr_own;
@@ -145,6 +150,11 @@ int rsnd_ssi_use_busif(struct rsnd_dai_stream *io)
return use_busif;
}
+int rsnd_ssi_get_busif(struct rsnd_dai_stream *io)
+{
+ return 0; /* BUSIF0 only for now */
+}
+
static void rsnd_ssi_status_clear(struct rsnd_mod *mod)
{
rsnd_mod_write(mod, SSISR, 0);
@@ -220,14 +230,32 @@ u32 rsnd_ssi_multi_slaves_runtime(struct rsnd_dai_stream *io)
return 0;
}
-unsigned int rsnd_ssi_clk_query(struct rsnd_priv *priv,
+static u32 rsnd_rdai_width_to_swl(struct rsnd_dai *rdai)
+{
+ struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ int width = rsnd_rdai_width_get(rdai);
+
+ switch (width) {
+ case 32: return SWL_32;
+ case 24: return SWL_24;
+ case 16: return SWL_16;
+ }
+
+ dev_err(dev, "unsupported slot width value: %d\n", width);
+ return 0;
+}
+
+unsigned int rsnd_ssi_clk_query(struct rsnd_dai *rdai,
int param1, int param2, int *idx)
{
+ struct rsnd_priv *priv = rsnd_rdai_to_priv(rdai);
int ssi_clk_mul_table[] = {
1, 2, 4, 8, 16, 6, 12,
};
int j, ret;
unsigned int main_rate;
+ int width = rsnd_rdai_width_get(rdai);
for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) {
@@ -240,12 +268,7 @@ unsigned int rsnd_ssi_clk_query(struct rsnd_priv *priv,
if (j == 0)
continue;
- /*
- * this driver is assuming that
- * system word is 32bit x chan
- * see rsnd_ssi_init()
- */
- main_rate = 32 * param1 * param2 * ssi_clk_mul_table[j];
+ main_rate = width * param1 * param2 * ssi_clk_mul_table[j];
ret = rsnd_adg_clk_query(priv, main_rate);
if (ret < 0)
@@ -289,10 +312,15 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
return -EINVAL;
}
+ if (ssi->chan != chan) {
+ dev_err(dev, "SSI parent/child should use same chan\n");
+ return -EINVAL;
+ }
+
return 0;
}
- main_rate = rsnd_ssi_clk_query(priv, rate, chan, &idx);
+ main_rate = rsnd_ssi_clk_query(rdai, rate, chan, &idx);
if (!main_rate) {
dev_err(dev, "unsupported clock rate\n");
return -EIO;
@@ -312,9 +340,11 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod,
* SSICR : FORCE, SCKD, SWSD
* SSIWSR : CONT
*/
- ssi->cr_clk = FORCE | SWL_32 | SCKD | SWSD | CKDV(idx);
+ ssi->cr_clk = FORCE | rsnd_rdai_width_to_swl(rdai) |
+ SCKD | SWSD | CKDV(idx);
ssi->wsr = CONT;
ssi->rate = rate;
+ ssi->chan = chan;
dev_dbg(dev, "%s[%d] outputs %u Hz\n",
rsnd_mod_name(mod),
@@ -340,6 +370,7 @@ static void rsnd_ssi_master_clk_stop(struct rsnd_mod *mod,
ssi->cr_clk = 0;
ssi->rate = 0;
+ ssi->chan = 0;
rsnd_adg_ssi_clk_stop(mod);
}
@@ -357,15 +388,11 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
is_tdm = rsnd_runtime_is_ssi_tdm(io);
- /*
- * always use 32bit system word.
- * see also rsnd_ssi_master_clk_enable()
- */
- cr_own |= FORCE | SWL_32;
+ cr_own |= FORCE | rsnd_rdai_width_to_swl(rdai);
if (rdai->bit_clk_inv)
cr_own |= SCKP;
- if (rdai->frm_clk_inv ^ is_tdm)
+ if (rdai->frm_clk_inv && !is_tdm)
cr_own |= SWSP;
if (rdai->data_alignment)
cr_own |= SDTA;
@@ -373,6 +400,17 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
cr_own |= DEL;
/*
+ * TDM Mode
+ * see
+ * rsnd_ssiu_init_gen2()
+ */
+ wsr = ssi->wsr;
+ if (is_tdm) {
+ wsr |= WS_MODE;
+ cr_own |= CHNL_8;
+ }
+
+ /*
* We shouldn't exchange SWSP after running.
* This means, parent needs to care it.
*/
@@ -384,6 +422,9 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
cr_own &= ~DWL_MASK;
switch (snd_pcm_format_width(runtime->format)) {
+ case 8:
+ cr_own |= DWL_8;
+ break;
case 16:
cr_own |= DWL_16;
break;
@@ -399,16 +440,6 @@ static void rsnd_ssi_config_init(struct rsnd_mod *mod,
cr_mode = DIEN; /* PIO : enable Data interrupt */
}
- /*
- * TDM Extend Mode
- * see
- * rsnd_ssiu_init_gen2()
- */
- wsr = ssi->wsr;
- if (is_tdm) {
- wsr |= WS_MODE;
- cr_own |= CHNL_8;
- }
init_end:
ssi->cr_own = cr_own;
ssi->cr_mode = cr_mode;
@@ -488,26 +519,16 @@ static int rsnd_ssi_hw_params(struct rsnd_mod *mod,
struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
- int chan = params_channels(params);
+ struct rsnd_dai *rdai = rsnd_io_to_rdai(io);
+ unsigned int fmt_width = snd_pcm_format_width(params_format(params));
- /*
- * snd_pcm_ops::hw_params will be called *before*
- * snd_soc_dai_ops::trigger. Thus, ssi->usrcnt is 0
- * in 1st call.
- */
- if (ssi->usrcnt) {
- /*
- * Already working.
- * It will happen if SSI has parent/child connection.
- * it is error if child <-> parent SSI uses
- * different channels.
- */
- if (ssi->chan != chan)
- return -EIO;
- }
+ if (fmt_width > rdai->chan_width) {
+ struct rsnd_priv *priv = rsnd_io_to_priv(io);
+ struct device *dev = rsnd_priv_to_dev(priv);
- ssi->chan = chan;
+ dev_err(dev, "invalid combination of slot-width and format-data-width\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -873,7 +894,6 @@ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
- struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
int ret;
/*
@@ -888,7 +908,7 @@ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
return ret;
/* SSI probe might be called many times in MUX multi path */
- ret = rsnd_dma_attach(io, mod, &ssi->dma);
+ ret = rsnd_dma_attach(io, mod, &io->dma);
return ret;
}
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
index 016fbf5ac242..39b67643b5dc 100644
--- a/sound/soc/sh/rcar/ssiu.c
+++ b/sound/soc/sh/rcar/ssiu.c
@@ -10,9 +10,12 @@
struct rsnd_ssiu {
struct rsnd_mod mod;
+ u32 busif_status[8]; /* for BUSIF0 - BUSIF7 */
+ unsigned int usrcnt;
};
#define rsnd_ssiu_nr(priv) ((priv)->ssiu_nr)
+#define rsnd_mod_to_ssiu(_mod) container_of((_mod), struct rsnd_ssiu, mod)
#define for_each_rsnd_ssiu(pos, priv, i) \
for (i = 0; \
(i < rsnd_ssiu_nr(priv)) && \
@@ -120,6 +123,7 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
+ struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
int hdmi = rsnd_ssi_hdmi_port(io);
int ret;
u32 mode = 0;
@@ -128,6 +132,8 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
if (ret < 0)
return ret;
+ ssiu->usrcnt++;
+
if (rsnd_runtime_is_ssi_tdm(io)) {
/*
* TDM Extend Mode
@@ -140,15 +146,59 @@ static int rsnd_ssiu_init_gen2(struct rsnd_mod *mod,
rsnd_mod_write(mod, SSI_MODE, mode);
if (rsnd_ssi_use_busif(io)) {
- rsnd_mod_write(mod, SSI_BUSIF_ADINR,
- rsnd_get_adinr_bit(mod, io) |
- (rsnd_io_is_play(io) ?
- rsnd_runtime_channel_after_ctu(io) :
- rsnd_runtime_channel_original(io)));
- rsnd_mod_write(mod, SSI_BUSIF_MODE,
- rsnd_get_busif_shift(io, mod) | 1);
- rsnd_mod_write(mod, SSI_BUSIF_DALIGN,
- rsnd_get_dalign(mod, io));
+ int id = rsnd_mod_id(mod);
+ int busif = rsnd_ssi_get_busif(io);
+
+ /*
+ * FIXME
+ *
+ * We can't support SSI9-4/5/6/7, because its address is
+ * out of calculation rule
+ */
+ if ((id == 9) && (busif >= 4)) {
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ dev_err(dev, "This driver doesn't support SSI%d-%d, so far",
+ id, busif);
+ }
+
+#define RSND_WRITE_BUSIF(i) \
+ rsnd_mod_write(mod, SSI_BUSIF##i##_ADINR, \
+ rsnd_get_adinr_bit(mod, io) | \
+ (rsnd_io_is_play(io) ? \
+ rsnd_runtime_channel_after_ctu(io) : \
+ rsnd_runtime_channel_original(io))); \
+ rsnd_mod_write(mod, SSI_BUSIF##i##_MODE, \
+ rsnd_get_busif_shift(io, mod) | 1); \
+ rsnd_mod_write(mod, SSI_BUSIF##i##_DALIGN, \
+ rsnd_get_dalign(mod, io))
+
+ switch (busif) {
+ case 0:
+ RSND_WRITE_BUSIF(0);
+ break;
+ case 1:
+ RSND_WRITE_BUSIF(1);
+ break;
+ case 2:
+ RSND_WRITE_BUSIF(2);
+ break;
+ case 3:
+ RSND_WRITE_BUSIF(3);
+ break;
+ case 4:
+ RSND_WRITE_BUSIF(4);
+ break;
+ case 5:
+ RSND_WRITE_BUSIF(5);
+ break;
+ case 6:
+ RSND_WRITE_BUSIF(6);
+ break;
+ case 7:
+ RSND_WRITE_BUSIF(7);
+ break;
+ }
}
if (hdmi) {
@@ -194,10 +244,12 @@ static int rsnd_ssiu_start_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
+ int busif = rsnd_ssi_get_busif(io);
+
if (!rsnd_ssi_use_busif(io))
return 0;
- rsnd_mod_write(mod, SSI_CTRL, 0x1);
+ rsnd_mod_bset(mod, SSI_CTRL, 1 << (busif * 4), 1 << (busif * 4));
if (rsnd_ssi_multi_slaves_runtime(io))
rsnd_mod_write(mod, SSI_CONTROL, 0x1);
@@ -209,10 +261,16 @@ static int rsnd_ssiu_stop_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io,
struct rsnd_priv *priv)
{
+ struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
+ int busif = rsnd_ssi_get_busif(io);
+
if (!rsnd_ssi_use_busif(io))
return 0;
- rsnd_mod_write(mod, SSI_CTRL, 0);
+ rsnd_mod_bset(mod, SSI_CTRL, 1 << (busif * 4), 0);
+
+ if (--ssiu->usrcnt)
+ return 0;
if (rsnd_ssi_multi_slaves_runtime(io))
rsnd_mod_write(mod, SSI_CONTROL, 0);
@@ -246,6 +304,16 @@ int rsnd_ssiu_attach(struct rsnd_dai_stream *io,
return rsnd_dai_connect(mod, io, mod->type);
}
+static u32 *rsnd_ssiu_get_status(struct rsnd_dai_stream *io,
+ struct rsnd_mod *mod,
+ enum rsnd_mod_type type)
+{
+ struct rsnd_ssiu *ssiu = rsnd_mod_to_ssiu(mod);
+ int busif = rsnd_ssi_get_busif(io);
+
+ return &ssiu->busif_status[busif];
+}
+
int rsnd_ssiu_probe(struct rsnd_priv *priv)
{
struct device *dev = rsnd_priv_to_dev(priv);
@@ -269,7 +337,7 @@ int rsnd_ssiu_probe(struct rsnd_priv *priv)
for_each_rsnd_ssiu(ssiu, priv, i) {
ret = rsnd_mod_init(priv, rsnd_mod_get(ssiu),
- ops, NULL, rsnd_mod_get_status,
+ ops, NULL, rsnd_ssiu_get_status,
RSND_MOD_SSIU, i);
if (ret)
return ret;
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 409d082e80d1..699397a09167 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -157,7 +157,7 @@ static int soc_compr_open_fe(struct snd_compr_stream *cstream)
ret = dpcm_be_dai_startup(fe, stream);
if (ret < 0) {
/* clean up all links */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
dpcm_be_disconnect(fe, stream);
@@ -321,7 +321,7 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
ret = dpcm_be_dai_shutdown(fe, stream);
/* mark FE's links ready to prune */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 473eefe8658e..6ddcf12bc030 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -52,6 +52,10 @@ EXPORT_SYMBOL_GPL(snd_soc_debugfs_root);
static DEFINE_MUTEX(client_mutex);
static LIST_HEAD(component_list);
+static LIST_HEAD(unbind_card_list);
+
+#define for_each_component(component) \
+ list_for_each_entry(component, &component_list, list)
/*
* This is a timeout to do a DAPM powerdown after a stream is closed().
@@ -62,8 +66,9 @@ static int pmdown_time = 5000;
module_param(pmdown_time, int, 0);
MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
-/* If a DMI filed contain strings in this blacklist (e.g.
- * "Type2 - Board Manufacturer" or "Type1 - TBD by OEM"), it will be taken
+/*
+ * If a DMI filed contain strings in this blacklist (e.g.
+ * "Type2 - Board Manufacturer" or "Type1 - TBD by OEM"), it will be taken
* as invalid and dropped when setting the card long name from DMI info.
*/
static const char * const dmi_blacklist[] = {
@@ -175,8 +180,8 @@ static int dai_list_show(struct seq_file *m, void *v)
mutex_lock(&client_mutex);
- list_for_each_entry(component, &component_list, list)
- list_for_each_entry(dai, &component->dai_list, list)
+ for_each_component(component)
+ for_each_component_dais(component, dai)
seq_printf(m, "%s\n", dai->name);
mutex_unlock(&client_mutex);
@@ -191,7 +196,7 @@ static int component_list_show(struct seq_file *m, void *v)
mutex_lock(&client_mutex);
- list_for_each_entry(component, &component_list, list)
+ for_each_component(component)
seq_printf(m, "%s\n", component->name);
mutex_unlock(&client_mutex);
@@ -218,7 +223,7 @@ static void soc_init_card_debugfs(struct snd_soc_card *card)
&card->pop_time);
if (!card->debugfs_pop_time)
dev_warn(card->dev,
- "ASoC: Failed to create pop time debugfs file\n");
+ "ASoC: Failed to create pop time debugfs file\n");
}
static void soc_cleanup_card_debugfs(struct snd_soc_card *card)
@@ -341,7 +346,7 @@ struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->dai_link->no_pcm &&
!strcmp(rtd->dai_link->name, dai_link))
return rtd->pcm->streams[stream].substream;
@@ -398,7 +403,7 @@ static void soc_remove_pcm_runtimes(struct snd_soc_card *card)
{
struct snd_soc_pcm_runtime *rtd, *_rtd;
- list_for_each_entry_safe(rtd, _rtd, &card->rtd_list, list) {
+ for_each_card_rtds_safe(card, rtd, _rtd) {
list_del(&rtd->list);
soc_free_pcm_runtime(rtd);
}
@@ -411,7 +416,7 @@ struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (!strcmp(rtd->dai_link->name, dai_link))
return rtd;
}
@@ -422,7 +427,8 @@ EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime);
static void codec2codec_close_delayed_work(struct work_struct *work)
{
- /* Currently nothing to do for c2c links
+ /*
+ * Currently nothing to do for c2c links
* Since c2c links are internal nodes in the DAPM graph and
* don't interface with the outside world or application layer
* we don't have to do any special handling on close.
@@ -442,8 +448,9 @@ int snd_soc_suspend(struct device *dev)
if (!card->instantiated)
return 0;
- /* Due to the resume being scheduled into a workqueue we could
- * suspend before that's finished - wait for it to complete.
+ /*
+ * Due to the resume being scheduled into a workqueue we could
+ * suspend before that's finished - wait for it to complete.
*/
snd_power_wait(card->snd_card, SNDRV_CTL_POWER_D0);
@@ -451,13 +458,13 @@ int snd_soc_suspend(struct device *dev)
snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D3hot);
/* mute any active DACs */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
+ struct snd_soc_dai *dai;
if (rtd->dai_link->ignore_suspend)
continue;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, dai) {
struct snd_soc_dai_driver *drv = dai->driver;
if (drv->ops->digital_mute && dai->playback_active)
@@ -466,7 +473,7 @@ int snd_soc_suspend(struct device *dev)
}
/* suspend all pcms */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->dai_link->ignore_suspend)
continue;
@@ -476,7 +483,7 @@ int snd_soc_suspend(struct device *dev)
if (card->suspend_pre)
card->suspend_pre(card);
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
if (rtd->dai_link->ignore_suspend)
@@ -487,10 +494,10 @@ int snd_soc_suspend(struct device *dev)
}
/* close any waiting streams */
- list_for_each_entry(rtd, &card->rtd_list, list)
+ for_each_card_rtds(card, rtd)
flush_delayed_work(&rtd->delayed_work);
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->dai_link->ignore_suspend)
continue;
@@ -509,11 +516,14 @@ int snd_soc_suspend(struct device *dev)
snd_soc_dapm_sync(&card->dapm);
/* suspend all COMPONENTs */
- list_for_each_entry(component, &card->component_dev_list, card_list) {
- struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+ for_each_card_components(card, component) {
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
- /* If there are paths active then the COMPONENT will be held with
- * bias _ON and should not be suspended. */
+ /*
+ * If there are paths active then the COMPONENT will be held
+ * with bias _ON and should not be suspended.
+ */
if (!component->suspended) {
switch (snd_soc_dapm_get_bias_level(dapm)) {
case SND_SOC_BIAS_STANDBY:
@@ -547,7 +557,7 @@ int snd_soc_suspend(struct device *dev)
}
}
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
if (rtd->dai_link->ignore_suspend)
@@ -567,18 +577,21 @@ int snd_soc_suspend(struct device *dev)
}
EXPORT_SYMBOL_GPL(snd_soc_suspend);
-/* deferred resume work, so resume can complete before we finished
+/*
+ * deferred resume work, so resume can complete before we finished
* setting our codec back up, which can be very slow on I2C
*/
static void soc_resume_deferred(struct work_struct *work)
{
struct snd_soc_card *card =
- container_of(work, struct snd_soc_card, deferred_resume_work);
+ container_of(work, struct snd_soc_card,
+ deferred_resume_work);
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_component *component;
int i;
- /* our power state is still SNDRV_CTL_POWER_D3hot from suspend time,
+ /*
+ * our power state is still SNDRV_CTL_POWER_D3hot from suspend time,
* so userspace apps are blocked from touching us
*/
@@ -591,7 +604,7 @@ static void soc_resume_deferred(struct work_struct *work)
card->resume_pre(card);
/* resume control bus DAIs */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
if (rtd->dai_link->ignore_suspend)
@@ -601,7 +614,7 @@ static void soc_resume_deferred(struct work_struct *work)
cpu_dai->driver->resume(cpu_dai);
}
- list_for_each_entry(component, &card->component_dev_list, card_list) {
+ for_each_card_components(card, component) {
if (component->suspended) {
if (component->driver->resume)
component->driver->resume(component);
@@ -609,7 +622,7 @@ static void soc_resume_deferred(struct work_struct *work)
}
}
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->dai_link->ignore_suspend)
continue;
@@ -624,13 +637,13 @@ static void soc_resume_deferred(struct work_struct *work)
}
/* unmute any active DACs */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
+ struct snd_soc_dai *dai;
if (rtd->dai_link->ignore_suspend)
continue;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, dai) {
struct snd_soc_dai_driver *drv = dai->driver;
if (drv->ops->digital_mute && dai->playback_active)
@@ -638,7 +651,7 @@ static void soc_resume_deferred(struct work_struct *work)
}
}
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
if (rtd->dai_link->ignore_suspend)
@@ -673,16 +686,15 @@ int snd_soc_resume(struct device *dev)
return 0;
/* activate pins from sleep state */
- list_for_each_entry(rtd, &card->rtd_list, list) {
- struct snd_soc_dai **codec_dais = rtd->codec_dais;
+ for_each_card_rtds(card, rtd) {
+ struct snd_soc_dai *codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int j;
if (cpu_dai->active)
pinctrl_pm_select_default_state(cpu_dai->dev);
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = codec_dais[j];
+ for_each_rtd_codec_dai(rtd, j, codec_dai) {
if (codec_dai->active)
pinctrl_pm_select_default_state(codec_dai->dev);
}
@@ -694,8 +706,9 @@ int snd_soc_resume(struct device *dev)
* have that problem and may take a substantial amount of time to resume
* due to I/O costs and anti-pop so handle them out of line.
*/
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+
bus_control |= cpu_dai->driver->bus_control;
}
if (bus_control) {
@@ -725,7 +738,7 @@ static struct snd_soc_component *soc_find_component(
lockdep_assert_held(&client_mutex);
- list_for_each_entry(component, &component_list, list) {
+ for_each_component(component) {
if (of_node) {
if (component->dev->of_node == of_node)
return component;
@@ -737,6 +750,24 @@ static struct snd_soc_component *soc_find_component(
return NULL;
}
+static int snd_soc_is_matching_component(
+ const struct snd_soc_dai_link_component *dlc,
+ struct snd_soc_component *component)
+{
+ struct device_node *component_of_node;
+
+ component_of_node = component->dev->of_node;
+ if (!component_of_node && component->dev->parent)
+ component_of_node = component->dev->parent->of_node;
+
+ if (dlc->of_node && component_of_node != dlc->of_node)
+ return 0;
+ if (dlc->name && strcmp(component->name, dlc->name))
+ return 0;
+
+ return 1;
+}
+
/**
* snd_soc_find_dai - Find a registered DAI
*
@@ -753,21 +784,14 @@ struct snd_soc_dai *snd_soc_find_dai(
{
struct snd_soc_component *component;
struct snd_soc_dai *dai;
- struct device_node *component_of_node;
lockdep_assert_held(&client_mutex);
- /* Find CPU DAI from registered DAIs*/
- list_for_each_entry(component, &component_list, list) {
- component_of_node = component->dev->of_node;
- if (!component_of_node && component->dev->parent)
- component_of_node = component->dev->parent->of_node;
-
- if (dlc->of_node && component_of_node != dlc->of_node)
- continue;
- if (dlc->name && strcmp(component->name, dlc->name))
+ /* Find CPU DAI from registered DAIs */
+ for_each_component(component) {
+ if (!snd_soc_is_matching_component(dlc, component))
continue;
- list_for_each_entry(dai, &component->dai_list, list) {
+ for_each_component_dais(component, dai) {
if (dlc->dai_name && strcmp(dai->name, dlc->dai_name)
&& (!dai->driver->name
|| strcmp(dai->driver->name, dlc->dai_name)))
@@ -781,7 +805,6 @@ struct snd_soc_dai *snd_soc_find_dai(
}
EXPORT_SYMBOL_GPL(snd_soc_find_dai);
-
/**
* snd_soc_find_dai_link - Find a DAI link
*
@@ -805,7 +828,7 @@ struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card,
lockdep_assert_held(&client_mutex);
- list_for_each_entry_safe(link, _link, &card->dai_link_list, list) {
+ for_each_card_links_safe(card, link, _link) {
if (link->id != id)
continue;
@@ -828,7 +851,7 @@ static bool soc_is_dai_link_bound(struct snd_soc_card *card,
{
struct snd_soc_pcm_runtime *rtd;
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (rtd->dai_link == dai_link)
return true;
}
@@ -844,8 +867,6 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
struct snd_soc_dai_link_component cpu_dai_component;
struct snd_soc_component *component;
struct snd_soc_dai **codec_dais;
- struct device_node *platform_of_node;
- const char *platform_name;
int i;
if (dai_link->ignore)
@@ -877,6 +898,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
rtd->num_codecs = dai_link->num_codecs;
/* Find CODEC from registered CODECs */
+ /* we can use for_each_rtd_codec_dai() after this */
codec_dais = rtd->codec_dais;
for (i = 0; i < rtd->num_codecs; i++) {
codec_dais[i] = snd_soc_find_dai(&codecs[i]);
@@ -891,24 +913,11 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
/* Single codec links expect codec and codec_dai in runtime data */
rtd->codec_dai = codec_dais[0];
- /* if there's no platform we match on the empty platform */
- platform_name = dai_link->platform_name;
- if (!platform_name && !dai_link->platform_of_node)
- platform_name = "snd-soc-dummy";
-
/* find one from the set of registered platforms */
- list_for_each_entry(component, &component_list, list) {
- platform_of_node = component->dev->of_node;
- if (!platform_of_node && component->dev->parent->of_node)
- platform_of_node = component->dev->parent->of_node;
-
- if (dai_link->platform_of_node) {
- if (platform_of_node != dai_link->platform_of_node)
- continue;
- } else {
- if (strcmp(component->name, platform_name))
- continue;
- }
+ for_each_component(component) {
+ if (!snd_soc_is_matching_component(dai_link->platform,
+ component))
+ continue;
snd_soc_rtdcom_add(rtd, component);
}
@@ -918,7 +927,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card,
_err_defer:
soc_free_pcm_runtime(rtd);
- return -EPROBE_DEFER;
+ return -EPROBE_DEFER;
}
static void soc_remove_component(struct snd_soc_component *component)
@@ -942,23 +951,25 @@ static void soc_remove_dai(struct snd_soc_dai *dai, int order)
{
int err;
- if (dai && dai->probed &&
- dai->driver->remove_order == order) {
- if (dai->driver->remove) {
- err = dai->driver->remove(dai);
- if (err < 0)
- dev_err(dai->dev,
- "ASoC: failed to remove %s: %d\n",
- dai->name, err);
- }
- dai->probed = 0;
+ if (!dai || !dai->probed ||
+ dai->driver->remove_order != order)
+ return;
+
+ if (dai->driver->remove) {
+ err = dai->driver->remove(dai);
+ if (err < 0)
+ dev_err(dai->dev,
+ "ASoC: failed to remove %s: %d\n",
+ dai->name, err);
}
+ dai->probed = 0;
}
static void soc_remove_link_dais(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd, int order)
{
int i;
+ struct snd_soc_dai *codec_dai;
/* unregister the rtd device */
if (rtd->dev_registered) {
@@ -967,8 +978,8 @@ static void soc_remove_link_dais(struct snd_soc_card *card,
}
/* remove the CODEC DAI */
- for (i = 0; i < rtd->num_codecs; i++)
- soc_remove_dai(rtd->codec_dais[i], order);
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ soc_remove_dai(codec_dai, order);
soc_remove_dai(rtd->cpu_dai, order);
}
@@ -993,28 +1004,57 @@ static void soc_remove_dai_links(struct snd_soc_card *card)
struct snd_soc_pcm_runtime *rtd;
struct snd_soc_dai_link *link, *_link;
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
- list_for_each_entry(rtd, &card->rtd_list, list)
+ for_each_comp_order(order) {
+ for_each_card_rtds(card, rtd)
soc_remove_link_dais(card, rtd, order);
}
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
- list_for_each_entry(rtd, &card->rtd_list, list)
+ for_each_comp_order(order) {
+ for_each_card_rtds(card, rtd)
soc_remove_link_components(card, rtd, order);
}
- list_for_each_entry_safe(link, _link, &card->dai_link_list, list) {
+ for_each_card_links_safe(card, link, _link) {
if (link->dobj.type == SND_SOC_DOBJ_DAI_LINK)
dev_warn(card->dev, "Topology forgot to remove link %s?\n",
link->name);
list_del(&link->list);
- card->num_dai_links--;
}
}
+static int snd_soc_init_platform(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link)
+{
+ struct snd_soc_dai_link_component *platform = dai_link->platform;
+
+ /*
+ * FIXME
+ *
+ * this function should be removed in the future
+ */
+ /* convert Legacy platform link */
+ if (!platform) {
+ platform = devm_kzalloc(card->dev,
+ sizeof(struct snd_soc_dai_link_component),
+ GFP_KERNEL);
+ if (!platform)
+ return -ENOMEM;
+
+ dai_link->platform = platform;
+ platform->name = dai_link->platform_name;
+ platform->of_node = dai_link->platform_of_node;
+ platform->dai_name = NULL;
+ }
+
+ /* if there's no platform we match on the empty platform */
+ if (!platform->name &&
+ !platform->of_node)
+ platform->name = "snd-soc-dummy";
+
+ return 0;
+}
+
static int snd_soc_init_multicodec(struct snd_soc_card *card,
struct snd_soc_dai_link *dai_link)
{
@@ -1043,9 +1083,16 @@ static int snd_soc_init_multicodec(struct snd_soc_card *card,
}
static int soc_init_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *link)
+ struct snd_soc_dai_link *link)
{
int i, ret;
+ struct snd_soc_dai_link_component *codec;
+
+ ret = snd_soc_init_platform(card, link);
+ if (ret) {
+ dev_err(card->dev, "ASoC: failed to init multiplatform\n");
+ return ret;
+ }
ret = snd_soc_init_multicodec(card, link);
if (ret) {
@@ -1053,19 +1100,19 @@ static int soc_init_dai_link(struct snd_soc_card *card,
return ret;
}
- for (i = 0; i < link->num_codecs; i++) {
+ for_each_link_codecs(link, i, codec) {
/*
* Codec must be specified by 1 of name or OF node,
* not both or neither.
*/
- if (!!link->codecs[i].name ==
- !!link->codecs[i].of_node) {
+ if (!!codec->name ==
+ !!codec->of_node) {
dev_err(card->dev, "ASoC: Neither/both codec name/of_node are set for %s\n",
link->name);
return -EINVAL;
}
/* Codec DAI name must be specified */
- if (!link->codecs[i].dai_name) {
+ if (!codec->dai_name) {
dev_err(card->dev, "ASoC: codec_dai_name not set for %s\n",
link->name);
return -EINVAL;
@@ -1076,13 +1123,12 @@ static int soc_init_dai_link(struct snd_soc_card *card,
* Platform may be specified by either name or OF node, but
* can be left unspecified, and a dummy platform will be used.
*/
- if (link->platform_name && link->platform_of_node) {
+ if (link->platform->name && link->platform->of_node) {
dev_err(card->dev,
"ASoC: Both platform name/of_node are set for %s\n",
link->name);
return -EINVAL;
}
-
/*
* CPU device may be specified by either name or OF node, but
* can be left unspecified, and will be matched based on DAI
@@ -1111,7 +1157,8 @@ static int soc_init_dai_link(struct snd_soc_card *card,
void snd_soc_disconnect_sync(struct device *dev)
{
- struct snd_soc_component *component = snd_soc_lookup_component(dev, NULL);
+ struct snd_soc_component *component =
+ snd_soc_lookup_component(dev, NULL);
if (!component || !component->card)
return;
@@ -1142,14 +1189,14 @@ int snd_soc_add_dai_link(struct snd_soc_card *card,
}
lockdep_assert_held(&client_mutex);
- /* Notify the machine driver for extra initialization
+ /*
+ * Notify the machine driver for extra initialization
* on the link created by topology.
*/
if (dai_link->dobj.type && card->add_dai_link)
card->add_dai_link(card, dai_link);
list_add_tail(&dai_link->list, &card->dai_link_list);
- card->num_dai_links++;
return 0;
}
@@ -1178,16 +1225,16 @@ void snd_soc_remove_dai_link(struct snd_soc_card *card,
}
lockdep_assert_held(&client_mutex);
- /* Notify the machine driver for extra destruction
+ /*
+ * Notify the machine driver for extra destruction
* on the link created by topology.
*/
if (dai_link->dobj.type && card->remove_dai_link)
card->remove_dai_link(card, dai_link);
- list_for_each_entry_safe(link, _link, &card->dai_link_list, list) {
+ for_each_card_links_safe(card, link, _link) {
if (link == dai_link) {
list_del(&link->list);
- card->num_dai_links--;
return;
}
}
@@ -1239,7 +1286,8 @@ static void soc_set_name_prefix(struct snd_soc_card *card,
static int soc_probe_component(struct snd_soc_card *card,
struct snd_soc_component *component)
{
- struct snd_soc_dapm_context *dapm = snd_soc_component_get_dapm(component);
+ struct snd_soc_dapm_context *dapm =
+ snd_soc_component_get_dapm(component);
struct snd_soc_dai *dai;
int ret;
@@ -1277,7 +1325,7 @@ static int soc_probe_component(struct snd_soc_card *card,
}
}
- list_for_each_entry(dai, &component->dai_list, list) {
+ for_each_component_dais(component, dai) {
ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
if (ret != 0) {
dev_err(component->dev,
@@ -1320,6 +1368,7 @@ static int soc_probe_component(struct snd_soc_card *card,
component->driver->num_dapm_routes);
list_add(&dapm->list, &card->dapm_list);
+ /* see for_each_card_components */
list_add(&component->card_list, &card->component_dev_list);
return 0;
@@ -1370,8 +1419,7 @@ static int soc_post_component_init(struct snd_soc_pcm_runtime *rtd,
}
static int soc_probe_link_components(struct snd_soc_card *card,
- struct snd_soc_pcm_runtime *rtd,
- int order)
+ struct snd_soc_pcm_runtime *rtd, int order)
{
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
@@ -1398,6 +1446,7 @@ static int soc_probe_dai(struct snd_soc_dai *dai, int order)
if (dai->driver->probe) {
int ret = dai->driver->probe(dai);
+
if (ret < 0) {
dev_err(dai->dev, "ASoC: failed to probe DAI %s: %d\n",
dai->name, ret);
@@ -1431,48 +1480,6 @@ static int soc_link_dai_pcm_new(struct snd_soc_dai **dais, int num_dais,
return 0;
}
-static int soc_link_dai_widgets(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link,
- struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dapm_widget *sink, *source;
- int ret;
-
- if (rtd->num_codecs > 1)
- dev_warn(card->dev, "ASoC: Multiple codecs not supported yet\n");
-
- /* link the DAI widgets */
- sink = codec_dai->playback_widget;
- source = cpu_dai->capture_widget;
- if (sink && source) {
- ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
- dai_link->num_params,
- source, sink);
- if (ret != 0) {
- dev_err(card->dev, "ASoC: Can't link %s to %s: %d\n",
- sink->name, source->name, ret);
- return ret;
- }
- }
-
- sink = cpu_dai->playback_widget;
- source = codec_dai->capture_widget;
- if (sink && source) {
- ret = snd_soc_dapm_new_pcm(card, rtd, dai_link->params,
- dai_link->num_params,
- source, sink);
- if (ret != 0) {
- dev_err(card->dev, "ASoC: Can't link %s to %s: %d\n",
- sink->name, source->name, ret);
- return ret;
- }
- }
-
- return 0;
-}
-
static int soc_probe_link_dais(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd, int order)
{
@@ -1480,6 +1487,7 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_component *component;
+ struct snd_soc_dai *codec_dai;
int i, ret, num;
dev_dbg(card->dev, "ASoC: probe %s dai link %d late %d\n",
@@ -1493,8 +1501,8 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
return ret;
/* probe the CODEC DAI */
- for (i = 0; i < rtd->num_codecs; i++) {
- ret = soc_probe_dai(rtd->codec_dais[i], order);
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ ret = soc_probe_dai(codec_dai, order);
if (ret)
return ret;
}
@@ -1546,7 +1554,7 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
}
if (cpu_dai->driver->compress_new) {
- /*create compress_device"*/
+ /* create compress_device" */
ret = cpu_dai->driver->compress_new(rtd, num);
if (ret < 0) {
dev_err(card->dev, "ASoC: can't create compress %s\n",
@@ -1560,7 +1568,7 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
ret = soc_new_pcm(rtd, num);
if (ret < 0) {
dev_err(card->dev, "ASoC: can't create pcm %s :%d\n",
- dai_link->stream_name, ret);
+ dai_link->stream_name, ret);
return ret;
}
ret = soc_link_dai_pcm_new(&cpu_dai, 1, rtd);
@@ -1573,11 +1581,6 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
} else {
INIT_DELAYED_WORK(&rtd->delayed_work,
codec2codec_close_delayed_work);
-
- /* link the DAI widgets */
- ret = soc_link_dai_widgets(card, dai_link, rtd);
- if (ret)
- return ret;
}
}
@@ -1628,8 +1631,7 @@ static int soc_probe_aux_devices(struct snd_soc_card *card)
int order;
int ret;
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
+ for_each_comp_order(order) {
list_for_each_entry(comp, &card->aux_comp_list, card_aux_list) {
if (comp->driver->probe_order == order) {
ret = soc_probe_component(card, comp);
@@ -1651,8 +1653,7 @@ static void soc_remove_aux_devices(struct snd_soc_card *card)
struct snd_soc_component *comp, *_comp;
int order;
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
+ for_each_comp_order(order) {
list_for_each_entry_safe(comp, _comp,
&card->aux_comp_list, card_aux_list) {
@@ -1681,14 +1682,12 @@ static void soc_remove_aux_devices(struct snd_soc_card *card)
int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
unsigned int dai_fmt)
{
- struct snd_soc_dai **codec_dais = rtd->codec_dais;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
unsigned int i;
int ret;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = codec_dais[i];
-
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
ret = snd_soc_dai_set_fmt(codec_dai, dai_fmt);
if (ret != 0 && ret != -ENOTSUPP) {
dev_warn(codec_dai->dev,
@@ -1697,8 +1696,10 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
}
}
- /* Flip the polarity for the "CPU" end of a CODEC<->CODEC link */
- /* the component which has non_legacy_dai_naming is Codec */
+ /*
+ * Flip the polarity for the "CPU" end of a CODEC<->CODEC link
+ * the component which has non_legacy_dai_naming is Codec
+ */
if (cpu_dai->component->driver->non_legacy_dai_naming) {
unsigned int inv_dai_fmt;
@@ -1732,9 +1733,9 @@ int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
}
EXPORT_SYMBOL_GPL(snd_soc_runtime_set_dai_fmt);
-
#ifdef CONFIG_DMI
-/* Trim special characters, and replace '-' with '_' since '-' is used to
+/*
+ * Trim special characters, and replace '-' with '_' since '-' is used to
* separate different DMI fields in the card long name. Only number and
* alphabet characters and a few separator characters are kept.
*/
@@ -1753,7 +1754,8 @@ static void cleanup_dmi_name(char *name)
name[j] = '\0';
}
-/* Check if a DMI field is valid, i.e. not containing any string
+/*
+ * Check if a DMI field is valid, i.e. not containing any string
* in the black list.
*/
static int is_dmi_valid(const char *field)
@@ -1816,7 +1818,6 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
return 0;
}
-
snprintf(card->dmi_longname, sizeof(card->snd_card->longname),
"%s", vendor);
cleanup_dmi_name(card->dmi_longname);
@@ -1832,7 +1833,8 @@ int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour)
if (len < longname_buf_size)
cleanup_dmi_name(card->dmi_longname + len);
- /* some vendors like Lenovo may only put a self-explanatory
+ /*
+ * some vendors like Lenovo may only put a self-explanatory
* name in the product version field
*/
product_version = dmi_get_system_info(DMI_PRODUCT_VERSION);
@@ -1891,7 +1893,7 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
struct snd_soc_dai_link *dai_link;
int i;
- list_for_each_entry(component, &component_list, list) {
+ for_each_component(component) {
/* does this component override FEs ? */
if (!component->driver->ignore_machine)
@@ -1903,9 +1905,7 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
continue;
/* machine matches, so override the rtd data */
- for (i = 0; i < card->num_links; i++) {
-
- dai_link = &card->dai_link[i];
+ for_each_card_prelinks(card, i, dai_link) {
/* ignore this FE */
if (dai_link->dynamic) {
@@ -1917,7 +1917,11 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
card->dai_link[i].name);
/* override platform component */
- dai_link->platform_name = component->name;
+ if (snd_soc_init_platform(card, dai_link) < 0) {
+ dev_err(card->dev, "init platform error");
+ continue;
+ }
+ dai_link->platform->name = component->name;
/* convert non BE into BE */
dai_link->no_pcm = 1;
@@ -1926,7 +1930,8 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
dai_link->be_hw_params_fixup =
component->driver->be_hw_params_fixup;
- /* most BE links don't set stream name, so set it to
+ /*
+ * most BE links don't set stream name, so set it to
* dai link name if it's NULL to help bind widgets.
*/
if (!dai_link->stream_name)
@@ -1936,7 +1941,7 @@ static void soc_check_tplg_fes(struct snd_soc_card *card)
/* Inform userspace we are using alternate topology */
if (component->driver->topology_name_prefix) {
- /* topology shortname created ? */
+ /* topology shortname created? */
if (!card->topology_shortname_created) {
comp_drv = component->driver;
@@ -1965,8 +1970,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
soc_check_tplg_fes(card);
/* bind DAIs */
- for (i = 0; i < card->num_links; i++) {
- ret = soc_bind_dai_link(card, &card->dai_link[i]);
+ for_each_card_prelinks(card, i, dai_link) {
+ ret = soc_bind_dai_link(card, dai_link);
if (ret != 0)
goto base_error;
}
@@ -1979,8 +1984,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
}
/* add predefined DAI links to the list */
- for (i = 0; i < card->num_links; i++)
- snd_soc_add_dai_link(card, card->dai_link+i);
+ for_each_card_prelinks(card, i, dai_link)
+ snd_soc_add_dai_link(card, dai_link);
/* card bind complete so register a sound card */
ret = snd_card_new(card->dev, SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
@@ -2024,9 +2029,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
}
/* probe all components used by DAI links on this card */
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_comp_order(order) {
+ for_each_card_rtds(card, rtd) {
ret = soc_probe_link_components(card, rtd, order);
if (ret < 0) {
dev_err(card->dev,
@@ -2042,10 +2046,11 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
if (ret < 0)
goto probe_dai_err;
- /* Find new DAI links added during probing components and bind them.
+ /*
+ * Find new DAI links added during probing components and bind them.
* Components with topology may bring new DAIs and DAI links.
*/
- list_for_each_entry(dai_link, &card->dai_link_list, list) {
+ for_each_card_links(card, dai_link) {
if (soc_is_dai_link_bound(card, dai_link))
continue;
@@ -2058,9 +2063,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
}
/* probe all DAI links on this card */
- for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
- order++) {
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_comp_order(order) {
+ for_each_card_rtds(card, rtd) {
ret = soc_probe_link_dais(card, rtd, order);
if (ret < 0) {
dev_err(card->dev,
@@ -2075,7 +2079,8 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
snd_soc_dapm_connect_dai_link_widgets(card);
if (card->controls)
- snd_soc_add_card_controls(card, card->controls, card->num_controls);
+ snd_soc_add_card_controls(card, card->controls,
+ card->num_controls);
if (card->dapm_routes)
snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
@@ -2181,7 +2186,7 @@ static int soc_cleanup_card_resources(struct snd_soc_card *card)
struct snd_soc_pcm_runtime *rtd;
/* make sure any delayed work runs */
- list_for_each_entry(rtd, &card->rtd_list, list)
+ for_each_card_rtds(card, rtd)
flush_delayed_work(&rtd->delayed_work);
/* free the ALSA card at first; this syncs with pending operations */
@@ -2221,21 +2226,23 @@ int snd_soc_poweroff(struct device *dev)
if (!card->instantiated)
return 0;
- /* Flush out pmdown_time work - we actually do want to run it
- * now, we're shutting down so no imminent restart. */
- list_for_each_entry(rtd, &card->rtd_list, list)
+ /*
+ * Flush out pmdown_time work - we actually do want to run it
+ * now, we're shutting down so no imminent restart.
+ */
+ for_each_card_rtds(card, rtd)
flush_delayed_work(&rtd->delayed_work);
snd_soc_dapm_shutdown(card);
/* deactivate pins to sleep state */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
int i;
pinctrl_pm_select_sleep_state(cpu_dai->dev);
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
pinctrl_pm_select_sleep_state(codec_dai->dev);
}
}
@@ -2315,6 +2322,7 @@ static int snd_soc_add_controls(struct snd_card *card, struct device *dev,
for (i = 0; i < num_controls; i++) {
const struct snd_kcontrol_new *control = &controls[i];
+
err = snd_ctl_add(card, snd_soc_cnew(control, data,
control->name, prefix));
if (err < 0) {
@@ -2432,8 +2440,9 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_sysclk);
*
* Configures the CODEC master (MCLK) or system (SYSCLK) clocking.
*/
-int snd_soc_component_set_sysclk(struct snd_soc_component *component, int clk_id,
- int source, unsigned int freq, int dir)
+int snd_soc_component_set_sysclk(struct snd_soc_component *component,
+ int clk_id, int source, unsigned int freq,
+ int dir)
{
if (component->driver->set_sysclk)
return component->driver->set_sysclk(component, clk_id, source,
@@ -2501,7 +2510,7 @@ int snd_soc_component_set_pll(struct snd_soc_component *component, int pll_id,
{
if (component->driver->set_pll)
return component->driver->set_pll(component, pll_id, source,
- freq_in, freq_out);
+ freq_in, freq_out);
return -EINVAL;
}
@@ -2532,8 +2541,6 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_bclk_ratio);
*/
int snd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
- if (dai->driver == NULL)
- return -EINVAL;
if (dai->driver->ops->set_fmt == NULL)
return -ENOTSUPP;
return dai->driver->ops->set_fmt(dai, fmt);
@@ -2549,8 +2556,8 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_fmt);
* Generates the TDM tx and rx slot default masks for DAI.
*/
static int snd_soc_xlate_tdm_slot_mask(unsigned int slots,
- unsigned int *tx_mask,
- unsigned int *rx_mask)
+ unsigned int *tx_mask,
+ unsigned int *rx_mask)
{
if (*tx_mask || *rx_mask)
return 0;
@@ -2680,9 +2687,6 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_set_tristate);
int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
int direction)
{
- if (!dai->driver)
- return -ENOTSUPP;
-
if (dai->driver->ops->mute_stream)
return dai->driver->ops->mute_stream(dai, mute, direction);
else if (direction == SNDRV_PCM_STREAM_PLAYBACK &&
@@ -2693,6 +2697,33 @@ int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
}
EXPORT_SYMBOL_GPL(snd_soc_dai_digital_mute);
+static int snd_soc_bind_card(struct snd_soc_card *card)
+{
+ struct snd_soc_pcm_runtime *rtd;
+ int ret;
+
+ ret = snd_soc_instantiate_card(card);
+ if (ret != 0)
+ return ret;
+
+ /* deactivate pins to sleep state */
+ for_each_card_rtds(card, rtd) {
+ struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
+ int j;
+
+ for_each_rtd_codec_dai(rtd, j, codec_dai) {
+ if (!codec_dai->active)
+ pinctrl_pm_select_sleep_state(codec_dai->dev);
+ }
+
+ if (!cpu_dai->active)
+ pinctrl_pm_select_sleep_state(cpu_dai->dev);
+ }
+
+ return ret;
+}
+
/**
* snd_soc_register_card - Register a card with the ASoC core
*
@@ -2702,13 +2733,12 @@ EXPORT_SYMBOL_GPL(snd_soc_dai_digital_mute);
int snd_soc_register_card(struct snd_soc_card *card)
{
int i, ret;
- struct snd_soc_pcm_runtime *rtd;
+ struct snd_soc_dai_link *link;
if (!card->name || !card->dev)
return -EINVAL;
- for (i = 0; i < card->num_links; i++) {
- struct snd_soc_dai_link *link = &card->dai_link[i];
+ for_each_card_prelinks(card, i, link) {
ret = soc_init_dai_link(card, link);
if (ret) {
@@ -2723,7 +2753,6 @@ int snd_soc_register_card(struct snd_soc_card *card)
snd_soc_initialize_card_lists(card);
INIT_LIST_HEAD(&card->dai_link_list);
- card->num_dai_links = 0;
INIT_LIST_HEAD(&card->rtd_list);
card->num_rtd = 0;
@@ -2734,28 +2763,23 @@ int snd_soc_register_card(struct snd_soc_card *card)
mutex_init(&card->mutex);
mutex_init(&card->dapm_mutex);
- ret = snd_soc_instantiate_card(card);
- if (ret != 0)
- return ret;
-
- /* deactivate pins to sleep state */
- list_for_each_entry(rtd, &card->rtd_list, list) {
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- int j;
-
- for (j = 0; j < rtd->num_codecs; j++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[j];
- if (!codec_dai->active)
- pinctrl_pm_select_sleep_state(codec_dai->dev);
- }
+ return snd_soc_bind_card(card);
+}
+EXPORT_SYMBOL_GPL(snd_soc_register_card);
- if (!cpu_dai->active)
- pinctrl_pm_select_sleep_state(cpu_dai->dev);
+static void snd_soc_unbind_card(struct snd_soc_card *card, bool unregister)
+{
+ if (card->instantiated) {
+ card->instantiated = false;
+ snd_soc_dapm_shutdown(card);
+ soc_cleanup_card_resources(card);
+ if (!unregister)
+ list_add(&card->list, &unbind_card_list);
+ } else {
+ if (unregister)
+ list_del(&card->list);
}
-
- return ret;
}
-EXPORT_SYMBOL_GPL(snd_soc_register_card);
/**
* snd_soc_unregister_card - Unregister a card with the ASoC core
@@ -2765,12 +2789,8 @@ EXPORT_SYMBOL_GPL(snd_soc_register_card);
*/
int snd_soc_unregister_card(struct snd_soc_card *card)
{
- if (card->instantiated) {
- card->instantiated = false;
- snd_soc_dapm_shutdown(card);
- soc_cleanup_card_resources(card);
- dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name);
- }
+ snd_soc_unbind_card(card, true);
+ dev_dbg(card->dev, "ASoC: Unregistered card '%s'\n", card->name);
return 0;
}
@@ -2802,7 +2822,7 @@ static char *fmt_single_name(struct device *dev, int *id)
}
} else {
- /* I2C component devices are named "bus-addr" */
+ /* I2C component devices are named "bus-addr" */
if (sscanf(name, "%x-%x", &id1, &id2) == 2) {
char tmp[NAME_SIZE];
@@ -2810,7 +2830,8 @@ static char *fmt_single_name(struct device *dev, int *id)
*id = ((id1 & 0xffff) << 16) + id2;
/* sanitize component name for DAI link creation */
- snprintf(tmp, NAME_SIZE, "%s.%s", dev->driver->name, name);
+ snprintf(tmp, NAME_SIZE, "%s.%s", dev->driver->name,
+ name);
strlcpy(name, tmp, NAME_SIZE);
} else
*id = 0;
@@ -2845,7 +2866,7 @@ static void snd_soc_unregister_dais(struct snd_soc_component *component)
{
struct snd_soc_dai *dai, *_dai;
- list_for_each_entry_safe(dai, _dai, &component->dai_list, list) {
+ for_each_component_dais_safe(component, dai, _dai) {
dev_dbg(component->dev, "ASoC: Unregistered DAI '%s'\n",
dai->name);
list_del(&dai->list);
@@ -2877,7 +2898,7 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
* component-less anymore.
*/
if (legacy_dai_naming &&
- (dai_drv->id == 0 || dai_drv->name == NULL)) {
+ (dai_drv->id == 0 || dai_drv->name == NULL)) {
dai->name = fmt_single_name(dev, &dai->id);
} else {
dai->name = fmt_multiple_name(dev, dai_drv);
@@ -2897,6 +2918,7 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
if (!dai->driver->ops)
dai->driver->ops = &null_dai_ops;
+ /* see for_each_component_dais */
list_add_tail(&dai->list, &component->dai_list);
component->num_dai++;
@@ -2910,11 +2932,10 @@ static struct snd_soc_dai *soc_add_dai(struct snd_soc_component *component,
* @component: The component the DAIs are registered for
* @dai_drv: DAI driver to use for the DAIs
* @count: Number of DAIs
- * @legacy_dai_naming: Use the legacy naming scheme and let the DAI inherit the
- * parent's name.
*/
static int snd_soc_register_dais(struct snd_soc_component *component,
- struct snd_soc_dai_driver *dai_drv, size_t count)
+ struct snd_soc_dai_driver *dai_drv,
+ size_t count)
{
struct device *dev = component->dev;
struct snd_soc_dai *dai;
@@ -2925,8 +2946,8 @@ static int snd_soc_register_dais(struct snd_soc_component *component,
for (i = 0; i < count; i++) {
- dai = soc_add_dai(component, dai_drv + i,
- count == 1 && !component->driver->non_legacy_dai_naming);
+ dai = soc_add_dai(component, dai_drv + i, count == 1 &&
+ !component->driver->non_legacy_dai_naming);
if (dai == NULL) {
ret = -ENOMEM;
goto err;
@@ -2970,7 +2991,8 @@ int snd_soc_register_dai(struct snd_soc_component *component,
if (!dai)
return -ENOMEM;
- /* Create the DAI widgets here. After adding DAIs, topology may
+ /*
+ * Create the DAI widgets here. After adding DAIs, topology may
* also add routes that need these widgets as source or sink.
*/
ret = snd_soc_dapm_new_dai_widgets(dapm, dai);
@@ -3052,7 +3074,8 @@ static void snd_soc_component_setup_regmap(struct snd_soc_component *component)
#ifdef CONFIG_REGMAP
/**
- * snd_soc_component_init_regmap() - Initialize regmap instance for the component
+ * snd_soc_component_init_regmap() - Initialize regmap instance for the
+ * component
* @component: The component for which to initialize the regmap instance
* @regmap: The regmap instance that should be used by the component
*
@@ -3070,7 +3093,8 @@ void snd_soc_component_init_regmap(struct snd_soc_component *component,
EXPORT_SYMBOL_GPL(snd_soc_component_init_regmap);
/**
- * snd_soc_component_exit_regmap() - De-initialize regmap instance for the component
+ * snd_soc_component_exit_regmap() - De-initialize regmap instance for the
+ * component
* @component: The component for which to de-initialize the regmap instance
*
* Calls regmap_exit() on the regmap instance associated to the component and
@@ -3094,11 +3118,13 @@ static void snd_soc_component_add(struct snd_soc_component *component)
if (!component->driver->write && !component->driver->read) {
if (!component->regmap)
- component->regmap = dev_get_regmap(component->dev, NULL);
+ component->regmap = dev_get_regmap(component->dev,
+ NULL);
if (component->regmap)
snd_soc_component_setup_regmap(component);
}
+ /* see for_each_component */
list_add(&component->list, &component_list);
INIT_LIST_HEAD(&component->dobj_list);
@@ -3116,7 +3142,7 @@ static void snd_soc_component_del_unlocked(struct snd_soc_component *component)
struct snd_soc_card *card = component->card;
if (card)
- snd_soc_unregister_card(card);
+ snd_soc_unbind_card(card, false);
list_del(&component->list);
}
@@ -3156,6 +3182,18 @@ static void convert_endianness_formats(struct snd_soc_pcm_stream *stream)
stream->formats |= endianness_format_map[i];
}
+static void snd_soc_try_rebind_card(void)
+{
+ struct snd_soc_card *card, *c;
+
+ if (!list_empty(&unbind_card_list)) {
+ list_for_each_entry_safe(card, c, &unbind_card_list, list) {
+ if (!snd_soc_bind_card(card))
+ list_del(&card->list);
+ }
+ }
+}
+
int snd_soc_add_component(struct device *dev,
struct snd_soc_component *component,
const struct snd_soc_component_driver *component_driver,
@@ -3183,6 +3221,7 @@ int snd_soc_add_component(struct device *dev,
}
snd_soc_component_add(component);
+ snd_soc_try_rebind_card();
return 0;
@@ -3221,27 +3260,28 @@ static int __snd_soc_unregister_component(struct device *dev)
int found = 0;
mutex_lock(&client_mutex);
- list_for_each_entry(component, &component_list, list) {
+ for_each_component(component) {
if (dev != component->dev)
continue;
- snd_soc_tplg_component_remove(component, SND_SOC_TPLG_INDEX_ALL);
+ snd_soc_tplg_component_remove(component,
+ SND_SOC_TPLG_INDEX_ALL);
snd_soc_component_del_unlocked(component);
found = 1;
break;
}
mutex_unlock(&client_mutex);
- if (found) {
+ if (found)
snd_soc_component_cleanup(component);
- }
return found;
}
void snd_soc_unregister_component(struct device *dev)
{
- while (__snd_soc_unregister_component(dev));
+ while (__snd_soc_unregister_component(dev))
+ ;
}
EXPORT_SYMBOL_GPL(snd_soc_unregister_component);
@@ -3253,7 +3293,7 @@ struct snd_soc_component *snd_soc_lookup_component(struct device *dev,
ret = NULL;
mutex_lock(&client_mutex);
- list_for_each_entry(component, &component_list, list) {
+ for_each_component(component) {
if (dev != component->dev)
continue;
@@ -3653,7 +3693,7 @@ int snd_soc_get_dai_id(struct device_node *ep)
*/
ret = -ENOTSUPP;
mutex_lock(&client_mutex);
- list_for_each_entry(pos, &component_list, list) {
+ for_each_component(pos) {
struct device_node *component_of_node = pos->dev->of_node;
if (!component_of_node && pos->dev->parent)
@@ -3683,7 +3723,7 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
int ret = -EPROBE_DEFER;
mutex_lock(&client_mutex);
- list_for_each_entry(pos, &component_list, list) {
+ for_each_component(pos) {
component_of_node = pos->dev->of_node;
if (!component_of_node && pos->dev->parent)
component_of_node = pos->dev->parent->of_node;
@@ -3719,7 +3759,7 @@ int snd_soc_get_dai_name(struct of_phandle_args *args,
ret = 0;
/* find target DAI */
- list_for_each_entry(dai, &pos->dai_list, list) {
+ for_each_component_dais(pos, dai) {
if (id == 0)
break;
id--;
@@ -3764,10 +3804,10 @@ EXPORT_SYMBOL_GPL(snd_soc_of_get_dai_name);
*/
void snd_soc_of_put_dai_link_codecs(struct snd_soc_dai_link *dai_link)
{
- struct snd_soc_dai_link_component *component = dai_link->codecs;
+ struct snd_soc_dai_link_component *component;
int index;
- for (index = 0; index < dai_link->num_codecs; index++, component++) {
+ for_each_link_codecs(dai_link, index, component) {
if (!component->of_node)
break;
of_node_put(component->of_node);
@@ -3819,12 +3859,10 @@ int snd_soc_of_get_dai_link_codecs(struct device *dev,
dai_link->num_codecs = num_codecs;
/* Parse the list */
- for (index = 0, component = dai_link->codecs;
- index < dai_link->num_codecs;
- index++, component++) {
+ for_each_link_codecs(dai_link, index, component) {
ret = of_parse_phandle_with_args(of_node, name,
"#sound-dai-cells",
- index, &args);
+ index, &args);
if (ret)
goto err;
component->of_node = args.np;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 461d951917c0..a5178845065b 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -18,7 +18,6 @@
// device reopen.
#include <linux/module.h>
-#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/async.h>
#include <linux/delay.h>
@@ -364,10 +363,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
ret = PTR_ERR(data->widget);
goto err_data;
}
- if (!data->widget) {
- ret = -ENOMEM;
- goto err_data;
- }
}
break;
case snd_soc_dapm_demux:
@@ -402,10 +397,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
ret = PTR_ERR(data->widget);
goto err_data;
}
- if (!data->widget) {
- ret = -ENOMEM;
- goto err_data;
- }
snd_soc_dapm_add_path(widget->dapm, data->widget,
widget, NULL, NULL);
@@ -1026,9 +1017,10 @@ static int dapm_new_dai_link(struct snd_soc_dapm_widget *w)
struct snd_kcontrol *kcontrol;
struct snd_soc_dapm_context *dapm = w->dapm;
struct snd_card *card = dapm->card->snd_card;
+ struct snd_soc_pcm_runtime *rtd = w->priv;
/* create control for links with > 1 config */
- if (w->num_params <= 1)
+ if (rtd->dai_link->num_params <= 1)
return 0;
/* add kcontrol */
@@ -1320,14 +1312,13 @@ int dapm_clock_event(struct snd_soc_dapm_widget *w,
soc_dapm_async_complete(w->dapm);
-#ifdef CONFIG_HAVE_CLK
if (SND_SOC_DAPM_EVENT_ON(event)) {
return clk_prepare_enable(w->clk);
} else {
clk_disable_unprepare(w->clk);
return 0;
}
-#endif
+
return 0;
}
EXPORT_SYMBOL_GPL(dapm_clock_event);
@@ -1953,7 +1944,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
dapm_pre_sequence_async(&card->dapm, 0);
/* Run other bias changes in parallel */
list_for_each_entry(d, &card->dapm_list, list) {
- if (d != &card->dapm)
+ if (d != &card->dapm && d->bias_level != d->target_bias_level)
async_schedule_domain(dapm_pre_sequence_async, d,
&async_domain);
}
@@ -1977,7 +1968,7 @@ static int dapm_power_widgets(struct snd_soc_card *card, int event)
/* Run all the bias changes in parallel */
list_for_each_entry(d, &card->dapm_list, list) {
- if (d != &card->dapm)
+ if (d != &card->dapm && d->bias_level != d->target_bias_level)
async_schedule_domain(dapm_post_sequence_async, d,
&async_domain);
}
@@ -2371,12 +2362,13 @@ static ssize_t dapm_widget_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
+ struct snd_soc_dai *codec_dai;
int i, count = 0;
mutex_lock(&rtd->card->dapm_mutex);
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_component *cmpnt = rtd->codec_dais[i]->component;
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ struct snd_soc_component *cmpnt = codec_dai->component;
count += dapm_widget_show_component(cmpnt, buf + count);
}
@@ -3426,35 +3418,6 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
EXPORT_SYMBOL_GPL(snd_soc_dapm_put_pin_switch);
struct snd_soc_dapm_widget *
-snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
- const struct snd_soc_dapm_widget *widget)
-{
- struct snd_soc_dapm_widget *w;
-
- mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- w = snd_soc_dapm_new_control_unlocked(dapm, widget);
- /* Do not nag about probe deferrals */
- if (IS_ERR(w)) {
- int ret = PTR_ERR(w);
-
- if (ret != -EPROBE_DEFER)
- dev_err(dapm->dev,
- "ASoC: Failed to create DAPM control %s (%d)\n",
- widget->name, ret);
- goto out_unlock;
- }
- if (!w)
- dev_err(dapm->dev,
- "ASoC: Failed to create DAPM control %s\n",
- widget->name);
-
-out_unlock:
- mutex_unlock(&dapm->card->dapm_mutex);
- return w;
-}
-EXPORT_SYMBOL_GPL(snd_soc_dapm_new_control);
-
-struct snd_soc_dapm_widget *
snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_widget *widget)
{
@@ -3464,53 +3427,37 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
int ret;
if ((w = dapm_cnew_widget(widget)) == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
switch (w->id) {
case snd_soc_dapm_regulator_supply:
w->regulator = devm_regulator_get(dapm->dev, w->name);
if (IS_ERR(w->regulator)) {
ret = PTR_ERR(w->regulator);
- if (ret == -EPROBE_DEFER)
- return ERR_PTR(ret);
- dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
- w->name, ret);
- return NULL;
+ goto request_failed;
}
if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) {
ret = regulator_allow_bypass(w->regulator, true);
if (ret != 0)
- dev_warn(w->dapm->dev,
+ dev_warn(dapm->dev,
"ASoC: Failed to bypass %s: %d\n",
w->name, ret);
}
break;
case snd_soc_dapm_pinctrl:
w->pinctrl = devm_pinctrl_get(dapm->dev);
- if (IS_ERR_OR_NULL(w->pinctrl)) {
+ if (IS_ERR(w->pinctrl)) {
ret = PTR_ERR(w->pinctrl);
- if (ret == -EPROBE_DEFER)
- return ERR_PTR(ret);
- dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
- w->name, ret);
- return NULL;
+ goto request_failed;
}
break;
case snd_soc_dapm_clock_supply:
-#ifdef CONFIG_CLKDEV_LOOKUP
w->clk = devm_clk_get(dapm->dev, w->name);
if (IS_ERR(w->clk)) {
ret = PTR_ERR(w->clk);
- if (ret == -EPROBE_DEFER)
- return ERR_PTR(ret);
- dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
- w->name, ret);
- return NULL;
+ goto request_failed;
}
-#else
- return NULL;
-#endif
break;
default:
break;
@@ -3523,7 +3470,7 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
w->name = kstrdup_const(widget->name, GFP_KERNEL);
if (w->name == NULL) {
kfree(w);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
switch (w->id) {
@@ -3600,7 +3547,37 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
/* machine layer sets up unconnected pins and insertions */
w->connected = 1;
return w;
+
+request_failed:
+ if (ret != -EPROBE_DEFER)
+ dev_err(dapm->dev, "ASoC: Failed to request %s: %d\n",
+ w->name, ret);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * snd_soc_dapm_new_control - create new dapm control
+ * @dapm: DAPM context
+ * @widget: widget template
+ *
+ * Creates new DAPM control based upon a template.
+ *
+ * Returns a widget pointer on success or an error pointer on failure
+ */
+struct snd_soc_dapm_widget *
+snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_widget *widget)
+{
+ struct snd_soc_dapm_widget *w;
+
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+ w = snd_soc_dapm_new_control_unlocked(dapm, widget);
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ return w;
}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_new_control);
/**
* snd_soc_dapm_new_controls - create new dapm controls
@@ -3625,19 +3602,6 @@ int snd_soc_dapm_new_controls(struct snd_soc_dapm_context *dapm,
w = snd_soc_dapm_new_control_unlocked(dapm, widget);
if (IS_ERR(w)) {
ret = PTR_ERR(w);
- /* Do not nag about probe deferrals */
- if (ret == -EPROBE_DEFER)
- break;
- dev_err(dapm->dev,
- "ASoC: Failed to create DAPM control %s (%d)\n",
- widget->name, ret);
- break;
- }
- if (!w) {
- dev_err(dapm->dev,
- "ASoC: Failed to create DAPM control %s\n",
- widget->name);
- ret = -ENOMEM;
break;
}
widget++;
@@ -3650,32 +3614,23 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_new_controls);
static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
- struct snd_soc_dapm_path *source_p, *sink_p;
+ struct snd_soc_dapm_path *path;
struct snd_soc_dai *source, *sink;
struct snd_soc_pcm_runtime *rtd = w->priv;
- const struct snd_soc_pcm_stream *config = w->params + w->params_select;
+ const struct snd_soc_pcm_stream *config;
struct snd_pcm_substream substream;
struct snd_pcm_hw_params *params = NULL;
struct snd_pcm_runtime *runtime = NULL;
unsigned int fmt;
- int ret;
+ int ret = 0;
+
+ config = rtd->dai_link->params + rtd->params_select;
if (WARN_ON(!config) ||
WARN_ON(list_empty(&w->edges[SND_SOC_DAPM_DIR_OUT]) ||
list_empty(&w->edges[SND_SOC_DAPM_DIR_IN])))
return -EINVAL;
- /* We only support a single source and sink, pick the first */
- source_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_OUT],
- struct snd_soc_dapm_path,
- list_node[SND_SOC_DAPM_DIR_OUT]);
- sink_p = list_first_entry(&w->edges[SND_SOC_DAPM_DIR_IN],
- struct snd_soc_dapm_path,
- list_node[SND_SOC_DAPM_DIR_IN]);
-
- source = source_p->source->priv;
- sink = sink_p->sink->priv;
-
/* Be a little careful as we don't want to overflow the mask array */
if (config->formats) {
fmt = ffs(config->formats) - 1;
@@ -3717,59 +3672,95 @@ static int snd_soc_dai_link_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
substream.stream = SNDRV_PCM_STREAM_CAPTURE;
- if (source->driver->ops->startup) {
- ret = source->driver->ops->startup(&substream, source);
- if (ret < 0) {
- dev_err(source->dev,
- "ASoC: startup() failed: %d\n", ret);
- goto out;
+ snd_soc_dapm_widget_for_each_source_path(w, path) {
+ source = path->source->priv;
+
+ if (source->driver->ops->startup) {
+ ret = source->driver->ops->startup(&substream,
+ source);
+ if (ret < 0) {
+ dev_err(source->dev,
+ "ASoC: startup() failed: %d\n",
+ ret);
+ goto out;
+ }
+ source->active++;
}
- source->active++;
+ ret = soc_dai_hw_params(&substream, params, source);
+ if (ret < 0)
+ goto out;
}
- ret = soc_dai_hw_params(&substream, params, source);
- if (ret < 0)
- goto out;
substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
- if (sink->driver->ops->startup) {
- ret = sink->driver->ops->startup(&substream, sink);
- if (ret < 0) {
- dev_err(sink->dev,
- "ASoC: startup() failed: %d\n", ret);
- goto out;
+ snd_soc_dapm_widget_for_each_sink_path(w, path) {
+ sink = path->sink->priv;
+
+ if (sink->driver->ops->startup) {
+ ret = sink->driver->ops->startup(&substream,
+ sink);
+ if (ret < 0) {
+ dev_err(sink->dev,
+ "ASoC: startup() failed: %d\n",
+ ret);
+ goto out;
+ }
+ sink->active++;
}
- sink->active++;
+ ret = soc_dai_hw_params(&substream, params, sink);
+ if (ret < 0)
+ goto out;
}
- ret = soc_dai_hw_params(&substream, params, sink);
- if (ret < 0)
- goto out;
break;
case SND_SOC_DAPM_POST_PMU:
- ret = snd_soc_dai_digital_mute(sink, 0,
- SNDRV_PCM_STREAM_PLAYBACK);
- if (ret != 0 && ret != -ENOTSUPP)
- dev_warn(sink->dev, "ASoC: Failed to unmute: %d\n", ret);
- ret = 0;
+ snd_soc_dapm_widget_for_each_sink_path(w, path) {
+ sink = path->sink->priv;
+
+ ret = snd_soc_dai_digital_mute(sink, 0,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret != 0 && ret != -ENOTSUPP)
+ dev_warn(sink->dev,
+ "ASoC: Failed to unmute: %d\n", ret);
+ ret = 0;
+ }
break;
case SND_SOC_DAPM_PRE_PMD:
- ret = snd_soc_dai_digital_mute(sink, 1,
- SNDRV_PCM_STREAM_PLAYBACK);
- if (ret != 0 && ret != -ENOTSUPP)
- dev_warn(sink->dev, "ASoC: Failed to mute: %d\n", ret);
- ret = 0;
+ snd_soc_dapm_widget_for_each_sink_path(w, path) {
+ sink = path->sink->priv;
+
+ ret = snd_soc_dai_digital_mute(sink, 1,
+ SNDRV_PCM_STREAM_PLAYBACK);
+ if (ret != 0 && ret != -ENOTSUPP)
+ dev_warn(sink->dev,
+ "ASoC: Failed to mute: %d\n", ret);
+ ret = 0;
+ }
- source->active--;
- if (source->driver->ops->shutdown) {
- substream.stream = SNDRV_PCM_STREAM_CAPTURE;
- source->driver->ops->shutdown(&substream, source);
+ substream.stream = SNDRV_PCM_STREAM_CAPTURE;
+ snd_soc_dapm_widget_for_each_source_path(w, path) {
+ source = path->source->priv;
+
+ if (source->driver->ops->hw_free)
+ source->driver->ops->hw_free(&substream,
+ source);
+
+ source->active--;
+ if (source->driver->ops->shutdown)
+ source->driver->ops->shutdown(&substream,
+ source);
}
- sink->active--;
- if (sink->driver->ops->shutdown) {
- substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
- sink->driver->ops->shutdown(&substream, sink);
+ substream.stream = SNDRV_PCM_STREAM_PLAYBACK;
+ snd_soc_dapm_widget_for_each_sink_path(w, path) {
+ sink = path->sink->priv;
+
+ if (sink->driver->ops->hw_free)
+ sink->driver->ops->hw_free(&substream, sink);
+
+ sink->active--;
+ if (sink->driver->ops->shutdown)
+ sink->driver->ops->shutdown(&substream, sink);
}
break;
@@ -3788,8 +3779,9 @@ static int snd_soc_dapm_dai_link_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_pcm_runtime *rtd = w->priv;
- ucontrol->value.enumerated.item[0] = w->params_select;
+ ucontrol->value.enumerated.item[0] = rtd->params_select;
return 0;
}
@@ -3798,18 +3790,19 @@ static int snd_soc_dapm_dai_link_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_widget *w = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_pcm_runtime *rtd = w->priv;
/* Can't change the config when widget is already powered */
if (w->power)
return -EBUSY;
- if (ucontrol->value.enumerated.item[0] == w->params_select)
+ if (ucontrol->value.enumerated.item[0] == rtd->params_select)
return 0;
- if (ucontrol->value.enumerated.item[0] >= w->num_params)
+ if (ucontrol->value.enumerated.item[0] >= rtd->dai_link->num_params)
return -EINVAL;
- w->params_select = ucontrol->value.enumerated.item[0];
+ rtd->params_select = ucontrol->value.enumerated.item[0];
return 0;
}
@@ -3896,12 +3889,10 @@ outfree_w_param:
return NULL;
}
-int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
- struct snd_soc_pcm_runtime *rtd,
- const struct snd_soc_pcm_stream *params,
- unsigned int num_params,
- struct snd_soc_dapm_widget *source,
- struct snd_soc_dapm_widget *sink)
+static struct snd_soc_dapm_widget *
+snd_soc_dapm_new_dai(struct snd_soc_card *card, struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink)
{
struct snd_soc_dapm_widget template;
struct snd_soc_dapm_widget *w;
@@ -3913,7 +3904,7 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
link_name = devm_kasprintf(card->dev, GFP_KERNEL, "%s-%s",
source->name, sink->name);
if (!link_name)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
memset(&template, 0, sizeof(template));
template.reg = SND_SOC_NOPM;
@@ -3925,9 +3916,10 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
template.kcontrol_news = NULL;
/* allocate memory for control, only in case of multiple configs */
- if (num_params > 1) {
- w_param_text = devm_kcalloc(card->dev, num_params,
- sizeof(char *), GFP_KERNEL);
+ if (rtd->dai_link->num_params > 1) {
+ w_param_text = devm_kcalloc(card->dev,
+ rtd->dai_link->num_params,
+ sizeof(char *), GFP_KERNEL);
if (!w_param_text) {
ret = -ENOMEM;
goto param_fail;
@@ -3936,7 +3928,9 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
template.num_kcontrols = 1;
template.kcontrol_news =
snd_soc_dapm_alloc_kcontrol(card,
- link_name, params, num_params,
+ link_name,
+ rtd->dai_link->params,
+ rtd->dai_link->num_params,
w_param_text, &private_value);
if (!template.kcontrol_news) {
ret = -ENOMEM;
@@ -3950,37 +3944,20 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
w = snd_soc_dapm_new_control_unlocked(&card->dapm, &template);
if (IS_ERR(w)) {
ret = PTR_ERR(w);
- /* Do not nag about probe deferrals */
- if (ret != -EPROBE_DEFER)
- dev_err(card->dev,
- "ASoC: Failed to create %s widget (%d)\n",
- link_name, ret);
- goto outfree_kcontrol_news;
- }
- if (!w) {
- dev_err(card->dev, "ASoC: Failed to create %s widget\n",
- link_name);
- ret = -ENOMEM;
goto outfree_kcontrol_news;
}
- w->params = params;
- w->num_params = num_params;
w->priv = rtd;
- ret = snd_soc_dapm_add_path(&card->dapm, source, w, NULL, NULL);
- if (ret)
- goto outfree_w;
- return snd_soc_dapm_add_path(&card->dapm, w, sink, NULL, NULL);
+ return w;
-outfree_w:
- devm_kfree(card->dev, w);
outfree_kcontrol_news:
devm_kfree(card->dev, (void *)template.kcontrol_news);
- snd_soc_dapm_free_kcontrol(card, &private_value, num_params, w_param_text);
+ snd_soc_dapm_free_kcontrol(card, &private_value,
+ rtd->dai_link->num_params, w_param_text);
param_fail:
devm_kfree(card->dev, link_name);
- return ret;
+ return ERR_PTR(ret);
}
int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
@@ -4003,21 +3980,8 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
template.name);
w = snd_soc_dapm_new_control_unlocked(dapm, &template);
- if (IS_ERR(w)) {
- int ret = PTR_ERR(w);
-
- /* Do not nag about probe deferrals */
- if (ret != -EPROBE_DEFER)
- dev_err(dapm->dev,
- "ASoC: Failed to create %s widget (%d)\n",
- dai->driver->playback.stream_name, ret);
- return ret;
- }
- if (!w) {
- dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
- dai->driver->playback.stream_name);
- return -ENOMEM;
- }
+ if (IS_ERR(w))
+ return PTR_ERR(w);
w->priv = dai;
dai->playback_widget = w;
@@ -4032,21 +3996,8 @@ int snd_soc_dapm_new_dai_widgets(struct snd_soc_dapm_context *dapm,
template.name);
w = snd_soc_dapm_new_control_unlocked(dapm, &template);
- if (IS_ERR(w)) {
- int ret = PTR_ERR(w);
-
- /* Do not nag about probe deferrals */
- if (ret != -EPROBE_DEFER)
- dev_err(dapm->dev,
- "ASoC: Failed to create %s widget (%d)\n",
- dai->driver->playback.stream_name, ret);
- return ret;
- }
- if (!w) {
- dev_err(dapm->dev, "ASoC: Failed to create %s widget\n",
- dai->driver->capture.stream_name);
- return -ENOMEM;
- }
+ if (IS_ERR(w))
+ return PTR_ERR(w);
w->priv = dai;
dai->capture_widget = w;
@@ -4115,34 +4066,79 @@ static void dapm_connect_dai_link_widgets(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct snd_soc_dapm_widget *sink, *source;
+ struct snd_soc_dai *codec_dai;
+ struct snd_soc_dapm_widget *playback = NULL, *capture = NULL;
+ struct snd_soc_dapm_widget *codec, *playback_cpu, *capture_cpu;
int i;
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
+ if (rtd->dai_link->params) {
+ playback_cpu = cpu_dai->capture_widget;
+ capture_cpu = cpu_dai->playback_widget;
+ } else {
+ playback = cpu_dai->playback_widget;
+ capture = cpu_dai->capture_widget;
+ playback_cpu = playback;
+ capture_cpu = capture;
+ }
+
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/* connect BE DAI playback if widgets are valid */
- if (codec_dai->playback_widget && cpu_dai->playback_widget) {
- source = cpu_dai->playback_widget;
- sink = codec_dai->playback_widget;
+ codec = codec_dai->playback_widget;
+
+ if (playback_cpu && codec) {
+ if (!playback) {
+ playback = snd_soc_dapm_new_dai(card, rtd,
+ playback_cpu,
+ codec);
+ if (IS_ERR(playback)) {
+ dev_err(rtd->dev,
+ "ASoC: Failed to create DAI %s: %ld\n",
+ codec_dai->name,
+ PTR_ERR(playback));
+ continue;
+ }
+
+ snd_soc_dapm_add_path(&card->dapm, playback_cpu,
+ playback, NULL, NULL);
+ }
+
dev_dbg(rtd->dev, "connected DAI link %s:%s -> %s:%s\n",
- cpu_dai->component->name, source->name,
- codec_dai->component->name, sink->name);
+ cpu_dai->component->name, playback_cpu->name,
+ codec_dai->component->name, codec->name);
- snd_soc_dapm_add_path(&card->dapm, source, sink,
- NULL, NULL);
+ snd_soc_dapm_add_path(&card->dapm, playback, codec,
+ NULL, NULL);
}
+ }
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/* connect BE DAI capture if widgets are valid */
- if (codec_dai->capture_widget && cpu_dai->capture_widget) {
- source = codec_dai->capture_widget;
- sink = cpu_dai->capture_widget;
+ codec = codec_dai->capture_widget;
+
+ if (codec && capture_cpu) {
+ if (!capture) {
+ capture = snd_soc_dapm_new_dai(card, rtd,
+ codec,
+ capture_cpu);
+ if (IS_ERR(capture)) {
+ dev_err(rtd->dev,
+ "ASoC: Failed to create DAI %s: %ld\n",
+ codec_dai->name,
+ PTR_ERR(capture));
+ continue;
+ }
+
+ snd_soc_dapm_add_path(&card->dapm, capture,
+ capture_cpu, NULL, NULL);
+ }
+
dev_dbg(rtd->dev, "connected DAI link %s:%s -> %s:%s\n",
- codec_dai->component->name, source->name,
- cpu_dai->component->name, sink->name);
+ codec_dai->component->name, codec->name,
+ cpu_dai->component->name, capture_cpu->name);
- snd_soc_dapm_add_path(&card->dapm, source, sink,
- NULL, NULL);
+ snd_soc_dapm_add_path(&card->dapm, codec, capture,
+ NULL, NULL);
}
}
}
@@ -4192,12 +4188,12 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card)
struct snd_soc_pcm_runtime *rtd;
/* for each BE DAI link... */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
/*
* dynamic FE links have no fixed DAI mapping.
* CODEC<->CODEC links have no direct connection.
*/
- if (rtd->dai_link->dynamic || rtd->dai_link->params)
+ if (rtd->dai_link->dynamic)
continue;
dapm_connect_dai_link_widgets(card, rtd);
@@ -4207,11 +4203,12 @@ void snd_soc_dapm_connect_dai_link_widgets(struct snd_soc_card *card)
static void soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
int event)
{
+ struct snd_soc_dai *codec_dai;
int i;
soc_dapm_dai_stream_event(rtd->cpu_dai, stream, event);
- for (i = 0; i < rtd->num_codecs; i++)
- soc_dapm_dai_stream_event(rtd->codec_dais[i], stream, event);
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ soc_dapm_dai_stream_event(codec_dai, stream, event);
dapm_power_widgets(rtd->card, event);
}
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index 592efb370c44..f4dc3d445aae 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -373,7 +373,7 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
unsigned int rshift = mc->rshift;
int max = mc->max;
int min = mc->min;
- unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
+ unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
unsigned int val;
int ret;
@@ -418,7 +418,7 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
unsigned int rshift = mc->rshift;
int max = mc->max;
int min = mc->min;
- unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
+ unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
int err = 0;
unsigned int val, val_mask, val2 = 0;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index e8b98bfd4cf1..03f36e534050 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -59,25 +59,26 @@ static bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream)
void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream)
{
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
int i;
lockdep_assert_held(&rtd->pcm_mutex);
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
cpu_dai->playback_active++;
- for (i = 0; i < rtd->num_codecs; i++)
- rtd->codec_dais[i]->playback_active++;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ codec_dai->playback_active++;
} else {
cpu_dai->capture_active++;
- for (i = 0; i < rtd->num_codecs; i++)
- rtd->codec_dais[i]->capture_active++;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ codec_dai->capture_active++;
}
cpu_dai->active++;
cpu_dai->component->active++;
- for (i = 0; i < rtd->num_codecs; i++) {
- rtd->codec_dais[i]->active++;
- rtd->codec_dais[i]->component->active++;
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ codec_dai->active++;
+ codec_dai->component->active++;
}
}
@@ -94,25 +95,26 @@ void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream)
void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream)
{
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
int i;
lockdep_assert_held(&rtd->pcm_mutex);
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
cpu_dai->playback_active--;
- for (i = 0; i < rtd->num_codecs; i++)
- rtd->codec_dais[i]->playback_active--;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ codec_dai->playback_active--;
} else {
cpu_dai->capture_active--;
- for (i = 0; i < rtd->num_codecs; i++)
- rtd->codec_dais[i]->capture_active--;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ codec_dai->capture_active--;
}
cpu_dai->active--;
cpu_dai->component->active--;
- for (i = 0; i < rtd->num_codecs; i++) {
- rtd->codec_dais[i]->component->active--;
- rtd->codec_dais[i]->active--;
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ codec_dai->component->active--;
+ codec_dai->active--;
}
}
@@ -172,7 +174,7 @@ int dpcm_dapm_stream_event(struct snd_soc_pcm_runtime *fe, int dir,
{
struct snd_soc_dpcm *dpcm;
- list_for_each_entry(dpcm, &fe->dpcm[dir].be_clients, list_be) {
+ for_each_dpcm_be(fe, dir, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
@@ -253,6 +255,7 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
unsigned int rate, channels, sample_bits, symmetry, i;
rate = params_rate(params);
@@ -263,8 +266,8 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
symmetry = cpu_dai->driver->symmetric_rates ||
rtd->dai_link->symmetric_rates;
- for (i = 0; i < rtd->num_codecs; i++)
- symmetry |= rtd->codec_dais[i]->driver->symmetric_rates;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ symmetry |= codec_dai->driver->symmetric_rates;
if (symmetry && cpu_dai->rate && cpu_dai->rate != rate) {
dev_err(rtd->dev, "ASoC: unmatched rate symmetry: %d - %d\n",
@@ -275,8 +278,8 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
symmetry = cpu_dai->driver->symmetric_channels ||
rtd->dai_link->symmetric_channels;
- for (i = 0; i < rtd->num_codecs; i++)
- symmetry |= rtd->codec_dais[i]->driver->symmetric_channels;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ symmetry |= codec_dai->driver->symmetric_channels;
if (symmetry && cpu_dai->channels && cpu_dai->channels != channels) {
dev_err(rtd->dev, "ASoC: unmatched channel symmetry: %d - %d\n",
@@ -287,8 +290,8 @@ static int soc_pcm_params_symmetry(struct snd_pcm_substream *substream,
symmetry = cpu_dai->driver->symmetric_samplebits ||
rtd->dai_link->symmetric_samplebits;
- for (i = 0; i < rtd->num_codecs; i++)
- symmetry |= rtd->codec_dais[i]->driver->symmetric_samplebits;
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ symmetry |= codec_dai->driver->symmetric_samplebits;
if (symmetry && cpu_dai->sample_bits && cpu_dai->sample_bits != sample_bits) {
dev_err(rtd->dev, "ASoC: unmatched sample bits symmetry: %d - %d\n",
@@ -304,17 +307,18 @@ static bool soc_pcm_has_symmetry(struct snd_pcm_substream *substream)
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai_driver *cpu_driver = rtd->cpu_dai->driver;
struct snd_soc_dai_link *link = rtd->dai_link;
+ struct snd_soc_dai *codec_dai;
unsigned int symmetry, i;
symmetry = cpu_driver->symmetric_rates || link->symmetric_rates ||
cpu_driver->symmetric_channels || link->symmetric_channels ||
cpu_driver->symmetric_samplebits || link->symmetric_samplebits;
- for (i = 0; i < rtd->num_codecs; i++)
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
symmetry = symmetry ||
- rtd->codec_dais[i]->driver->symmetric_rates ||
- rtd->codec_dais[i]->driver->symmetric_channels ||
- rtd->codec_dais[i]->driver->symmetric_samplebits;
+ codec_dai->driver->symmetric_rates ||
+ codec_dai->driver->symmetric_channels ||
+ codec_dai->driver->symmetric_samplebits;
return symmetry;
}
@@ -342,8 +346,7 @@ static void soc_pcm_apply_msb(struct snd_pcm_substream *substream)
unsigned int bits = 0, cpu_bits;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->playback.sig_bits == 0) {
bits = 0;
break;
@@ -352,8 +355,7 @@ static void soc_pcm_apply_msb(struct snd_pcm_substream *substream)
}
cpu_bits = cpu_dai->driver->playback.sig_bits;
} else {
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->capture.sig_bits == 0) {
bits = 0;
break;
@@ -372,6 +374,7 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream)
struct snd_pcm_runtime *runtime = substream->runtime;
struct snd_pcm_hardware *hw = &runtime->hw;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai;
struct snd_soc_dai_driver *cpu_dai_drv = rtd->cpu_dai->driver;
struct snd_soc_dai_driver *codec_dai_drv;
struct snd_soc_pcm_stream *codec_stream;
@@ -388,7 +391,7 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream)
cpu_stream = &cpu_dai_drv->capture;
/* first calculate min/max only for CODECs in the DAI link */
- for (i = 0; i < rtd->num_codecs; i++) {
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
/*
* Skip CODECs which don't support the current stream type.
@@ -399,11 +402,11 @@ static void soc_pcm_init_runtime_hw(struct snd_pcm_substream *substream)
* bailed out on a higher level, since there would be no
* CODEC to support the transfer direction in that case.
*/
- if (!snd_soc_dai_stream_valid(rtd->codec_dais[i],
+ if (!snd_soc_dai_stream_valid(codec_dai,
substream->stream))
continue;
- codec_dai_drv = rtd->codec_dais[i]->driver;
+ codec_dai_drv = codec_dai->driver;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
codec_stream = &codec_dai_drv->playback;
else
@@ -482,8 +485,8 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
int i, ret = 0;
pinctrl_pm_select_default_state(cpu_dai->dev);
- for (i = 0; i < rtd->num_codecs; i++)
- pinctrl_pm_select_default_state(rtd->codec_dais[i]->dev);
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ pinctrl_pm_select_default_state(codec_dai->dev);
for_each_rtdcom(rtd, rtdcom) {
component = rtdcom->component;
@@ -520,8 +523,7 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
}
component = NULL;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->startup) {
ret = codec_dai->driver->ops->startup(substream,
codec_dai);
@@ -588,10 +590,9 @@ static int soc_pcm_open(struct snd_pcm_substream *substream)
goto config_err;
}
- for (i = 0; i < rtd->num_codecs; i++) {
- if (rtd->codec_dais[i]->active) {
- ret = soc_pcm_apply_symmetry(substream,
- rtd->codec_dais[i]);
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ if (codec_dai->active) {
+ ret = soc_pcm_apply_symmetry(substream, codec_dai);
if (ret != 0)
goto config_err;
}
@@ -620,8 +621,7 @@ machine_err:
i = rtd->num_codecs;
codec_dai_err:
- while (--i >= 0) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai_rollback(rtd, i, codec_dai) {
if (codec_dai->driver->ops->shutdown)
codec_dai->driver->ops->shutdown(substream, codec_dai);
}
@@ -641,9 +641,9 @@ out:
pm_runtime_put_autosuspend(component->dev);
}
- for (i = 0; i < rtd->num_codecs; i++) {
- if (!rtd->codec_dais[i]->active)
- pinctrl_pm_select_sleep_state(rtd->codec_dais[i]->dev);
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ if (!codec_dai->active)
+ pinctrl_pm_select_sleep_state(codec_dai->dev);
}
if (!cpu_dai->active)
pinctrl_pm_select_sleep_state(cpu_dai->dev);
@@ -701,8 +701,7 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
if (!cpu_dai->active)
cpu_dai->rate = 0;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (!codec_dai->active)
codec_dai->rate = 0;
}
@@ -712,8 +711,7 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
if (cpu_dai->driver->ops->shutdown)
cpu_dai->driver->ops->shutdown(substream, cpu_dai);
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->shutdown)
codec_dai->driver->ops->shutdown(substream, codec_dai);
}
@@ -751,9 +749,9 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
pm_runtime_put_autosuspend(component->dev);
}
- for (i = 0; i < rtd->num_codecs; i++) {
- if (!rtd->codec_dais[i]->active)
- pinctrl_pm_select_sleep_state(rtd->codec_dais[i]->dev);
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ if (!codec_dai->active)
+ pinctrl_pm_select_sleep_state(codec_dai->dev);
}
if (!cpu_dai->active)
pinctrl_pm_select_sleep_state(cpu_dai->dev);
@@ -801,8 +799,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
}
}
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->prepare) {
ret = codec_dai->driver->ops->prepare(substream,
codec_dai);
@@ -834,8 +831,8 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
snd_soc_dapm_stream_event(rtd, substream->stream,
SND_SOC_DAPM_STREAM_START);
- for (i = 0; i < rtd->num_codecs; i++)
- snd_soc_dai_digital_mute(rtd->codec_dais[i], 0,
+ for_each_rtd_codec_dai(rtd, i, codec_dai)
+ snd_soc_dai_digital_mute(codec_dai, 0,
substream->stream);
snd_soc_dai_digital_mute(cpu_dai, 0, substream->stream);
@@ -920,6 +917,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_component *component;
struct snd_soc_rtdcom_list *rtdcom;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
+ struct snd_soc_dai *codec_dai;
int i, ret = 0;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
@@ -932,8 +930,7 @@ static int soc_pcm_hw_params(struct snd_pcm_substream *substream,
}
}
- for (i = 0; i < rtd->num_codecs; i++) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
struct snd_pcm_hw_params codec_params;
/*
@@ -1018,8 +1015,7 @@ interface_err:
i = rtd->num_codecs;
codec_err:
- while (--i >= 0) {
- struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai_rollback(rtd, i, codec_dai) {
if (codec_dai->driver->ops->hw_free)
codec_dai->driver->ops->hw_free(substream, codec_dai);
codec_dai->rate = 0;
@@ -1052,8 +1048,7 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
cpu_dai->sample_bits = 0;
}
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->active == 1) {
codec_dai->rate = 0;
codec_dai->channels = 0;
@@ -1062,10 +1057,10 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
}
/* apply codec digital mute */
- for (i = 0; i < rtd->num_codecs; i++) {
- if ((playback && rtd->codec_dais[i]->playback_active == 1) ||
- (!playback && rtd->codec_dais[i]->capture_active == 1))
- snd_soc_dai_digital_mute(rtd->codec_dais[i], 1,
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ if ((playback && codec_dai->playback_active == 1) ||
+ (!playback && codec_dai->capture_active == 1))
+ snd_soc_dai_digital_mute(codec_dai, 1,
substream->stream);
}
@@ -1077,8 +1072,7 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
soc_pcm_components_hw_free(substream, NULL);
/* now free hw params for the DAIs */
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->hw_free)
codec_dai->driver->ops->hw_free(substream, codec_dai);
}
@@ -1099,8 +1093,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
struct snd_soc_dai *codec_dai;
int i, ret;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->trigger) {
ret = codec_dai->driver->ops->trigger(substream,
cmd, codec_dai);
@@ -1144,8 +1137,7 @@ static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
struct snd_soc_dai *codec_dai;
int i, ret;
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->bespoke_trigger) {
ret = codec_dai->driver->ops->bespoke_trigger(substream,
cmd, codec_dai);
@@ -1199,8 +1191,7 @@ static snd_pcm_uframes_t soc_pcm_pointer(struct snd_pcm_substream *substream)
if (cpu_dai->driver->ops->delay)
delay += cpu_dai->driver->ops->delay(substream, cpu_dai);
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->ops->delay)
codec_delay = max(codec_delay,
codec_dai->driver->ops->delay(substream,
@@ -1220,7 +1211,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
struct snd_soc_dpcm *dpcm;
/* only add new dpcms */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
if (dpcm->be == be && dpcm->fe == fe)
return 0;
}
@@ -1261,7 +1252,7 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe,
be_substream = snd_soc_dpcm_get_substream(be, stream);
- list_for_each_entry(dpcm, &be->dpcm[stream].fe_clients, list_fe) {
+ for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
continue;
@@ -1281,7 +1272,7 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm, *d;
- list_for_each_entry_safe(dpcm, d, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be_safe(fe, stream, dpcm, d) {
dev_dbg(fe->dev, "ASoC: BE %s disconnect check for %s\n",
stream ? "capture" : "playback",
dpcm->be->dai_link->name);
@@ -1310,12 +1301,13 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
struct snd_soc_dapm_widget *widget, int stream)
{
struct snd_soc_pcm_runtime *be;
+ struct snd_soc_dai *dai;
int i;
dev_dbg(card->dev, "ASoC: find BE for widget %s\n", widget->name);
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- list_for_each_entry(be, &card->rtd_list, list) {
+ for_each_card_rtds(card, be) {
if (!be->dai_link->no_pcm)
continue;
@@ -1327,15 +1319,14 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
if (be->cpu_dai->playback_widget == widget)
return be;
- for (i = 0; i < be->num_codecs; i++) {
- struct snd_soc_dai *dai = be->codec_dais[i];
+ for_each_rtd_codec_dai(be, i, dai) {
if (dai->playback_widget == widget)
return be;
}
}
} else {
- list_for_each_entry(be, &card->rtd_list, list) {
+ for_each_card_rtds(card, be) {
if (!be->dai_link->no_pcm)
continue;
@@ -1347,8 +1338,7 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
if (be->cpu_dai->capture_widget == widget)
return be;
- for (i = 0; i < be->num_codecs; i++) {
- struct snd_soc_dai *dai = be->codec_dais[i];
+ for_each_rtd_codec_dai(be, i, dai) {
if (dai->capture_widget == widget)
return be;
}
@@ -1388,32 +1378,31 @@ static bool dpcm_end_walk_at_be(struct snd_soc_dapm_widget *widget,
{
struct snd_soc_card *card = widget->dapm->card;
struct snd_soc_pcm_runtime *rtd;
+ struct snd_soc_dai *dai;
int i;
if (dir == SND_SOC_DAPM_DIR_OUT) {
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (!rtd->dai_link->no_pcm)
continue;
if (rtd->cpu_dai->playback_widget == widget)
return true;
- for (i = 0; i < rtd->num_codecs; ++i) {
- struct snd_soc_dai *dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, dai) {
if (dai->playback_widget == widget)
return true;
}
}
} else { /* SND_SOC_DAPM_DIR_IN */
- list_for_each_entry(rtd, &card->rtd_list, list) {
+ for_each_card_rtds(card, rtd) {
if (!rtd->dai_link->no_pcm)
continue;
if (rtd->cpu_dai->capture_widget == widget)
return true;
- for (i = 0; i < rtd->num_codecs; ++i) {
- struct snd_soc_dai *dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, dai) {
if (dai->capture_widget == widget)
return true;
}
@@ -1445,10 +1434,11 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
struct snd_soc_dpcm *dpcm;
struct snd_soc_dapm_widget_list *list = *list_;
struct snd_soc_dapm_widget *widget;
+ struct snd_soc_dai *dai;
int prune = 0;
/* Destroy any old FE <--> BE connections */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
unsigned int i;
/* is there a valid CPU DAI widget for this BE */
@@ -1459,8 +1449,7 @@ static int dpcm_prune_paths(struct snd_soc_pcm_runtime *fe, int stream,
continue;
/* is there a valid CODEC DAI widget for this BE */
- for (i = 0; i < dpcm->be->num_codecs; i++) {
- struct snd_soc_dai *dai = dpcm->be->codec_dais[i];
+ for_each_rtd_codec_dai(dpcm->be, i, dai) {
widget = dai_get_widget(dai, stream);
/* prune the BE if it's no longer in our active list */
@@ -1555,7 +1544,7 @@ void dpcm_clear_pending_state(struct snd_soc_pcm_runtime *fe, int stream)
{
struct snd_soc_dpcm *dpcm;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ for_each_dpcm_be(fe, stream, dpcm)
dpcm->be->dpcm[stream].runtime_update =
SND_SOC_DPCM_UPDATE_NO;
}
@@ -1566,7 +1555,7 @@ static void dpcm_be_dai_startup_unwind(struct snd_soc_pcm_runtime *fe,
struct snd_soc_dpcm *dpcm;
/* disable any enabled and non active backends */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -1595,7 +1584,7 @@ int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
int err, count = 0;
/* only startup BE DAIs that are either sinks or sources to this FE DAI */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -1649,7 +1638,7 @@ int dpcm_be_dai_startup(struct snd_soc_pcm_runtime *fe, int stream)
unwind:
/* disable any enabled and non active backends */
- list_for_each_entry_continue_reverse(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be_rollback(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
@@ -1680,7 +1669,7 @@ static void dpcm_init_runtime_hw(struct snd_pcm_runtime *runtime,
struct snd_soc_pcm_stream *stream)
{
runtime->hw.rate_min = stream->rate_min;
- runtime->hw.rate_max = stream->rate_max;
+ runtime->hw.rate_max = min_not_zero(stream->rate_max, UINT_MAX);
runtime->hw.channels_min = stream->channels_min;
runtime->hw.channels_max = stream->channels_max;
if (runtime->hw.formats)
@@ -1695,6 +1684,7 @@ static void dpcm_runtime_merge_format(struct snd_pcm_substream *substream,
{
struct snd_soc_pcm_runtime *fe = substream->private_data;
struct snd_soc_dpcm *dpcm;
+ struct snd_soc_dai *dai;
int stream = substream->stream;
if (!fe->dai_link->dpcm_merged_format)
@@ -1705,22 +1695,21 @@ static void dpcm_runtime_merge_format(struct snd_pcm_substream *substream,
* if FE want to use it (= dpcm_merged_format)
*/
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_soc_dai_driver *codec_dai_drv;
struct snd_soc_pcm_stream *codec_stream;
int i;
- for (i = 0; i < be->num_codecs; i++) {
+ for_each_rtd_codec_dai(be, i, dai) {
/*
* Skip CODECs which don't support the current stream
* type. See soc_pcm_init_runtime_hw() for more details
*/
- if (!snd_soc_dai_stream_valid(be->codec_dais[i],
- stream))
+ if (!snd_soc_dai_stream_valid(dai, stream))
continue;
- codec_dai_drv = be->codec_dais[i]->driver;
+ codec_dai_drv = dai->driver;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
codec_stream = &codec_dai_drv->playback;
else
@@ -1747,7 +1736,7 @@ static void dpcm_runtime_merge_chan(struct snd_pcm_substream *substream,
* if FE want to use it (= dpcm_merged_chan)
*/
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_soc_dai_driver *cpu_dai_drv = be->cpu_dai->driver;
struct snd_soc_dai_driver *codec_dai_drv;
@@ -1799,12 +1788,13 @@ static void dpcm_runtime_merge_rate(struct snd_pcm_substream *substream,
* if FE want to use it (= dpcm_merged_chan)
*/
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_soc_dai_driver *cpu_dai_drv = be->cpu_dai->driver;
struct snd_soc_dai_driver *codec_dai_drv;
struct snd_soc_pcm_stream *codec_stream;
struct snd_soc_pcm_stream *cpu_stream;
+ struct snd_soc_dai *dai;
int i;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -1816,16 +1806,15 @@ static void dpcm_runtime_merge_rate(struct snd_pcm_substream *substream,
*rate_max = min_not_zero(*rate_max, cpu_stream->rate_max);
*rates = snd_pcm_rate_mask_intersect(*rates, cpu_stream->rates);
- for (i = 0; i < be->num_codecs; i++) {
+ for_each_rtd_codec_dai(be, i, dai) {
/*
* Skip CODECs which don't support the current stream
* type. See soc_pcm_init_runtime_hw() for more details
*/
- if (!snd_soc_dai_stream_valid(be->codec_dais[i],
- stream))
+ if (!snd_soc_dai_stream_valid(dai, stream))
continue;
- codec_dai_drv = be->codec_dais[i]->driver;
+ codec_dai_drv = dai->driver;
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
codec_stream = &codec_dai_drv->playback;
else
@@ -1902,11 +1891,12 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
}
/* apply symmetry for BE */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
struct snd_soc_pcm_runtime *rtd = be_substream->private_data;
+ struct snd_soc_dai *codec_dai;
int i;
if (rtd->dai_link->be_hw_params_fixup)
@@ -1923,10 +1913,10 @@ static int dpcm_apply_symmetry(struct snd_pcm_substream *fe_substream,
return err;
}
- for (i = 0; i < rtd->num_codecs; i++) {
- if (rtd->codec_dais[i]->active) {
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
+ if (codec_dai->active) {
err = soc_pcm_apply_symmetry(fe_substream,
- rtd->codec_dais[i]);
+ codec_dai);
if (err < 0)
return err;
}
@@ -1986,7 +1976,7 @@ int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream)
struct snd_soc_dpcm *dpcm;
/* only shutdown BEs that are either sinks or sources to this FE DAI */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -2050,7 +2040,7 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
/* only hw_params backends that are either sinks or sources
* to this frontend DAI */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -2119,7 +2109,7 @@ int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
struct snd_soc_dpcm *dpcm;
int ret;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -2170,7 +2160,7 @@ int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
unwind:
/* disable any enabled and non active backends */
- list_for_each_entry_continue_reverse(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be_rollback(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
snd_soc_dpcm_get_substream(be, stream);
@@ -2250,7 +2240,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
struct snd_soc_dpcm *dpcm;
int ret = 0;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -2436,7 +2426,7 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
struct snd_soc_dpcm *dpcm;
int ret = 0;
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
struct snd_pcm_substream *be_substream =
@@ -2646,7 +2636,7 @@ close:
dpcm_be_dai_shutdown(fe, stream);
disconnect:
/* disconnect any non started BEs */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
if (be->dpcm[stream].state != SND_SOC_DPCM_STATE_START)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
@@ -2771,14 +2761,14 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
/* shutdown all old paths first */
- list_for_each_entry(fe, &card->rtd_list, list) {
+ for_each_card_rtds(card, fe) {
ret = soc_dpcm_fe_runtime_update(fe, 0);
if (ret)
goto out;
}
/* bring new paths up */
- list_for_each_entry(fe, &card->rtd_list, list) {
+ for_each_card_rtds(card, fe) {
ret = soc_dpcm_fe_runtime_update(fe, 1);
if (ret)
goto out;
@@ -2791,10 +2781,9 @@ out:
int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute)
{
struct snd_soc_dpcm *dpcm;
- struct list_head *clients =
- &fe->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients;
+ struct snd_soc_dai *dai;
- list_for_each_entry(dpcm, clients, list_be) {
+ for_each_dpcm_be(fe, SNDRV_PCM_STREAM_PLAYBACK, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
int i;
@@ -2802,8 +2791,7 @@ int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute)
if (be->dai_link->ignore_suspend)
continue;
- for (i = 0; i < be->num_codecs; i++) {
- struct snd_soc_dai *dai = be->codec_dais[i];
+ for_each_rtd_codec_dai(be, i, dai) {
struct snd_soc_dai_driver *drv = dai->driver;
dev_dbg(be->dev, "ASoC: BE digital mute %s\n",
@@ -2844,7 +2832,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
ret = dpcm_fe_dai_startup(fe_substream);
if (ret < 0) {
/* clean up all links */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
dpcm_be_disconnect(fe, stream);
@@ -2867,7 +2855,7 @@ static int dpcm_fe_dai_close(struct snd_pcm_substream *fe_substream)
ret = dpcm_fe_dai_shutdown(fe_substream);
/* mark FE's links ready to prune */
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
+ for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
dpcm_be_disconnect(fe, stream);
@@ -3041,8 +3029,7 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
playback = rtd->dai_link->dpcm_playback;
capture = rtd->dai_link->dpcm_capture;
} else {
- for (i = 0; i < rtd->num_codecs; i++) {
- codec_dai = rtd->codec_dais[i];
+ for_each_rtd_codec_dai(rtd, i, codec_dai) {
if (codec_dai->driver->playback.channels_min)
playback = 1;
if (codec_dai->driver->capture.channels_min)
@@ -3230,7 +3217,7 @@ int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
struct snd_soc_dpcm *dpcm;
int state;
- list_for_each_entry(dpcm, &be->dpcm[stream].fe_clients, list_fe) {
+ for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
continue;
@@ -3257,7 +3244,7 @@ int snd_soc_dpcm_can_be_params(struct snd_soc_pcm_runtime *fe,
struct snd_soc_dpcm *dpcm;
int state;
- list_for_each_entry(dpcm, &be->dpcm[stream].fe_clients, list_fe) {
+ for_each_dpcm_fe(be, stream, dpcm) {
if (dpcm->fe == fe)
continue;
@@ -3337,7 +3324,7 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
goto out;
}
- list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) {
+ for_each_dpcm_be(fe, stream, dpcm) {
struct snd_soc_pcm_runtime *be = dpcm->be;
params = &dpcm->hw_params;
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 66e77e020745..045ef136903d 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -993,7 +993,7 @@ static int soc_tplg_denum_create(struct soc_tplg *tplg, unsigned int count,
kfree(se);
continue;
}
- /* fall through and create texts */
+ /* fall through */
case SND_SOC_TPLG_CTL_ENUM:
case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
@@ -1310,7 +1310,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
ec->hdr.name);
goto err_se;
}
- /* fall through to create texts */
+ /* fall through */
case SND_SOC_TPLG_CTL_ENUM:
case SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE:
case SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT:
@@ -1565,17 +1565,6 @@ widget:
widget = snd_soc_dapm_new_control_unlocked(dapm, &template);
if (IS_ERR(widget)) {
ret = PTR_ERR(widget);
- /* Do not nag about probe deferrals */
- if (ret != -EPROBE_DEFER)
- dev_err(tplg->dev,
- "ASoC: failed to create widget %s controls (%d)\n",
- w->name, ret);
- goto hdr_err;
- }
- if (widget == NULL) {
- dev_err(tplg->dev, "ASoC: failed to create widget %s controls\n",
- w->name);
- ret = -ENOMEM;
goto hdr_err;
}
diff --git a/sound/soc/soc-utils.c b/sound/soc/soc-utils.c
index e0c93496c0cd..e3b9dd634c6d 100644
--- a/sound/soc/soc-utils.c
+++ b/sound/soc/soc-utils.c
@@ -273,13 +273,13 @@ static int dummy_dma_open(struct snd_pcm_substream *substream)
return 0;
}
-static const struct snd_pcm_ops dummy_dma_ops = {
+static const struct snd_pcm_ops snd_dummy_dma_ops = {
.open = dummy_dma_open,
.ioctl = snd_pcm_lib_ioctl,
};
static const struct snd_soc_component_driver dummy_platform = {
- .ops = &dummy_dma_ops,
+ .ops = &snd_dummy_dma_ops,
};
static const struct snd_soc_component_driver dummy_codec = {
diff --git a/sound/soc/stm/Kconfig b/sound/soc/stm/Kconfig
index 9b2681397dba..c66ffa72057e 100644
--- a/sound/soc/stm/Kconfig
+++ b/sound/soc/stm/Kconfig
@@ -3,6 +3,7 @@ menu "STMicroelectronics STM32 SOC audio support"
config SND_SOC_STM32_SAI
tristate "STM32 SAI interface (Serial Audio Interface) support"
depends on (ARCH_STM32 && OF) || COMPILE_TEST
+ depends on COMMON_CLK
depends on SND_SOC
select SND_SOC_GENERIC_DMAENGINE_PCM
select REGMAP_MMIO
diff --git a/sound/soc/stm/stm32_sai.c b/sound/soc/stm/stm32_sai.c
index f22654253c43..d597eba61992 100644
--- a/sound/soc/stm/stm32_sai.c
+++ b/sound/soc/stm/stm32_sai.c
@@ -104,7 +104,7 @@ static int stm32_sai_set_sync(struct stm32_sai_data *sai_client,
if (!pdev) {
dev_err(&sai_client->pdev->dev,
- "Device not found for node %s\n", np_provider->name);
+ "Device not found for node %pOFn\n", np_provider);
return -ENODEV;
}
diff --git a/sound/soc/stm/stm32_sai.h b/sound/soc/stm/stm32_sai.h
index f25422174909..08de899c766b 100644
--- a/sound/soc/stm/stm32_sai.h
+++ b/sound/soc/stm/stm32_sai.h
@@ -91,6 +91,9 @@
#define SAI_XCR1_OSR_SHIFT 26
#define SAI_XCR1_OSR BIT(SAI_XCR1_OSR_SHIFT)
+#define SAI_XCR1_MCKEN_SHIFT 27
+#define SAI_XCR1_MCKEN BIT(SAI_XCR1_MCKEN_SHIFT)
+
/******************* Bit definition for SAI_XCR2 register *******************/
#define SAI_XCR2_FTH_SHIFT 0
#define SAI_XCR2_FTH_MASK GENMASK(2, SAI_XCR2_FTH_SHIFT)
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index 06fba9650ac4..ea05cc91aa05 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -17,6 +17,7 @@
*/
#include <linux/clk.h>
+#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_irq.h>
@@ -68,6 +69,8 @@
#define SAI_IEC60958_BLOCK_FRAMES 192
#define SAI_IEC60958_STATUS_BYTES 24
+#define SAI_MCLK_NAME_LEN 32
+
/**
* struct stm32_sai_sub_data - private data of SAI sub block (block A or B)
* @pdev: device data pointer
@@ -80,6 +83,7 @@
* @pdata: SAI block parent data pointer
* @np_sync_provider: synchronization provider node
* @sai_ck: kernel clock feeding the SAI clock generator
+ * @sai_mclk: master clock from SAI mclk provider
* @phys_addr: SAI registers physical base address
* @mclk_rate: SAI block master clock frequency (Hz). set at init
* @id: SAI sub block id corresponding to sub-block A or B
@@ -110,6 +114,7 @@ struct stm32_sai_sub_data {
struct stm32_sai_data *pdata;
struct device_node *np_sync_provider;
struct clk *sai_ck;
+ struct clk *sai_mclk;
dma_addr_t phys_addr;
unsigned int mclk_rate;
unsigned int id;
@@ -251,6 +256,176 @@ static const struct snd_kcontrol_new iec958_ctls = {
.put = snd_pcm_iec958_put,
};
+struct stm32_sai_mclk_data {
+ struct clk_hw hw;
+ unsigned long freq;
+ struct stm32_sai_sub_data *sai_data;
+};
+
+#define to_mclk_data(_hw) container_of(_hw, struct stm32_sai_mclk_data, hw)
+#define STM32_SAI_MAX_CLKS 1
+
+static int stm32_sai_get_clk_div(struct stm32_sai_sub_data *sai,
+ unsigned long input_rate,
+ unsigned long output_rate)
+{
+ int version = sai->pdata->conf->version;
+ int div;
+
+ div = DIV_ROUND_CLOSEST(input_rate, output_rate);
+ if (div > SAI_XCR1_MCKDIV_MAX(version)) {
+ dev_err(&sai->pdev->dev, "Divider %d out of range\n", div);
+ return -EINVAL;
+ }
+ dev_dbg(&sai->pdev->dev, "SAI divider %d\n", div);
+
+ if (input_rate % div)
+ dev_dbg(&sai->pdev->dev,
+ "Rate not accurate. requested (%ld), actual (%ld)\n",
+ output_rate, input_rate / div);
+
+ return div;
+}
+
+static int stm32_sai_set_clk_div(struct stm32_sai_sub_data *sai,
+ unsigned int div)
+{
+ int version = sai->pdata->conf->version;
+ int ret, cr1, mask;
+
+ if (div > SAI_XCR1_MCKDIV_MAX(version)) {
+ dev_err(&sai->pdev->dev, "Divider %d out of range\n", div);
+ return -EINVAL;
+ }
+
+ mask = SAI_XCR1_MCKDIV_MASK(SAI_XCR1_MCKDIV_WIDTH(version));
+ cr1 = SAI_XCR1_MCKDIV_SET(div);
+ ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, mask, cr1);
+ if (ret < 0)
+ dev_err(&sai->pdev->dev, "Failed to update CR1 register\n");
+
+ return ret;
+}
+
+static long stm32_sai_mclk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ struct stm32_sai_mclk_data *mclk = to_mclk_data(hw);
+ struct stm32_sai_sub_data *sai = mclk->sai_data;
+ int div;
+
+ div = stm32_sai_get_clk_div(sai, *prate, rate);
+ if (div < 0)
+ return div;
+
+ mclk->freq = *prate / div;
+
+ return mclk->freq;
+}
+
+static unsigned long stm32_sai_mclk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct stm32_sai_mclk_data *mclk = to_mclk_data(hw);
+
+ return mclk->freq;
+}
+
+static int stm32_sai_mclk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct stm32_sai_mclk_data *mclk = to_mclk_data(hw);
+ struct stm32_sai_sub_data *sai = mclk->sai_data;
+ unsigned int div;
+ int ret;
+
+ div = stm32_sai_get_clk_div(sai, parent_rate, rate);
+ if (div < 0)
+ return div;
+
+ ret = stm32_sai_set_clk_div(sai, div);
+ if (ret)
+ return ret;
+
+ mclk->freq = rate;
+
+ return 0;
+}
+
+static int stm32_sai_mclk_enable(struct clk_hw *hw)
+{
+ struct stm32_sai_mclk_data *mclk = to_mclk_data(hw);
+ struct stm32_sai_sub_data *sai = mclk->sai_data;
+
+ dev_dbg(&sai->pdev->dev, "Enable master clock\n");
+
+ return regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
+ SAI_XCR1_MCKEN, SAI_XCR1_MCKEN);
+}
+
+static void stm32_sai_mclk_disable(struct clk_hw *hw)
+{
+ struct stm32_sai_mclk_data *mclk = to_mclk_data(hw);
+ struct stm32_sai_sub_data *sai = mclk->sai_data;
+
+ dev_dbg(&sai->pdev->dev, "Disable master clock\n");
+
+ regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_MCKEN, 0);
+}
+
+static const struct clk_ops mclk_ops = {
+ .enable = stm32_sai_mclk_enable,
+ .disable = stm32_sai_mclk_disable,
+ .recalc_rate = stm32_sai_mclk_recalc_rate,
+ .round_rate = stm32_sai_mclk_round_rate,
+ .set_rate = stm32_sai_mclk_set_rate,
+};
+
+static int stm32_sai_add_mclk_provider(struct stm32_sai_sub_data *sai)
+{
+ struct clk_hw *hw;
+ struct stm32_sai_mclk_data *mclk;
+ struct device *dev = &sai->pdev->dev;
+ const char *pname = __clk_get_name(sai->sai_ck);
+ char *mclk_name, *p, *s = (char *)pname;
+ int ret, i = 0;
+
+ mclk = devm_kzalloc(dev, sizeof(mclk), GFP_KERNEL);
+ if (!mclk)
+ return -ENOMEM;
+
+ mclk_name = devm_kcalloc(dev, sizeof(char),
+ SAI_MCLK_NAME_LEN, GFP_KERNEL);
+ if (!mclk_name)
+ return -ENOMEM;
+
+ /*
+ * Forge mclk clock name from parent clock name and suffix.
+ * String after "_" char is stripped in parent name.
+ */
+ p = mclk_name;
+ while (*s && *s != '_' && (i < (SAI_MCLK_NAME_LEN - 7))) {
+ *p++ = *s++;
+ i++;
+ }
+ STM_SAI_IS_SUB_A(sai) ? strcat(p, "a_mclk") : strcat(p, "b_mclk");
+
+ mclk->hw.init = CLK_HW_INIT(mclk_name, pname, &mclk_ops, 0);
+ mclk->sai_data = sai;
+ hw = &mclk->hw;
+
+ dev_dbg(dev, "Register master clock %s\n", mclk_name);
+ ret = devm_clk_hw_register(&sai->pdev->dev, hw);
+ if (ret) {
+ dev_err(dev, "mclk register returned %d\n", ret);
+ return ret;
+ }
+ sai->sai_mclk = hw->clk;
+
+ /* register mclk provider */
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
static irqreturn_t stm32_sai_isr(int irq, void *devid)
{
struct stm32_sai_sub_data *sai = (struct stm32_sai_sub_data *)devid;
@@ -312,15 +487,25 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
int ret;
- if ((dir == SND_SOC_CLOCK_OUT) && sai->master) {
+ if (dir == SND_SOC_CLOCK_OUT) {
ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
SAI_XCR1_NODIV,
(unsigned int)~SAI_XCR1_NODIV);
if (ret < 0)
return ret;
- sai->mclk_rate = freq;
dev_dbg(cpu_dai->dev, "SAI MCLK frequency is %uHz\n", freq);
+ sai->mclk_rate = freq;
+
+ if (sai->sai_mclk) {
+ ret = clk_set_rate_exclusive(sai->sai_mclk,
+ sai->mclk_rate);
+ if (ret) {
+ dev_err(cpu_dai->dev,
+ "Could not set mclk rate\n");
+ return ret;
+ }
+ }
}
return 0;
@@ -715,15 +900,9 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
{
struct stm32_sai_sub_data *sai = snd_soc_dai_get_drvdata(cpu_dai);
int cr1, mask, div = 0;
- int sai_clk_rate, mclk_ratio, den, ret;
- int version = sai->pdata->conf->version;
+ int sai_clk_rate, mclk_ratio, den;
unsigned int rate = params_rate(params);
- if (!sai->mclk_rate) {
- dev_err(cpu_dai->dev, "Mclk rate is null\n");
- return -EINVAL;
- }
-
if (!(rate % 11025))
clk_set_parent(sai->sai_ck, sai->pdata->clk_x11k);
else
@@ -731,14 +910,22 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
sai_clk_rate = clk_get_rate(sai->sai_ck);
if (STM_SAI_IS_F4(sai->pdata)) {
- /*
- * mclk_rate = 256 * fs
- * MCKDIV = 0 if sai_ck < 3/2 * mclk_rate
- * MCKDIV = sai_ck / (2 * mclk_rate) otherwise
+ /* mclk on (NODIV=0)
+ * mclk_rate = 256 * fs
+ * MCKDIV = 0 if sai_ck < 3/2 * mclk_rate
+ * MCKDIV = sai_ck / (2 * mclk_rate) otherwise
+ * mclk off (NODIV=1)
+ * MCKDIV ignored. sck = sai_ck
*/
- if (2 * sai_clk_rate >= 3 * sai->mclk_rate)
- div = DIV_ROUND_CLOSEST(sai_clk_rate,
- 2 * sai->mclk_rate);
+ if (!sai->mclk_rate)
+ return 0;
+
+ if (2 * sai_clk_rate >= 3 * sai->mclk_rate) {
+ div = stm32_sai_get_clk_div(sai, sai_clk_rate,
+ 2 * sai->mclk_rate);
+ if (div < 0)
+ return div;
+ }
} else {
/*
* TDM mode :
@@ -750,8 +937,10 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
* Note: NOMCK/NODIV correspond to same bit.
*/
if (STM_SAI_PROTOCOL_IS_SPDIF(sai)) {
- div = DIV_ROUND_CLOSEST(sai_clk_rate,
- (params_rate(params) * 128));
+ div = stm32_sai_get_clk_div(sai, sai_clk_rate,
+ rate * 128);
+ if (div < 0)
+ return div;
} else {
if (sai->mclk_rate) {
mclk_ratio = sai->mclk_rate / rate;
@@ -764,31 +953,22 @@ static int stm32_sai_configure_clock(struct snd_soc_dai *cpu_dai,
mclk_ratio);
return -EINVAL;
}
- div = DIV_ROUND_CLOSEST(sai_clk_rate,
- sai->mclk_rate);
+ div = stm32_sai_get_clk_div(sai, sai_clk_rate,
+ sai->mclk_rate);
+ if (div < 0)
+ return div;
} else {
/* mclk-fs not set, master clock not active */
den = sai->fs_length * params_rate(params);
- div = DIV_ROUND_CLOSEST(sai_clk_rate, den);
+ div = stm32_sai_get_clk_div(sai, sai_clk_rate,
+ den);
+ if (div < 0)
+ return div;
}
}
}
- if (div > SAI_XCR1_MCKDIV_MAX(version)) {
- dev_err(cpu_dai->dev, "Divider %d out of range\n", div);
- return -EINVAL;
- }
- dev_dbg(cpu_dai->dev, "SAI clock %d, divider %d\n", sai_clk_rate, div);
-
- mask = SAI_XCR1_MCKDIV_MASK(SAI_XCR1_MCKDIV_WIDTH(version));
- cr1 = SAI_XCR1_MCKDIV_SET(div);
- ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, mask, cr1);
- if (ret < 0) {
- dev_err(cpu_dai->dev, "Failed to update CR1 register\n");
- return ret;
- }
-
- return 0;
+ return stm32_sai_set_clk_div(sai, div);
}
static int stm32_sai_hw_params(struct snd_pcm_substream *substream,
@@ -881,6 +1061,9 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
SAI_XCR1_NODIV);
clk_disable_unprepare(sai->sai_ck);
+
+ clk_rate_exclusive_put(sai->sai_mclk);
+
sai->substream = NULL;
}
@@ -903,6 +1086,8 @@ static int stm32_sai_dai_probe(struct snd_soc_dai *cpu_dai)
struct stm32_sai_sub_data *sai = dev_get_drvdata(cpu_dai->dev);
int cr1 = 0, cr1_mask;
+ sai->cpu_dai = cpu_dai;
+
sai->dma_params.addr = (dma_addr_t)(sai->phys_addr + STM_SAI_DR_REGX);
/*
* DMA supports 4, 8 or 16 burst sizes. Burst size 4 is the best choice,
@@ -1124,16 +1309,15 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
sai->sync = SAI_SYNC_NONE;
if (args.np) {
if (args.np == np) {
- dev_err(&pdev->dev, "%s sync own reference\n",
- np->name);
+ dev_err(&pdev->dev, "%pOFn sync own reference\n", np);
of_node_put(args.np);
return -EINVAL;
}
sai->np_sync_provider = of_get_parent(args.np);
if (!sai->np_sync_provider) {
- dev_err(&pdev->dev, "%s parent node not found\n",
- np->name);
+ dev_err(&pdev->dev, "%pOFn parent node not found\n",
+ np);
of_node_put(args.np);
return -ENODEV;
}
@@ -1182,6 +1366,23 @@ static int stm32_sai_sub_parse_of(struct platform_device *pdev,
return PTR_ERR(sai->sai_ck);
}
+ if (STM_SAI_IS_F4(sai->pdata))
+ return 0;
+
+ /* Register mclk provider if requested */
+ if (of_find_property(np, "#clock-cells", NULL)) {
+ ret = stm32_sai_add_mclk_provider(sai);
+ if (ret < 0)
+ return ret;
+ } else {
+ sai->sai_mclk = devm_clk_get(&pdev->dev, "MCLK");
+ if (IS_ERR(sai->sai_mclk)) {
+ if (PTR_ERR(sai->sai_mclk) != -ENOENT)
+ return PTR_ERR(sai->sai_mclk);
+ sai->sai_mclk = NULL;
+ }
+ }
+
return 0;
}
diff --git a/sound/soc/sunxi/Kconfig b/sound/soc/sunxi/Kconfig
index 22408bc2d6ec..66aad0d3f9c7 100644
--- a/sound/soc/sunxi/Kconfig
+++ b/sound/soc/sunxi/Kconfig
@@ -12,7 +12,7 @@ config SND_SUN4I_CODEC
config SND_SUN8I_CODEC
tristate "Allwinner SUN8I audio codec"
depends on OF
- depends on MACH_SUN8I || COMPILE_TEST
+ depends on MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
select REGMAP_MMIO
help
This option enables the digital part of the internal audio codec for
@@ -23,11 +23,19 @@ config SND_SUN8I_CODEC
config SND_SUN8I_CODEC_ANALOG
tristate "Allwinner sun8i Codec Analog Controls Support"
depends on MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
- select REGMAP
+ select SND_SUN8I_ADDA_PR_REGMAP
help
Say Y or M if you want to add support for the analog controls for
the codec embedded in newer Allwinner SoCs.
+config SND_SUN50I_CODEC_ANALOG
+ tristate "Allwinner sun50i Codec Analog Controls Support"
+ depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
+ select SND_SUNXI_ADDA_PR_REGMAP
+ help
+ Say Y or M if you want to add support for the analog controls for
+ the codec embedded in Allwinner A64 SoC.
+
config SND_SUN4I_I2S
tristate "Allwinner A10 I2S Support"
select SND_SOC_GENERIC_DMAENGINE_PCM
@@ -45,4 +53,9 @@ config SND_SUN4I_SPDIF
help
Say Y or M to add support for the S/PDIF audio block in the Allwinner
A10 and affiliated SoCs.
+
+config SND_SUN8I_ADDA_PR_REGMAP
+ tristate
+ select REGMAP
+
endmenu
diff --git a/sound/soc/sunxi/Makefile b/sound/soc/sunxi/Makefile
index 4a9ef67386ca..a86be340a076 100644
--- a/sound/soc/sunxi/Makefile
+++ b/sound/soc/sunxi/Makefile
@@ -3,4 +3,6 @@ obj-$(CONFIG_SND_SUN4I_CODEC) += sun4i-codec.o
obj-$(CONFIG_SND_SUN4I_I2S) += sun4i-i2s.o
obj-$(CONFIG_SND_SUN4I_SPDIF) += sun4i-spdif.o
obj-$(CONFIG_SND_SUN8I_CODEC_ANALOG) += sun8i-codec-analog.o
+obj-$(CONFIG_SND_SUN50I_CODEC_ANALOG) += sun50i-codec-analog.o
obj-$(CONFIG_SND_SUN8I_CODEC) += sun8i-codec.o
+obj-$(CONFIG_SND_SUN8I_ADDA_PR_REGMAP) += sun8i-adda-pr-regmap.o
diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
index a4aa931ebfae..d5ec1a20499d 100644
--- a/sound/soc/sunxi/sun4i-i2s.c
+++ b/sound/soc/sunxi/sun4i-i2s.c
@@ -644,40 +644,6 @@ static int sun4i_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
return 0;
}
-static int sun4i_i2s_startup(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
-
- /* Enable the whole hardware block */
- regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
- SUN4I_I2S_CTRL_GL_EN, SUN4I_I2S_CTRL_GL_EN);
-
- /* Enable the first output line */
- regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
- SUN4I_I2S_CTRL_SDO_EN_MASK,
- SUN4I_I2S_CTRL_SDO_EN(0));
-
-
- return clk_prepare_enable(i2s->mod_clk);
-}
-
-static void sun4i_i2s_shutdown(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct sun4i_i2s *i2s = snd_soc_dai_get_drvdata(dai);
-
- clk_disable_unprepare(i2s->mod_clk);
-
- /* Disable our output lines */
- regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
- SUN4I_I2S_CTRL_SDO_EN_MASK, 0);
-
- /* Disable the whole hardware block */
- regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
- SUN4I_I2S_CTRL_GL_EN, 0);
-}
-
static int sun4i_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
@@ -695,8 +661,6 @@ static const struct snd_soc_dai_ops sun4i_i2s_dai_ops = {
.hw_params = sun4i_i2s_hw_params,
.set_fmt = sun4i_i2s_set_fmt,
.set_sysclk = sun4i_i2s_set_sysclk,
- .shutdown = sun4i_i2s_shutdown,
- .startup = sun4i_i2s_startup,
.trigger = sun4i_i2s_trigger,
};
@@ -869,6 +833,21 @@ static int sun4i_i2s_runtime_resume(struct device *dev)
goto err_disable_clk;
}
+ /* Enable the whole hardware block */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_GL_EN, SUN4I_I2S_CTRL_GL_EN);
+
+ /* Enable the first output line */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_SDO_EN_MASK,
+ SUN4I_I2S_CTRL_SDO_EN(0));
+
+ ret = clk_prepare_enable(i2s->mod_clk);
+ if (ret) {
+ dev_err(dev, "Failed to enable module clock\n");
+ goto err_disable_clk;
+ }
+
return 0;
err_disable_clk:
@@ -880,6 +859,16 @@ static int sun4i_i2s_runtime_suspend(struct device *dev)
{
struct sun4i_i2s *i2s = dev_get_drvdata(dev);
+ clk_disable_unprepare(i2s->mod_clk);
+
+ /* Disable our output lines */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_SDO_EN_MASK, 0);
+
+ /* Disable the whole hardware block */
+ regmap_update_bits(i2s->regmap, SUN4I_I2S_CTRL_REG,
+ SUN4I_I2S_CTRL_GL_EN, 0);
+
regcache_cache_only(i2s->regmap, true);
clk_disable_unprepare(i2s->bus_clk);
@@ -961,6 +950,23 @@ static const struct sun4i_i2s_quirks sun8i_h3_i2s_quirks = {
.field_rxchansel = REG_FIELD(SUN8I_I2S_RX_CHAN_SEL_REG, 0, 2),
};
+static const struct sun4i_i2s_quirks sun50i_a64_codec_i2s_quirks = {
+ .has_reset = true,
+ .reg_offset_txdata = SUN8I_I2S_FIFO_TX_REG,
+ .sun4i_i2s_regmap = &sun4i_i2s_regmap_config,
+ .has_slave_select_bit = true,
+ .field_clkdiv_mclk_en = REG_FIELD(SUN4I_I2S_CLK_DIV_REG, 7, 7),
+ .field_fmt_wss = REG_FIELD(SUN4I_I2S_FMT0_REG, 2, 3),
+ .field_fmt_sr = REG_FIELD(SUN4I_I2S_FMT0_REG, 4, 5),
+ .field_fmt_bclk = REG_FIELD(SUN4I_I2S_FMT0_REG, 6, 6),
+ .field_fmt_lrclk = REG_FIELD(SUN4I_I2S_FMT0_REG, 7, 7),
+ .field_fmt_mode = REG_FIELD(SUN4I_I2S_FMT0_REG, 0, 1),
+ .field_txchanmap = REG_FIELD(SUN4I_I2S_TX_CHAN_MAP_REG, 0, 31),
+ .field_rxchanmap = REG_FIELD(SUN4I_I2S_RX_CHAN_MAP_REG, 0, 31),
+ .field_txchansel = REG_FIELD(SUN4I_I2S_TX_CHAN_SEL_REG, 0, 2),
+ .field_rxchansel = REG_FIELD(SUN4I_I2S_RX_CHAN_SEL_REG, 0, 2),
+};
+
static int sun4i_i2s_init_regmap_fields(struct device *dev,
struct sun4i_i2s *i2s)
{
@@ -1169,6 +1175,10 @@ static const struct of_device_id sun4i_i2s_match[] = {
.compatible = "allwinner,sun8i-h3-i2s",
.data = &sun8i_h3_i2s_quirks,
},
+ {
+ .compatible = "allwinner,sun50i-a64-codec-i2s",
+ .data = &sun50i_a64_codec_i2s_quirks,
+ },
{}
};
MODULE_DEVICE_TABLE(of, sun4i_i2s_match);
diff --git a/sound/soc/sunxi/sun50i-codec-analog.c b/sound/soc/sunxi/sun50i-codec-analog.c
new file mode 100644
index 000000000000..8f5f999df631
--- /dev/null
+++ b/sound/soc/sunxi/sun50i-codec-analog.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This driver supports the analog controls for the internal codec
+ * found in Allwinner's A64 SoC.
+ *
+ * Copyright (C) 2016 Chen-Yu Tsai <wens@csie.org>
+ * Copyright (C) 2017 Marcus Cooper <codekipper@gmail.com>
+ * Copyright (C) 2018 Vasily Khoruzhick <anarsoul@gmail.com>
+ *
+ * Based on sun8i-codec-analog.c
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/tlv.h>
+
+#include "sun8i-adda-pr-regmap.h"
+
+/* Codec analog control register offsets and bit fields */
+#define SUN50I_ADDA_HP_CTRL 0x00
+#define SUN50I_ADDA_HP_CTRL_PA_CLK_GATE 7
+#define SUN50I_ADDA_HP_CTRL_HPPA_EN 6
+#define SUN50I_ADDA_HP_CTRL_HPVOL 0
+
+#define SUN50I_ADDA_OL_MIX_CTRL 0x01
+#define SUN50I_ADDA_OL_MIX_CTRL_MIC1 6
+#define SUN50I_ADDA_OL_MIX_CTRL_MIC2 5
+#define SUN50I_ADDA_OL_MIX_CTRL_PHONE 4
+#define SUN50I_ADDA_OL_MIX_CTRL_PHONEN 3
+#define SUN50I_ADDA_OL_MIX_CTRL_LINEINL 2
+#define SUN50I_ADDA_OL_MIX_CTRL_DACL 1
+#define SUN50I_ADDA_OL_MIX_CTRL_DACR 0
+
+#define SUN50I_ADDA_OR_MIX_CTRL 0x02
+#define SUN50I_ADDA_OR_MIX_CTRL_MIC1 6
+#define SUN50I_ADDA_OR_MIX_CTRL_MIC2 5
+#define SUN50I_ADDA_OR_MIX_CTRL_PHONE 4
+#define SUN50I_ADDA_OR_MIX_CTRL_PHONEP 3
+#define SUN50I_ADDA_OR_MIX_CTRL_LINEINR 2
+#define SUN50I_ADDA_OR_MIX_CTRL_DACR 1
+#define SUN50I_ADDA_OR_MIX_CTRL_DACL 0
+
+#define SUN50I_ADDA_LINEOUT_CTRL0 0x05
+#define SUN50I_ADDA_LINEOUT_CTRL0_LEN 7
+#define SUN50I_ADDA_LINEOUT_CTRL0_REN 6
+#define SUN50I_ADDA_LINEOUT_CTRL0_LSRC_SEL 5
+#define SUN50I_ADDA_LINEOUT_CTRL0_RSRC_SEL 4
+
+#define SUN50I_ADDA_LINEOUT_CTRL1 0x06
+#define SUN50I_ADDA_LINEOUT_CTRL1_VOL 0
+
+#define SUN50I_ADDA_MIC1_CTRL 0x07
+#define SUN50I_ADDA_MIC1_CTRL_MIC1G 4
+#define SUN50I_ADDA_MIC1_CTRL_MIC1AMPEN 3
+#define SUN50I_ADDA_MIC1_CTRL_MIC1BOOST 0
+
+#define SUN50I_ADDA_MIC2_CTRL 0x08
+#define SUN50I_ADDA_MIC2_CTRL_MIC2G 4
+#define SUN50I_ADDA_MIC2_CTRL_MIC2AMPEN 3
+#define SUN50I_ADDA_MIC2_CTRL_MIC2BOOST 0
+
+#define SUN50I_ADDA_LINEIN_CTRL 0x09
+#define SUN50I_ADDA_LINEIN_CTRL_LINEING 0
+
+#define SUN50I_ADDA_MIX_DAC_CTRL 0x0a
+#define SUN50I_ADDA_MIX_DAC_CTRL_DACAREN 7
+#define SUN50I_ADDA_MIX_DAC_CTRL_DACALEN 6
+#define SUN50I_ADDA_MIX_DAC_CTRL_RMIXEN 5
+#define SUN50I_ADDA_MIX_DAC_CTRL_LMIXEN 4
+#define SUN50I_ADDA_MIX_DAC_CTRL_RHPPAMUTE 3
+#define SUN50I_ADDA_MIX_DAC_CTRL_LHPPAMUTE 2
+#define SUN50I_ADDA_MIX_DAC_CTRL_RHPIS 1
+#define SUN50I_ADDA_MIX_DAC_CTRL_LHPIS 0
+
+#define SUN50I_ADDA_L_ADCMIX_SRC 0x0b
+#define SUN50I_ADDA_L_ADCMIX_SRC_MIC1 6
+#define SUN50I_ADDA_L_ADCMIX_SRC_MIC2 5
+#define SUN50I_ADDA_L_ADCMIX_SRC_PHONE 4
+#define SUN50I_ADDA_L_ADCMIX_SRC_PHONEN 3
+#define SUN50I_ADDA_L_ADCMIX_SRC_LINEINL 2
+#define SUN50I_ADDA_L_ADCMIX_SRC_OMIXRL 1
+#define SUN50I_ADDA_L_ADCMIX_SRC_OMIXRR 0
+
+#define SUN50I_ADDA_R_ADCMIX_SRC 0x0c
+#define SUN50I_ADDA_R_ADCMIX_SRC_MIC1 6
+#define SUN50I_ADDA_R_ADCMIX_SRC_MIC2 5
+#define SUN50I_ADDA_R_ADCMIX_SRC_PHONE 4
+#define SUN50I_ADDA_R_ADCMIX_SRC_PHONEP 3
+#define SUN50I_ADDA_R_ADCMIX_SRC_LINEINR 2
+#define SUN50I_ADDA_R_ADCMIX_SRC_OMIXR 1
+#define SUN50I_ADDA_R_ADCMIX_SRC_OMIXL 0
+
+#define SUN50I_ADDA_ADC_CTRL 0x0d
+#define SUN50I_ADDA_ADC_CTRL_ADCREN 7
+#define SUN50I_ADDA_ADC_CTRL_ADCLEN 6
+#define SUN50I_ADDA_ADC_CTRL_ADCG 0
+
+#define SUN50I_ADDA_HS_MBIAS_CTRL 0x0e
+#define SUN50I_ADDA_HS_MBIAS_CTRL_MMICBIASEN 7
+
+#define SUN50I_ADDA_JACK_MIC_CTRL 0x1d
+#define SUN50I_ADDA_JACK_MIC_CTRL_HMICBIASEN 5
+
+/* mixer controls */
+static const struct snd_kcontrol_new sun50i_a64_codec_mixer_controls[] = {
+ SOC_DAPM_DOUBLE_R("DAC Playback Switch",
+ SUN50I_ADDA_OL_MIX_CTRL,
+ SUN50I_ADDA_OR_MIX_CTRL,
+ SUN50I_ADDA_OL_MIX_CTRL_DACL, 1, 0),
+ SOC_DAPM_DOUBLE_R("DAC Reversed Playback Switch",
+ SUN50I_ADDA_OL_MIX_CTRL,
+ SUN50I_ADDA_OR_MIX_CTRL,
+ SUN50I_ADDA_OL_MIX_CTRL_DACR, 1, 0),
+ SOC_DAPM_DOUBLE_R("Line In Playback Switch",
+ SUN50I_ADDA_OL_MIX_CTRL,
+ SUN50I_ADDA_OR_MIX_CTRL,
+ SUN50I_ADDA_OL_MIX_CTRL_LINEINL, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic1 Playback Switch",
+ SUN50I_ADDA_OL_MIX_CTRL,
+ SUN50I_ADDA_OR_MIX_CTRL,
+ SUN50I_ADDA_OL_MIX_CTRL_MIC1, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic2 Playback Switch",
+ SUN50I_ADDA_OL_MIX_CTRL,
+ SUN50I_ADDA_OR_MIX_CTRL,
+ SUN50I_ADDA_OL_MIX_CTRL_MIC2, 1, 0),
+};
+
+/* ADC mixer controls */
+static const struct snd_kcontrol_new sun50i_codec_adc_mixer_controls[] = {
+ SOC_DAPM_DOUBLE_R("Mixer Capture Switch",
+ SUN50I_ADDA_L_ADCMIX_SRC,
+ SUN50I_ADDA_R_ADCMIX_SRC,
+ SUN50I_ADDA_L_ADCMIX_SRC_OMIXRL, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mixer Reversed Capture Switch",
+ SUN50I_ADDA_L_ADCMIX_SRC,
+ SUN50I_ADDA_R_ADCMIX_SRC,
+ SUN50I_ADDA_L_ADCMIX_SRC_OMIXRR, 1, 0),
+ SOC_DAPM_DOUBLE_R("Line In Capture Switch",
+ SUN50I_ADDA_L_ADCMIX_SRC,
+ SUN50I_ADDA_R_ADCMIX_SRC,
+ SUN50I_ADDA_L_ADCMIX_SRC_LINEINL, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic1 Capture Switch",
+ SUN50I_ADDA_L_ADCMIX_SRC,
+ SUN50I_ADDA_R_ADCMIX_SRC,
+ SUN50I_ADDA_L_ADCMIX_SRC_MIC1, 1, 0),
+ SOC_DAPM_DOUBLE_R("Mic2 Capture Switch",
+ SUN50I_ADDA_L_ADCMIX_SRC,
+ SUN50I_ADDA_R_ADCMIX_SRC,
+ SUN50I_ADDA_L_ADCMIX_SRC_MIC2, 1, 0),
+};
+
+static const DECLARE_TLV_DB_SCALE(sun50i_codec_out_mixer_pregain_scale,
+ -450, 150, 0);
+static const DECLARE_TLV_DB_RANGE(sun50i_codec_mic_gain_scale,
+ 0, 0, TLV_DB_SCALE_ITEM(0, 0, 0),
+ 1, 7, TLV_DB_SCALE_ITEM(2400, 300, 0),
+);
+
+static const DECLARE_TLV_DB_SCALE(sun50i_codec_hp_vol_scale, -6300, 100, 1);
+
+static const DECLARE_TLV_DB_RANGE(sun50i_codec_lineout_vol_scale,
+ 0, 1, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1),
+ 2, 31, TLV_DB_SCALE_ITEM(-4350, 150, 0),
+);
+
+
+/* volume / mute controls */
+static const struct snd_kcontrol_new sun50i_a64_codec_controls[] = {
+ SOC_SINGLE_TLV("Headphone Playback Volume",
+ SUN50I_ADDA_HP_CTRL,
+ SUN50I_ADDA_HP_CTRL_HPVOL, 0x3f, 0,
+ sun50i_codec_hp_vol_scale),
+
+ SOC_DOUBLE("Headphone Playback Switch",
+ SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_LHPPAMUTE,
+ SUN50I_ADDA_MIX_DAC_CTRL_RHPPAMUTE, 1, 0),
+
+ /* Mixer pre-gain */
+ SOC_SINGLE_TLV("Mic1 Playback Volume", SUN50I_ADDA_MIC1_CTRL,
+ SUN50I_ADDA_MIC1_CTRL_MIC1G,
+ 0x7, 0, sun50i_codec_out_mixer_pregain_scale),
+
+ /* Microphone Amp boost gain */
+ SOC_SINGLE_TLV("Mic1 Boost Volume", SUN50I_ADDA_MIC1_CTRL,
+ SUN50I_ADDA_MIC1_CTRL_MIC1BOOST, 0x7, 0,
+ sun50i_codec_mic_gain_scale),
+
+ /* Mixer pre-gain */
+ SOC_SINGLE_TLV("Mic2 Playback Volume",
+ SUN50I_ADDA_MIC2_CTRL, SUN50I_ADDA_MIC2_CTRL_MIC2G,
+ 0x7, 0, sun50i_codec_out_mixer_pregain_scale),
+
+ /* Microphone Amp boost gain */
+ SOC_SINGLE_TLV("Mic2 Boost Volume", SUN50I_ADDA_MIC2_CTRL,
+ SUN50I_ADDA_MIC2_CTRL_MIC2BOOST, 0x7, 0,
+ sun50i_codec_mic_gain_scale),
+
+ /* ADC */
+ SOC_SINGLE_TLV("ADC Gain Capture Volume", SUN50I_ADDA_ADC_CTRL,
+ SUN50I_ADDA_ADC_CTRL_ADCG, 0x7, 0,
+ sun50i_codec_out_mixer_pregain_scale),
+
+ /* Mixer pre-gain */
+ SOC_SINGLE_TLV("Line In Playback Volume", SUN50I_ADDA_LINEIN_CTRL,
+ SUN50I_ADDA_LINEIN_CTRL_LINEING,
+ 0x7, 0, sun50i_codec_out_mixer_pregain_scale),
+
+ SOC_SINGLE_TLV("Line Out Playback Volume",
+ SUN50I_ADDA_LINEOUT_CTRL1,
+ SUN50I_ADDA_LINEOUT_CTRL1_VOL, 0x1f, 0,
+ sun50i_codec_lineout_vol_scale),
+
+ SOC_DOUBLE("Line Out Playback Switch",
+ SUN50I_ADDA_LINEOUT_CTRL0,
+ SUN50I_ADDA_LINEOUT_CTRL0_LEN,
+ SUN50I_ADDA_LINEOUT_CTRL0_REN, 1, 0),
+
+};
+
+static const char * const sun50i_codec_hp_src_enum_text[] = {
+ "DAC", "Mixer",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun50i_codec_hp_src_enum,
+ SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_LHPIS,
+ SUN50I_ADDA_MIX_DAC_CTRL_RHPIS,
+ sun50i_codec_hp_src_enum_text);
+
+static const struct snd_kcontrol_new sun50i_codec_hp_src[] = {
+ SOC_DAPM_ENUM("Headphone Source Playback Route",
+ sun50i_codec_hp_src_enum),
+};
+
+static const char * const sun50i_codec_lineout_src_enum_text[] = {
+ "Stereo", "Mono Differential",
+};
+
+static SOC_ENUM_DOUBLE_DECL(sun50i_codec_lineout_src_enum,
+ SUN50I_ADDA_LINEOUT_CTRL0,
+ SUN50I_ADDA_LINEOUT_CTRL0_LSRC_SEL,
+ SUN50I_ADDA_LINEOUT_CTRL0_RSRC_SEL,
+ sun50i_codec_lineout_src_enum_text);
+
+static const struct snd_kcontrol_new sun50i_codec_lineout_src[] = {
+ SOC_DAPM_ENUM("Line Out Source Playback Route",
+ sun50i_codec_lineout_src_enum),
+};
+
+static const struct snd_soc_dapm_widget sun50i_a64_codec_widgets[] = {
+ /* DAC */
+ SND_SOC_DAPM_DAC("Left DAC", NULL, SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_DACALEN, 0),
+ SND_SOC_DAPM_DAC("Right DAC", NULL, SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_DACAREN, 0),
+ /* ADC */
+ SND_SOC_DAPM_ADC("Left ADC", NULL, SUN50I_ADDA_ADC_CTRL,
+ SUN50I_ADDA_ADC_CTRL_ADCLEN, 0),
+ SND_SOC_DAPM_ADC("Right ADC", NULL, SUN50I_ADDA_ADC_CTRL,
+ SUN50I_ADDA_ADC_CTRL_ADCREN, 0),
+ /*
+ * Due to this component and the codec belonging to separate DAPM
+ * contexts, we need to manually link the above widgets to their
+ * stream widgets at the card level.
+ */
+
+ SND_SOC_DAPM_MUX("Headphone Source Playback Route",
+ SND_SOC_NOPM, 0, 0, sun50i_codec_hp_src),
+ SND_SOC_DAPM_OUT_DRV("Headphone Amp", SUN50I_ADDA_HP_CTRL,
+ SUN50I_ADDA_HP_CTRL_HPPA_EN, 0, NULL, 0),
+ SND_SOC_DAPM_OUTPUT("HP"),
+
+ SND_SOC_DAPM_MUX("Line Out Source Playback Route",
+ SND_SOC_NOPM, 0, 0, sun50i_codec_lineout_src),
+ SND_SOC_DAPM_OUTPUT("LINEOUT"),
+
+ /* Microphone inputs */
+ SND_SOC_DAPM_INPUT("MIC1"),
+
+ /* Microphone Bias */
+ SND_SOC_DAPM_SUPPLY("MBIAS", SUN50I_ADDA_HS_MBIAS_CTRL,
+ SUN50I_ADDA_HS_MBIAS_CTRL_MMICBIASEN,
+ 0, NULL, 0),
+
+ /* Mic input path */
+ SND_SOC_DAPM_PGA("Mic1 Amplifier", SUN50I_ADDA_MIC1_CTRL,
+ SUN50I_ADDA_MIC1_CTRL_MIC1AMPEN, 0, NULL, 0),
+
+ /* Microphone input */
+ SND_SOC_DAPM_INPUT("MIC2"),
+
+ /* Microphone Bias */
+ SND_SOC_DAPM_SUPPLY("HBIAS", SUN50I_ADDA_JACK_MIC_CTRL,
+ SUN50I_ADDA_JACK_MIC_CTRL_HMICBIASEN,
+ 0, NULL, 0),
+
+ /* Mic input path */
+ SND_SOC_DAPM_PGA("Mic2 Amplifier", SUN50I_ADDA_MIC2_CTRL,
+ SUN50I_ADDA_MIC2_CTRL_MIC2AMPEN, 0, NULL, 0),
+
+ /* Line input */
+ SND_SOC_DAPM_INPUT("LINEIN"),
+
+ /* Mixers */
+ SND_SOC_DAPM_MIXER("Left Mixer", SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_LMIXEN, 0,
+ sun50i_a64_codec_mixer_controls,
+ ARRAY_SIZE(sun50i_a64_codec_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Right Mixer", SUN50I_ADDA_MIX_DAC_CTRL,
+ SUN50I_ADDA_MIX_DAC_CTRL_RMIXEN, 0,
+ sun50i_a64_codec_mixer_controls,
+ ARRAY_SIZE(sun50i_a64_codec_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Left ADC Mixer", SUN50I_ADDA_ADC_CTRL,
+ SUN50I_ADDA_ADC_CTRL_ADCLEN, 0,
+ sun50i_codec_adc_mixer_controls,
+ ARRAY_SIZE(sun50i_codec_adc_mixer_controls)),
+ SND_SOC_DAPM_MIXER("Right ADC Mixer", SUN50I_ADDA_ADC_CTRL,
+ SUN50I_ADDA_ADC_CTRL_ADCREN, 0,
+ sun50i_codec_adc_mixer_controls,
+ ARRAY_SIZE(sun50i_codec_adc_mixer_controls)),
+};
+
+static const struct snd_soc_dapm_route sun50i_a64_codec_routes[] = {
+ /* Left Mixer Routes */
+ { "Left Mixer", "DAC Playback Switch", "Left DAC" },
+ { "Left Mixer", "DAC Reversed Playback Switch", "Right DAC" },
+ { "Left Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+
+ /* Right Mixer Routes */
+ { "Right Mixer", "DAC Playback Switch", "Right DAC" },
+ { "Right Mixer", "DAC Reversed Playback Switch", "Left DAC" },
+ { "Right Mixer", "Mic1 Playback Switch", "Mic1 Amplifier" },
+
+ /* Left ADC Mixer Routes */
+ { "Left ADC Mixer", "Mixer Capture Switch", "Left Mixer" },
+ { "Left ADC Mixer", "Mixer Reversed Capture Switch", "Right Mixer" },
+ { "Left ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+
+ /* Right ADC Mixer Routes */
+ { "Right ADC Mixer", "Mixer Capture Switch", "Right Mixer" },
+ { "Right ADC Mixer", "Mixer Reversed Capture Switch", "Left Mixer" },
+ { "Right ADC Mixer", "Mic1 Capture Switch", "Mic1 Amplifier" },
+
+ /* ADC Routes */
+ { "Left ADC", NULL, "Left ADC Mixer" },
+ { "Right ADC", NULL, "Right ADC Mixer" },
+
+ /* Headphone Routes */
+ { "Headphone Source Playback Route", "DAC", "Left DAC" },
+ { "Headphone Source Playback Route", "DAC", "Right DAC" },
+ { "Headphone Source Playback Route", "Mixer", "Left Mixer" },
+ { "Headphone Source Playback Route", "Mixer", "Right Mixer" },
+ { "Headphone Amp", NULL, "Headphone Source Playback Route" },
+ { "HP", NULL, "Headphone Amp" },
+
+ /* Microphone Routes */
+ { "Mic1 Amplifier", NULL, "MIC1"},
+
+ /* Microphone Routes */
+ { "Mic2 Amplifier", NULL, "MIC2"},
+ { "Left Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+ { "Right Mixer", "Mic2 Playback Switch", "Mic2 Amplifier" },
+ { "Left ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+ { "Right ADC Mixer", "Mic2 Capture Switch", "Mic2 Amplifier" },
+
+ /* Line-in Routes */
+ { "Left Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Right Mixer", "Line In Playback Switch", "LINEIN" },
+ { "Left ADC Mixer", "Line In Capture Switch", "LINEIN" },
+ { "Right ADC Mixer", "Line In Capture Switch", "LINEIN" },
+
+ /* Line-out Routes */
+ { "Line Out Source Playback Route", "Stereo", "Left Mixer" },
+ { "Line Out Source Playback Route", "Stereo", "Right Mixer" },
+ { "Line Out Source Playback Route", "Mono Differential", "Left Mixer" },
+ { "Line Out Source Playback Route", "Mono Differential",
+ "Right Mixer" },
+ { "LINEOUT", NULL, "Line Out Source Playback Route" },
+};
+
+static const struct snd_soc_component_driver sun50i_codec_analog_cmpnt_drv = {
+ .controls = sun50i_a64_codec_controls,
+ .num_controls = ARRAY_SIZE(sun50i_a64_codec_controls),
+ .dapm_widgets = sun50i_a64_codec_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(sun50i_a64_codec_widgets),
+ .dapm_routes = sun50i_a64_codec_routes,
+ .num_dapm_routes = ARRAY_SIZE(sun50i_a64_codec_routes),
+};
+
+static const struct of_device_id sun50i_codec_analog_of_match[] = {
+ {
+ .compatible = "allwinner,sun50i-a64-codec-analog",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun50i_codec_analog_of_match);
+
+static int sun50i_codec_analog_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct regmap *regmap;
+ void __iomem *base;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ dev_err(&pdev->dev, "Failed to map the registers\n");
+ return PTR_ERR(base);
+ }
+
+ regmap = sun8i_adda_pr_regmap_init(&pdev->dev, base);
+ if (IS_ERR(regmap)) {
+ dev_err(&pdev->dev, "Failed to create regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ return devm_snd_soc_register_component(&pdev->dev,
+ &sun50i_codec_analog_cmpnt_drv,
+ NULL, 0);
+}
+
+static struct platform_driver sun50i_codec_analog_driver = {
+ .driver = {
+ .name = "sun50i-codec-analog",
+ .of_match_table = sun50i_codec_analog_of_match,
+ },
+ .probe = sun50i_codec_analog_probe,
+};
+module_platform_driver(sun50i_codec_analog_driver);
+
+MODULE_DESCRIPTION("Allwinner internal codec analog controls driver for A64");
+MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sun50i-codec-analog");
diff --git a/sound/soc/sunxi/sun8i-adda-pr-regmap.c b/sound/soc/sunxi/sun8i-adda-pr-regmap.c
new file mode 100644
index 000000000000..e68ce9d2884d
--- /dev/null
+++ b/sound/soc/sunxi/sun8i-adda-pr-regmap.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * This driver provides regmap to access to analog part of audio codec
+ * found on Allwinner A23, A31s, A33, H3 and A64 Socs
+ *
+ * Copyright 2016 Chen-Yu Tsai <wens@csie.org>
+ * Copyright (C) 2018 Vasily Khoruzhick <anarsoul@gmail.com>
+ */
+
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include "sun8i-adda-pr-regmap.h"
+
+/* Analog control register access bits */
+#define ADDA_PR 0x0 /* PRCM base + 0x1c0 */
+#define ADDA_PR_RESET BIT(28)
+#define ADDA_PR_WRITE BIT(24)
+#define ADDA_PR_ADDR_SHIFT 16
+#define ADDA_PR_ADDR_MASK GENMASK(4, 0)
+#define ADDA_PR_DATA_IN_SHIFT 8
+#define ADDA_PR_DATA_IN_MASK GENMASK(7, 0)
+#define ADDA_PR_DATA_OUT_SHIFT 0
+#define ADDA_PR_DATA_OUT_MASK GENMASK(7, 0)
+
+/* regmap access bits */
+static int adda_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ void __iomem *base = (void __iomem *)context;
+ u32 tmp;
+
+ /* De-assert reset */
+ writel(readl(base) | ADDA_PR_RESET, base);
+
+ /* Clear write bit */
+ writel(readl(base) & ~ADDA_PR_WRITE, base);
+
+ /* Set register address */
+ tmp = readl(base);
+ tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
+ tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
+ writel(tmp, base);
+
+ /* Read back value */
+ *val = readl(base) & ADDA_PR_DATA_OUT_MASK;
+
+ return 0;
+}
+
+static int adda_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ void __iomem *base = (void __iomem *)context;
+ u32 tmp;
+
+ /* De-assert reset */
+ writel(readl(base) | ADDA_PR_RESET, base);
+
+ /* Set register address */
+ tmp = readl(base);
+ tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
+ tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
+ writel(tmp, base);
+
+ /* Set data to write */
+ tmp = readl(base);
+ tmp &= ~(ADDA_PR_DATA_IN_MASK << ADDA_PR_DATA_IN_SHIFT);
+ tmp |= (val & ADDA_PR_DATA_IN_MASK) << ADDA_PR_DATA_IN_SHIFT;
+ writel(tmp, base);
+
+ /* Set write bit to signal a write */
+ writel(readl(base) | ADDA_PR_WRITE, base);
+
+ /* Clear write bit */
+ writel(readl(base) & ~ADDA_PR_WRITE, base);
+
+ return 0;
+}
+
+static const struct regmap_config adda_pr_regmap_cfg = {
+ .name = "adda-pr",
+ .reg_bits = 5,
+ .reg_stride = 1,
+ .val_bits = 8,
+ .reg_read = adda_reg_read,
+ .reg_write = adda_reg_write,
+ .fast_io = true,
+ .max_register = 31,
+};
+
+struct regmap *sun8i_adda_pr_regmap_init(struct device *dev,
+ void __iomem *base)
+{
+ return devm_regmap_init(dev, NULL, base, &adda_pr_regmap_cfg);
+}
+EXPORT_SYMBOL_GPL(sun8i_adda_pr_regmap_init);
+
+MODULE_DESCRIPTION("Allwinner analog audio codec regmap driver");
+MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sunxi-adda-pr");
diff --git a/sound/soc/sunxi/sun8i-adda-pr-regmap.h b/sound/soc/sunxi/sun8i-adda-pr-regmap.h
new file mode 100644
index 000000000000..a5ae95dfebc1
--- /dev/null
+++ b/sound/soc/sunxi/sun8i-adda-pr-regmap.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2018 Vasily Khoruzhick <anarsoul@gmail.com>
+ */
+
+struct regmap *sun8i_adda_pr_regmap_init(struct device *dev,
+ void __iomem *base);
diff --git a/sound/soc/sunxi/sun8i-codec-analog.c b/sound/soc/sunxi/sun8i-codec-analog.c
index 485e79f292c4..916a46bbc1c8 100644
--- a/sound/soc/sunxi/sun8i-codec-analog.c
+++ b/sound/soc/sunxi/sun8i-codec-analog.c
@@ -27,6 +27,8 @@
#include <sound/soc-dapm.h>
#include <sound/tlv.h>
+#include "sun8i-adda-pr-regmap.h"
+
/* Codec analog control register offsets and bit fields */
#define SUN8I_ADDA_HP_VOLC 0x00
#define SUN8I_ADDA_HP_VOLC_PA_CLK_GATE 7
@@ -120,81 +122,6 @@
#define SUN8I_ADDA_ADC_AP_EN_ADCLEN 6
#define SUN8I_ADDA_ADC_AP_EN_ADCG 0
-/* Analog control register access bits */
-#define ADDA_PR 0x0 /* PRCM base + 0x1c0 */
-#define ADDA_PR_RESET BIT(28)
-#define ADDA_PR_WRITE BIT(24)
-#define ADDA_PR_ADDR_SHIFT 16
-#define ADDA_PR_ADDR_MASK GENMASK(4, 0)
-#define ADDA_PR_DATA_IN_SHIFT 8
-#define ADDA_PR_DATA_IN_MASK GENMASK(7, 0)
-#define ADDA_PR_DATA_OUT_SHIFT 0
-#define ADDA_PR_DATA_OUT_MASK GENMASK(7, 0)
-
-/* regmap access bits */
-static int adda_reg_read(void *context, unsigned int reg, unsigned int *val)
-{
- void __iomem *base = (void __iomem *)context;
- u32 tmp;
-
- /* De-assert reset */
- writel(readl(base) | ADDA_PR_RESET, base);
-
- /* Clear write bit */
- writel(readl(base) & ~ADDA_PR_WRITE, base);
-
- /* Set register address */
- tmp = readl(base);
- tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
- tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
- writel(tmp, base);
-
- /* Read back value */
- *val = readl(base) & ADDA_PR_DATA_OUT_MASK;
-
- return 0;
-}
-
-static int adda_reg_write(void *context, unsigned int reg, unsigned int val)
-{
- void __iomem *base = (void __iomem *)context;
- u32 tmp;
-
- /* De-assert reset */
- writel(readl(base) | ADDA_PR_RESET, base);
-
- /* Set register address */
- tmp = readl(base);
- tmp &= ~(ADDA_PR_ADDR_MASK << ADDA_PR_ADDR_SHIFT);
- tmp |= (reg & ADDA_PR_ADDR_MASK) << ADDA_PR_ADDR_SHIFT;
- writel(tmp, base);
-
- /* Set data to write */
- tmp = readl(base);
- tmp &= ~(ADDA_PR_DATA_IN_MASK << ADDA_PR_DATA_IN_SHIFT);
- tmp |= (val & ADDA_PR_DATA_IN_MASK) << ADDA_PR_DATA_IN_SHIFT;
- writel(tmp, base);
-
- /* Set write bit to signal a write */
- writel(readl(base) | ADDA_PR_WRITE, base);
-
- /* Clear write bit */
- writel(readl(base) & ~ADDA_PR_WRITE, base);
-
- return 0;
-}
-
-static const struct regmap_config adda_pr_regmap_cfg = {
- .name = "adda-pr",
- .reg_bits = 5,
- .reg_stride = 1,
- .val_bits = 8,
- .reg_read = adda_reg_read,
- .reg_write = adda_reg_write,
- .fast_io = true,
- .max_register = 24,
-};
-
/* mixer controls */
static const struct snd_kcontrol_new sun8i_codec_mixer_controls[] = {
SOC_DAPM_DOUBLE_R("DAC Playback Switch",
@@ -912,7 +839,7 @@ static int sun8i_codec_analog_probe(struct platform_device *pdev)
return PTR_ERR(base);
}
- regmap = devm_regmap_init(&pdev->dev, NULL, base, &adda_pr_regmap_cfg);
+ regmap = sun8i_adda_pr_regmap_init(&pdev->dev, base);
if (IS_ERR(regmap)) {
dev_err(&pdev->dev, "Failed to create regmap\n");
return PTR_ERR(regmap);
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index fb37dd927e33..522a72fde78d 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -24,6 +24,7 @@
#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
+#include <linux/log2.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
@@ -52,7 +53,6 @@
#define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_INV 13
#define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV 9
#define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV 6
-#define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_16 (1 << 6)
#define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ 4
#define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_16 (1 << 4)
#define SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT 2
@@ -300,12 +300,23 @@ static u8 sun8i_codec_get_bclk_div(struct sun8i_codec *scodec,
return best_val;
}
+static int sun8i_codec_get_lrck_div(unsigned int channels,
+ unsigned int word_size)
+{
+ unsigned int div = word_size * channels;
+
+ if (div < 16 || div > 256)
+ return -EINVAL;
+
+ return ilog2(div) - 4;
+}
+
static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct sun8i_codec *scodec = snd_soc_component_get_drvdata(dai->component);
- int sample_rate;
+ int sample_rate, lrck_div;
u8 bclk_div;
/*
@@ -321,9 +332,14 @@ static int sun8i_codec_hw_params(struct snd_pcm_substream *substream,
SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK,
bclk_div << SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV);
+ lrck_div = sun8i_codec_get_lrck_div(params_channels(params),
+ params_physical_width(params));
+ if (lrck_div < 0)
+ return lrck_div;
+
regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK,
- SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_16);
+ lrck_div << SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV);
sample_rate = sun8i_codec_get_hw_rate(params);
if (sample_rate < 0)
diff --git a/sound/soc/tegra/tegra_sgtl5000.c b/sound/soc/tegra/tegra_sgtl5000.c
index 45a4aa9d2a47..901457da25ec 100644
--- a/sound/soc/tegra/tegra_sgtl5000.c
+++ b/sound/soc/tegra/tegra_sgtl5000.c
@@ -149,14 +149,14 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Property 'nvidia,i2s-controller' missing/invalid\n");
ret = -EINVAL;
- goto err;
+ goto err_put_codec_of_node;
}
tegra_sgtl5000_dai.platform_of_node = tegra_sgtl5000_dai.cpu_of_node;
ret = tegra_asoc_utils_init(&machine->util_data, &pdev->dev);
if (ret)
- goto err;
+ goto err_put_cpu_of_node;
ret = snd_soc_register_card(card);
if (ret) {
@@ -169,6 +169,13 @@ static int tegra_sgtl5000_driver_probe(struct platform_device *pdev)
err_fini_utils:
tegra_asoc_utils_fini(&machine->util_data);
+err_put_cpu_of_node:
+ of_node_put(tegra_sgtl5000_dai.cpu_of_node);
+ tegra_sgtl5000_dai.cpu_of_node = NULL;
+ tegra_sgtl5000_dai.platform_of_node = NULL;
+err_put_codec_of_node:
+ of_node_put(tegra_sgtl5000_dai.codec_of_node);
+ tegra_sgtl5000_dai.codec_of_node = NULL;
err:
return ret;
}
@@ -183,6 +190,12 @@ static int tegra_sgtl5000_driver_remove(struct platform_device *pdev)
tegra_asoc_utils_fini(&machine->util_data);
+ of_node_put(tegra_sgtl5000_dai.cpu_of_node);
+ tegra_sgtl5000_dai.cpu_of_node = NULL;
+ tegra_sgtl5000_dai.platform_of_node = NULL;
+ of_node_put(tegra_sgtl5000_dai.codec_of_node);
+ tegra_sgtl5000_dai.codec_of_node = NULL;
+
return ret;
}
diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
index e2ad00e3cae1..1cfca698ae4b 100644
--- a/sound/soc/txx9/txx9aclc-ac97.c
+++ b/sound/soc/txx9/txx9aclc-ac97.c
@@ -208,13 +208,12 @@ static int txx9aclc_ac97_dev_probe(struct platform_device *pdev)
if (err < 0)
return err;
- return snd_soc_register_component(&pdev->dev, &txx9aclc_ac97_component,
+ return devm_snd_soc_register_component(&pdev->dev, &txx9aclc_ac97_component,
&txx9aclc_ac97_dai, 1);
}
static int txx9aclc_ac97_dev_remove(struct platform_device *pdev)
{
- snd_soc_unregister_component(&pdev->dev);
snd_soc_set_ac97_ops(NULL);
return 0;
}
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c
index d55ca48de3ea..f4a72e39ffa9 100644
--- a/sound/usb/caiaq/device.c
+++ b/sound/usb/caiaq/device.c
@@ -200,6 +200,7 @@ static void usb_ep1_command_reply_dispatch (struct urb* urb)
break;
}
#ifdef CONFIG_SND_USB_CAIAQ_INPUT
+ /* fall through */
case EP1_CMD_READ_ERP:
case EP1_CMD_READ_ANALOG:
snd_usb_caiaq_input_dispatch(cdev, buf, urb->actual_length);
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index dcfc546d81b9..b737f0ec77d0 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -1175,8 +1175,7 @@ static void snd_usbmidi_output_trigger(struct snd_rawmidi_substream *substream,
if (port->ep->umidi->disconnected) {
/* gobble up remaining bytes to prevent wait in
* snd_rawmidi_drain_output */
- while (!snd_rawmidi_transmit_empty(substream))
- snd_rawmidi_transmit_ack(substream, 1);
+ snd_rawmidi_proceed(substream);
return;
}
tasklet_schedule(&port->ep->tasklet);
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index cbfb48bdea51..85ae0ff2382a 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -29,6 +29,7 @@
#include <linux/hid.h>
#include <linux/init.h>
+#include <linux/math64.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/usb/audio.h>
@@ -1817,6 +1818,380 @@ static int dell_dock_mixer_init(struct usb_mixer_interface *mixer)
return 0;
}
+/* RME Class Compliant device quirks */
+
+#define SND_RME_GET_STATUS1 23
+#define SND_RME_GET_CURRENT_FREQ 17
+#define SND_RME_CLK_SYSTEM_SHIFT 16
+#define SND_RME_CLK_SYSTEM_MASK 0x1f
+#define SND_RME_CLK_AES_SHIFT 8
+#define SND_RME_CLK_SPDIF_SHIFT 12
+#define SND_RME_CLK_AES_SPDIF_MASK 0xf
+#define SND_RME_CLK_SYNC_SHIFT 6
+#define SND_RME_CLK_SYNC_MASK 0x3
+#define SND_RME_CLK_FREQMUL_SHIFT 18
+#define SND_RME_CLK_FREQMUL_MASK 0x7
+#define SND_RME_CLK_SYSTEM(x) \
+ ((x >> SND_RME_CLK_SYSTEM_SHIFT) & SND_RME_CLK_SYSTEM_MASK)
+#define SND_RME_CLK_AES(x) \
+ ((x >> SND_RME_CLK_AES_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK)
+#define SND_RME_CLK_SPDIF(x) \
+ ((x >> SND_RME_CLK_SPDIF_SHIFT) & SND_RME_CLK_AES_SPDIF_MASK)
+#define SND_RME_CLK_SYNC(x) \
+ ((x >> SND_RME_CLK_SYNC_SHIFT) & SND_RME_CLK_SYNC_MASK)
+#define SND_RME_CLK_FREQMUL(x) \
+ ((x >> SND_RME_CLK_FREQMUL_SHIFT) & SND_RME_CLK_FREQMUL_MASK)
+#define SND_RME_CLK_AES_LOCK 0x1
+#define SND_RME_CLK_AES_SYNC 0x4
+#define SND_RME_CLK_SPDIF_LOCK 0x2
+#define SND_RME_CLK_SPDIF_SYNC 0x8
+#define SND_RME_SPDIF_IF_SHIFT 4
+#define SND_RME_SPDIF_FORMAT_SHIFT 5
+#define SND_RME_BINARY_MASK 0x1
+#define SND_RME_SPDIF_IF(x) \
+ ((x >> SND_RME_SPDIF_IF_SHIFT) & SND_RME_BINARY_MASK)
+#define SND_RME_SPDIF_FORMAT(x) \
+ ((x >> SND_RME_SPDIF_FORMAT_SHIFT) & SND_RME_BINARY_MASK)
+
+static const u32 snd_rme_rate_table[] = {
+ 32000, 44100, 48000, 50000,
+ 64000, 88200, 96000, 100000,
+ 128000, 176400, 192000, 200000,
+ 256000, 352800, 384000, 400000,
+ 512000, 705600, 768000, 800000
+};
+/* maximum number of items for AES and S/PDIF rates for above table */
+#define SND_RME_RATE_IDX_AES_SPDIF_NUM 12
+
+enum snd_rme_domain {
+ SND_RME_DOMAIN_SYSTEM,
+ SND_RME_DOMAIN_AES,
+ SND_RME_DOMAIN_SPDIF
+};
+
+enum snd_rme_clock_status {
+ SND_RME_CLOCK_NOLOCK,
+ SND_RME_CLOCK_LOCK,
+ SND_RME_CLOCK_SYNC
+};
+
+static int snd_rme_read_value(struct snd_usb_audio *chip,
+ unsigned int item,
+ u32 *value)
+{
+ struct usb_device *dev = chip->dev;
+ int err;
+
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0),
+ item,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0, 0,
+ value, sizeof(*value));
+ if (err < 0)
+ dev_err(&dev->dev,
+ "unable to issue vendor read request %d (ret = %d)",
+ item, err);
+ return err;
+}
+
+static int snd_rme_get_status1(struct snd_kcontrol *kcontrol,
+ u32 *status1)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ struct snd_usb_audio *chip = list->mixer->chip;
+ int err;
+
+ err = snd_usb_lock_shutdown(chip);
+ if (err < 0)
+ return err;
+ err = snd_rme_read_value(chip, SND_RME_GET_STATUS1, status1);
+ snd_usb_unlock_shutdown(chip);
+ return err;
+}
+
+static int snd_rme_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 status1;
+ u32 rate = 0;
+ int idx;
+ int err;
+
+ err = snd_rme_get_status1(kcontrol, &status1);
+ if (err < 0)
+ return err;
+ switch (kcontrol->private_value) {
+ case SND_RME_DOMAIN_SYSTEM:
+ idx = SND_RME_CLK_SYSTEM(status1);
+ if (idx < ARRAY_SIZE(snd_rme_rate_table))
+ rate = snd_rme_rate_table[idx];
+ break;
+ case SND_RME_DOMAIN_AES:
+ idx = SND_RME_CLK_AES(status1);
+ if (idx < SND_RME_RATE_IDX_AES_SPDIF_NUM)
+ rate = snd_rme_rate_table[idx];
+ break;
+ case SND_RME_DOMAIN_SPDIF:
+ idx = SND_RME_CLK_SPDIF(status1);
+ if (idx < SND_RME_RATE_IDX_AES_SPDIF_NUM)
+ rate = snd_rme_rate_table[idx];
+ break;
+ default:
+ return -EINVAL;
+ }
+ ucontrol->value.integer.value[0] = rate;
+ return 0;
+}
+
+static int snd_rme_sync_state_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 status1;
+ int idx = SND_RME_CLOCK_NOLOCK;
+ int err;
+
+ err = snd_rme_get_status1(kcontrol, &status1);
+ if (err < 0)
+ return err;
+ switch (kcontrol->private_value) {
+ case SND_RME_DOMAIN_AES: /* AES */
+ if (status1 & SND_RME_CLK_AES_SYNC)
+ idx = SND_RME_CLOCK_SYNC;
+ else if (status1 & SND_RME_CLK_AES_LOCK)
+ idx = SND_RME_CLOCK_LOCK;
+ break;
+ case SND_RME_DOMAIN_SPDIF: /* SPDIF */
+ if (status1 & SND_RME_CLK_SPDIF_SYNC)
+ idx = SND_RME_CLOCK_SYNC;
+ else if (status1 & SND_RME_CLK_SPDIF_LOCK)
+ idx = SND_RME_CLOCK_LOCK;
+ break;
+ default:
+ return -EINVAL;
+ }
+ ucontrol->value.enumerated.item[0] = idx;
+ return 0;
+}
+
+static int snd_rme_spdif_if_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 status1;
+ int err;
+
+ err = snd_rme_get_status1(kcontrol, &status1);
+ if (err < 0)
+ return err;
+ ucontrol->value.enumerated.item[0] = SND_RME_SPDIF_IF(status1);
+ return 0;
+}
+
+static int snd_rme_spdif_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 status1;
+ int err;
+
+ err = snd_rme_get_status1(kcontrol, &status1);
+ if (err < 0)
+ return err;
+ ucontrol->value.enumerated.item[0] = SND_RME_SPDIF_FORMAT(status1);
+ return 0;
+}
+
+static int snd_rme_sync_source_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ u32 status1;
+ int err;
+
+ err = snd_rme_get_status1(kcontrol, &status1);
+ if (err < 0)
+ return err;
+ ucontrol->value.enumerated.item[0] = SND_RME_CLK_SYNC(status1);
+ return 0;
+}
+
+static int snd_rme_current_freq_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_list *list = snd_kcontrol_chip(kcontrol);
+ struct snd_usb_audio *chip = list->mixer->chip;
+ u32 status1;
+ const u64 num = 104857600000000ULL;
+ u32 den;
+ unsigned int freq;
+ int err;
+
+ err = snd_usb_lock_shutdown(chip);
+ if (err < 0)
+ return err;
+ err = snd_rme_read_value(chip, SND_RME_GET_STATUS1, &status1);
+ if (err < 0)
+ goto end;
+ err = snd_rme_read_value(chip, SND_RME_GET_CURRENT_FREQ, &den);
+ if (err < 0)
+ goto end;
+ freq = (den == 0) ? 0 : div64_u64(num, den);
+ freq <<= SND_RME_CLK_FREQMUL(status1);
+ ucontrol->value.integer.value[0] = freq;
+
+end:
+ snd_usb_unlock_shutdown(chip);
+ return err;
+}
+
+static int snd_rme_rate_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ switch (kcontrol->private_value) {
+ case SND_RME_DOMAIN_SYSTEM:
+ uinfo->value.integer.min = 32000;
+ uinfo->value.integer.max = 800000;
+ break;
+ case SND_RME_DOMAIN_AES:
+ case SND_RME_DOMAIN_SPDIF:
+ default:
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 200000;
+ }
+ uinfo->value.integer.step = 0;
+ return 0;
+}
+
+static int snd_rme_sync_state_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const sync_states[] = {
+ "No Lock", "Lock", "Sync"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1,
+ ARRAY_SIZE(sync_states), sync_states);
+}
+
+static int snd_rme_spdif_if_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const spdif_if[] = {
+ "Coaxial", "Optical"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1,
+ ARRAY_SIZE(spdif_if), spdif_if);
+}
+
+static int snd_rme_spdif_format_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const optical_type[] = {
+ "Consumer", "Professional"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1,
+ ARRAY_SIZE(optical_type), optical_type);
+}
+
+static int snd_rme_sync_source_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ static const char *const sync_sources[] = {
+ "Internal", "AES", "SPDIF", "Internal"
+ };
+
+ return snd_ctl_enum_info(uinfo, 1,
+ ARRAY_SIZE(sync_sources), sync_sources);
+}
+
+static struct snd_kcontrol_new snd_rme_controls[] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "AES Rate",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_rate_info,
+ .get = snd_rme_rate_get,
+ .private_value = SND_RME_DOMAIN_AES
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "AES Sync",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_sync_state_info,
+ .get = snd_rme_sync_state_get,
+ .private_value = SND_RME_DOMAIN_AES
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "SPDIF Rate",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_rate_info,
+ .get = snd_rme_rate_get,
+ .private_value = SND_RME_DOMAIN_SPDIF
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "SPDIF Sync",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_sync_state_info,
+ .get = snd_rme_sync_state_get,
+ .private_value = SND_RME_DOMAIN_SPDIF
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "SPDIF Interface",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_spdif_if_info,
+ .get = snd_rme_spdif_if_get,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "SPDIF Format",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_spdif_format_info,
+ .get = snd_rme_spdif_format_get,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Sync Source",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_sync_source_info,
+ .get = snd_rme_sync_source_get
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "System Rate",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_rate_info,
+ .get = snd_rme_rate_get,
+ .private_value = SND_RME_DOMAIN_SYSTEM
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Current Frequency",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = snd_rme_rate_info,
+ .get = snd_rme_current_freq_get
+ }
+};
+
+static int snd_rme_controls_create(struct usb_mixer_interface *mixer)
+{
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(snd_rme_controls); ++i) {
+ err = add_single_ctl_with_resume(mixer, 0,
+ NULL,
+ &snd_rme_controls[i],
+ NULL);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
{
int err = 0;
@@ -1904,6 +2279,12 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
case USB_ID(0x0bda, 0x4014): /* Dell WD15 dock */
err = dell_dock_mixer_init(mixer);
break;
+
+ case USB_ID(0x2a39, 0x3fd2): /* RME ADI-2 Pro */
+ case USB_ID(0x2a39, 0x3fd3): /* RME ADI-2 DAC */
+ case USB_ID(0x2a39, 0x3fd4): /* RME */
+ err = snd_rme_controls_create(mixer);
+ break;
}
return err;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 08aa78007020..849953e5775c 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -3346,19 +3346,14 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
.ifnum = 0,
.type = QUIRK_AUDIO_STANDARD_MIXER,
},
- /* Capture */
- {
- .ifnum = 1,
- .type = QUIRK_IGNORE_INTERFACE,
- },
/* Playback */
{
- .ifnum = 2,
+ .ifnum = 1,
.type = QUIRK_AUDIO_FIXED_ENDPOINT,
.data = &(const struct audioformat) {
.formats = SNDRV_PCM_FMTBIT_S16_LE,
.channels = 2,
- .iface = 2,
+ .iface = 1,
.altsetting = 1,
.altset_idx = 1,
.attributes = UAC_EP_CS_ATTR_FILL_MAX |
diff --git a/sound/x86/intel_hdmi_audio.c b/sound/x86/intel_hdmi_audio.c
index fa7dca5a68c8..83d76c345940 100644
--- a/sound/x86/intel_hdmi_audio.c
+++ b/sound/x86/intel_hdmi_audio.c
@@ -30,7 +30,6 @@
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
-#include <asm/set_memory.h>
#include <sound/core.h>
#include <sound/asoundef.h>
#include <sound/pcm.h>
@@ -1141,8 +1140,7 @@ static int had_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct snd_intelhad *intelhaddata;
- unsigned long addr;
- int pages, buf_size, retval;
+ int buf_size, retval;
intelhaddata = snd_pcm_substream_chip(substream);
buf_size = params_buffer_bytes(hw_params);
@@ -1151,17 +1149,6 @@ static int had_pcm_hw_params(struct snd_pcm_substream *substream,
return retval;
dev_dbg(intelhaddata->dev, "%s:allocated memory = %d\n",
__func__, buf_size);
- /* mark the pages as uncached region */
- addr = (unsigned long) substream->runtime->dma_area;
- pages = (substream->runtime->dma_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
- retval = set_memory_uc(addr, pages);
- if (retval) {
- dev_err(intelhaddata->dev, "set_memory_uc failed.Error:%d\n",
- retval);
- return retval;
- }
- memset(substream->runtime->dma_area, 0, buf_size);
-
return retval;
}
@@ -1171,21 +1158,11 @@ static int had_pcm_hw_params(struct snd_pcm_substream *substream,
static int had_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct snd_intelhad *intelhaddata;
- unsigned long addr;
- u32 pages;
intelhaddata = snd_pcm_substream_chip(substream);
had_do_reset(intelhaddata);
- /* mark back the pages as cached/writeback region before the free */
- if (substream->runtime->dma_area != NULL) {
- addr = (unsigned long) substream->runtime->dma_area;
- pages = (substream->runtime->dma_bytes + PAGE_SIZE - 1) /
- PAGE_SIZE;
- set_memory_wb(addr, pages);
- return snd_pcm_lib_free_pages(substream);
- }
- return 0;
+ return snd_pcm_lib_free_pages(substream);
}
/*
@@ -1860,7 +1837,7 @@ static int hdmi_lpe_audio_probe(struct platform_device *pdev)
* try to allocate 600k buffer as default which is large enough
*/
snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_DEV, NULL,
+ SNDRV_DMA_TYPE_DEV_UC, NULL,
HAD_DEFAULT_BUFFER, HAD_MAX_BUFFER);
/* create controls */
diff --git a/sound/xen/xen_snd_front_alsa.c b/sound/xen/xen_snd_front_alsa.c
index 129180e17db1..2cbd9679aca1 100644
--- a/sound/xen/xen_snd_front_alsa.c
+++ b/sound/xen/xen_snd_front_alsa.c
@@ -637,31 +637,31 @@ static int alsa_pb_fill_silence(struct snd_pcm_substream *substream,
* to know when the buffer can be transferred to the backend.
*/
-static struct snd_pcm_ops snd_drv_alsa_playback_ops = {
- .open = alsa_open,
- .close = alsa_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = alsa_hw_params,
- .hw_free = alsa_hw_free,
- .prepare = alsa_prepare,
- .trigger = alsa_trigger,
- .pointer = alsa_pointer,
- .copy_user = alsa_pb_copy_user,
- .copy_kernel = alsa_pb_copy_kernel,
- .fill_silence = alsa_pb_fill_silence,
+static const struct snd_pcm_ops snd_drv_alsa_playback_ops = {
+ .open = alsa_open,
+ .close = alsa_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = alsa_hw_params,
+ .hw_free = alsa_hw_free,
+ .prepare = alsa_prepare,
+ .trigger = alsa_trigger,
+ .pointer = alsa_pointer,
+ .copy_user = alsa_pb_copy_user,
+ .copy_kernel = alsa_pb_copy_kernel,
+ .fill_silence = alsa_pb_fill_silence,
};
-static struct snd_pcm_ops snd_drv_alsa_capture_ops = {
- .open = alsa_open,
- .close = alsa_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = alsa_hw_params,
- .hw_free = alsa_hw_free,
- .prepare = alsa_prepare,
- .trigger = alsa_trigger,
- .pointer = alsa_pointer,
- .copy_user = alsa_cap_copy_user,
- .copy_kernel = alsa_cap_copy_kernel,
+static const struct snd_pcm_ops snd_drv_alsa_capture_ops = {
+ .open = alsa_open,
+ .close = alsa_close,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = alsa_hw_params,
+ .hw_free = alsa_hw_free,
+ .prepare = alsa_prepare,
+ .trigger = alsa_trigger,
+ .pointer = alsa_pointer,
+ .copy_user = alsa_cap_copy_user,
+ .copy_kernel = alsa_cap_copy_kernel,
};
static int new_pcm_instance(struct xen_snd_front_card_info *card_info,
diff --git a/tools/Makefile b/tools/Makefile
index be02c8b904db..abb358a70ad0 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -21,6 +21,7 @@ help:
@echo ' leds - LEDs tools'
@echo ' liblockdep - user-space wrapper for kernel locking-validator'
@echo ' bpf - misc BPF tools'
+ @echo ' pci - PCI tools'
@echo ' perf - Linux performance measurement and analysis tool'
@echo ' selftests - various kernel selftests'
@echo ' spi - spi tools'
@@ -59,7 +60,7 @@ acpi: FORCE
cpupower: FORCE
$(call descend,power/$@)
-cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi: FORCE
+cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi pci: FORCE
$(call descend,$@)
liblockdep: FORCE
@@ -94,7 +95,7 @@ kvm_stat: FORCE
all: acpi cgroup cpupower gpio hv firewire liblockdep \
perf selftests spi turbostat usb \
virtio vm bpf x86_energy_perf_policy \
- tmon freefall iio objtool kvm_stat wmi
+ tmon freefall iio objtool kvm_stat wmi pci
acpi_install:
$(call descend,power/$(@:_install=),install)
@@ -102,7 +103,7 @@ acpi_install:
cpupower_install:
$(call descend,power/$(@:_install=),install)
-cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install:
+cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install pci_install:
$(call descend,$(@:_install=),install)
liblockdep_install:
@@ -128,7 +129,7 @@ install: acpi_install cgroup_install cpupower_install gpio_install \
perf_install selftests_install turbostat_install usb_install \
virtio_install vm_install bpf_install x86_energy_perf_policy_install \
tmon_install freefall_install objtool_install kvm_stat_install \
- wmi_install
+ wmi_install pci_install
acpi_clean:
$(call descend,power/acpi,clean)
@@ -136,7 +137,7 @@ acpi_clean:
cpupower_clean:
$(call descend,power/cpupower,clean)
-cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean:
+cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean pci_clean:
$(call descend,$(@:_clean=),clean)
liblockdep_clean:
@@ -174,6 +175,6 @@ clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \
perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \
vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \
freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \
- gpio_clean objtool_clean leds_clean wmi_clean
+ gpio_clean objtool_clean leds_clean wmi_clean pci_clean
.PHONY: FORCE
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index fd23d5778ea1..8a6eff9c27f3 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -288,6 +288,7 @@ struct kvm_reinject_control {
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
#define KVM_VCPUEVENT_VALID_SMM 0x00000008
+#define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010
/* Interrupt shadow states */
#define KVM_X86_SHADOW_INT_MOV_SS 0x01
@@ -299,7 +300,10 @@ struct kvm_vcpu_events {
__u8 injected;
__u8 nr;
__u8 has_error_code;
- __u8 pad;
+ union {
+ __u8 pad;
+ __u8 pending;
+ };
__u32 error_code;
} exception;
struct {
@@ -322,7 +326,9 @@ struct kvm_vcpu_events {
__u8 smm_inside_nmi;
__u8 latched_init;
} smi;
- __u32 reserved[9];
+ __u8 reserved[27];
+ __u8 exception_has_payload;
+ __u64 exception_payload;
};
/* for KVM_GET/SET_DEBUGREGS */
diff --git a/tools/crypto/getstat.c b/tools/crypto/getstat.c
new file mode 100644
index 000000000000..24115173a483
--- /dev/null
+++ b/tools/crypto/getstat.c
@@ -0,0 +1,294 @@
+/* Heavily copied from libkcapi 2015 - 2017, Stephan Mueller <smueller@chronox.de> */
+#include <errno.h>
+#include <linux/cryptouser.h>
+#include <linux/netlink.h>
+#include <linux/rtnetlink.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+
+#define CR_RTA(x) ((struct rtattr *)(((char *)(x)) + NLMSG_ALIGN(sizeof(struct crypto_user_alg))))
+
+static int get_stat(const char *drivername)
+{
+ struct {
+ struct nlmsghdr n;
+ struct crypto_user_alg cru;
+ } req;
+ struct sockaddr_nl nl;
+ int sd = 0, ret;
+ socklen_t addr_len;
+ struct iovec iov;
+ struct msghdr msg;
+ char buf[4096];
+ struct nlmsghdr *res_n = (struct nlmsghdr *)buf;
+ struct crypto_user_alg *cru_res = NULL;
+ int res_len = 0;
+ struct rtattr *tb[CRYPTOCFGA_MAX + 1];
+ struct rtattr *rta;
+ struct nlmsgerr *errmsg;
+
+ memset(&req, 0, sizeof(req));
+ memset(&buf, 0, sizeof(buf));
+ memset(&msg, 0, sizeof(msg));
+
+ req.n.nlmsg_len = NLMSG_LENGTH(sizeof(req.cru));
+ req.n.nlmsg_flags = NLM_F_REQUEST;
+ req.n.nlmsg_type = CRYPTO_MSG_GETSTAT;
+ req.n.nlmsg_seq = time(NULL);
+
+ strncpy(req.cru.cru_driver_name, drivername, strlen(drivername));
+
+ sd = socket(AF_NETLINK, SOCK_RAW, NETLINK_CRYPTO);
+ if (sd < 0) {
+ fprintf(stderr, "Netlink error: cannot open netlink socket");
+ return -errno;
+ }
+ memset(&nl, 0, sizeof(nl));
+ nl.nl_family = AF_NETLINK;
+ if (bind(sd, (struct sockaddr *)&nl, sizeof(nl)) < 0) {
+ ret = -errno;
+ fprintf(stderr, "Netlink error: cannot bind netlink socket");
+ goto out;
+ }
+
+ /* sanity check that netlink socket was successfully opened */
+ addr_len = sizeof(nl);
+ if (getsockname(sd, (struct sockaddr *)&nl, &addr_len) < 0) {
+ ret = -errno;
+ printf("Netlink error: cannot getsockname");
+ goto out;
+ }
+ if (addr_len != sizeof(nl)) {
+ ret = -errno;
+ printf("Netlink error: wrong address length %d", addr_len);
+ goto out;
+ }
+ if (nl.nl_family != AF_NETLINK) {
+ ret = -errno;
+ printf("Netlink error: wrong address family %d",
+ nl.nl_family);
+ goto out;
+ }
+
+ memset(&nl, 0, sizeof(nl));
+ nl.nl_family = AF_NETLINK;
+ iov.iov_base = (void *)&req.n;
+ iov.iov_len = req.n.nlmsg_len;
+ msg.msg_name = &nl;
+ msg.msg_namelen = sizeof(nl);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ if (sendmsg(sd, &msg, 0) < 0) {
+ ret = -errno;
+ printf("Netlink error: sendmsg failed");
+ goto out;
+ }
+ memset(buf, 0, sizeof(buf));
+ iov.iov_base = buf;
+ while (1) {
+ iov.iov_len = sizeof(buf);
+ ret = recvmsg(sd, &msg, 0);
+ if (ret < 0) {
+ if (errno == EINTR || errno == EAGAIN)
+ continue;
+ ret = -errno;
+ printf("Netlink error: netlink receive error");
+ goto out;
+ }
+ if (ret == 0) {
+ ret = -errno;
+ printf("Netlink error: no data");
+ goto out;
+ }
+ if (ret > sizeof(buf)) {
+ ret = -errno;
+ printf("Netlink error: received too much data");
+ goto out;
+ }
+ break;
+ }
+
+ ret = -EFAULT;
+ res_len = res_n->nlmsg_len;
+ if (res_n->nlmsg_type == NLMSG_ERROR) {
+ errmsg = NLMSG_DATA(res_n);
+ fprintf(stderr, "Fail with %d\n", errmsg->error);
+ ret = errmsg->error;
+ goto out;
+ }
+
+ if (res_n->nlmsg_type == CRYPTO_MSG_GETSTAT) {
+ cru_res = NLMSG_DATA(res_n);
+ res_len -= NLMSG_SPACE(sizeof(*cru_res));
+ }
+ if (res_len < 0) {
+ printf("Netlink error: nlmsg len %d\n", res_len);
+ goto out;
+ }
+
+ if (!cru_res) {
+ ret = -EFAULT;
+ printf("Netlink error: no cru_res\n");
+ goto out;
+ }
+
+ rta = CR_RTA(cru_res);
+ memset(tb, 0, sizeof(struct rtattr *) * (CRYPTOCFGA_MAX + 1));
+ while (RTA_OK(rta, res_len)) {
+ if ((rta->rta_type <= CRYPTOCFGA_MAX) && (!tb[rta->rta_type]))
+ tb[rta->rta_type] = rta;
+ rta = RTA_NEXT(rta, res_len);
+ }
+ if (res_len) {
+ printf("Netlink error: unprocessed data %d",
+ res_len);
+ goto out;
+ }
+
+ if (tb[CRYPTOCFGA_STAT_HASH]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_HASH];
+ struct crypto_stat *rhash =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tHash\n\tHash: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rhash->stat_hash_cnt, rhash->stat_hash_tlen,
+ rhash->stat_hash_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_COMPRESS]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_COMPRESS];
+ struct crypto_stat *rblk =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tCompress\n\tCompress: %u bytes: %llu\n\tDecompress: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rblk->stat_compress_cnt, rblk->stat_compress_tlen,
+ rblk->stat_decompress_cnt, rblk->stat_decompress_tlen,
+ rblk->stat_compress_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_ACOMP]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_ACOMP];
+ struct crypto_stat *rcomp =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tACompress\n\tCompress: %u bytes: %llu\n\tDecompress: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rcomp->stat_compress_cnt, rcomp->stat_compress_tlen,
+ rcomp->stat_decompress_cnt, rcomp->stat_decompress_tlen,
+ rcomp->stat_compress_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_AEAD]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_AEAD];
+ struct crypto_stat *raead =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tAEAD\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ raead->stat_encrypt_cnt, raead->stat_encrypt_tlen,
+ raead->stat_decrypt_cnt, raead->stat_decrypt_tlen,
+ raead->stat_aead_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_BLKCIPHER]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_BLKCIPHER];
+ struct crypto_stat *rblk =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tCipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
+ rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
+ rblk->stat_cipher_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_AKCIPHER]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_AKCIPHER];
+ struct crypto_stat *rblk =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tAkcipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tSign: %u\n\tVerify: %u\n\tErrors: %u\n",
+ drivername,
+ rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
+ rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
+ rblk->stat_sign_cnt, rblk->stat_verify_cnt,
+ rblk->stat_akcipher_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_CIPHER]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_CIPHER];
+ struct crypto_stat *rblk =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tcipher\n\tEncrypt: %u bytes: %llu\n\tDecrypt: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rblk->stat_encrypt_cnt, rblk->stat_encrypt_tlen,
+ rblk->stat_decrypt_cnt, rblk->stat_decrypt_tlen,
+ rblk->stat_cipher_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_RNG]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_RNG];
+ struct crypto_stat *rrng =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tRNG\n\tSeed: %u\n\tGenerate: %u bytes: %llu\n\tErrors: %u\n",
+ drivername,
+ rrng->stat_seed_cnt,
+ rrng->stat_generate_cnt, rrng->stat_generate_tlen,
+ rrng->stat_rng_err_cnt);
+ } else if (tb[CRYPTOCFGA_STAT_KPP]) {
+ struct rtattr *rta = tb[CRYPTOCFGA_STAT_KPP];
+ struct crypto_stat *rkpp =
+ (struct crypto_stat *)RTA_DATA(rta);
+ printf("%s\tKPP\n\tSetsecret: %u\n\tGenerate public key: %u\n\tCompute_shared_secret: %u\n\tErrors: %u\n",
+ drivername,
+ rkpp->stat_setsecret_cnt,
+ rkpp->stat_generate_public_key_cnt,
+ rkpp->stat_compute_shared_secret_cnt,
+ rkpp->stat_kpp_err_cnt);
+ } else {
+ fprintf(stderr, "%s is of an unknown algorithm\n", drivername);
+ }
+ ret = 0;
+out:
+ close(sd);
+ return ret;
+}
+
+int main(int argc, const char *argv[])
+{
+ char buf[4096];
+ FILE *procfd;
+ int i, lastspace;
+ int ret;
+
+ procfd = fopen("/proc/crypto", "r");
+ if (!procfd) {
+ ret = errno;
+ fprintf(stderr, "Cannot open /proc/crypto %s\n", strerror(errno));
+ return ret;
+ }
+ if (argc > 1) {
+ if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
+ printf("Usage: %s [-h|--help] display this help\n", argv[0]);
+ printf("Usage: %s display all crypto statistics\n", argv[0]);
+ printf("Usage: %s drivername1 drivername2 ... = display crypto statistics about drivername1 ...\n", argv[0]);
+ return 0;
+ }
+ for (i = 1; i < argc; i++) {
+ ret = get_stat(argv[i]);
+ if (ret) {
+ fprintf(stderr, "Failed with %s\n", strerror(-ret));
+ return ret;
+ }
+ }
+ return 0;
+ }
+
+ while (fgets(buf, sizeof(buf), procfd)) {
+ if (!strncmp(buf, "driver", 6)) {
+ lastspace = 0;
+ i = 0;
+ while (i < strlen(buf)) {
+ i++;
+ if (buf[i] == ' ')
+ lastspace = i;
+ }
+ buf[strlen(buf) - 1] = '\0';
+ ret = get_stat(buf + lastspace + 1);
+ if (ret) {
+ fprintf(stderr, "Failed with %s\n", strerror(-ret));
+ goto out;
+ }
+ }
+ }
+out:
+ fclose(procfd);
+ return ret;
+}
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 251be353f950..2875ce85b322 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -719,6 +719,7 @@ struct kvm_ppc_one_seg_page_size {
#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
#define KVM_PPC_1T_SEGMENTS 0x00000002
+#define KVM_PPC_NO_HASH 0x00000004
struct kvm_ppc_smmu_info {
__u64 flags;
@@ -953,6 +954,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_NESTED_STATE 157
#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
#define KVM_CAP_MSR_PLATFORM_INFO 159
+#define KVM_CAP_PPC_NESTED_HV 160
+#define KVM_CAP_HYPERV_SEND_IPI 161
+#define KVM_CAP_COALESCED_PIO 162
+#define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/tools/pci/Build b/tools/pci/Build
new file mode 100644
index 000000000000..c375aea21790
--- /dev/null
+++ b/tools/pci/Build
@@ -0,0 +1 @@
+pcitest-y += pcitest.o
diff --git a/tools/pci/Makefile b/tools/pci/Makefile
new file mode 100644
index 000000000000..46e4c2f318c9
--- /dev/null
+++ b/tools/pci/Makefile
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: GPL-2.0
+include ../scripts/Makefile.include
+
+bindir ?= /usr/bin
+
+ifeq ($(srctree),)
+srctree := $(patsubst %/,%,$(dir $(CURDIR)))
+srctree := $(patsubst %/,%,$(dir $(srctree)))
+endif
+
+# Do not use make's built-in rules
+# (this improves performance and avoids hard-to-debug behaviour);
+MAKEFLAGS += -r
+
+CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include
+
+ALL_TARGETS := pcitest pcitest.sh
+ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS))
+
+all: $(ALL_PROGRAMS)
+
+export srctree OUTPUT CC LD CFLAGS
+include $(srctree)/tools/build/Makefile.include
+
+#
+# We need the following to be outside of kernel tree
+#
+$(OUTPUT)include/linux/: ../../include/uapi/linux/
+ mkdir -p $(OUTPUT)include/linux/ 2>&1 || true
+ ln -sf $(CURDIR)/../../include/uapi/linux/pcitest.h $@
+
+prepare: $(OUTPUT)include/linux/
+
+PCITEST_IN := $(OUTPUT)pcitest-in.o
+$(PCITEST_IN): prepare FORCE
+ $(Q)$(MAKE) $(build)=pcitest
+$(OUTPUT)pcitest: $(PCITEST_IN)
+ $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@
+
+clean:
+ rm -f $(ALL_PROGRAMS)
+ rm -rf $(OUTPUT)include/
+ find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete
+
+install: $(ALL_PROGRAMS)
+ install -d -m 755 $(DESTDIR)$(bindir); \
+ for program in $(ALL_PROGRAMS); do \
+ install $$program $(DESTDIR)$(bindir); \
+ done
+
+FORCE:
+
+.PHONY: all install clean FORCE prepare
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
index af146bb03b4d..ec4d51f3308b 100644
--- a/tools/pci/pcitest.c
+++ b/tools/pci/pcitest.c
@@ -23,7 +23,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <sys/ioctl.h>
-#include <time.h>
#include <unistd.h>
#include <linux/pcitest.h>
@@ -48,17 +47,15 @@ struct pci_test {
unsigned long size;
};
-static int run_test(struct pci_test *test)
+static void run_test(struct pci_test *test)
{
long ret;
int fd;
- struct timespec start, end;
- double time;
fd = open(test->device, O_RDWR);
if (fd < 0) {
perror("can't open PCI Endpoint Test device");
- return fd;
+ return;
}
if (test->barnum >= 0 && test->barnum <= 5) {
diff --git a/tools/perf/arch/powerpc/util/book3s_hv_exits.h b/tools/perf/arch/powerpc/util/book3s_hv_exits.h
index 853b95d1e139..2011376c7ab5 100644
--- a/tools/perf/arch/powerpc/util/book3s_hv_exits.h
+++ b/tools/perf/arch/powerpc/util/book3s_hv_exits.h
@@ -15,7 +15,6 @@
{0x400, "INST_STORAGE"}, \
{0x480, "INST_SEGMENT"}, \
{0x500, "EXTERNAL"}, \
- {0x501, "EXTERNAL_LEVEL"}, \
{0x502, "EXTERNAL_HV"}, \
{0x600, "ALIGNMENT"}, \
{0x700, "PROGRAM"}, \
diff --git a/tools/testing/nvdimm/Kbuild b/tools/testing/nvdimm/Kbuild
index 0392153a0009..778ceb651000 100644
--- a/tools/testing/nvdimm/Kbuild
+++ b/tools/testing/nvdimm/Kbuild
@@ -22,6 +22,7 @@ NVDIMM_SRC := $(DRIVERS)/nvdimm
ACPI_SRC := $(DRIVERS)/acpi/nfit
DAX_SRC := $(DRIVERS)/dax
ccflags-y := -I$(src)/$(NVDIMM_SRC)/
+ccflags-y += -I$(src)/$(ACPI_SRC)/
obj-$(CONFIG_LIBNVDIMM) += libnvdimm.o
obj-$(CONFIG_BLK_DEV_PMEM) += nd_pmem.o
diff --git a/tools/testing/nvdimm/acpi_nfit_test.c b/tools/testing/nvdimm/acpi_nfit_test.c
index 43521512e577..fec8fb1b7715 100644
--- a/tools/testing/nvdimm/acpi_nfit_test.c
+++ b/tools/testing/nvdimm/acpi_nfit_test.c
@@ -4,5 +4,13 @@
#include <linux/module.h>
#include <linux/printk.h>
#include "watermark.h"
+#include <nfit.h>
nfit_test_watermark(acpi_nfit);
+
+/* strong / override definition of nfit_intel_shutdown_status */
+void nfit_intel_shutdown_status(struct nfit_mem *nfit_mem)
+{
+ set_bit(NFIT_MEM_DIRTY_COUNT, &nfit_mem->flags);
+ nfit_mem->dirty_shutdown = 42;
+}
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index cffc2c5a778d..9527d47a1070 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -24,6 +24,7 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <nd-core.h>
+#include <intel.h>
#include <nfit.h>
#include <nd.h>
#include "nfit_test.h"
@@ -148,6 +149,7 @@ static const struct nd_intel_smart smart_def = {
| ND_INTEL_SMART_ALARM_VALID
| ND_INTEL_SMART_USED_VALID
| ND_INTEL_SMART_SHUTDOWN_VALID
+ | ND_INTEL_SMART_SHUTDOWN_COUNT_VALID
| ND_INTEL_SMART_MTEMP_VALID
| ND_INTEL_SMART_CTEMP_VALID,
.health = ND_INTEL_SMART_NON_CRITICAL_HEALTH,
@@ -160,8 +162,8 @@ static const struct nd_intel_smart smart_def = {
.ait_status = 1,
.life_used = 5,
.shutdown_state = 0,
+ .shutdown_count = 42,
.vendor_size = 0,
- .shutdown_count = 100,
};
struct nfit_test_fw {
diff --git a/tools/testing/nvdimm/test/nfit_test.h b/tools/testing/nvdimm/test/nfit_test.h
index 33752e06ff8d..ade14fe3837e 100644
--- a/tools/testing/nvdimm/test/nfit_test.h
+++ b/tools/testing/nvdimm/test/nfit_test.h
@@ -117,30 +117,6 @@ struct nd_cmd_ars_err_inj_stat {
#define ND_INTEL_SMART_INJECT_FATAL (1 << 2)
#define ND_INTEL_SMART_INJECT_SHUTDOWN (1 << 3)
-struct nd_intel_smart {
- __u32 status;
- union {
- struct {
- __u32 flags;
- __u8 reserved0[4];
- __u8 health;
- __u8 spares;
- __u8 life_used;
- __u8 alarm_flags;
- __u16 media_temperature;
- __u16 ctrl_temperature;
- __u32 shutdown_count;
- __u8 ait_status;
- __u16 pmic_temperature;
- __u8 reserved1[8];
- __u8 shutdown_state;
- __u32 vendor_size;
- __u8 vendor_data[92];
- } __packed;
- __u8 data[128];
- };
-} __packed;
-
struct nd_intel_smart_threshold {
__u32 status;
union {
diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore
index 5c34752e1cff..6210ba41c29e 100644
--- a/tools/testing/selftests/kvm/.gitignore
+++ b/tools/testing/selftests/kvm/.gitignore
@@ -1,6 +1,8 @@
-cr4_cpuid_sync_test
-platform_info_test
-set_sregs_test
-sync_regs_test
-vmx_tsc_adjust_test
-state_test
+/x86_64/cr4_cpuid_sync_test
+/x86_64/evmcs_test
+/x86_64/platform_info_test
+/x86_64/set_sregs_test
+/x86_64/sync_regs_test
+/x86_64/vmx_tsc_adjust_test
+/x86_64/state_test
+/dirty_log_test
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index ec32dad3c3f0..01a219229238 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -1,26 +1,30 @@
all:
-top_srcdir = ../../../../
+top_srcdir = ../../../..
UNAME_M := $(shell uname -m)
-LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
-LIBKVM_x86_64 = lib/x86.c lib/vmx.c
-
-TEST_GEN_PROGS_x86_64 = platform_info_test
-TEST_GEN_PROGS_x86_64 += set_sregs_test
-TEST_GEN_PROGS_x86_64 += sync_regs_test
-TEST_GEN_PROGS_x86_64 += vmx_tsc_adjust_test
-TEST_GEN_PROGS_x86_64 += cr4_cpuid_sync_test
-TEST_GEN_PROGS_x86_64 += state_test
+LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/ucall.c lib/sparsebit.c
+LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c
+LIBKVM_aarch64 = lib/aarch64/processor.c
+
+TEST_GEN_PROGS_x86_64 = x86_64/platform_info_test
+TEST_GEN_PROGS_x86_64 += x86_64/set_sregs_test
+TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
+TEST_GEN_PROGS_x86_64 += x86_64/cr4_cpuid_sync_test
+TEST_GEN_PROGS_x86_64 += x86_64/state_test
+TEST_GEN_PROGS_x86_64 += x86_64/evmcs_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
+TEST_GEN_PROGS_aarch64 += dirty_log_test
+
TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
LIBKVM += $(LIBKVM_$(UNAME_M))
INSTALL_HDR_PATH = $(top_srcdir)/usr
LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
-LINUX_TOOL_INCLUDE = $(top_srcdir)tools/include
-CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -I..
+LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include
+CFLAGS += -O2 -g -std=gnu99 -I$(LINUX_TOOL_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
LDFLAGS += -pthread
# After inclusion, $(OUTPUT) is defined and
@@ -29,7 +33,7 @@ include ../lib.mk
STATIC_LIBS := $(OUTPUT)/libkvm.a
LIBKVM_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM))
-EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS)
+EXTRA_CLEAN += $(LIBKVM_OBJ) $(STATIC_LIBS) cscope.*
x := $(shell mkdir -p $(sort $(dir $(LIBKVM_OBJ))))
$(LIBKVM_OBJ): $(OUTPUT)/%.o: %.c
@@ -41,3 +45,12 @@ $(OUTPUT)/libkvm.a: $(LIBKVM_OBJ)
all: $(STATIC_LIBS)
$(TEST_GEN_PROGS): $(STATIC_LIBS)
$(STATIC_LIBS):| khdr
+
+cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib ..
+cscope:
+ $(RM) cscope.*
+ (find $(include_paths) -name '*.h' \
+ -exec realpath --relative-base=$(PWD) {} \;; \
+ find . -name '*.c' \
+ -exec realpath --relative-base=$(PWD) {} \;) | sort -u > cscope.files
+ cscope -b
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 0c2cdc105f96..d59820cc2d39 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -5,6 +5,8 @@
* Copyright (C) 2018, Red Hat, Inc.
*/
+#define _GNU_SOURCE /* for program_invocation_name */
+
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
@@ -15,76 +17,78 @@
#include "test_util.h"
#include "kvm_util.h"
+#include "processor.h"
+
+#define DEBUG printf
-#define DEBUG printf
+#define VCPU_ID 1
-#define VCPU_ID 1
/* The memory slot index to track dirty pages */
-#define TEST_MEM_SLOT_INDEX 1
-/*
- * GPA offset of the testing memory slot. Must be bigger than the
- * default vm mem slot, which is DEFAULT_GUEST_PHY_PAGES.
- */
-#define TEST_MEM_OFFSET (1ULL << 30) /* 1G */
-/* Size of the testing memory slot */
-#define TEST_MEM_PAGES (1ULL << 18) /* 1G for 4K pages */
+#define TEST_MEM_SLOT_INDEX 1
+
+/* Default guest test memory offset, 1G */
+#define DEFAULT_GUEST_TEST_MEM 0x40000000
+
/* How many pages to dirty for each guest loop */
-#define TEST_PAGES_PER_LOOP 1024
+#define TEST_PAGES_PER_LOOP 1024
+
/* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
-#define TEST_HOST_LOOP_N 32
+#define TEST_HOST_LOOP_N 32
+
/* Interval for each host loop (ms) */
-#define TEST_HOST_LOOP_INTERVAL 10
+#define TEST_HOST_LOOP_INTERVAL 10
+
+/*
+ * Guest/Host shared variables. Ensure addr_gva2hva() and/or
+ * sync_global_to/from_guest() are used when accessing from
+ * the host. READ/WRITE_ONCE() should also be used with anything
+ * that may change.
+ */
+static uint64_t host_page_size;
+static uint64_t guest_page_size;
+static uint64_t guest_num_pages;
+static uint64_t random_array[TEST_PAGES_PER_LOOP];
+static uint64_t iteration;
/*
- * Guest variables. We use these variables to share data between host
- * and guest. There are two copies of the variables, one in host memory
- * (which is unused) and one in guest memory. When the host wants to
- * access these variables, it needs to call addr_gva2hva() to access the
- * guest copy.
+ * GPA offset of the testing memory slot. Must be bigger than
+ * DEFAULT_GUEST_PHY_PAGES.
*/
-uint64_t guest_random_array[TEST_PAGES_PER_LOOP];
-uint64_t guest_iteration;
-uint64_t guest_page_size;
+static uint64_t guest_test_mem = DEFAULT_GUEST_TEST_MEM;
/*
- * Writes to the first byte of a random page within the testing memory
- * region continuously.
+ * Continuously write to the first 8 bytes of a random pages within
+ * the testing memory region.
*/
-void guest_code(void)
+static void guest_code(void)
{
- int i = 0;
- uint64_t volatile *array = guest_random_array;
- uint64_t volatile *guest_addr;
+ int i;
while (true) {
for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
- /*
- * Write to the first 8 bytes of a random page
- * on the testing memory region.
- */
- guest_addr = (uint64_t *)
- (TEST_MEM_OFFSET +
- (array[i] % TEST_MEM_PAGES) * guest_page_size);
- *guest_addr = guest_iteration;
+ uint64_t addr = guest_test_mem;
+ addr += (READ_ONCE(random_array[i]) % guest_num_pages)
+ * guest_page_size;
+ addr &= ~(host_page_size - 1);
+ *(uint64_t *)addr = READ_ONCE(iteration);
}
+
/* Tell the host that we need more random numbers */
GUEST_SYNC(1);
}
}
-/*
- * Host variables. These variables should only be used by the host
- * rather than the guest.
- */
-bool host_quit;
+/* Host variables */
+static bool host_quit;
/* Points to the test VM memory region on which we track dirty logs */
-void *host_test_mem;
+static void *host_test_mem;
+static uint64_t host_num_pages;
/* For statistics only */
-uint64_t host_dirty_count;
-uint64_t host_clear_count;
-uint64_t host_track_next_count;
+static uint64_t host_dirty_count;
+static uint64_t host_clear_count;
+static uint64_t host_track_next_count;
/*
* We use this bitmap to track some pages that should have its dirty
@@ -93,40 +97,34 @@ uint64_t host_track_next_count;
* page bit is cleared in the latest bitmap, then the system must
* report that write in the next get dirty log call.
*/
-unsigned long *host_bmap_track;
+static unsigned long *host_bmap_track;
-void generate_random_array(uint64_t *guest_array, uint64_t size)
+static void generate_random_array(uint64_t *guest_array, uint64_t size)
{
uint64_t i;
- for (i = 0; i < size; i++) {
+ for (i = 0; i < size; i++)
guest_array[i] = random();
- }
}
-void *vcpu_worker(void *data)
+static void *vcpu_worker(void *data)
{
int ret;
- uint64_t loops, *guest_array, pages_count = 0;
struct kvm_vm *vm = data;
+ uint64_t *guest_array;
+ uint64_t pages_count = 0;
struct kvm_run *run;
- struct guest_args args;
+ struct ucall uc;
run = vcpu_state(vm, VCPU_ID);
- /* Retrieve the guest random array pointer and cache it */
- guest_array = addr_gva2hva(vm, (vm_vaddr_t)guest_random_array);
-
- DEBUG("VCPU starts\n");
-
+ guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
while (!READ_ONCE(host_quit)) {
- /* Let the guest to dirty these random pages */
+ /* Let the guest dirty the random pages */
ret = _vcpu_run(vm, VCPU_ID);
- guest_args_read(vm, VCPU_ID, &args);
- if (run->exit_reason == KVM_EXIT_IO &&
- args.port == GUEST_PORT_SYNC) {
+ if (get_ucall(vm, VCPU_ID, &uc) == UCALL_SYNC) {
pages_count += TEST_PAGES_PER_LOOP;
generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
} else {
@@ -137,18 +135,20 @@ void *vcpu_worker(void *data)
}
}
- DEBUG("VCPU exits, dirtied %"PRIu64" pages\n", pages_count);
+ DEBUG("Dirtied %"PRIu64" pages\n", pages_count);
return NULL;
}
-void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration)
+static void vm_dirty_log_verify(unsigned long *bmap)
{
uint64_t page;
- uint64_t volatile *value_ptr;
+ uint64_t *value_ptr;
+ uint64_t step = host_page_size >= guest_page_size ? 1 :
+ guest_page_size / host_page_size;
- for (page = 0; page < TEST_MEM_PAGES; page++) {
- value_ptr = host_test_mem + page * getpagesize();
+ for (page = 0; page < host_num_pages; page += step) {
+ value_ptr = host_test_mem + page * host_page_size;
/* If this is a special page that we were tracking... */
if (test_and_clear_bit(page, host_bmap_track)) {
@@ -208,88 +208,117 @@ void vm_dirty_log_verify(unsigned long *bmap, uint64_t iteration)
}
}
-void help(char *name)
+static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
+ uint64_t extra_mem_pages, void *guest_code)
{
- puts("");
- printf("usage: %s [-i iterations] [-I interval] [-h]\n", name);
- puts("");
- printf(" -i: specify iteration counts (default: %"PRIu64")\n",
- TEST_HOST_LOOP_N);
- printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
- TEST_HOST_LOOP_INTERVAL);
- puts("");
- exit(0);
+ struct kvm_vm *vm;
+ uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
+
+ vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
+ kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+#ifdef __x86_64__
+ vm_create_irqchip(vm);
+#endif
+ vm_vcpu_add_default(vm, vcpuid, guest_code);
+ return vm;
}
-int main(int argc, char *argv[])
+static void run_test(enum vm_guest_mode mode, unsigned long iterations,
+ unsigned long interval, bool top_offset)
{
+ unsigned int guest_pa_bits, guest_page_shift;
pthread_t vcpu_thread;
struct kvm_vm *vm;
- uint64_t volatile *psize, *iteration;
- unsigned long *bmap, iterations = TEST_HOST_LOOP_N,
- interval = TEST_HOST_LOOP_INTERVAL;
- int opt;
-
- while ((opt = getopt(argc, argv, "hi:I:")) != -1) {
- switch (opt) {
- case 'i':
- iterations = strtol(optarg, NULL, 10);
- break;
- case 'I':
- interval = strtol(optarg, NULL, 10);
- break;
- case 'h':
- default:
- help(argv[0]);
- break;
- }
+ uint64_t max_gfn;
+ unsigned long *bmap;
+
+ switch (mode) {
+ case VM_MODE_P52V48_4K:
+ guest_pa_bits = 52;
+ guest_page_shift = 12;
+ break;
+ case VM_MODE_P52V48_64K:
+ guest_pa_bits = 52;
+ guest_page_shift = 16;
+ break;
+ case VM_MODE_P40V48_4K:
+ guest_pa_bits = 40;
+ guest_page_shift = 12;
+ break;
+ case VM_MODE_P40V48_64K:
+ guest_pa_bits = 40;
+ guest_page_shift = 16;
+ break;
+ default:
+ TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
}
- TEST_ASSERT(iterations > 2, "Iteration must be bigger than zero\n");
- TEST_ASSERT(interval > 0, "Interval must be bigger than zero");
+ DEBUG("Testing guest mode: %s\n", vm_guest_mode_string(mode));
- DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
- iterations, interval);
+ max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
+ guest_page_size = (1ul << guest_page_shift);
+ /* 1G of guest page sized pages */
+ guest_num_pages = (1ul << (30 - guest_page_shift));
+ host_page_size = getpagesize();
+ host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
+ !!((guest_num_pages * guest_page_size) % host_page_size);
- srandom(time(0));
+ if (top_offset) {
+ guest_test_mem = (max_gfn - guest_num_pages) * guest_page_size;
+ guest_test_mem &= ~(host_page_size - 1);
+ }
- bmap = bitmap_alloc(TEST_MEM_PAGES);
- host_bmap_track = bitmap_alloc(TEST_MEM_PAGES);
+ DEBUG("guest test mem offset: 0x%lx\n", guest_test_mem);
- vm = vm_create_default(VCPU_ID, TEST_MEM_PAGES, guest_code);
+ bmap = bitmap_alloc(host_num_pages);
+ host_bmap_track = bitmap_alloc(host_num_pages);
+
+ vm = create_vm(mode, VCPU_ID, guest_num_pages, guest_code);
/* Add an extra memory slot for testing dirty logging */
vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
- TEST_MEM_OFFSET,
+ guest_test_mem,
TEST_MEM_SLOT_INDEX,
- TEST_MEM_PAGES,
+ guest_num_pages,
KVM_MEM_LOG_DIRTY_PAGES);
- /* Cache the HVA pointer of the region */
- host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)TEST_MEM_OFFSET);
/* Do 1:1 mapping for the dirty track memory slot */
- virt_map(vm, TEST_MEM_OFFSET, TEST_MEM_OFFSET,
- TEST_MEM_PAGES * getpagesize(), 0);
+ virt_map(vm, guest_test_mem, guest_test_mem,
+ guest_num_pages * guest_page_size, 0);
+
+ /* Cache the HVA pointer of the region */
+ host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_mem);
+#ifdef __x86_64__
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+#endif
+#ifdef __aarch64__
+ ucall_init(vm, UCALL_MMIO, NULL);
+#endif
- /* Tell the guest about the page size on the system */
- psize = addr_gva2hva(vm, (vm_vaddr_t)&guest_page_size);
- *psize = getpagesize();
+ /* Export the shared variables to the guest */
+ sync_global_to_guest(vm, host_page_size);
+ sync_global_to_guest(vm, guest_page_size);
+ sync_global_to_guest(vm, guest_test_mem);
+ sync_global_to_guest(vm, guest_num_pages);
/* Start the iterations */
- iteration = addr_gva2hva(vm, (vm_vaddr_t)&guest_iteration);
- *iteration = 1;
+ iteration = 1;
+ sync_global_to_guest(vm, iteration);
+ host_quit = false;
+ host_dirty_count = 0;
+ host_clear_count = 0;
+ host_track_next_count = 0;
- /* Start dirtying pages */
pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
- while (*iteration < iterations) {
+ while (iteration < iterations) {
/* Give the vcpu thread some time to dirty some pages */
usleep(interval * 1000);
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
- vm_dirty_log_verify(bmap, *iteration);
- (*iteration)++;
+ vm_dirty_log_verify(bmap);
+ iteration++;
+ sync_global_to_guest(vm, iteration);
}
/* Tell the vcpu thread to quit */
@@ -302,7 +331,118 @@ int main(int argc, char *argv[])
free(bmap);
free(host_bmap_track);
+ ucall_uninit(vm);
kvm_vm_free(vm);
+}
+
+static struct vm_guest_modes {
+ enum vm_guest_mode mode;
+ bool supported;
+ bool enabled;
+} vm_guest_modes[NUM_VM_MODES] = {
+#if defined(__x86_64__)
+ { VM_MODE_P52V48_4K, 1, 1, },
+ { VM_MODE_P52V48_64K, 0, 0, },
+ { VM_MODE_P40V48_4K, 0, 0, },
+ { VM_MODE_P40V48_64K, 0, 0, },
+#elif defined(__aarch64__)
+ { VM_MODE_P52V48_4K, 0, 0, },
+ { VM_MODE_P52V48_64K, 0, 0, },
+ { VM_MODE_P40V48_4K, 1, 1, },
+ { VM_MODE_P40V48_64K, 1, 1, },
+#endif
+};
+
+static void help(char *name)
+{
+ int i;
+
+ puts("");
+ printf("usage: %s [-h] [-i iterations] [-I interval] "
+ "[-o offset] [-t] [-m mode]\n", name);
+ puts("");
+ printf(" -i: specify iteration counts (default: %"PRIu64")\n",
+ TEST_HOST_LOOP_N);
+ printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
+ TEST_HOST_LOOP_INTERVAL);
+ printf(" -o: guest test memory offset (default: 0x%lx)\n",
+ DEFAULT_GUEST_TEST_MEM);
+ printf(" -t: map guest test memory at the top of the allowed "
+ "physical address range\n");
+ printf(" -m: specify the guest mode ID to test "
+ "(default: test all supported modes)\n"
+ " This option may be used multiple times.\n"
+ " Guest mode IDs:\n");
+ for (i = 0; i < NUM_VM_MODES; ++i) {
+ printf(" %d: %s%s\n",
+ vm_guest_modes[i].mode,
+ vm_guest_mode_string(vm_guest_modes[i].mode),
+ vm_guest_modes[i].supported ? " (supported)" : "");
+ }
+ puts("");
+ exit(0);
+}
+
+int main(int argc, char *argv[])
+{
+ unsigned long iterations = TEST_HOST_LOOP_N;
+ unsigned long interval = TEST_HOST_LOOP_INTERVAL;
+ bool mode_selected = false;
+ bool top_offset = false;
+ unsigned int mode;
+ int opt, i;
+
+ while ((opt = getopt(argc, argv, "hi:I:o:tm:")) != -1) {
+ switch (opt) {
+ case 'i':
+ iterations = strtol(optarg, NULL, 10);
+ break;
+ case 'I':
+ interval = strtol(optarg, NULL, 10);
+ break;
+ case 'o':
+ guest_test_mem = strtoull(optarg, NULL, 0);
+ break;
+ case 't':
+ top_offset = true;
+ break;
+ case 'm':
+ if (!mode_selected) {
+ for (i = 0; i < NUM_VM_MODES; ++i)
+ vm_guest_modes[i].enabled = 0;
+ mode_selected = true;
+ }
+ mode = strtoul(optarg, NULL, 10);
+ TEST_ASSERT(mode < NUM_VM_MODES,
+ "Guest mode ID %d too big", mode);
+ vm_guest_modes[mode].enabled = 1;
+ break;
+ case 'h':
+ default:
+ help(argv[0]);
+ break;
+ }
+ }
+
+ TEST_ASSERT(iterations > 2, "Iterations must be greater than two");
+ TEST_ASSERT(interval > 0, "Interval must be greater than zero");
+ TEST_ASSERT(!top_offset || guest_test_mem == DEFAULT_GUEST_TEST_MEM,
+ "Cannot use both -o [offset] and -t at the same time");
+
+ DEBUG("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
+ iterations, interval);
+
+ srandom(time(0));
+
+ for (i = 0; i < NUM_VM_MODES; ++i) {
+ if (!vm_guest_modes[i].enabled)
+ continue;
+ TEST_ASSERT(vm_guest_modes[i].supported,
+ "Guest mode ID %d (%s) not supported.",
+ vm_guest_modes[i].mode,
+ vm_guest_mode_string(vm_guest_modes[i].mode));
+ run_test(vm_guest_modes[i].mode, iterations, interval, top_offset);
+ }
return 0;
}
diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h
new file mode 100644
index 000000000000..9ef2ab1a0c08
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/aarch64/processor.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * AArch64 processor specific defines
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ */
+#ifndef SELFTEST_KVM_PROCESSOR_H
+#define SELFTEST_KVM_PROCESSOR_H
+
+#include "kvm_util.h"
+
+
+#define ARM64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+#define CPACR_EL1 3, 0, 1, 0, 2
+#define TCR_EL1 3, 0, 2, 0, 2
+#define MAIR_EL1 3, 0, 10, 2, 0
+#define TTBR0_EL1 3, 0, 2, 0, 0
+#define SCTLR_EL1 3, 0, 1, 0, 0
+
+/*
+ * Default MAIR
+ * index attribute
+ * DEVICE_nGnRnE 0 0000:0000
+ * DEVICE_nGnRE 1 0000:0100
+ * DEVICE_GRE 2 0000:1100
+ * NORMAL_NC 3 0100:0100
+ * NORMAL 4 1111:1111
+ * NORMAL_WT 5 1011:1011
+ */
+#define DEFAULT_MAIR_EL1 ((0x00ul << (0 * 8)) | \
+ (0x04ul << (1 * 8)) | \
+ (0x0cul << (2 * 8)) | \
+ (0x44ul << (3 * 8)) | \
+ (0xfful << (4 * 8)) | \
+ (0xbbul << (5 * 8)))
+
+static inline void get_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t *addr)
+{
+ struct kvm_one_reg reg;
+ reg.id = id;
+ reg.addr = (uint64_t)addr;
+ vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, &reg);
+}
+
+static inline void set_reg(struct kvm_vm *vm, uint32_t vcpuid, uint64_t id, uint64_t val)
+{
+ struct kvm_one_reg reg;
+ reg.id = id;
+ reg.addr = (uint64_t)&val;
+ vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, &reg);
+}
+
+#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/evmcs.h b/tools/testing/selftests/kvm/include/evmcs.h
new file mode 100644
index 000000000000..4059014d93ea
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/evmcs.h
@@ -0,0 +1,1098 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * tools/testing/selftests/kvm/include/vmx.h
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ *
+ */
+
+#ifndef SELFTEST_KVM_EVMCS_H
+#define SELFTEST_KVM_EVMCS_H
+
+#include <stdint.h>
+#include "vmx.h"
+
+#define u16 uint16_t
+#define u32 uint32_t
+#define u64 uint64_t
+
+extern bool enable_evmcs;
+
+struct hv_vp_assist_page {
+ __u32 apic_assist;
+ __u32 reserved;
+ __u64 vtl_control[2];
+ __u64 nested_enlightenments_control[2];
+ __u32 enlighten_vmentry;
+ __u64 current_nested_vmcs;
+};
+
+struct hv_enlightened_vmcs {
+ u32 revision_id;
+ u32 abort;
+
+ u16 host_es_selector;
+ u16 host_cs_selector;
+ u16 host_ss_selector;
+ u16 host_ds_selector;
+ u16 host_fs_selector;
+ u16 host_gs_selector;
+ u16 host_tr_selector;
+
+ u64 host_ia32_pat;
+ u64 host_ia32_efer;
+
+ u64 host_cr0;
+ u64 host_cr3;
+ u64 host_cr4;
+
+ u64 host_ia32_sysenter_esp;
+ u64 host_ia32_sysenter_eip;
+ u64 host_rip;
+ u32 host_ia32_sysenter_cs;
+
+ u32 pin_based_vm_exec_control;
+ u32 vm_exit_controls;
+ u32 secondary_vm_exec_control;
+
+ u64 io_bitmap_a;
+ u64 io_bitmap_b;
+ u64 msr_bitmap;
+
+ u16 guest_es_selector;
+ u16 guest_cs_selector;
+ u16 guest_ss_selector;
+ u16 guest_ds_selector;
+ u16 guest_fs_selector;
+ u16 guest_gs_selector;
+ u16 guest_ldtr_selector;
+ u16 guest_tr_selector;
+
+ u32 guest_es_limit;
+ u32 guest_cs_limit;
+ u32 guest_ss_limit;
+ u32 guest_ds_limit;
+ u32 guest_fs_limit;
+ u32 guest_gs_limit;
+ u32 guest_ldtr_limit;
+ u32 guest_tr_limit;
+ u32 guest_gdtr_limit;
+ u32 guest_idtr_limit;
+
+ u32 guest_es_ar_bytes;
+ u32 guest_cs_ar_bytes;
+ u32 guest_ss_ar_bytes;
+ u32 guest_ds_ar_bytes;
+ u32 guest_fs_ar_bytes;
+ u32 guest_gs_ar_bytes;
+ u32 guest_ldtr_ar_bytes;
+ u32 guest_tr_ar_bytes;
+
+ u64 guest_es_base;
+ u64 guest_cs_base;
+ u64 guest_ss_base;
+ u64 guest_ds_base;
+ u64 guest_fs_base;
+ u64 guest_gs_base;
+ u64 guest_ldtr_base;
+ u64 guest_tr_base;
+ u64 guest_gdtr_base;
+ u64 guest_idtr_base;
+
+ u64 padding64_1[3];
+
+ u64 vm_exit_msr_store_addr;
+ u64 vm_exit_msr_load_addr;
+ u64 vm_entry_msr_load_addr;
+
+ u64 cr3_target_value0;
+ u64 cr3_target_value1;
+ u64 cr3_target_value2;
+ u64 cr3_target_value3;
+
+ u32 page_fault_error_code_mask;
+ u32 page_fault_error_code_match;
+
+ u32 cr3_target_count;
+ u32 vm_exit_msr_store_count;
+ u32 vm_exit_msr_load_count;
+ u32 vm_entry_msr_load_count;
+
+ u64 tsc_offset;
+ u64 virtual_apic_page_addr;
+ u64 vmcs_link_pointer;
+
+ u64 guest_ia32_debugctl;
+ u64 guest_ia32_pat;
+ u64 guest_ia32_efer;
+
+ u64 guest_pdptr0;
+ u64 guest_pdptr1;
+ u64 guest_pdptr2;
+ u64 guest_pdptr3;
+
+ u64 guest_pending_dbg_exceptions;
+ u64 guest_sysenter_esp;
+ u64 guest_sysenter_eip;
+
+ u32 guest_activity_state;
+ u32 guest_sysenter_cs;
+
+ u64 cr0_guest_host_mask;
+ u64 cr4_guest_host_mask;
+ u64 cr0_read_shadow;
+ u64 cr4_read_shadow;
+ u64 guest_cr0;
+ u64 guest_cr3;
+ u64 guest_cr4;
+ u64 guest_dr7;
+
+ u64 host_fs_base;
+ u64 host_gs_base;
+ u64 host_tr_base;
+ u64 host_gdtr_base;
+ u64 host_idtr_base;
+ u64 host_rsp;
+
+ u64 ept_pointer;
+
+ u16 virtual_processor_id;
+ u16 padding16[3];
+
+ u64 padding64_2[5];
+ u64 guest_physical_address;
+
+ u32 vm_instruction_error;
+ u32 vm_exit_reason;
+ u32 vm_exit_intr_info;
+ u32 vm_exit_intr_error_code;
+ u32 idt_vectoring_info_field;
+ u32 idt_vectoring_error_code;
+ u32 vm_exit_instruction_len;
+ u32 vmx_instruction_info;
+
+ u64 exit_qualification;
+ u64 exit_io_instruction_ecx;
+ u64 exit_io_instruction_esi;
+ u64 exit_io_instruction_edi;
+ u64 exit_io_instruction_eip;
+
+ u64 guest_linear_address;
+ u64 guest_rsp;
+ u64 guest_rflags;
+
+ u32 guest_interruptibility_info;
+ u32 cpu_based_vm_exec_control;
+ u32 exception_bitmap;
+ u32 vm_entry_controls;
+ u32 vm_entry_intr_info_field;
+ u32 vm_entry_exception_error_code;
+ u32 vm_entry_instruction_len;
+ u32 tpr_threshold;
+
+ u64 guest_rip;
+
+ u32 hv_clean_fields;
+ u32 hv_padding_32;
+ u32 hv_synthetic_controls;
+ struct {
+ u32 nested_flush_hypercall:1;
+ u32 msr_bitmap:1;
+ u32 reserved:30;
+ } hv_enlightenments_control;
+ u32 hv_vp_id;
+
+ u64 hv_vm_id;
+ u64 partition_assist_page;
+ u64 padding64_4[4];
+ u64 guest_bndcfgs;
+ u64 padding64_5[7];
+ u64 xss_exit_bitmap;
+ u64 padding64_6[7];
+};
+
+#define HV_X64_MSR_VP_ASSIST_PAGE 0x40000073
+#define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE 0x00000001
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT 12
+#define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK \
+ (~((1ull << HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
+
+struct hv_enlightened_vmcs *current_evmcs;
+struct hv_vp_assist_page *current_vp_assist;
+
+static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
+{
+ u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
+ HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
+
+ wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val);
+
+ current_vp_assist = vp_assist;
+
+ enable_evmcs = true;
+
+ return 0;
+}
+
+static inline int evmcs_vmptrld(uint64_t vmcs_pa, void *vmcs)
+{
+ current_vp_assist->current_nested_vmcs = vmcs_pa;
+ current_vp_assist->enlighten_vmentry = 1;
+
+ current_evmcs = vmcs;
+
+ return 0;
+}
+
+static inline int evmcs_vmptrst(uint64_t *value)
+{
+ *value = current_vp_assist->current_nested_vmcs &
+ ~HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
+
+ return 0;
+}
+
+static inline int evmcs_vmread(uint64_t encoding, uint64_t *value)
+{
+ switch (encoding) {
+ case GUEST_RIP:
+ *value = current_evmcs->guest_rip;
+ break;
+ case GUEST_RSP:
+ *value = current_evmcs->guest_rsp;
+ break;
+ case GUEST_RFLAGS:
+ *value = current_evmcs->guest_rflags;
+ break;
+ case HOST_IA32_PAT:
+ *value = current_evmcs->host_ia32_pat;
+ break;
+ case HOST_IA32_EFER:
+ *value = current_evmcs->host_ia32_efer;
+ break;
+ case HOST_CR0:
+ *value = current_evmcs->host_cr0;
+ break;
+ case HOST_CR3:
+ *value = current_evmcs->host_cr3;
+ break;
+ case HOST_CR4:
+ *value = current_evmcs->host_cr4;
+ break;
+ case HOST_IA32_SYSENTER_ESP:
+ *value = current_evmcs->host_ia32_sysenter_esp;
+ break;
+ case HOST_IA32_SYSENTER_EIP:
+ *value = current_evmcs->host_ia32_sysenter_eip;
+ break;
+ case HOST_RIP:
+ *value = current_evmcs->host_rip;
+ break;
+ case IO_BITMAP_A:
+ *value = current_evmcs->io_bitmap_a;
+ break;
+ case IO_BITMAP_B:
+ *value = current_evmcs->io_bitmap_b;
+ break;
+ case MSR_BITMAP:
+ *value = current_evmcs->msr_bitmap;
+ break;
+ case GUEST_ES_BASE:
+ *value = current_evmcs->guest_es_base;
+ break;
+ case GUEST_CS_BASE:
+ *value = current_evmcs->guest_cs_base;
+ break;
+ case GUEST_SS_BASE:
+ *value = current_evmcs->guest_ss_base;
+ break;
+ case GUEST_DS_BASE:
+ *value = current_evmcs->guest_ds_base;
+ break;
+ case GUEST_FS_BASE:
+ *value = current_evmcs->guest_fs_base;
+ break;
+ case GUEST_GS_BASE:
+ *value = current_evmcs->guest_gs_base;
+ break;
+ case GUEST_LDTR_BASE:
+ *value = current_evmcs->guest_ldtr_base;
+ break;
+ case GUEST_TR_BASE:
+ *value = current_evmcs->guest_tr_base;
+ break;
+ case GUEST_GDTR_BASE:
+ *value = current_evmcs->guest_gdtr_base;
+ break;
+ case GUEST_IDTR_BASE:
+ *value = current_evmcs->guest_idtr_base;
+ break;
+ case TSC_OFFSET:
+ *value = current_evmcs->tsc_offset;
+ break;
+ case VIRTUAL_APIC_PAGE_ADDR:
+ *value = current_evmcs->virtual_apic_page_addr;
+ break;
+ case VMCS_LINK_POINTER:
+ *value = current_evmcs->vmcs_link_pointer;
+ break;
+ case GUEST_IA32_DEBUGCTL:
+ *value = current_evmcs->guest_ia32_debugctl;
+ break;
+ case GUEST_IA32_PAT:
+ *value = current_evmcs->guest_ia32_pat;
+ break;
+ case GUEST_IA32_EFER:
+ *value = current_evmcs->guest_ia32_efer;
+ break;
+ case GUEST_PDPTR0:
+ *value = current_evmcs->guest_pdptr0;
+ break;
+ case GUEST_PDPTR1:
+ *value = current_evmcs->guest_pdptr1;
+ break;
+ case GUEST_PDPTR2:
+ *value = current_evmcs->guest_pdptr2;
+ break;
+ case GUEST_PDPTR3:
+ *value = current_evmcs->guest_pdptr3;
+ break;
+ case GUEST_PENDING_DBG_EXCEPTIONS:
+ *value = current_evmcs->guest_pending_dbg_exceptions;
+ break;
+ case GUEST_SYSENTER_ESP:
+ *value = current_evmcs->guest_sysenter_esp;
+ break;
+ case GUEST_SYSENTER_EIP:
+ *value = current_evmcs->guest_sysenter_eip;
+ break;
+ case CR0_GUEST_HOST_MASK:
+ *value = current_evmcs->cr0_guest_host_mask;
+ break;
+ case CR4_GUEST_HOST_MASK:
+ *value = current_evmcs->cr4_guest_host_mask;
+ break;
+ case CR0_READ_SHADOW:
+ *value = current_evmcs->cr0_read_shadow;
+ break;
+ case CR4_READ_SHADOW:
+ *value = current_evmcs->cr4_read_shadow;
+ break;
+ case GUEST_CR0:
+ *value = current_evmcs->guest_cr0;
+ break;
+ case GUEST_CR3:
+ *value = current_evmcs->guest_cr3;
+ break;
+ case GUEST_CR4:
+ *value = current_evmcs->guest_cr4;
+ break;
+ case GUEST_DR7:
+ *value = current_evmcs->guest_dr7;
+ break;
+ case HOST_FS_BASE:
+ *value = current_evmcs->host_fs_base;
+ break;
+ case HOST_GS_BASE:
+ *value = current_evmcs->host_gs_base;
+ break;
+ case HOST_TR_BASE:
+ *value = current_evmcs->host_tr_base;
+ break;
+ case HOST_GDTR_BASE:
+ *value = current_evmcs->host_gdtr_base;
+ break;
+ case HOST_IDTR_BASE:
+ *value = current_evmcs->host_idtr_base;
+ break;
+ case HOST_RSP:
+ *value = current_evmcs->host_rsp;
+ break;
+ case EPT_POINTER:
+ *value = current_evmcs->ept_pointer;
+ break;
+ case GUEST_BNDCFGS:
+ *value = current_evmcs->guest_bndcfgs;
+ break;
+ case XSS_EXIT_BITMAP:
+ *value = current_evmcs->xss_exit_bitmap;
+ break;
+ case GUEST_PHYSICAL_ADDRESS:
+ *value = current_evmcs->guest_physical_address;
+ break;
+ case EXIT_QUALIFICATION:
+ *value = current_evmcs->exit_qualification;
+ break;
+ case GUEST_LINEAR_ADDRESS:
+ *value = current_evmcs->guest_linear_address;
+ break;
+ case VM_EXIT_MSR_STORE_ADDR:
+ *value = current_evmcs->vm_exit_msr_store_addr;
+ break;
+ case VM_EXIT_MSR_LOAD_ADDR:
+ *value = current_evmcs->vm_exit_msr_load_addr;
+ break;
+ case VM_ENTRY_MSR_LOAD_ADDR:
+ *value = current_evmcs->vm_entry_msr_load_addr;
+ break;
+ case CR3_TARGET_VALUE0:
+ *value = current_evmcs->cr3_target_value0;
+ break;
+ case CR3_TARGET_VALUE1:
+ *value = current_evmcs->cr3_target_value1;
+ break;
+ case CR3_TARGET_VALUE2:
+ *value = current_evmcs->cr3_target_value2;
+ break;
+ case CR3_TARGET_VALUE3:
+ *value = current_evmcs->cr3_target_value3;
+ break;
+ case TPR_THRESHOLD:
+ *value = current_evmcs->tpr_threshold;
+ break;
+ case GUEST_INTERRUPTIBILITY_INFO:
+ *value = current_evmcs->guest_interruptibility_info;
+ break;
+ case CPU_BASED_VM_EXEC_CONTROL:
+ *value = current_evmcs->cpu_based_vm_exec_control;
+ break;
+ case EXCEPTION_BITMAP:
+ *value = current_evmcs->exception_bitmap;
+ break;
+ case VM_ENTRY_CONTROLS:
+ *value = current_evmcs->vm_entry_controls;
+ break;
+ case VM_ENTRY_INTR_INFO_FIELD:
+ *value = current_evmcs->vm_entry_intr_info_field;
+ break;
+ case VM_ENTRY_EXCEPTION_ERROR_CODE:
+ *value = current_evmcs->vm_entry_exception_error_code;
+ break;
+ case VM_ENTRY_INSTRUCTION_LEN:
+ *value = current_evmcs->vm_entry_instruction_len;
+ break;
+ case HOST_IA32_SYSENTER_CS:
+ *value = current_evmcs->host_ia32_sysenter_cs;
+ break;
+ case PIN_BASED_VM_EXEC_CONTROL:
+ *value = current_evmcs->pin_based_vm_exec_control;
+ break;
+ case VM_EXIT_CONTROLS:
+ *value = current_evmcs->vm_exit_controls;
+ break;
+ case SECONDARY_VM_EXEC_CONTROL:
+ *value = current_evmcs->secondary_vm_exec_control;
+ break;
+ case GUEST_ES_LIMIT:
+ *value = current_evmcs->guest_es_limit;
+ break;
+ case GUEST_CS_LIMIT:
+ *value = current_evmcs->guest_cs_limit;
+ break;
+ case GUEST_SS_LIMIT:
+ *value = current_evmcs->guest_ss_limit;
+ break;
+ case GUEST_DS_LIMIT:
+ *value = current_evmcs->guest_ds_limit;
+ break;
+ case GUEST_FS_LIMIT:
+ *value = current_evmcs->guest_fs_limit;
+ break;
+ case GUEST_GS_LIMIT:
+ *value = current_evmcs->guest_gs_limit;
+ break;
+ case GUEST_LDTR_LIMIT:
+ *value = current_evmcs->guest_ldtr_limit;
+ break;
+ case GUEST_TR_LIMIT:
+ *value = current_evmcs->guest_tr_limit;
+ break;
+ case GUEST_GDTR_LIMIT:
+ *value = current_evmcs->guest_gdtr_limit;
+ break;
+ case GUEST_IDTR_LIMIT:
+ *value = current_evmcs->guest_idtr_limit;
+ break;
+ case GUEST_ES_AR_BYTES:
+ *value = current_evmcs->guest_es_ar_bytes;
+ break;
+ case GUEST_CS_AR_BYTES:
+ *value = current_evmcs->guest_cs_ar_bytes;
+ break;
+ case GUEST_SS_AR_BYTES:
+ *value = current_evmcs->guest_ss_ar_bytes;
+ break;
+ case GUEST_DS_AR_BYTES:
+ *value = current_evmcs->guest_ds_ar_bytes;
+ break;
+ case GUEST_FS_AR_BYTES:
+ *value = current_evmcs->guest_fs_ar_bytes;
+ break;
+ case GUEST_GS_AR_BYTES:
+ *value = current_evmcs->guest_gs_ar_bytes;
+ break;
+ case GUEST_LDTR_AR_BYTES:
+ *value = current_evmcs->guest_ldtr_ar_bytes;
+ break;
+ case GUEST_TR_AR_BYTES:
+ *value = current_evmcs->guest_tr_ar_bytes;
+ break;
+ case GUEST_ACTIVITY_STATE:
+ *value = current_evmcs->guest_activity_state;
+ break;
+ case GUEST_SYSENTER_CS:
+ *value = current_evmcs->guest_sysenter_cs;
+ break;
+ case VM_INSTRUCTION_ERROR:
+ *value = current_evmcs->vm_instruction_error;
+ break;
+ case VM_EXIT_REASON:
+ *value = current_evmcs->vm_exit_reason;
+ break;
+ case VM_EXIT_INTR_INFO:
+ *value = current_evmcs->vm_exit_intr_info;
+ break;
+ case VM_EXIT_INTR_ERROR_CODE:
+ *value = current_evmcs->vm_exit_intr_error_code;
+ break;
+ case IDT_VECTORING_INFO_FIELD:
+ *value = current_evmcs->idt_vectoring_info_field;
+ break;
+ case IDT_VECTORING_ERROR_CODE:
+ *value = current_evmcs->idt_vectoring_error_code;
+ break;
+ case VM_EXIT_INSTRUCTION_LEN:
+ *value = current_evmcs->vm_exit_instruction_len;
+ break;
+ case VMX_INSTRUCTION_INFO:
+ *value = current_evmcs->vmx_instruction_info;
+ break;
+ case PAGE_FAULT_ERROR_CODE_MASK:
+ *value = current_evmcs->page_fault_error_code_mask;
+ break;
+ case PAGE_FAULT_ERROR_CODE_MATCH:
+ *value = current_evmcs->page_fault_error_code_match;
+ break;
+ case CR3_TARGET_COUNT:
+ *value = current_evmcs->cr3_target_count;
+ break;
+ case VM_EXIT_MSR_STORE_COUNT:
+ *value = current_evmcs->vm_exit_msr_store_count;
+ break;
+ case VM_EXIT_MSR_LOAD_COUNT:
+ *value = current_evmcs->vm_exit_msr_load_count;
+ break;
+ case VM_ENTRY_MSR_LOAD_COUNT:
+ *value = current_evmcs->vm_entry_msr_load_count;
+ break;
+ case HOST_ES_SELECTOR:
+ *value = current_evmcs->host_es_selector;
+ break;
+ case HOST_CS_SELECTOR:
+ *value = current_evmcs->host_cs_selector;
+ break;
+ case HOST_SS_SELECTOR:
+ *value = current_evmcs->host_ss_selector;
+ break;
+ case HOST_DS_SELECTOR:
+ *value = current_evmcs->host_ds_selector;
+ break;
+ case HOST_FS_SELECTOR:
+ *value = current_evmcs->host_fs_selector;
+ break;
+ case HOST_GS_SELECTOR:
+ *value = current_evmcs->host_gs_selector;
+ break;
+ case HOST_TR_SELECTOR:
+ *value = current_evmcs->host_tr_selector;
+ break;
+ case GUEST_ES_SELECTOR:
+ *value = current_evmcs->guest_es_selector;
+ break;
+ case GUEST_CS_SELECTOR:
+ *value = current_evmcs->guest_cs_selector;
+ break;
+ case GUEST_SS_SELECTOR:
+ *value = current_evmcs->guest_ss_selector;
+ break;
+ case GUEST_DS_SELECTOR:
+ *value = current_evmcs->guest_ds_selector;
+ break;
+ case GUEST_FS_SELECTOR:
+ *value = current_evmcs->guest_fs_selector;
+ break;
+ case GUEST_GS_SELECTOR:
+ *value = current_evmcs->guest_gs_selector;
+ break;
+ case GUEST_LDTR_SELECTOR:
+ *value = current_evmcs->guest_ldtr_selector;
+ break;
+ case GUEST_TR_SELECTOR:
+ *value = current_evmcs->guest_tr_selector;
+ break;
+ case VIRTUAL_PROCESSOR_ID:
+ *value = current_evmcs->virtual_processor_id;
+ break;
+ default: return 1;
+ }
+
+ return 0;
+}
+
+static inline int evmcs_vmwrite(uint64_t encoding, uint64_t value)
+{
+ switch (encoding) {
+ case GUEST_RIP:
+ current_evmcs->guest_rip = value;
+ break;
+ case GUEST_RSP:
+ current_evmcs->guest_rsp = value;
+ break;
+ case GUEST_RFLAGS:
+ current_evmcs->guest_rflags = value;
+ break;
+ case HOST_IA32_PAT:
+ current_evmcs->host_ia32_pat = value;
+ break;
+ case HOST_IA32_EFER:
+ current_evmcs->host_ia32_efer = value;
+ break;
+ case HOST_CR0:
+ current_evmcs->host_cr0 = value;
+ break;
+ case HOST_CR3:
+ current_evmcs->host_cr3 = value;
+ break;
+ case HOST_CR4:
+ current_evmcs->host_cr4 = value;
+ break;
+ case HOST_IA32_SYSENTER_ESP:
+ current_evmcs->host_ia32_sysenter_esp = value;
+ break;
+ case HOST_IA32_SYSENTER_EIP:
+ current_evmcs->host_ia32_sysenter_eip = value;
+ break;
+ case HOST_RIP:
+ current_evmcs->host_rip = value;
+ break;
+ case IO_BITMAP_A:
+ current_evmcs->io_bitmap_a = value;
+ break;
+ case IO_BITMAP_B:
+ current_evmcs->io_bitmap_b = value;
+ break;
+ case MSR_BITMAP:
+ current_evmcs->msr_bitmap = value;
+ break;
+ case GUEST_ES_BASE:
+ current_evmcs->guest_es_base = value;
+ break;
+ case GUEST_CS_BASE:
+ current_evmcs->guest_cs_base = value;
+ break;
+ case GUEST_SS_BASE:
+ current_evmcs->guest_ss_base = value;
+ break;
+ case GUEST_DS_BASE:
+ current_evmcs->guest_ds_base = value;
+ break;
+ case GUEST_FS_BASE:
+ current_evmcs->guest_fs_base = value;
+ break;
+ case GUEST_GS_BASE:
+ current_evmcs->guest_gs_base = value;
+ break;
+ case GUEST_LDTR_BASE:
+ current_evmcs->guest_ldtr_base = value;
+ break;
+ case GUEST_TR_BASE:
+ current_evmcs->guest_tr_base = value;
+ break;
+ case GUEST_GDTR_BASE:
+ current_evmcs->guest_gdtr_base = value;
+ break;
+ case GUEST_IDTR_BASE:
+ current_evmcs->guest_idtr_base = value;
+ break;
+ case TSC_OFFSET:
+ current_evmcs->tsc_offset = value;
+ break;
+ case VIRTUAL_APIC_PAGE_ADDR:
+ current_evmcs->virtual_apic_page_addr = value;
+ break;
+ case VMCS_LINK_POINTER:
+ current_evmcs->vmcs_link_pointer = value;
+ break;
+ case GUEST_IA32_DEBUGCTL:
+ current_evmcs->guest_ia32_debugctl = value;
+ break;
+ case GUEST_IA32_PAT:
+ current_evmcs->guest_ia32_pat = value;
+ break;
+ case GUEST_IA32_EFER:
+ current_evmcs->guest_ia32_efer = value;
+ break;
+ case GUEST_PDPTR0:
+ current_evmcs->guest_pdptr0 = value;
+ break;
+ case GUEST_PDPTR1:
+ current_evmcs->guest_pdptr1 = value;
+ break;
+ case GUEST_PDPTR2:
+ current_evmcs->guest_pdptr2 = value;
+ break;
+ case GUEST_PDPTR3:
+ current_evmcs->guest_pdptr3 = value;
+ break;
+ case GUEST_PENDING_DBG_EXCEPTIONS:
+ current_evmcs->guest_pending_dbg_exceptions = value;
+ break;
+ case GUEST_SYSENTER_ESP:
+ current_evmcs->guest_sysenter_esp = value;
+ break;
+ case GUEST_SYSENTER_EIP:
+ current_evmcs->guest_sysenter_eip = value;
+ break;
+ case CR0_GUEST_HOST_MASK:
+ current_evmcs->cr0_guest_host_mask = value;
+ break;
+ case CR4_GUEST_HOST_MASK:
+ current_evmcs->cr4_guest_host_mask = value;
+ break;
+ case CR0_READ_SHADOW:
+ current_evmcs->cr0_read_shadow = value;
+ break;
+ case CR4_READ_SHADOW:
+ current_evmcs->cr4_read_shadow = value;
+ break;
+ case GUEST_CR0:
+ current_evmcs->guest_cr0 = value;
+ break;
+ case GUEST_CR3:
+ current_evmcs->guest_cr3 = value;
+ break;
+ case GUEST_CR4:
+ current_evmcs->guest_cr4 = value;
+ break;
+ case GUEST_DR7:
+ current_evmcs->guest_dr7 = value;
+ break;
+ case HOST_FS_BASE:
+ current_evmcs->host_fs_base = value;
+ break;
+ case HOST_GS_BASE:
+ current_evmcs->host_gs_base = value;
+ break;
+ case HOST_TR_BASE:
+ current_evmcs->host_tr_base = value;
+ break;
+ case HOST_GDTR_BASE:
+ current_evmcs->host_gdtr_base = value;
+ break;
+ case HOST_IDTR_BASE:
+ current_evmcs->host_idtr_base = value;
+ break;
+ case HOST_RSP:
+ current_evmcs->host_rsp = value;
+ break;
+ case EPT_POINTER:
+ current_evmcs->ept_pointer = value;
+ break;
+ case GUEST_BNDCFGS:
+ current_evmcs->guest_bndcfgs = value;
+ break;
+ case XSS_EXIT_BITMAP:
+ current_evmcs->xss_exit_bitmap = value;
+ break;
+ case GUEST_PHYSICAL_ADDRESS:
+ current_evmcs->guest_physical_address = value;
+ break;
+ case EXIT_QUALIFICATION:
+ current_evmcs->exit_qualification = value;
+ break;
+ case GUEST_LINEAR_ADDRESS:
+ current_evmcs->guest_linear_address = value;
+ break;
+ case VM_EXIT_MSR_STORE_ADDR:
+ current_evmcs->vm_exit_msr_store_addr = value;
+ break;
+ case VM_EXIT_MSR_LOAD_ADDR:
+ current_evmcs->vm_exit_msr_load_addr = value;
+ break;
+ case VM_ENTRY_MSR_LOAD_ADDR:
+ current_evmcs->vm_entry_msr_load_addr = value;
+ break;
+ case CR3_TARGET_VALUE0:
+ current_evmcs->cr3_target_value0 = value;
+ break;
+ case CR3_TARGET_VALUE1:
+ current_evmcs->cr3_target_value1 = value;
+ break;
+ case CR3_TARGET_VALUE2:
+ current_evmcs->cr3_target_value2 = value;
+ break;
+ case CR3_TARGET_VALUE3:
+ current_evmcs->cr3_target_value3 = value;
+ break;
+ case TPR_THRESHOLD:
+ current_evmcs->tpr_threshold = value;
+ break;
+ case GUEST_INTERRUPTIBILITY_INFO:
+ current_evmcs->guest_interruptibility_info = value;
+ break;
+ case CPU_BASED_VM_EXEC_CONTROL:
+ current_evmcs->cpu_based_vm_exec_control = value;
+ break;
+ case EXCEPTION_BITMAP:
+ current_evmcs->exception_bitmap = value;
+ break;
+ case VM_ENTRY_CONTROLS:
+ current_evmcs->vm_entry_controls = value;
+ break;
+ case VM_ENTRY_INTR_INFO_FIELD:
+ current_evmcs->vm_entry_intr_info_field = value;
+ break;
+ case VM_ENTRY_EXCEPTION_ERROR_CODE:
+ current_evmcs->vm_entry_exception_error_code = value;
+ break;
+ case VM_ENTRY_INSTRUCTION_LEN:
+ current_evmcs->vm_entry_instruction_len = value;
+ break;
+ case HOST_IA32_SYSENTER_CS:
+ current_evmcs->host_ia32_sysenter_cs = value;
+ break;
+ case PIN_BASED_VM_EXEC_CONTROL:
+ current_evmcs->pin_based_vm_exec_control = value;
+ break;
+ case VM_EXIT_CONTROLS:
+ current_evmcs->vm_exit_controls = value;
+ break;
+ case SECONDARY_VM_EXEC_CONTROL:
+ current_evmcs->secondary_vm_exec_control = value;
+ break;
+ case GUEST_ES_LIMIT:
+ current_evmcs->guest_es_limit = value;
+ break;
+ case GUEST_CS_LIMIT:
+ current_evmcs->guest_cs_limit = value;
+ break;
+ case GUEST_SS_LIMIT:
+ current_evmcs->guest_ss_limit = value;
+ break;
+ case GUEST_DS_LIMIT:
+ current_evmcs->guest_ds_limit = value;
+ break;
+ case GUEST_FS_LIMIT:
+ current_evmcs->guest_fs_limit = value;
+ break;
+ case GUEST_GS_LIMIT:
+ current_evmcs->guest_gs_limit = value;
+ break;
+ case GUEST_LDTR_LIMIT:
+ current_evmcs->guest_ldtr_limit = value;
+ break;
+ case GUEST_TR_LIMIT:
+ current_evmcs->guest_tr_limit = value;
+ break;
+ case GUEST_GDTR_LIMIT:
+ current_evmcs->guest_gdtr_limit = value;
+ break;
+ case GUEST_IDTR_LIMIT:
+ current_evmcs->guest_idtr_limit = value;
+ break;
+ case GUEST_ES_AR_BYTES:
+ current_evmcs->guest_es_ar_bytes = value;
+ break;
+ case GUEST_CS_AR_BYTES:
+ current_evmcs->guest_cs_ar_bytes = value;
+ break;
+ case GUEST_SS_AR_BYTES:
+ current_evmcs->guest_ss_ar_bytes = value;
+ break;
+ case GUEST_DS_AR_BYTES:
+ current_evmcs->guest_ds_ar_bytes = value;
+ break;
+ case GUEST_FS_AR_BYTES:
+ current_evmcs->guest_fs_ar_bytes = value;
+ break;
+ case GUEST_GS_AR_BYTES:
+ current_evmcs->guest_gs_ar_bytes = value;
+ break;
+ case GUEST_LDTR_AR_BYTES:
+ current_evmcs->guest_ldtr_ar_bytes = value;
+ break;
+ case GUEST_TR_AR_BYTES:
+ current_evmcs->guest_tr_ar_bytes = value;
+ break;
+ case GUEST_ACTIVITY_STATE:
+ current_evmcs->guest_activity_state = value;
+ break;
+ case GUEST_SYSENTER_CS:
+ current_evmcs->guest_sysenter_cs = value;
+ break;
+ case VM_INSTRUCTION_ERROR:
+ current_evmcs->vm_instruction_error = value;
+ break;
+ case VM_EXIT_REASON:
+ current_evmcs->vm_exit_reason = value;
+ break;
+ case VM_EXIT_INTR_INFO:
+ current_evmcs->vm_exit_intr_info = value;
+ break;
+ case VM_EXIT_INTR_ERROR_CODE:
+ current_evmcs->vm_exit_intr_error_code = value;
+ break;
+ case IDT_VECTORING_INFO_FIELD:
+ current_evmcs->idt_vectoring_info_field = value;
+ break;
+ case IDT_VECTORING_ERROR_CODE:
+ current_evmcs->idt_vectoring_error_code = value;
+ break;
+ case VM_EXIT_INSTRUCTION_LEN:
+ current_evmcs->vm_exit_instruction_len = value;
+ break;
+ case VMX_INSTRUCTION_INFO:
+ current_evmcs->vmx_instruction_info = value;
+ break;
+ case PAGE_FAULT_ERROR_CODE_MASK:
+ current_evmcs->page_fault_error_code_mask = value;
+ break;
+ case PAGE_FAULT_ERROR_CODE_MATCH:
+ current_evmcs->page_fault_error_code_match = value;
+ break;
+ case CR3_TARGET_COUNT:
+ current_evmcs->cr3_target_count = value;
+ break;
+ case VM_EXIT_MSR_STORE_COUNT:
+ current_evmcs->vm_exit_msr_store_count = value;
+ break;
+ case VM_EXIT_MSR_LOAD_COUNT:
+ current_evmcs->vm_exit_msr_load_count = value;
+ break;
+ case VM_ENTRY_MSR_LOAD_COUNT:
+ current_evmcs->vm_entry_msr_load_count = value;
+ break;
+ case HOST_ES_SELECTOR:
+ current_evmcs->host_es_selector = value;
+ break;
+ case HOST_CS_SELECTOR:
+ current_evmcs->host_cs_selector = value;
+ break;
+ case HOST_SS_SELECTOR:
+ current_evmcs->host_ss_selector = value;
+ break;
+ case HOST_DS_SELECTOR:
+ current_evmcs->host_ds_selector = value;
+ break;
+ case HOST_FS_SELECTOR:
+ current_evmcs->host_fs_selector = value;
+ break;
+ case HOST_GS_SELECTOR:
+ current_evmcs->host_gs_selector = value;
+ break;
+ case HOST_TR_SELECTOR:
+ current_evmcs->host_tr_selector = value;
+ break;
+ case GUEST_ES_SELECTOR:
+ current_evmcs->guest_es_selector = value;
+ break;
+ case GUEST_CS_SELECTOR:
+ current_evmcs->guest_cs_selector = value;
+ break;
+ case GUEST_SS_SELECTOR:
+ current_evmcs->guest_ss_selector = value;
+ break;
+ case GUEST_DS_SELECTOR:
+ current_evmcs->guest_ds_selector = value;
+ break;
+ case GUEST_FS_SELECTOR:
+ current_evmcs->guest_fs_selector = value;
+ break;
+ case GUEST_GS_SELECTOR:
+ current_evmcs->guest_gs_selector = value;
+ break;
+ case GUEST_LDTR_SELECTOR:
+ current_evmcs->guest_ldtr_selector = value;
+ break;
+ case GUEST_TR_SELECTOR:
+ current_evmcs->guest_tr_selector = value;
+ break;
+ case VIRTUAL_PROCESSOR_ID:
+ current_evmcs->virtual_processor_id = value;
+ break;
+ default: return 1;
+ }
+
+ return 0;
+}
+
+static inline int evmcs_vmlaunch(void)
+{
+ int ret;
+
+ current_evmcs->hv_clean_fields = 0;
+
+ __asm__ __volatile__("push %%rbp;"
+ "push %%rcx;"
+ "push %%rdx;"
+ "push %%rsi;"
+ "push %%rdi;"
+ "push $0;"
+ "mov %%rsp, (%[host_rsp]);"
+ "lea 1f(%%rip), %%rax;"
+ "mov %%rax, (%[host_rip]);"
+ "vmlaunch;"
+ "incq (%%rsp);"
+ "1: pop %%rax;"
+ "pop %%rdi;"
+ "pop %%rsi;"
+ "pop %%rdx;"
+ "pop %%rcx;"
+ "pop %%rbp;"
+ : [ret]"=&a"(ret)
+ : [host_rsp]"r"
+ ((uint64_t)&current_evmcs->host_rsp),
+ [host_rip]"r"
+ ((uint64_t)&current_evmcs->host_rip)
+ : "memory", "cc", "rbx", "r8", "r9", "r10",
+ "r11", "r12", "r13", "r14", "r15");
+ return ret;
+}
+
+/*
+ * No guest state (e.g. GPRs) is established by this vmresume.
+ */
+static inline int evmcs_vmresume(void)
+{
+ int ret;
+
+ current_evmcs->hv_clean_fields = 0;
+
+ __asm__ __volatile__("push %%rbp;"
+ "push %%rcx;"
+ "push %%rdx;"
+ "push %%rsi;"
+ "push %%rdi;"
+ "push $0;"
+ "mov %%rsp, (%[host_rsp]);"
+ "lea 1f(%%rip), %%rax;"
+ "mov %%rax, (%[host_rip]);"
+ "vmresume;"
+ "incq (%%rsp);"
+ "1: pop %%rax;"
+ "pop %%rdi;"
+ "pop %%rsi;"
+ "pop %%rdx;"
+ "pop %%rcx;"
+ "pop %%rbp;"
+ : [ret]"=&a"(ret)
+ : [host_rsp]"r"
+ ((uint64_t)&current_evmcs->host_rsp),
+ [host_rip]"r"
+ ((uint64_t)&current_evmcs->host_rip)
+ : "memory", "cc", "rbx", "r8", "r9", "r10",
+ "r11", "r12", "r13", "r14", "r15");
+ return ret;
+}
+
+#endif /* !SELFTEST_KVM_EVMCS_H */
diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h
index 3acf9a91704c..a4e59e3b4826 100644
--- a/tools/testing/selftests/kvm/include/kvm_util.h
+++ b/tools/testing/selftests/kvm/include/kvm_util.h
@@ -7,7 +7,7 @@
*
*/
#ifndef SELFTEST_KVM_UTIL_H
-#define SELFTEST_KVM_UTIL_H 1
+#define SELFTEST_KVM_UTIL_H
#include "test_util.h"
@@ -17,12 +17,6 @@
#include "sparsebit.h"
-/*
- * Memslots can't cover the gfn starting at this gpa otherwise vCPUs can't be
- * created. Only applies to VMs using EPT.
- */
-#define KVM_DEFAULT_IDENTITY_MAP_ADDRESS 0xfffbc000ul
-
/* Callers of kvm_util only have an incomplete/opaque description of the
* structure kvm_util is using to maintain the state of a VM.
@@ -33,16 +27,23 @@ typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
/* Minimum allocated guest virtual and physical addresses */
-#define KVM_UTIL_MIN_VADDR 0x2000
+#define KVM_UTIL_MIN_VADDR 0x2000
#define DEFAULT_GUEST_PHY_PAGES 512
#define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
-#define DEFAULT_STACK_PGS 5
+#define DEFAULT_STACK_PGS 5
enum vm_guest_mode {
- VM_MODE_FLAT48PG,
+ VM_MODE_P52V48_4K,
+ VM_MODE_P52V48_64K,
+ VM_MODE_P40V48_4K,
+ VM_MODE_P40V48_64K,
+ NUM_VM_MODES,
};
+#define vm_guest_mode_string(m) vm_guest_mode_string[m]
+extern const char * const vm_guest_mode_string[];
+
enum vm_mem_backing_src_type {
VM_MEM_SRC_ANONYMOUS,
VM_MEM_SRC_ANONYMOUS_THP,
@@ -58,15 +59,15 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm);
void kvm_vm_release(struct kvm_vm *vmp);
void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log);
-int kvm_memcmp_hva_gva(void *hva,
- struct kvm_vm *vm, const vm_vaddr_t gva, size_t len);
+int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
+ size_t len);
void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename,
- uint32_t data_memslot, uint32_t pgd_memslot);
+ uint32_t data_memslot, uint32_t pgd_memslot);
void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
-void vcpu_dump(FILE *stream, struct kvm_vm *vm,
- uint32_t vcpuid, uint8_t indent);
+void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid,
+ uint8_t indent);
void vm_create_irqchip(struct kvm_vm *vm);
@@ -75,13 +76,14 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
uint64_t guest_paddr, uint32_t slot, uint64_t npages,
uint32_t flags);
-void vcpu_ioctl(struct kvm_vm *vm,
- uint32_t vcpuid, unsigned long ioctl, void *arg);
+void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, unsigned long ioctl,
+ void *arg);
void vm_ioctl(struct kvm_vm *vm, unsigned long ioctl, void *arg);
void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
-void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot);
+void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot,
+ int gdt_memslot);
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
- uint32_t data_memslot, uint32_t pgd_memslot);
+ uint32_t data_memslot, uint32_t pgd_memslot);
void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
size_t size, uint32_t pgd_memslot);
void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
@@ -93,56 +95,35 @@ struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_mp_state *mp_state);
-void vcpu_regs_get(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_regs *regs);
-void vcpu_regs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_regs *regs);
+ struct kvm_mp_state *mp_state);
+void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
+void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...);
-void vcpu_sregs_get(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs);
-void vcpu_sregs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs);
-int _vcpu_sregs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs);
+void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_sregs *sregs);
+void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_sregs *sregs);
+int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_sregs *sregs);
void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events);
+ struct kvm_vcpu_events *events);
void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events);
-uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
-void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
- uint64_t msr_value);
+ struct kvm_vcpu_events *events);
const char *exit_reason_str(unsigned int exit_reason);
void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot);
void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
- uint32_t pgd_memslot);
-vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm,
- vm_paddr_t paddr_min, uint32_t memslot);
-
-struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
-void vcpu_set_cpuid(
- struct kvm_vm *vm, uint32_t vcpuid, struct kvm_cpuid2 *cpuid);
-
-struct kvm_cpuid_entry2 *
-kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
-
-static inline struct kvm_cpuid_entry2 *
-kvm_get_supported_cpuid_entry(uint32_t function)
-{
- return kvm_get_supported_cpuid_index(function, 0);
-}
+ uint32_t pgd_memslot);
+vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
+ uint32_t memslot);
+vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot);
struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_size,
void *guest_code);
void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code);
-typedef void (*vmx_guest_code_t)(vm_vaddr_t vmxon_vaddr,
- vm_paddr_t vmxon_paddr,
- vm_vaddr_t vmcs_vaddr,
- vm_paddr_t vmcs_paddr);
-
struct kvm_userspace_memory_region *
kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
uint64_t end);
@@ -152,43 +133,49 @@ allocate_kvm_dirty_log(struct kvm_userspace_memory_region *region);
int vm_create_device(struct kvm_vm *vm, struct kvm_create_device *cd);
-#define GUEST_PORT_SYNC 0x1000
-#define GUEST_PORT_ABORT 0x1001
-#define GUEST_PORT_DONE 0x1002
-
-static inline void __exit_to_l0(uint16_t port, uint64_t arg0, uint64_t arg1)
-{
- __asm__ __volatile__("in %[port], %%al"
- :
- : [port]"d"(port), "D"(arg0), "S"(arg1)
- : "rax");
-}
-
-/*
- * Allows to pass three arguments to the host: port is 16bit wide,
- * arg0 & arg1 are 64bit wide
- */
-#define GUEST_SYNC_ARGS(_port, _arg0, _arg1) \
- __exit_to_l0(_port, (uint64_t) (_arg0), (uint64_t) (_arg1))
-
-#define GUEST_ASSERT(_condition) do { \
- if (!(_condition)) \
- GUEST_SYNC_ARGS(GUEST_PORT_ABORT, \
- "Failed guest assert: " \
- #_condition, __LINE__); \
- } while (0)
-
-#define GUEST_SYNC(stage) GUEST_SYNC_ARGS(GUEST_PORT_SYNC, "hello", stage)
+#define sync_global_to_guest(vm, g) ({ \
+ typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
+ memcpy(_p, &(g), sizeof(g)); \
+})
+
+#define sync_global_from_guest(vm, g) ({ \
+ typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
+ memcpy(&(g), _p, sizeof(g)); \
+})
+
+/* ucall implementation types */
+typedef enum {
+ UCALL_PIO,
+ UCALL_MMIO,
+} ucall_type_t;
+
+/* Common ucalls */
+enum {
+ UCALL_NONE,
+ UCALL_SYNC,
+ UCALL_ABORT,
+ UCALL_DONE,
+};
-#define GUEST_DONE() GUEST_SYNC_ARGS(GUEST_PORT_DONE, 0, 0)
+#define UCALL_MAX_ARGS 6
-struct guest_args {
- uint64_t arg0;
- uint64_t arg1;
- uint16_t port;
-} __attribute__ ((packed));
+struct ucall {
+ uint64_t cmd;
+ uint64_t args[UCALL_MAX_ARGS];
+};
-void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id,
- struct guest_args *args);
+void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg);
+void ucall_uninit(struct kvm_vm *vm);
+void ucall(uint64_t cmd, int nargs, ...);
+uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc);
+
+#define GUEST_SYNC(stage) ucall(UCALL_SYNC, 2, "hello", stage)
+#define GUEST_DONE() ucall(UCALL_DONE, 0)
+#define GUEST_ASSERT(_condition) do { \
+ if (!(_condition)) \
+ ucall(UCALL_ABORT, 2, \
+ "Failed guest assert: " \
+ #_condition, __LINE__); \
+} while (0)
#endif /* SELFTEST_KVM_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/sparsebit.h b/tools/testing/selftests/kvm/include/sparsebit.h
index 54cfeb6568d3..31e030915c1f 100644
--- a/tools/testing/selftests/kvm/include/sparsebit.h
+++ b/tools/testing/selftests/kvm/include/sparsebit.h
@@ -15,8 +15,8 @@
* even in the case where most bits are set.
*/
-#ifndef _TEST_SPARSEBIT_H_
-#define _TEST_SPARSEBIT_H_
+#ifndef SELFTEST_KVM_SPARSEBIT_H
+#define SELFTEST_KVM_SPARSEBIT_H
#include <stdbool.h>
#include <stdint.h>
@@ -72,4 +72,4 @@ void sparsebit_validate_internal(struct sparsebit *sbit);
}
#endif
-#endif /* _TEST_SPARSEBIT_H_ */
+#endif /* SELFTEST_KVM_SPARSEBIT_H */
diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h
index 73c3933436ec..c7dafe8bd02c 100644
--- a/tools/testing/selftests/kvm/include/test_util.h
+++ b/tools/testing/selftests/kvm/include/test_util.h
@@ -7,8 +7,8 @@
*
*/
-#ifndef TEST_UTIL_H
-#define TEST_UTIL_H 1
+#ifndef SELFTEST_KVM_TEST_UTIL_H
+#define SELFTEST_KVM_TEST_UTIL_H
#include <stdlib.h>
#include <stdarg.h>
@@ -41,4 +41,4 @@ void test_assert(bool exp, const char *exp_str,
#a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \
} while (0)
-#endif /* TEST_UTIL_H */
+#endif /* SELFTEST_KVM_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/include/x86.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index 42c3596815b8..e2884c2b81ff 100644
--- a/tools/testing/selftests/kvm/include/x86.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -1,5 +1,5 @@
/*
- * tools/testing/selftests/kvm/include/x86.h
+ * tools/testing/selftests/kvm/include/x86_64/processor.h
*
* Copyright (C) 2018, Google LLC.
*
@@ -7,8 +7,8 @@
*
*/
-#ifndef SELFTEST_KVM_X86_H
-#define SELFTEST_KVM_X86_H
+#ifndef SELFTEST_KVM_PROCESSOR_H
+#define SELFTEST_KVM_PROCESSOR_H
#include <assert.h>
#include <stdint.h>
@@ -305,7 +305,25 @@ static inline unsigned long get_xmm(int n)
struct kvm_x86_state;
struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid);
-void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state);
+void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_x86_state *state);
+
+struct kvm_cpuid2 *kvm_get_supported_cpuid(void);
+void vcpu_set_cpuid(struct kvm_vm *vm, uint32_t vcpuid,
+ struct kvm_cpuid2 *cpuid);
+
+struct kvm_cpuid_entry2 *
+kvm_get_supported_cpuid_index(uint32_t function, uint32_t index);
+
+static inline struct kvm_cpuid_entry2 *
+kvm_get_supported_cpuid_entry(uint32_t function)
+{
+ return kvm_get_supported_cpuid_index(function, 0);
+}
+
+uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index);
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+ uint64_t msr_value);
/*
* Basic CPU control in CR0
@@ -1044,4 +1062,4 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
#define MSR_VM_IGNNE 0xc0010115
#define MSR_VM_HSAVE_PA 0xc0010117
-#endif /* !SELFTEST_KVM_X86_H */
+#endif /* SELFTEST_KVM_PROCESSOR_H */
diff --git a/tools/testing/selftests/kvm/include/vmx.h b/tools/testing/selftests/kvm/include/x86_64/vmx.h
index b9ffe1024d3a..c9bd935b939c 100644
--- a/tools/testing/selftests/kvm/include/vmx.h
+++ b/tools/testing/selftests/kvm/include/x86_64/vmx.h
@@ -1,5 +1,5 @@
/*
- * tools/testing/selftests/kvm/include/vmx.h
+ * tools/testing/selftests/kvm/include/x86_64/vmx.h
*
* Copyright (C) 2018, Google LLC.
*
@@ -11,7 +11,7 @@
#define SELFTEST_KVM_VMX_H
#include <stdint.h>
-#include "x86.h"
+#include "processor.h"
#define CPUID_VMX_BIT 5
@@ -339,6 +339,8 @@ struct vmx_msr_entry {
uint64_t value;
} __attribute__ ((aligned(16)));
+#include "evmcs.h"
+
static inline int vmxon(uint64_t phys)
{
uint8_t ret;
@@ -372,6 +374,9 @@ static inline int vmptrld(uint64_t vmcs_pa)
{
uint8_t ret;
+ if (enable_evmcs)
+ return -1;
+
__asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]"
: [ret]"=rm"(ret)
: [pa]"m"(vmcs_pa)
@@ -385,6 +390,9 @@ static inline int vmptrst(uint64_t *value)
uint64_t tmp;
uint8_t ret;
+ if (enable_evmcs)
+ return evmcs_vmptrst(value);
+
__asm__ __volatile__("vmptrst %[value]; setna %[ret]"
: [value]"=m"(tmp), [ret]"=rm"(ret)
: : "cc", "memory");
@@ -411,6 +419,9 @@ static inline int vmlaunch(void)
{
int ret;
+ if (enable_evmcs)
+ return evmcs_vmlaunch();
+
__asm__ __volatile__("push %%rbp;"
"push %%rcx;"
"push %%rdx;"
@@ -443,6 +454,9 @@ static inline int vmresume(void)
{
int ret;
+ if (enable_evmcs)
+ return evmcs_vmresume();
+
__asm__ __volatile__("push %%rbp;"
"push %%rcx;"
"push %%rdx;"
@@ -482,6 +496,9 @@ static inline int vmread(uint64_t encoding, uint64_t *value)
uint64_t tmp;
uint8_t ret;
+ if (enable_evmcs)
+ return evmcs_vmread(encoding, value);
+
__asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]"
: [value]"=rm"(tmp), [ret]"=rm"(ret)
: [encoding]"r"(encoding)
@@ -506,6 +523,9 @@ static inline int vmwrite(uint64_t encoding, uint64_t value)
{
uint8_t ret;
+ if (enable_evmcs)
+ return evmcs_vmwrite(encoding, value);
+
__asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]"
: [ret]"=rm"(ret)
: [value]"rm"(value), [encoding]"r"(encoding)
@@ -543,10 +563,19 @@ struct vmx_pages {
void *vmwrite_hva;
uint64_t vmwrite_gpa;
void *vmwrite;
+
+ void *vp_assist_hva;
+ uint64_t vp_assist_gpa;
+ void *vp_assist;
+
+ void *enlightened_vmcs_hva;
+ uint64_t enlightened_vmcs_gpa;
+ void *enlightened_vmcs;
};
struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
bool prepare_for_vmx_operation(struct vmx_pages *vmx);
void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
+bool load_vmcs(struct vmx_pages *vmx);
-#endif /* !SELFTEST_KVM_VMX_H */
+#endif /* SELFTEST_KVM_VMX_H */
diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c
new file mode 100644
index 000000000000..b6022e2f116e
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AArch64 code
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_name */
+
+#include "kvm_util.h"
+#include "../kvm_util_internal.h"
+#include "processor.h"
+
+#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
+#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000
+
+static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
+{
+ return (v + vm->page_size) & ~(vm->page_size - 1);
+}
+
+static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
+ uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
+
+ return (gva >> shift) & mask;
+}
+
+static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
+ uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+
+ TEST_ASSERT(vm->pgtable_levels == 4,
+ "Mode %d does not have 4 page table levels", vm->mode);
+
+ return (gva >> shift) & mask;
+}
+
+static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
+ uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+
+ TEST_ASSERT(vm->pgtable_levels >= 3,
+ "Mode %d does not have >= 3 page table levels", vm->mode);
+
+ return (gva >> shift) & mask;
+}
+
+static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
+ return (gva >> vm->page_shift) & mask;
+}
+
+static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
+{
+ uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
+ return entry & mask;
+}
+
+static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
+{
+ unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
+ return 1 << (vm->va_bits - shift);
+}
+
+static uint64_t ptrs_per_pte(struct kvm_vm *vm)
+{
+ return 1 << (vm->page_shift - 3);
+}
+
+void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
+{
+ int rc;
+
+ if (!vm->pgd_created) {
+ vm_paddr_t paddr = vm_phy_pages_alloc(vm,
+ page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
+ KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
+ vm->pgd = paddr;
+ vm->pgd_created = true;
+ }
+}
+
+void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+ uint32_t pgd_memslot, uint64_t flags)
+{
+ uint8_t attr_idx = flags & 7;
+ uint64_t *ptep;
+
+ TEST_ASSERT((vaddr % vm->page_size) == 0,
+ "Virtual address not on page boundary,\n"
+ " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
+ TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
+ (vaddr >> vm->page_shift)),
+ "Invalid virtual address, vaddr: 0x%lx", vaddr);
+ TEST_ASSERT((paddr % vm->page_size) == 0,
+ "Physical address not on page boundary,\n"
+ " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
+ TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+ "Physical address beyond beyond maximum supported,\n"
+ " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+ paddr, vm->max_gfn, vm->page_size);
+
+ ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
+ if (!*ptep) {
+ *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
+ *ptep |= 3;
+ }
+
+ switch (vm->pgtable_levels) {
+ case 4:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
+ if (!*ptep) {
+ *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
+ *ptep |= 3;
+ }
+ /* fall through */
+ case 3:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
+ if (!*ptep) {
+ *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
+ *ptep |= 3;
+ }
+ /* fall through */
+ case 2:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
+ break;
+ default:
+ TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
+ }
+
+ *ptep = paddr | 3;
+ *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
+}
+
+void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
+ uint32_t pgd_memslot)
+{
+ uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
+
+ _virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx);
+}
+
+vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
+{
+ uint64_t *ptep;
+
+ if (!vm->pgd_created)
+ goto unmapped_gva;
+
+ ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
+ if (!ptep)
+ goto unmapped_gva;
+
+ switch (vm->pgtable_levels) {
+ case 4:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
+ if (!ptep)
+ goto unmapped_gva;
+ /* fall through */
+ case 3:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
+ if (!ptep)
+ goto unmapped_gva;
+ /* fall through */
+ case 2:
+ ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
+ if (!ptep)
+ goto unmapped_gva;
+ break;
+ default:
+ TEST_ASSERT(false, "Page table levels must be 2, 3, or 4");
+ }
+
+ return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
+
+unmapped_gva:
+ TEST_ASSERT(false, "No mapping for vm virtual address, "
+ "gva: 0x%lx", gva);
+}
+
+static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
+{
+#ifdef DEBUG_VM
+ static const char * const type[] = { "", "pud", "pmd", "pte" };
+ uint64_t pte, *ptep;
+
+ if (level == 4)
+ return;
+
+ for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
+ ptep = addr_gpa2hva(vm, pte);
+ if (!*ptep)
+ continue;
+ printf("%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
+ pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
+ }
+#endif
+}
+
+void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
+{
+ int level = 4 - (vm->pgtable_levels - 1);
+ uint64_t pgd, *ptep;
+
+ if (!vm->pgd_created)
+ return;
+
+ for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
+ ptep = addr_gpa2hva(vm, pgd);
+ if (!*ptep)
+ continue;
+ printf("%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
+ pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
+ }
+}
+
+struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
+ void *guest_code)
+{
+ uint64_t ptrs_per_4k_pte = 512;
+ uint64_t extra_pg_pages = (extra_mem_pages / ptrs_per_4k_pte) * 2;
+ struct kvm_vm *vm;
+
+ vm = vm_create(VM_MODE_P52V48_4K, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
+
+ kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
+ vm_vcpu_add_default(vm, vcpuid, guest_code);
+
+ return vm;
+}
+
+void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
+{
+ size_t stack_size = vm->page_size == 4096 ?
+ DEFAULT_STACK_PGS * vm->page_size :
+ vm->page_size;
+ uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
+ DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 0, 0);
+
+ vm_vcpu_add(vm, vcpuid, 0, 0);
+
+ set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
+ set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+}
+
+void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
+{
+ struct kvm_vcpu_init init;
+ uint64_t sctlr_el1, tcr_el1;
+
+ memset(&init, 0, sizeof(init));
+ init.target = KVM_ARM_TARGET_GENERIC_V8;
+ vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, &init);
+
+ /*
+ * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
+ * registers, which the variable argument list macros do.
+ */
+ set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20);
+
+ get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1);
+ get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1);
+
+ switch (vm->mode) {
+ case VM_MODE_P52V48_4K:
+ tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
+ tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
+ break;
+ case VM_MODE_P52V48_64K:
+ tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
+ tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
+ break;
+ case VM_MODE_P40V48_4K:
+ tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
+ tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
+ break;
+ case VM_MODE_P40V48_64K:
+ tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
+ tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
+ break;
+ default:
+ TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", vm->mode);
+ }
+
+ sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
+ /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
+ tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
+ tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
+
+ set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1);
+ set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1);
+ set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1);
+ set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd);
+}
+
+void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+{
+ uint64_t pstate, pc;
+
+ get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
+ get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
+
+ fprintf(stream, "%*spstate: 0x%.16llx pc: 0x%.16llx\n",
+ indent, "", pstate, pc);
+
+}
diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c
index cd01144d27c8..6398efe67885 100644
--- a/tools/testing/selftests/kvm/lib/assert.c
+++ b/tools/testing/selftests/kvm/lib/assert.c
@@ -13,7 +13,7 @@
#include <execinfo.h>
#include <sys/syscall.h>
-#include "../../kselftest.h"
+#include "kselftest.h"
/* Dumps the current stack trace to stderr. */
static void __attribute__((noinline)) test_dump_stack(void);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 6fd8c089cafc..8c06da4f03db 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -16,10 +16,8 @@
#include <sys/stat.h>
#include <linux/kernel.h>
-#define KVM_DEV_PATH "/dev/kvm"
-
#define KVM_UTIL_PGS_PER_HUGEPG 512
-#define KVM_UTIL_MIN_PADDR 0x2000
+#define KVM_UTIL_MIN_PFN 2
/* Aligns x up to the next multiple of size. Size must be a power of 2. */
static void *align(void *x, size_t size)
@@ -30,7 +28,8 @@ static void *align(void *x, size_t size)
return (void *) (((size_t) x + mask) & ~mask);
}
-/* Capability
+/*
+ * Capability
*
* Input Args:
* cap - Capability
@@ -92,16 +91,23 @@ static void vm_open(struct kvm_vm *vm, int perm)
if (vm->kvm_fd < 0)
exit(KSFT_SKIP);
- /* Create VM. */
vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, NULL);
TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
"rc: %i errno: %i", vm->fd, errno);
}
-/* VM Create
+const char * const vm_guest_mode_string[] = {
+ "PA-bits:52, VA-bits:48, 4K pages",
+ "PA-bits:52, VA-bits:48, 64K pages",
+ "PA-bits:40, VA-bits:48, 4K pages",
+ "PA-bits:40, VA-bits:48, 64K pages",
+};
+
+/*
+ * VM Create
*
* Input Args:
- * mode - VM Mode (e.g. VM_MODE_FLAT48PG)
+ * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
* phy_pages - Physical memory pages
* perm - permission
*
@@ -110,7 +116,7 @@ static void vm_open(struct kvm_vm *vm, int perm)
* Return:
* Pointer to opaque structure that describes the created VM.
*
- * Creates a VM with the mode specified by mode (e.g. VM_MODE_FLAT48PG).
+ * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
* When phy_pages is non-zero, a memory region of phy_pages physical pages
* is created and mapped starting at guest physical address 0. The file
* descriptor to control the created VM is created with the permissions
@@ -121,7 +127,6 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
struct kvm_vm *vm;
int kvm_fd;
- /* Allocate memory. */
vm = calloc(1, sizeof(*vm));
TEST_ASSERT(vm != NULL, "Insufficent Memory");
@@ -130,26 +135,48 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
/* Setup mode specific traits. */
switch (vm->mode) {
- case VM_MODE_FLAT48PG:
+ case VM_MODE_P52V48_4K:
+ vm->pgtable_levels = 4;
vm->page_size = 0x1000;
vm->page_shift = 12;
-
- /* Limit to 48-bit canonical virtual addresses. */
- vm->vpages_valid = sparsebit_alloc();
- sparsebit_set_num(vm->vpages_valid,
- 0, (1ULL << (48 - 1)) >> vm->page_shift);
- sparsebit_set_num(vm->vpages_valid,
- (~((1ULL << (48 - 1)) - 1)) >> vm->page_shift,
- (1ULL << (48 - 1)) >> vm->page_shift);
-
- /* Limit physical addresses to 52-bits. */
- vm->max_gfn = ((1ULL << 52) >> vm->page_shift) - 1;
+ vm->va_bits = 48;
+ break;
+ case VM_MODE_P52V48_64K:
+ vm->pgtable_levels = 3;
+ vm->pa_bits = 52;
+ vm->page_size = 0x10000;
+ vm->page_shift = 16;
+ vm->va_bits = 48;
+ break;
+ case VM_MODE_P40V48_4K:
+ vm->pgtable_levels = 4;
+ vm->pa_bits = 40;
+ vm->va_bits = 48;
+ vm->page_size = 0x1000;
+ vm->page_shift = 12;
+ break;
+ case VM_MODE_P40V48_64K:
+ vm->pgtable_levels = 3;
+ vm->pa_bits = 40;
+ vm->va_bits = 48;
+ vm->page_size = 0x10000;
+ vm->page_shift = 16;
break;
-
default:
TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
}
+ /* Limit to VA-bit canonical virtual addresses. */
+ vm->vpages_valid = sparsebit_alloc();
+ sparsebit_set_num(vm->vpages_valid,
+ 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
+ sparsebit_set_num(vm->vpages_valid,
+ (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
+ (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
+
+ /* Limit physical addresses to PA-bits. */
+ vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
+
/* Allocate and setup memory for guest. */
vm->vpages_mapped = sparsebit_alloc();
if (phy_pages != 0)
@@ -159,7 +186,8 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
return vm;
}
-/* VM Restart
+/*
+ * VM Restart
*
* Input Args:
* vm - VM that has been released before
@@ -186,7 +214,8 @@ void kvm_vm_restart(struct kvm_vm *vmp, int perm)
" rc: %i errno: %i\n"
" slot: %u flags: 0x%x\n"
" guest_phys_addr: 0x%lx size: 0x%lx",
- ret, errno, region->region.slot, region->region.flags,
+ ret, errno, region->region.slot,
+ region->region.flags,
region->region.guest_phys_addr,
region->region.memory_size);
}
@@ -202,7 +231,8 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
strerror(-ret));
}
-/* Userspace Memory Region Find
+/*
+ * Userspace Memory Region Find
*
* Input Args:
* vm - Virtual Machine
@@ -220,8 +250,8 @@ void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
* of the regions is returned. Null is returned only when no overlapping
* region exists.
*/
-static struct userspace_mem_region *userspace_mem_region_find(
- struct kvm_vm *vm, uint64_t start, uint64_t end)
+static struct userspace_mem_region *
+userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
{
struct userspace_mem_region *region;
@@ -237,7 +267,8 @@ static struct userspace_mem_region *userspace_mem_region_find(
return NULL;
}
-/* KVM Userspace Memory Region Find
+/*
+ * KVM Userspace Memory Region Find
*
* Input Args:
* vm - Virtual Machine
@@ -265,7 +296,8 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
return &region->region;
}
-/* VCPU Find
+/*
+ * VCPU Find
*
* Input Args:
* vm - Virtual Machine
@@ -280,8 +312,7 @@ kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
* returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU
* for the specified vcpuid.
*/
-struct vcpu *vcpu_find(struct kvm_vm *vm,
- uint32_t vcpuid)
+struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
{
struct vcpu *vcpup;
@@ -293,7 +324,8 @@ struct vcpu *vcpu_find(struct kvm_vm *vm,
return NULL;
}
-/* VM VCPU Remove
+/*
+ * VM VCPU Remove
*
* Input Args:
* vm - Virtual Machine
@@ -330,11 +362,9 @@ void kvm_vm_release(struct kvm_vm *vmp)
{
int ret;
- /* Free VCPUs. */
while (vmp->vcpu_head)
vm_vcpu_rm(vmp, vmp->vcpu_head->id);
- /* Close file descriptor for the VM. */
ret = close(vmp->fd);
TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
" vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
@@ -344,7 +374,8 @@ void kvm_vm_release(struct kvm_vm *vmp)
" vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
}
-/* Destroys and frees the VM pointed to by vmp.
+/*
+ * Destroys and frees the VM pointed to by vmp.
*/
void kvm_vm_free(struct kvm_vm *vmp)
{
@@ -383,7 +414,8 @@ void kvm_vm_free(struct kvm_vm *vmp)
free(vmp);
}
-/* Memory Compare, host virtual to guest virtual
+/*
+ * Memory Compare, host virtual to guest virtual
*
* Input Args:
* hva - Starting host virtual address
@@ -405,23 +437,25 @@ void kvm_vm_free(struct kvm_vm *vmp)
* a length of len, to the guest bytes starting at the guest virtual
* address given by gva.
*/
-int kvm_memcmp_hva_gva(void *hva,
- struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
+int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
{
size_t amt;
- /* Compare a batch of bytes until either a match is found
+ /*
+ * Compare a batch of bytes until either a match is found
* or all the bytes have been compared.
*/
for (uintptr_t offset = 0; offset < len; offset += amt) {
uintptr_t ptr1 = (uintptr_t)hva + offset;
- /* Determine host address for guest virtual address
+ /*
+ * Determine host address for guest virtual address
* at offset.
*/
uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
- /* Determine amount to compare on this pass.
+ /*
+ * Determine amount to compare on this pass.
* Don't allow the comparsion to cross a page boundary.
*/
amt = len - offset;
@@ -433,7 +467,8 @@ int kvm_memcmp_hva_gva(void *hva,
assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
- /* Perform the comparison. If there is a difference
+ /*
+ * Perform the comparison. If there is a difference
* return that result to the caller, otherwise need
* to continue on looking for a mismatch.
*/
@@ -442,109 +477,15 @@ int kvm_memcmp_hva_gva(void *hva,
return ret;
}
- /* No mismatch found. Let the caller know the two memory
+ /*
+ * No mismatch found. Let the caller know the two memory
* areas are equal.
*/
return 0;
}
-/* Allocate an instance of struct kvm_cpuid2
- *
- * Input Args: None
- *
- * Output Args: None
- *
- * Return: A pointer to the allocated struct. The caller is responsible
- * for freeing this struct.
- *
- * Since kvm_cpuid2 uses a 0-length array to allow a the size of the
- * array to be decided at allocation time, allocation is slightly
- * complicated. This function uses a reasonable default length for
- * the array and performs the appropriate allocation.
- */
-static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
-{
- struct kvm_cpuid2 *cpuid;
- int nent = 100;
- size_t size;
-
- size = sizeof(*cpuid);
- size += nent * sizeof(struct kvm_cpuid_entry2);
- cpuid = malloc(size);
- if (!cpuid) {
- perror("malloc");
- abort();
- }
-
- cpuid->nent = nent;
-
- return cpuid;
-}
-
-/* KVM Supported CPUID Get
- *
- * Input Args: None
- *
- * Output Args:
- *
- * Return: The supported KVM CPUID
- *
- * Get the guest CPUID supported by KVM.
- */
-struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
-{
- static struct kvm_cpuid2 *cpuid;
- int ret;
- int kvm_fd;
-
- if (cpuid)
- return cpuid;
-
- cpuid = allocate_kvm_cpuid2();
- kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
- if (kvm_fd < 0)
- exit(KSFT_SKIP);
-
- ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
- TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
- ret, errno);
-
- close(kvm_fd);
- return cpuid;
-}
-
-/* Locate a cpuid entry.
- *
- * Input Args:
- * cpuid: The cpuid.
- * function: The function of the cpuid entry to find.
- *
- * Output Args: None
- *
- * Return: A pointer to the cpuid entry. Never returns NULL.
- */
-struct kvm_cpuid_entry2 *
-kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
-{
- struct kvm_cpuid2 *cpuid;
- struct kvm_cpuid_entry2 *entry = NULL;
- int i;
-
- cpuid = kvm_get_supported_cpuid();
- for (i = 0; i < cpuid->nent; i++) {
- if (cpuid->entries[i].function == function &&
- cpuid->entries[i].index == index) {
- entry = &cpuid->entries[i];
- break;
- }
- }
-
- TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
- function, index);
- return entry;
-}
-
-/* VM Userspace Memory Region Add
+/*
+ * VM Userspace Memory Region Add
*
* Input Args:
* vm - Virtual Machine
@@ -586,7 +527,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
" vm->max_gfn: 0x%lx vm->page_size: 0x%x",
guest_paddr, npages, vm->max_gfn, vm->page_size);
- /* Confirm a mem region with an overlapping address doesn't
+ /*
+ * Confirm a mem region with an overlapping address doesn't
* already exist.
*/
region = (struct userspace_mem_region *) userspace_mem_region_find(
@@ -677,7 +619,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
vm->userspace_mem_region_head = region;
}
-/* Memslot to region
+/*
+ * Memslot to region
*
* Input Args:
* vm - Virtual Machine
@@ -691,8 +634,8 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
* on error (e.g. currently no memory region using memslot as a KVM
* memory slot ID).
*/
-static struct userspace_mem_region *memslot2region(struct kvm_vm *vm,
- uint32_t memslot)
+static struct userspace_mem_region *
+memslot2region(struct kvm_vm *vm, uint32_t memslot)
{
struct userspace_mem_region *region;
@@ -712,7 +655,8 @@ static struct userspace_mem_region *memslot2region(struct kvm_vm *vm,
return region;
}
-/* VM Memory Region Flags Set
+/*
+ * VM Memory Region Flags Set
*
* Input Args:
* vm - Virtual Machine
@@ -730,7 +674,6 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
int ret;
struct userspace_mem_region *region;
- /* Locate memory region. */
region = memslot2region(vm, slot);
region->region.flags = flags;
@@ -742,7 +685,8 @@ void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
ret, errno, slot, flags);
}
-/* VCPU mmap Size
+/*
+ * VCPU mmap Size
*
* Input Args: None
*
@@ -772,7 +716,8 @@ static int vcpu_mmap_sz(void)
return ret;
}
-/* VM VCPU Add
+/*
+ * VM VCPU Add
*
* Input Args:
* vm - Virtual Machine
@@ -785,7 +730,8 @@ static int vcpu_mmap_sz(void)
* Creates and adds to the VM specified by vm and virtual CPU with
* the ID given by vcpuid.
*/
-void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_memslot)
+void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot,
+ int gdt_memslot)
{
struct vcpu *vcpu;
@@ -823,7 +769,8 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_me
vcpu_setup(vm, vcpuid, pgd_memslot, gdt_memslot);
}
-/* VM Virtual Address Unused Gap
+/*
+ * VM Virtual Address Unused Gap
*
* Input Args:
* vm - Virtual Machine
@@ -843,14 +790,14 @@ void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, int gdt_me
* sz unallocated bytes >= vaddr_min is available.
*/
static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
- vm_vaddr_t vaddr_min)
+ vm_vaddr_t vaddr_min)
{
uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
/* Determine lowest permitted virtual page index. */
uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
if ((pgidx_start * vm->page_size) < vaddr_min)
- goto no_va_found;
+ goto no_va_found;
/* Loop over section with enough valid virtual page indexes. */
if (!sparsebit_is_set_num(vm->vpages_valid,
@@ -909,7 +856,8 @@ va_found:
return pgidx_start * vm->page_size;
}
-/* VM Virtual Address Allocate
+/*
+ * VM Virtual Address Allocate
*
* Input Args:
* vm - Virtual Machine
@@ -930,13 +878,14 @@ va_found:
* a page.
*/
vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
- uint32_t data_memslot, uint32_t pgd_memslot)
+ uint32_t data_memslot, uint32_t pgd_memslot)
{
uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
virt_pgd_alloc(vm, pgd_memslot);
- /* Find an unused range of virtual page addresses of at least
+ /*
+ * Find an unused range of virtual page addresses of at least
* pages in length.
*/
vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
@@ -946,7 +895,8 @@ vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
pages--, vaddr += vm->page_size) {
vm_paddr_t paddr;
- paddr = vm_phy_page_alloc(vm, KVM_UTIL_MIN_PADDR, data_memslot);
+ paddr = vm_phy_page_alloc(vm,
+ KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
virt_pg_map(vm, vaddr, paddr, pgd_memslot);
@@ -990,7 +940,8 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
}
}
-/* Address VM Physical to Host Virtual
+/*
+ * Address VM Physical to Host Virtual
*
* Input Args:
* vm - Virtual Machine
@@ -1022,7 +973,8 @@ void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
return NULL;
}
-/* Address Host Virtual to VM Physical
+/*
+ * Address Host Virtual to VM Physical
*
* Input Args:
* vm - Virtual Machine
@@ -1056,7 +1008,8 @@ vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
return -1;
}
-/* VM Create IRQ Chip
+/*
+ * VM Create IRQ Chip
*
* Input Args:
* vm - Virtual Machine
@@ -1078,7 +1031,8 @@ void vm_create_irqchip(struct kvm_vm *vm)
vm->has_irqchip = true;
}
-/* VM VCPU State
+/*
+ * VM VCPU State
*
* Input Args:
* vm - Virtual Machine
@@ -1100,7 +1054,8 @@ struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
return vcpu->state;
}
-/* VM VCPU Run
+/*
+ * VM VCPU Run
*
* Input Args:
* vm - Virtual Machine
@@ -1126,13 +1081,14 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
int rc;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- do {
+ do {
rc = ioctl(vcpu->fd, KVM_RUN, NULL);
} while (rc == -1 && errno == EINTR);
return rc;
}
-/* VM VCPU Set MP State
+/*
+ * VM VCPU Set MP State
*
* Input Args:
* vm - Virtual Machine
@@ -1147,7 +1103,7 @@ int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
* by mp_state.
*/
void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_mp_state *mp_state)
+ struct kvm_mp_state *mp_state)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
@@ -1159,7 +1115,8 @@ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
"rc: %i errno: %i", ret, errno);
}
-/* VM VCPU Regs Get
+/*
+ * VM VCPU Regs Get
*
* Input Args:
* vm - Virtual Machine
@@ -1173,21 +1130,20 @@ void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
* Obtains the current register state for the VCPU specified by vcpuid
* and stores it at the location given by regs.
*/
-void vcpu_regs_get(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_regs *regs)
+void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Get the regs. */
ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
ret, errno);
}
-/* VM VCPU Regs Set
+/*
+ * VM VCPU Regs Set
*
* Input Args:
* vm - Virtual Machine
@@ -1201,165 +1157,46 @@ void vcpu_regs_get(struct kvm_vm *vm,
* Sets the regs of the VCPU specified by vcpuid to the values
* given by regs.
*/
-void vcpu_regs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_regs *regs)
+void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Set the regs. */
ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
ret, errno);
}
void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events)
+ struct kvm_vcpu_events *events)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Get the regs. */
ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
ret, errno);
}
void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
- struct kvm_vcpu_events *events)
+ struct kvm_vcpu_events *events)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Set the regs. */
ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
ret, errno);
}
-/* VCPU Get MSR
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * msr_index - Index of MSR
- *
- * Output Args: None
- *
- * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
- *
- * Get value of MSR for VCPU.
- */
-uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- struct {
- struct kvm_msrs header;
- struct kvm_msr_entry entry;
- } buffer = {};
- int r;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- buffer.header.nmsrs = 1;
- buffer.entry.index = msr_index;
- r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
- TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
- " rc: %i errno: %i", r, errno);
-
- return buffer.entry.data;
-}
-
-/* VCPU Set MSR
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * msr_index - Index of MSR
- * msr_value - New value of MSR
- *
- * Output Args: None
- *
- * Return: On success, nothing. On failure a TEST_ASSERT is produced.
- *
- * Set value of MSR for VCPU.
- */
-void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
- uint64_t msr_value)
-{
- struct vcpu *vcpu = vcpu_find(vm, vcpuid);
- struct {
- struct kvm_msrs header;
- struct kvm_msr_entry entry;
- } buffer = {};
- int r;
-
- TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- memset(&buffer, 0, sizeof(buffer));
- buffer.header.nmsrs = 1;
- buffer.entry.index = msr_index;
- buffer.entry.data = msr_value;
- r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
- TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
- " rc: %i errno: %i", r, errno);
-}
-
-/* VM VCPU Args Set
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * num - number of arguments
- * ... - arguments, each of type uint64_t
- *
- * Output Args: None
- *
- * Return: None
- *
- * Sets the first num function input arguments to the values
- * given as variable args. Each of the variable args is expected to
- * be of type uint64_t.
- */
-void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
-{
- va_list ap;
- struct kvm_regs regs;
-
- TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
- " num: %u\n",
- num);
-
- va_start(ap, num);
- vcpu_regs_get(vm, vcpuid, &regs);
-
- if (num >= 1)
- regs.rdi = va_arg(ap, uint64_t);
-
- if (num >= 2)
- regs.rsi = va_arg(ap, uint64_t);
-
- if (num >= 3)
- regs.rdx = va_arg(ap, uint64_t);
-
- if (num >= 4)
- regs.rcx = va_arg(ap, uint64_t);
-
- if (num >= 5)
- regs.r8 = va_arg(ap, uint64_t);
-
- if (num >= 6)
- regs.r9 = va_arg(ap, uint64_t);
-
- vcpu_regs_set(vm, vcpuid, &regs);
- va_end(ap);
-}
-
-/* VM VCPU System Regs Get
+/*
+ * VM VCPU System Regs Get
*
* Input Args:
* vm - Virtual Machine
@@ -1373,22 +1210,20 @@ void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
* Obtains the current system register state for the VCPU specified by
* vcpuid and stores it at the location given by sregs.
*/
-void vcpu_sregs_get(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs)
+void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Get the regs. */
- /* Get the regs. */
ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
ret, errno);
}
-/* VM VCPU System Regs Set
+/*
+ * VM VCPU System Regs Set
*
* Input Args:
* vm - Virtual Machine
@@ -1402,27 +1237,25 @@ void vcpu_sregs_get(struct kvm_vm *vm,
* Sets the system regs of the VCPU specified by vcpuid to the values
* given by sregs.
*/
-void vcpu_sregs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs)
+void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
{
int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
"rc: %i errno: %i", ret, errno);
}
-int _vcpu_sregs_set(struct kvm_vm *vm,
- uint32_t vcpuid, struct kvm_sregs *sregs)
+int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
- /* Get the regs. */
return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
}
-/* VCPU Ioctl
+/*
+ * VCPU Ioctl
*
* Input Args:
* vm - Virtual Machine
@@ -1434,8 +1267,8 @@ int _vcpu_sregs_set(struct kvm_vm *vm,
*
* Issues an arbitrary ioctl on a VCPU fd.
*/
-void vcpu_ioctl(struct kvm_vm *vm,
- uint32_t vcpuid, unsigned long cmd, void *arg)
+void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
+ unsigned long cmd, void *arg)
{
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
int ret;
@@ -1447,7 +1280,8 @@ void vcpu_ioctl(struct kvm_vm *vm,
cmd, ret, errno, strerror(errno));
}
-/* VM Ioctl
+/*
+ * VM Ioctl
*
* Input Args:
* vm - Virtual Machine
@@ -1467,7 +1301,8 @@ void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
cmd, ret, errno, strerror(errno));
}
-/* VM Dump
+/*
+ * VM Dump
*
* Input Args:
* vm - Virtual Machine
@@ -1514,38 +1349,6 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
vcpu_dump(stream, vm, vcpu->id, indent + 2);
}
-/* VM VCPU Dump
- *
- * Input Args:
- * vm - Virtual Machine
- * vcpuid - VCPU ID
- * indent - Left margin indent amount
- *
- * Output Args:
- * stream - Output FILE stream
- *
- * Return: None
- *
- * Dumps the current state of the VCPU specified by vcpuid, within the VM
- * given by vm, to the FILE stream given by stream.
- */
-void vcpu_dump(FILE *stream, struct kvm_vm *vm,
- uint32_t vcpuid, uint8_t indent)
-{
- struct kvm_regs regs;
- struct kvm_sregs sregs;
-
- fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
-
- fprintf(stream, "%*sregs:\n", indent + 2, "");
- vcpu_regs_get(vm, vcpuid, &regs);
- regs_dump(stream, &regs, indent + 4);
-
- fprintf(stream, "%*ssregs:\n", indent + 2, "");
- vcpu_sregs_get(vm, vcpuid, &sregs);
- sregs_dump(stream, &sregs, indent + 4);
-}
-
/* Known KVM exit reasons */
static struct exit_reason {
unsigned int reason;
@@ -1576,7 +1379,8 @@ static struct exit_reason {
#endif
};
-/* Exit Reason String
+/*
+ * Exit Reason String
*
* Input Args:
* exit_reason - Exit reason
@@ -1602,10 +1406,12 @@ const char *exit_reason_str(unsigned int exit_reason)
return "Unknown";
}
-/* Physical Page Allocate
+/*
+ * Physical Contiguous Page Allocator
*
* Input Args:
* vm - Virtual Machine
+ * num - number of pages
* paddr_min - Physical address minimum
* memslot - Memory region to allocate page from
*
@@ -1614,47 +1420,59 @@ const char *exit_reason_str(unsigned int exit_reason)
* Return:
* Starting physical address
*
- * Within the VM specified by vm, locates an available physical page
- * at or above paddr_min. If found, the page is marked as in use
- * and its address is returned. A TEST_ASSERT failure occurs if no
- * page is available at or above paddr_min.
+ * Within the VM specified by vm, locates a range of available physical
+ * pages at or above paddr_min. If found, the pages are marked as in use
+ * and thier base address is returned. A TEST_ASSERT failure occurs if
+ * not enough pages are available at or above paddr_min.
*/
-vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm,
- vm_paddr_t paddr_min, uint32_t memslot)
+vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot)
{
struct userspace_mem_region *region;
- sparsebit_idx_t pg;
+ sparsebit_idx_t pg, base;
+
+ TEST_ASSERT(num > 0, "Must allocate at least one page");
TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
"not divisible by page size.\n"
" paddr_min: 0x%lx page_size: 0x%x",
paddr_min, vm->page_size);
- /* Locate memory region. */
region = memslot2region(vm, memslot);
+ base = pg = paddr_min >> vm->page_shift;
- /* Locate next available physical page at or above paddr_min. */
- pg = paddr_min >> vm->page_shift;
-
- if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
- pg = sparsebit_next_set(region->unused_phy_pages, pg);
- if (pg == 0) {
- fprintf(stderr, "No guest physical page available, "
- "paddr_min: 0x%lx page_size: 0x%x memslot: %u",
- paddr_min, vm->page_size, memslot);
- fputs("---- vm dump ----\n", stderr);
- vm_dump(stderr, vm, 2);
- abort();
+ do {
+ for (; pg < base + num; ++pg) {
+ if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
+ base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
+ break;
+ }
}
+ } while (pg && pg != base + num);
+
+ if (pg == 0) {
+ fprintf(stderr, "No guest physical page available, "
+ "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
+ paddr_min, vm->page_size, memslot);
+ fputs("---- vm dump ----\n", stderr);
+ vm_dump(stderr, vm, 2);
+ abort();
}
- /* Specify page as in use and return its address. */
- sparsebit_clear(region->unused_phy_pages, pg);
+ for (pg = base; pg < base + num; ++pg)
+ sparsebit_clear(region->unused_phy_pages, pg);
+
+ return base * vm->page_size;
+}
- return pg * vm->page_size;
+vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
+ uint32_t memslot)
+{
+ return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
}
-/* Address Guest Virtual to Host Virtual
+/*
+ * Address Guest Virtual to Host Virtual
*
* Input Args:
* vm - Virtual Machine
@@ -1669,17 +1487,3 @@ void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
{
return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
}
-
-void guest_args_read(struct kvm_vm *vm, uint32_t vcpu_id,
- struct guest_args *args)
-{
- struct kvm_run *run = vcpu_state(vm, vcpu_id);
- struct kvm_regs regs;
-
- memset(&regs, 0, sizeof(regs));
- vcpu_regs_get(vm, vcpu_id, &regs);
-
- args->port = run->io.port;
- args->arg0 = regs.rdi;
- args->arg1 = regs.rsi;
-}
diff --git a/tools/testing/selftests/kvm/lib/kvm_util_internal.h b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
index 542ed606b338..52701db0f253 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util_internal.h
+++ b/tools/testing/selftests/kvm/lib/kvm_util_internal.h
@@ -1,28 +1,29 @@
/*
- * tools/testing/selftests/kvm/lib/kvm_util.c
+ * tools/testing/selftests/kvm/lib/kvm_util_internal.h
*
* Copyright (C) 2018, Google LLC.
*
* This work is licensed under the terms of the GNU GPL, version 2.
*/
-#ifndef KVM_UTIL_INTERNAL_H
-#define KVM_UTIL_INTERNAL_H 1
+#ifndef SELFTEST_KVM_UTIL_INTERNAL_H
+#define SELFTEST_KVM_UTIL_INTERNAL_H
#include "sparsebit.h"
+#define KVM_DEV_PATH "/dev/kvm"
+
#ifndef BITS_PER_BYTE
-#define BITS_PER_BYTE 8
+#define BITS_PER_BYTE 8
#endif
#ifndef BITS_PER_LONG
-#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long))
+#define BITS_PER_LONG (BITS_PER_BYTE * sizeof(long))
#endif
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG)
-/* Concrete definition of struct kvm_vm. */
struct userspace_mem_region {
struct userspace_mem_region *next, *prev;
struct kvm_userspace_memory_region region;
@@ -45,14 +46,16 @@ struct kvm_vm {
int mode;
int kvm_fd;
int fd;
+ unsigned int pgtable_levels;
unsigned int page_size;
unsigned int page_shift;
+ unsigned int pa_bits;
+ unsigned int va_bits;
uint64_t max_gfn;
struct vcpu *vcpu_head;
struct userspace_mem_region *userspace_mem_region_head;
struct sparsebit *vpages_valid;
struct sparsebit *vpages_mapped;
-
bool has_irqchip;
bool pgd_created;
vm_paddr_t pgd;
@@ -60,13 +63,11 @@ struct kvm_vm {
vm_vaddr_t tss;
};
-struct vcpu *vcpu_find(struct kvm_vm *vm,
- uint32_t vcpuid);
-void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot);
+struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid);
+void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot,
+ int gdt_memslot);
void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
-void regs_dump(FILE *stream, struct kvm_regs *regs,
- uint8_t indent);
-void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
- uint8_t indent);
+void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent);
+void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent);
-#endif
+#endif /* SELFTEST_KVM_UTIL_INTERNAL_H */
diff --git a/tools/testing/selftests/kvm/lib/ucall.c b/tools/testing/selftests/kvm/lib/ucall.c
new file mode 100644
index 000000000000..4777f9bb5194
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/ucall.c
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ucall support. A ucall is a "hypercall to userspace".
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ */
+#include "kvm_util.h"
+#include "kvm_util_internal.h"
+
+#define UCALL_PIO_PORT ((uint16_t)0x1000)
+
+static ucall_type_t ucall_type;
+static vm_vaddr_t *ucall_exit_mmio_addr;
+
+static bool ucall_mmio_init(struct kvm_vm *vm, vm_paddr_t gpa)
+{
+ if (kvm_userspace_memory_region_find(vm, gpa, gpa + 1))
+ return false;
+
+ virt_pg_map(vm, gpa, gpa, 0);
+
+ ucall_exit_mmio_addr = (vm_vaddr_t *)gpa;
+ sync_global_to_guest(vm, ucall_exit_mmio_addr);
+
+ return true;
+}
+
+void ucall_init(struct kvm_vm *vm, ucall_type_t type, void *arg)
+{
+ ucall_type = type;
+ sync_global_to_guest(vm, ucall_type);
+
+ if (type == UCALL_PIO)
+ return;
+
+ if (type == UCALL_MMIO) {
+ vm_paddr_t gpa, start, end, step;
+ bool ret;
+
+ if (arg) {
+ gpa = (vm_paddr_t)arg;
+ ret = ucall_mmio_init(vm, gpa);
+ TEST_ASSERT(ret, "Can't set ucall mmio address to %lx", gpa);
+ return;
+ }
+
+ /*
+ * Find an address within the allowed virtual address space,
+ * that does _not_ have a KVM memory region associated with it.
+ * Identity mapping an address like this allows the guest to
+ * access it, but as KVM doesn't know what to do with it, it
+ * will assume it's something userspace handles and exit with
+ * KVM_EXIT_MMIO. Well, at least that's how it works for AArch64.
+ * Here we start with a guess that the addresses around two
+ * thirds of the VA space are unmapped and then work both down
+ * and up from there in 1/6 VA space sized steps.
+ */
+ start = 1ul << (vm->va_bits * 2 / 3);
+ end = 1ul << vm->va_bits;
+ step = 1ul << (vm->va_bits / 6);
+ for (gpa = start; gpa >= 0; gpa -= step) {
+ if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
+ return;
+ }
+ for (gpa = start + step; gpa < end; gpa += step) {
+ if (ucall_mmio_init(vm, gpa & ~(vm->page_size - 1)))
+ return;
+ }
+ TEST_ASSERT(false, "Can't find a ucall mmio address");
+ }
+}
+
+void ucall_uninit(struct kvm_vm *vm)
+{
+ ucall_type = 0;
+ sync_global_to_guest(vm, ucall_type);
+ ucall_exit_mmio_addr = 0;
+ sync_global_to_guest(vm, ucall_exit_mmio_addr);
+}
+
+static void ucall_pio_exit(struct ucall *uc)
+{
+#ifdef __x86_64__
+ asm volatile("in %[port], %%al"
+ : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax");
+#endif
+}
+
+static void ucall_mmio_exit(struct ucall *uc)
+{
+ *ucall_exit_mmio_addr = (vm_vaddr_t)uc;
+}
+
+void ucall(uint64_t cmd, int nargs, ...)
+{
+ struct ucall uc = {
+ .cmd = cmd,
+ };
+ va_list va;
+ int i;
+
+ nargs = nargs <= UCALL_MAX_ARGS ? nargs : UCALL_MAX_ARGS;
+
+ va_start(va, nargs);
+ for (i = 0; i < nargs; ++i)
+ uc.args[i] = va_arg(va, uint64_t);
+ va_end(va);
+
+ switch (ucall_type) {
+ case UCALL_PIO:
+ ucall_pio_exit(&uc);
+ break;
+ case UCALL_MMIO:
+ ucall_mmio_exit(&uc);
+ break;
+ };
+}
+
+uint64_t get_ucall(struct kvm_vm *vm, uint32_t vcpu_id, struct ucall *uc)
+{
+ struct kvm_run *run = vcpu_state(vm, vcpu_id);
+
+ memset(uc, 0, sizeof(*uc));
+
+#ifdef __x86_64__
+ if (ucall_type == UCALL_PIO && run->exit_reason == KVM_EXIT_IO &&
+ run->io.port == UCALL_PIO_PORT) {
+ struct kvm_regs regs;
+ vcpu_regs_get(vm, vcpu_id, &regs);
+ memcpy(uc, addr_gva2hva(vm, (vm_vaddr_t)regs.rdi), sizeof(*uc));
+ return uc->cmd;
+ }
+#endif
+ if (ucall_type == UCALL_MMIO && run->exit_reason == KVM_EXIT_MMIO &&
+ run->mmio.phys_addr == (uint64_t)ucall_exit_mmio_addr) {
+ vm_vaddr_t gva;
+ TEST_ASSERT(run->mmio.is_write && run->mmio.len == 8,
+ "Unexpected ucall exit mmio address access");
+ gva = *(vm_vaddr_t *)run->mmio.data;
+ memcpy(uc, addr_gva2hva(vm, gva), sizeof(*uc));
+ }
+
+ return uc->cmd;
+}
diff --git a/tools/testing/selftests/kvm/lib/x86.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c
index a3122f1949a8..f28127f4a3af 100644
--- a/tools/testing/selftests/kvm/lib/x86.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c
@@ -1,5 +1,5 @@
/*
- * tools/testing/selftests/kvm/lib/x86.c
+ * tools/testing/selftests/kvm/lib/x86_64/processor.c
*
* Copyright (C) 2018, Google LLC.
*
@@ -10,8 +10,8 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "kvm_util_internal.h"
-#include "x86.h"
+#include "../kvm_util_internal.h"
+#include "processor.h"
/* Minimum physical address used for virtual translation tables. */
#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
@@ -231,7 +231,7 @@ void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
{
int rc;
- TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
+ TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
/* If needed, create page map l4 table. */
@@ -264,7 +264,7 @@ void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
uint16_t index[4];
struct pageMapL4Entry *pml4e;
- TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
+ TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
TEST_ASSERT((vaddr % vm->page_size) == 0,
@@ -551,7 +551,7 @@ vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
struct pageTableEntry *pte;
void *hva;
- TEST_ASSERT(vm->mode == VM_MODE_FLAT48PG, "Attempt to use "
+ TEST_ASSERT(vm->mode == VM_MODE_P52V48_4K, "Attempt to use "
"unknown or unsupported guest mode, mode: 0x%x", vm->mode);
index[0] = (gva >> 12) & 0x1ffu;
@@ -624,9 +624,9 @@ void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
kvm_setup_gdt(vm, &sregs.gdt, gdt_memslot, pgd_memslot);
switch (vm->mode) {
- case VM_MODE_FLAT48PG:
+ case VM_MODE_P52V48_4K:
sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
- sregs.cr4 |= X86_CR4_PAE;
+ sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
kvm_seg_set_unusable(&sregs.ldt);
@@ -672,6 +672,102 @@ void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
vcpu_set_mp_state(vm, vcpuid, &mp_state);
}
+/* Allocate an instance of struct kvm_cpuid2
+ *
+ * Input Args: None
+ *
+ * Output Args: None
+ *
+ * Return: A pointer to the allocated struct. The caller is responsible
+ * for freeing this struct.
+ *
+ * Since kvm_cpuid2 uses a 0-length array to allow a the size of the
+ * array to be decided at allocation time, allocation is slightly
+ * complicated. This function uses a reasonable default length for
+ * the array and performs the appropriate allocation.
+ */
+static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
+{
+ struct kvm_cpuid2 *cpuid;
+ int nent = 100;
+ size_t size;
+
+ size = sizeof(*cpuid);
+ size += nent * sizeof(struct kvm_cpuid_entry2);
+ cpuid = malloc(size);
+ if (!cpuid) {
+ perror("malloc");
+ abort();
+ }
+
+ cpuid->nent = nent;
+
+ return cpuid;
+}
+
+/* KVM Supported CPUID Get
+ *
+ * Input Args: None
+ *
+ * Output Args:
+ *
+ * Return: The supported KVM CPUID
+ *
+ * Get the guest CPUID supported by KVM.
+ */
+struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
+{
+ static struct kvm_cpuid2 *cpuid;
+ int ret;
+ int kvm_fd;
+
+ if (cpuid)
+ return cpuid;
+
+ cpuid = allocate_kvm_cpuid2();
+ kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
+ if (kvm_fd < 0)
+ exit(KSFT_SKIP);
+
+ ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
+ TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
+ ret, errno);
+
+ close(kvm_fd);
+ return cpuid;
+}
+
+/* Locate a cpuid entry.
+ *
+ * Input Args:
+ * cpuid: The cpuid.
+ * function: The function of the cpuid entry to find.
+ *
+ * Output Args: None
+ *
+ * Return: A pointer to the cpuid entry. Never returns NULL.
+ */
+struct kvm_cpuid_entry2 *
+kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
+{
+ struct kvm_cpuid2 *cpuid;
+ struct kvm_cpuid_entry2 *entry = NULL;
+ int i;
+
+ cpuid = kvm_get_supported_cpuid();
+ for (i = 0; i < cpuid->nent; i++) {
+ if (cpuid->entries[i].function == function &&
+ cpuid->entries[i].index == index) {
+ entry = &cpuid->entries[i];
+ break;
+ }
+ }
+
+ TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
+ function, index);
+ return entry;
+}
+
/* VM VCPU CPUID Set
*
* Input Args:
@@ -698,6 +794,7 @@ void vcpu_set_cpuid(struct kvm_vm *vm,
rc, errno);
}
+
/* Create a VM with reasonable defaults
*
* Input Args:
@@ -726,7 +823,7 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
/* Create VM */
- vm = vm_create(VM_MODE_FLAT48PG,
+ vm = vm_create(VM_MODE_P52V48_4K,
DEFAULT_GUEST_PHY_PAGES + extra_pg_pages,
O_RDWR);
@@ -742,6 +839,154 @@ struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
return vm;
}
+/* VCPU Get MSR
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * msr_index - Index of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
+ *
+ * Get value of MSR for VCPU.
+ */
+uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ struct {
+ struct kvm_msrs header;
+ struct kvm_msr_entry entry;
+ } buffer = {};
+ int r;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+ buffer.header.nmsrs = 1;
+ buffer.entry.index = msr_index;
+ r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
+ TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
+ " rc: %i errno: %i", r, errno);
+
+ return buffer.entry.data;
+}
+
+/* VCPU Set MSR
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * msr_index - Index of MSR
+ * msr_value - New value of MSR
+ *
+ * Output Args: None
+ *
+ * Return: On success, nothing. On failure a TEST_ASSERT is produced.
+ *
+ * Set value of MSR for VCPU.
+ */
+void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
+ uint64_t msr_value)
+{
+ struct vcpu *vcpu = vcpu_find(vm, vcpuid);
+ struct {
+ struct kvm_msrs header;
+ struct kvm_msr_entry entry;
+ } buffer = {};
+ int r;
+
+ TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
+ memset(&buffer, 0, sizeof(buffer));
+ buffer.header.nmsrs = 1;
+ buffer.entry.index = msr_index;
+ buffer.entry.data = msr_value;
+ r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
+ TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
+ " rc: %i errno: %i", r, errno);
+}
+
+/* VM VCPU Args Set
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * num - number of arguments
+ * ... - arguments, each of type uint64_t
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Sets the first num function input arguments to the values
+ * given as variable args. Each of the variable args is expected to
+ * be of type uint64_t.
+ */
+void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
+{
+ va_list ap;
+ struct kvm_regs regs;
+
+ TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
+ " num: %u\n",
+ num);
+
+ va_start(ap, num);
+ vcpu_regs_get(vm, vcpuid, &regs);
+
+ if (num >= 1)
+ regs.rdi = va_arg(ap, uint64_t);
+
+ if (num >= 2)
+ regs.rsi = va_arg(ap, uint64_t);
+
+ if (num >= 3)
+ regs.rdx = va_arg(ap, uint64_t);
+
+ if (num >= 4)
+ regs.rcx = va_arg(ap, uint64_t);
+
+ if (num >= 5)
+ regs.r8 = va_arg(ap, uint64_t);
+
+ if (num >= 6)
+ regs.r9 = va_arg(ap, uint64_t);
+
+ vcpu_regs_set(vm, vcpuid, &regs);
+ va_end(ap);
+}
+
+/*
+ * VM VCPU Dump
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * vcpuid - VCPU ID
+ * indent - Left margin indent amount
+ *
+ * Output Args:
+ * stream - Output FILE stream
+ *
+ * Return: None
+ *
+ * Dumps the current state of the VCPU specified by vcpuid, within the VM
+ * given by vm, to the FILE stream given by stream.
+ */
+void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
+{
+ struct kvm_regs regs;
+ struct kvm_sregs sregs;
+
+ fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
+
+ fprintf(stream, "%*sregs:\n", indent + 2, "");
+ vcpu_regs_get(vm, vcpuid, &regs);
+ regs_dump(stream, &regs, indent + 4);
+
+ fprintf(stream, "%*ssregs:\n", indent + 2, "");
+ vcpu_sregs_get(vm, vcpuid, &sregs);
+ sregs_dump(stream, &sregs, indent + 4);
+}
+
struct kvm_x86_state {
struct kvm_vcpu_events events;
struct kvm_mp_state mp_state;
diff --git a/tools/testing/selftests/kvm/lib/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index b987c3c970eb..771ba6bf751c 100644
--- a/tools/testing/selftests/kvm/lib/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -1,5 +1,5 @@
/*
- * tools/testing/selftests/kvm/lib/x86.c
+ * tools/testing/selftests/kvm/lib/x86_64/vmx.c
*
* Copyright (C) 2018, Google LLC.
*
@@ -10,9 +10,11 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#include "vmx.h"
+bool enable_evmcs;
+
/* Allocate memory regions for nested VMX tests.
*
* Input Args:
@@ -62,6 +64,20 @@ vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
memset(vmx->vmwrite_hva, 0, getpagesize());
+ /* Setup of a region of guest memory for the VP Assist page. */
+ vmx->vp_assist = (void *)vm_vaddr_alloc(vm, getpagesize(),
+ 0x10000, 0, 0);
+ vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist);
+ vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist);
+
+ /* Setup of a region of guest memory for the enlightened VMCS. */
+ vmx->enlightened_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(),
+ 0x10000, 0, 0);
+ vmx->enlightened_vmcs_hva =
+ addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs);
+ vmx->enlightened_vmcs_gpa =
+ addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs);
+
*p_vmx_gva = vmx_gva;
return vmx;
}
@@ -107,18 +123,31 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx)
if (vmxon(vmx->vmxon_gpa))
return false;
- /* Load a VMCS. */
- *(uint32_t *)(vmx->vmcs) = vmcs_revision();
- if (vmclear(vmx->vmcs_gpa))
- return false;
-
- if (vmptrld(vmx->vmcs_gpa))
- return false;
+ return true;
+}
- /* Setup shadow VMCS, do not load it yet. */
- *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul;
- if (vmclear(vmx->shadow_vmcs_gpa))
- return false;
+bool load_vmcs(struct vmx_pages *vmx)
+{
+ if (!enable_evmcs) {
+ /* Load a VMCS. */
+ *(uint32_t *)(vmx->vmcs) = vmcs_revision();
+ if (vmclear(vmx->vmcs_gpa))
+ return false;
+
+ if (vmptrld(vmx->vmcs_gpa))
+ return false;
+
+ /* Setup shadow VMCS, do not load it yet. */
+ *(uint32_t *)(vmx->shadow_vmcs) =
+ vmcs_revision() | 0x80000000ul;
+ if (vmclear(vmx->shadow_vmcs_gpa))
+ return false;
+ } else {
+ if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa,
+ vmx->enlightened_vmcs))
+ return false;
+ current_evmcs->revision_id = vmcs_revision();
+ }
return true;
}
diff --git a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
index 11ec358bf969..d503a51fad30 100644
--- a/tools/testing/selftests/kvm/cr4_cpuid_sync_test.c
+++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c
@@ -17,7 +17,7 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#define X86_FEATURE_XSAVE (1<<26)
#define X86_FEATURE_OSXSAVE (1<<27)
@@ -67,6 +67,7 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
struct kvm_sregs sregs;
struct kvm_cpuid_entry2 *entry;
+ struct ucall uc;
int rc;
entry = kvm_get_supported_cpuid_entry(1);
@@ -87,21 +88,20 @@ int main(int argc, char *argv[])
rc = _vcpu_run(vm, VCPU_ID);
if (run->exit_reason == KVM_EXIT_IO) {
- switch (run->io.port) {
- case GUEST_PORT_SYNC:
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_SYNC:
/* emulate hypervisor clearing CR4.OSXSAVE */
vcpu_sregs_get(vm, VCPU_ID, &sregs);
sregs.cr4 &= ~X86_CR4_OSXSAVE;
vcpu_sregs_set(vm, VCPU_ID, &sregs);
break;
- case GUEST_PORT_ABORT:
+ case UCALL_ABORT:
TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
break;
- case GUEST_PORT_DONE:
+ case UCALL_DONE:
goto done;
default:
- TEST_ASSERT(false, "Unknown port 0x%x.",
- run->io.port);
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
}
}
}
diff --git a/tools/testing/selftests/kvm/x86_64/evmcs_test.c b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
new file mode 100644
index 000000000000..92c2cfd1b182
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/evmcs_test.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018, Red Hat, Inc.
+ *
+ * Tests for Enlightened VMCS, including nested guest state.
+ */
+#define _GNU_SOURCE /* for program_invocation_short_name */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+
+#include "kvm_util.h"
+
+#include "vmx.h"
+
+#define VCPU_ID 5
+
+static bool have_nested_state;
+
+void l2_guest_code(void)
+{
+ GUEST_SYNC(6);
+
+ GUEST_SYNC(7);
+
+ /* Done, exit to L1 and never come back. */
+ vmcall();
+}
+
+void l1_guest_code(struct vmx_pages *vmx_pages)
+{
+#define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ enable_vp_assist(vmx_pages->vp_assist_gpa, vmx_pages->vp_assist);
+
+ GUEST_ASSERT(vmx_pages->vmcs_gpa);
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_SYNC(3);
+ GUEST_ASSERT(load_vmcs(vmx_pages));
+ GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+
+ GUEST_SYNC(4);
+ GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ GUEST_SYNC(5);
+ GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+ GUEST_ASSERT(!vmlaunch());
+ GUEST_ASSERT(vmptrstz() == vmx_pages->enlightened_vmcs_gpa);
+ GUEST_SYNC(8);
+ GUEST_ASSERT(!vmresume());
+ GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+ GUEST_SYNC(9);
+}
+
+void guest_code(struct vmx_pages *vmx_pages)
+{
+ GUEST_SYNC(1);
+ GUEST_SYNC(2);
+
+ if (vmx_pages)
+ l1_guest_code(vmx_pages);
+
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ struct vmx_pages *vmx_pages = NULL;
+ vm_vaddr_t vmx_pages_gva = 0;
+
+ struct kvm_regs regs1, regs2;
+ struct kvm_vm *vm;
+ struct kvm_run *run;
+ struct kvm_x86_state *state;
+ struct ucall uc;
+ int stage;
+ uint16_t evmcs_ver;
+ struct kvm_enable_cap enable_evmcs_cap = {
+ .cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
+ .args[0] = (unsigned long)&evmcs_ver
+ };
+
+ struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
+ /* Create VM */
+ vm = vm_create_default(VCPU_ID, 0, guest_code);
+
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ if (!kvm_check_cap(KVM_CAP_NESTED_STATE) ||
+ !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
+ printf("capabilities not available, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+
+ vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
+
+ run = vcpu_state(vm, VCPU_ID);
+
+ vcpu_regs_get(vm, VCPU_ID, &regs1);
+
+ vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+
+ for (stage = 1;; stage++) {
+ _vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Unexpected exit reason: %u (%s),\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ memset(&regs1, 0, sizeof(regs1));
+ vcpu_regs_get(vm, VCPU_ID, &regs1);
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
+ __FILE__, uc.args[1]);
+ /* NOT REACHED */
+ case UCALL_SYNC:
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+ }
+
+ /* UCALL_SYNC is handled here. */
+ TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
+ uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
+ stage, (ulong)uc.args[1]);
+
+ state = vcpu_save_state(vm, VCPU_ID);
+ kvm_vm_release(vm);
+
+ /* Restore state in a new VM. */
+ kvm_vm_restart(vm, O_RDWR);
+ vm_vcpu_add(vm, VCPU_ID, 0, 0);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+ vcpu_load_state(vm, VCPU_ID, state);
+ run = vcpu_state(vm, VCPU_ID);
+ free(state);
+
+ memset(&regs2, 0, sizeof(regs2));
+ vcpu_regs_get(vm, VCPU_ID, &regs2);
+ TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
+ "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
+ (ulong) regs2.rdi, (ulong) regs2.rsi);
+ }
+
+done:
+ kvm_vm_free(vm);
+}
diff --git a/tools/testing/selftests/kvm/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
index 3764e7121265..eb3e7a838cb4 100644
--- a/tools/testing/selftests/kvm/platform_info_test.c
+++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c
@@ -19,7 +19,7 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#define VCPU_ID 0
#define MSR_PLATFORM_INFO_MAX_TURBO_RATIO 0xff00
@@ -48,7 +48,7 @@ static void set_msr_platform_info_enabled(struct kvm_vm *vm, bool enable)
static void test_msr_platform_info_enabled(struct kvm_vm *vm)
{
struct kvm_run *run = vcpu_state(vm, VCPU_ID);
- struct guest_args args;
+ struct ucall uc;
set_msr_platform_info_enabled(vm, true);
vcpu_run(vm, VCPU_ID);
@@ -56,11 +56,11 @@ static void test_msr_platform_info_enabled(struct kvm_vm *vm)
"Exit_reason other than KVM_EXIT_IO: %u (%s),\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- guest_args_read(vm, VCPU_ID, &args);
- TEST_ASSERT(args.port == GUEST_PORT_SYNC,
- "Received IO from port other than PORT_HOST_SYNC: %u\n",
- run->io.port);
- TEST_ASSERT((args.arg1 & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
+ get_ucall(vm, VCPU_ID, &uc);
+ TEST_ASSERT(uc.cmd == UCALL_SYNC,
+ "Received ucall other than UCALL_SYNC: %u\n",
+ ucall);
+ TEST_ASSERT((uc.args[1] & MSR_PLATFORM_INFO_MAX_TURBO_RATIO) ==
MSR_PLATFORM_INFO_MAX_TURBO_RATIO,
"Expected MSR_PLATFORM_INFO to have max turbo ratio mask: %i.",
MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
diff --git a/tools/testing/selftests/kvm/set_sregs_test.c b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
index 881419d5746e..35640e8e95bc 100644
--- a/tools/testing/selftests/kvm/set_sregs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/set_sregs_test.c
@@ -22,7 +22,7 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#define VCPU_ID 5
diff --git a/tools/testing/selftests/kvm/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c
index 900e3e9dfb9f..03da41f0f736 100644
--- a/tools/testing/selftests/kvm/state_test.c
+++ b/tools/testing/selftests/kvm/x86_64/state_test.c
@@ -17,7 +17,7 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#include "vmx.h"
#define VCPU_ID 5
@@ -26,20 +26,20 @@ static bool have_nested_state;
void l2_guest_code(void)
{
- GUEST_SYNC(5);
+ GUEST_SYNC(6);
/* Exit to L1 */
vmcall();
/* L1 has now set up a shadow VMCS for us. */
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
- GUEST_SYNC(9);
+ GUEST_SYNC(10);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
- GUEST_SYNC(10);
+ GUEST_SYNC(11);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
- GUEST_SYNC(11);
+ GUEST_SYNC(12);
/* Done, exit to L1 and never come back. */
vmcall();
@@ -52,15 +52,17 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(vmx_pages->vmcs_gpa);
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_SYNC(3);
+ GUEST_ASSERT(load_vmcs(vmx_pages));
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
- GUEST_SYNC(3);
+ GUEST_SYNC(4);
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
prepare_vmcs(vmx_pages, l2_guest_code,
&l2_guest_stack[L2_GUEST_STACK_SIZE]);
- GUEST_SYNC(4);
+ GUEST_SYNC(5);
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
GUEST_ASSERT(!vmlaunch());
GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
@@ -72,7 +74,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(!vmresume());
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
- GUEST_SYNC(6);
+ GUEST_SYNC(7);
GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
GUEST_ASSERT(!vmresume());
@@ -85,12 +87,12 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
GUEST_ASSERT(vmlaunch());
- GUEST_SYNC(7);
+ GUEST_SYNC(8);
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume());
vmwrite(GUEST_RIP, 0xc0ffee);
- GUEST_SYNC(8);
+ GUEST_SYNC(9);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
@@ -101,7 +103,7 @@ void l1_guest_code(struct vmx_pages *vmx_pages)
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume());
- GUEST_SYNC(12);
+ GUEST_SYNC(13);
GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
GUEST_ASSERT(vmlaunch());
GUEST_ASSERT(vmresume());
@@ -127,6 +129,7 @@ int main(int argc, char *argv[])
struct kvm_vm *vm;
struct kvm_run *run;
struct kvm_x86_state *state;
+ struct ucall uc;
int stage;
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
@@ -155,23 +158,23 @@ int main(int argc, char *argv[])
memset(&regs1, 0, sizeof(regs1));
vcpu_regs_get(vm, VCPU_ID, &regs1);
- switch (run->io.port) {
- case GUEST_PORT_ABORT:
- TEST_ASSERT(false, "%s at %s:%d", (const char *) regs1.rdi,
- __FILE__, regs1.rsi);
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
+ __FILE__, uc.args[1]);
/* NOT REACHED */
- case GUEST_PORT_SYNC:
+ case UCALL_SYNC:
break;
- case GUEST_PORT_DONE:
+ case UCALL_DONE:
goto done;
default:
- TEST_ASSERT(false, "Unknown port 0x%x.", run->io.port);
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
}
- /* PORT_SYNC is handled here. */
- TEST_ASSERT(!strcmp((const char *)regs1.rdi, "hello") &&
- regs1.rsi == stage, "Unexpected register values vmexit #%lx, got %lx",
- stage, (ulong) regs1.rsi);
+ /* UCALL_SYNC is handled here. */
+ TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
+ uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
+ stage, (ulong)uc.args[1]);
state = vcpu_save_state(vm, VCPU_ID);
kvm_vm_release(vm);
diff --git a/tools/testing/selftests/kvm/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
index 213343e5dff9..c8478ce9ea77 100644
--- a/tools/testing/selftests/kvm/sync_regs_test.c
+++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c
@@ -19,7 +19,7 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#define VCPU_ID 5
diff --git a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
index 49bcc68b0235..18fa64db0d7a 100644
--- a/tools/testing/selftests/kvm/vmx_tsc_adjust_test.c
+++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
@@ -1,5 +1,5 @@
/*
- * gtests/tests/vmx_tsc_adjust_test.c
+ * vmx_tsc_adjust_test
*
* Copyright (C) 2018, Google LLC.
*
@@ -22,13 +22,13 @@
#include "test_util.h"
#include "kvm_util.h"
-#include "x86.h"
+#include "processor.h"
#include "vmx.h"
#include <string.h>
#include <sys/ioctl.h>
-#include "../kselftest.h"
+#include "kselftest.h"
#ifndef MSR_IA32_TSC_ADJUST
#define MSR_IA32_TSC_ADJUST 0x3b
@@ -94,6 +94,7 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_vmcs(vmx_pages));
/* Prepare the VMCS for L2 execution. */
prepare_vmcs(vmx_pages, l2_guest_code,
@@ -146,26 +147,25 @@ int main(int argc, char *argv[])
for (;;) {
volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
- struct guest_args args;
+ struct ucall uc;
vcpu_run(vm, VCPU_ID);
- guest_args_read(vm, VCPU_ID, &args);
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
"Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
run->exit_reason,
exit_reason_str(run->exit_reason));
- switch (args.port) {
- case GUEST_PORT_ABORT:
- TEST_ASSERT(false, "%s", (const char *) args.arg0);
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "%s", (const char *)uc.args[0]);
/* NOT REACHED */
- case GUEST_PORT_SYNC:
- report(args.arg1);
+ case UCALL_SYNC:
+ report(uc.args[1]);
break;
- case GUEST_PORT_DONE:
+ case UCALL_DONE:
goto done;
default:
- TEST_ASSERT(false, "Unknown port 0x%x.", args.port);
+ TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
}
}
diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile
index 201b598558b9..b3ad909aefbc 100644
--- a/tools/testing/selftests/powerpc/Makefile
+++ b/tools/testing/selftests/powerpc/Makefile
@@ -28,7 +28,8 @@ SUB_DIRS = alignment \
tm \
vphn \
math \
- ptrace
+ ptrace \
+ security
endif
diff --git a/tools/testing/selftests/powerpc/include/reg.h b/tools/testing/selftests/powerpc/include/reg.h
index 7f348c059bc2..52b4710469d2 100644
--- a/tools/testing/selftests/powerpc/include/reg.h
+++ b/tools/testing/selftests/powerpc/include/reg.h
@@ -17,6 +17,7 @@
: "memory")
#define mb() asm volatile("sync" : : : "memory");
+#define barrier() asm volatile("" : : : "memory");
#define SPRN_MMCR2 769
#define SPRN_MMCRA 770
diff --git a/tools/testing/selftests/powerpc/include/utils.h b/tools/testing/selftests/powerpc/include/utils.h
index c58c370828b4..49621822d7c3 100644
--- a/tools/testing/selftests/powerpc/include/utils.h
+++ b/tools/testing/selftests/powerpc/include/utils.h
@@ -11,6 +11,7 @@
#include <stdint.h>
#include <stdbool.h>
#include <linux/auxvec.h>
+#include <linux/perf_event.h>
#include "reg.h"
/* Avoid headaches with PRI?64 - just use %ll? always */
@@ -31,6 +32,15 @@ void *get_auxv_entry(int type);
int pick_online_cpu(void);
+int read_debugfs_file(char *debugfs_file, int *result);
+int write_debugfs_file(char *debugfs_file, int result);
+void set_dscr(unsigned long val);
+int perf_event_open_counter(unsigned int type,
+ unsigned long config, int group_fd);
+int perf_event_enable(int fd);
+int perf_event_disable(int fd);
+int perf_event_reset(int fd);
+
static inline bool have_hwcap(unsigned long ftr)
{
return ((unsigned long)get_auxv_entry(AT_HWCAP) & ftr) == ftr;
@@ -80,4 +90,12 @@ do { \
#define PPC_FEATURE2_ARCH_3_00 0x00800000
#endif
+#if defined(__powerpc64__)
+#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP]
+#elif defined(__powerpc__)
+#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_NIP]
+#else
+#error implement UCONTEXT_NIA
+#endif
+
#endif /* _SELFTESTS_POWERPC_UTILS_H */
diff --git a/tools/testing/selftests/powerpc/mm/.gitignore b/tools/testing/selftests/powerpc/mm/.gitignore
index 7d7c42ed6de9..ba919308fe30 100644
--- a/tools/testing/selftests/powerpc/mm/.gitignore
+++ b/tools/testing/selftests/powerpc/mm/.gitignore
@@ -2,4 +2,5 @@ hugetlb_vs_thp_test
subpage_prot
tempfile
prot_sao
-segv_errors \ No newline at end of file
+segv_errors
+wild_bctr \ No newline at end of file
diff --git a/tools/testing/selftests/powerpc/mm/Makefile b/tools/testing/selftests/powerpc/mm/Makefile
index 33ced6e0ad25..43d68420e363 100644
--- a/tools/testing/selftests/powerpc/mm/Makefile
+++ b/tools/testing/selftests/powerpc/mm/Makefile
@@ -2,7 +2,7 @@
noarg:
$(MAKE) -C ../
-TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors
+TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr
TEST_GEN_FILES := tempfile
top_srcdir = ../../../../..
@@ -12,6 +12,8 @@ $(TEST_GEN_PROGS): ../harness.c
$(OUTPUT)/prot_sao: ../utils.c
+$(OUTPUT)/wild_bctr: CFLAGS += -m64
+
$(OUTPUT)/tempfile:
dd if=/dev/zero of=$@ bs=64k count=1
diff --git a/tools/testing/selftests/powerpc/mm/wild_bctr.c b/tools/testing/selftests/powerpc/mm/wild_bctr.c
new file mode 100644
index 000000000000..1b0e9e9a2ddc
--- /dev/null
+++ b/tools/testing/selftests/powerpc/mm/wild_bctr.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018, Michael Ellerman, IBM Corp.
+ *
+ * Test that an out-of-bounds branch to counter behaves as expected.
+ */
+
+#include <setjmp.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <ucontext.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+
+#define BAD_NIP 0x788c545a18000000ull
+
+static struct pt_regs signal_regs;
+static jmp_buf setjmp_env;
+
+static void save_regs(ucontext_t *ctxt)
+{
+ struct pt_regs *regs = ctxt->uc_mcontext.regs;
+
+ memcpy(&signal_regs, regs, sizeof(signal_regs));
+}
+
+static void segv_handler(int signum, siginfo_t *info, void *ctxt_v)
+{
+ save_regs(ctxt_v);
+ longjmp(setjmp_env, 1);
+}
+
+static void usr2_handler(int signum, siginfo_t *info, void *ctxt_v)
+{
+ save_regs(ctxt_v);
+}
+
+static int ok(void)
+{
+ printf("Everything is OK in here.\n");
+ return 0;
+}
+
+#define REG_POISON 0x5a5aUL
+#define POISONED_REG(n) ((REG_POISON << 48) | ((n) << 32) | (REG_POISON << 16) | (n))
+
+static inline void poison_regs(void)
+{
+ #define POISON_REG(n) \
+ "lis " __stringify(n) "," __stringify(REG_POISON) ";" \
+ "addi " __stringify(n) "," __stringify(n) "," __stringify(n) ";" \
+ "sldi " __stringify(n) "," __stringify(n) ", 32 ;" \
+ "oris " __stringify(n) "," __stringify(n) "," __stringify(REG_POISON) ";" \
+ "addi " __stringify(n) "," __stringify(n) "," __stringify(n) ";"
+
+ asm (POISON_REG(15)
+ POISON_REG(16)
+ POISON_REG(17)
+ POISON_REG(18)
+ POISON_REG(19)
+ POISON_REG(20)
+ POISON_REG(21)
+ POISON_REG(22)
+ POISON_REG(23)
+ POISON_REG(24)
+ POISON_REG(25)
+ POISON_REG(26)
+ POISON_REG(27)
+ POISON_REG(28)
+ POISON_REG(29)
+ : // inputs
+ : // outputs
+ : "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25",
+ "26", "27", "28", "29"
+ );
+ #undef POISON_REG
+}
+
+static int check_regs(void)
+{
+ unsigned long i;
+
+ for (i = 15; i <= 29; i++)
+ FAIL_IF(signal_regs.gpr[i] != POISONED_REG(i));
+
+ printf("Regs OK\n");
+ return 0;
+}
+
+static void dump_regs(void)
+{
+ for (int i = 0; i < 32; i += 4) {
+ printf("r%02d 0x%016lx r%02d 0x%016lx " \
+ "r%02d 0x%016lx r%02d 0x%016lx\n",
+ i, signal_regs.gpr[i],
+ i+1, signal_regs.gpr[i+1],
+ i+2, signal_regs.gpr[i+2],
+ i+3, signal_regs.gpr[i+3]);
+ }
+}
+
+int test_wild_bctr(void)
+{
+ int (*func_ptr)(void);
+ struct sigaction segv = {
+ .sa_sigaction = segv_handler,
+ .sa_flags = SA_SIGINFO
+ };
+ struct sigaction usr2 = {
+ .sa_sigaction = usr2_handler,
+ .sa_flags = SA_SIGINFO
+ };
+
+ FAIL_IF(sigaction(SIGSEGV, &segv, NULL));
+ FAIL_IF(sigaction(SIGUSR2, &usr2, NULL));
+
+ bzero(&signal_regs, sizeof(signal_regs));
+
+ if (setjmp(setjmp_env) == 0) {
+ func_ptr = ok;
+ func_ptr();
+
+ kill(getpid(), SIGUSR2);
+ printf("Regs before:\n");
+ dump_regs();
+ bzero(&signal_regs, sizeof(signal_regs));
+
+ poison_regs();
+
+ func_ptr = (int (*)(void))BAD_NIP;
+ func_ptr();
+
+ FAIL_IF(1); /* we didn't segv? */
+ }
+
+ FAIL_IF(signal_regs.nip != BAD_NIP);
+
+ printf("All good - took SEGV as expected branching to 0x%llx\n", BAD_NIP);
+
+ dump_regs();
+ FAIL_IF(check_regs());
+
+ return 0;
+}
+
+int main(void)
+{
+ return test_harness(test_wild_bctr, "wild_bctr");
+}
diff --git a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
index ed3239bbfae2..ee1e9ca22f0d 100644
--- a/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
+++ b/tools/testing/selftests/powerpc/primitives/load_unaligned_zeropad.c
@@ -65,14 +65,6 @@ static int unprotect_region(void)
extern char __start___ex_table[];
extern char __stop___ex_table[];
-#if defined(__powerpc64__)
-#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.gp_regs[PT_NIP]
-#elif defined(__powerpc__)
-#define UCONTEXT_NIA(UC) (UC)->uc_mcontext.uc_regs->gregs[PT_NIP]
-#else
-#error implement UCONTEXT_NIA
-#endif
-
struct extbl_entry {
int insn;
int fixup;
diff --git a/tools/testing/selftests/powerpc/ptrace/Makefile b/tools/testing/selftests/powerpc/ptrace/Makefile
index 923d531265f8..9b35ca8e8f13 100644
--- a/tools/testing/selftests/powerpc/ptrace/Makefile
+++ b/tools/testing/selftests/powerpc/ptrace/Makefile
@@ -2,7 +2,7 @@
TEST_PROGS := ptrace-gpr ptrace-tm-gpr ptrace-tm-spd-gpr \
ptrace-tar ptrace-tm-tar ptrace-tm-spd-tar ptrace-vsx ptrace-tm-vsx \
ptrace-tm-spd-vsx ptrace-tm-spr ptrace-hwbreak ptrace-pkey core-pkey \
- perf-hwbreak
+ perf-hwbreak ptrace-syscall
top_srcdir = ../../../../..
include ../../lib.mk
diff --git a/tools/testing/selftests/powerpc/ptrace/ptrace-syscall.c b/tools/testing/selftests/powerpc/ptrace/ptrace-syscall.c
new file mode 100644
index 000000000000..3353210dcdbd
--- /dev/null
+++ b/tools/testing/selftests/powerpc/ptrace/ptrace-syscall.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * A ptrace test for testing PTRACE_SYSEMU, PTRACE_SETREGS and
+ * PTRACE_GETREG. This test basically create a child process that executes
+ * syscalls and the parent process check if it is being traced appropriated.
+ *
+ * This test is heavily based on tools/testing/selftests/x86/ptrace_syscall.c
+ * test, and it was adapted to run on Powerpc by
+ * Breno Leitao <leitao@debian.org>
+ */
+#define _GNU_SOURCE
+
+#include <sys/ptrace.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/syscall.h>
+#include <sys/user.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <err.h>
+#include <string.h>
+#include <sys/auxv.h>
+#include "utils.h"
+
+/* Bitness-agnostic defines for user_regs_struct fields. */
+#define user_syscall_nr gpr[0]
+#define user_arg0 gpr[3]
+#define user_arg1 gpr[4]
+#define user_arg2 gpr[5]
+#define user_arg3 gpr[6]
+#define user_arg4 gpr[7]
+#define user_arg5 gpr[8]
+#define user_ip nip
+
+#define PTRACE_SYSEMU 0x1d
+
+static int nerrs;
+
+static void wait_trap(pid_t chld)
+{
+ siginfo_t si;
+
+ if (waitid(P_PID, chld, &si, WEXITED|WSTOPPED) != 0)
+ err(1, "waitid");
+ if (si.si_pid != chld)
+ errx(1, "got unexpected pid in event\n");
+ if (si.si_code != CLD_TRAPPED)
+ errx(1, "got unexpected event type %d\n", si.si_code);
+}
+
+static void test_ptrace_syscall_restart(void)
+{
+ int status;
+ struct pt_regs regs;
+ pid_t chld;
+
+ printf("[RUN]\tptrace-induced syscall restart\n");
+
+ chld = fork();
+ if (chld < 0)
+ err(1, "fork");
+
+ /*
+ * Child process is running 4 syscalls after ptrace.
+ *
+ * 1) getpid()
+ * 2) gettid()
+ * 3) tgkill() -> Send SIGSTOP
+ * 4) gettid() -> Where the tests will happen essentially
+ */
+ if (chld == 0) {
+ if (ptrace(PTRACE_TRACEME, 0, 0, 0) != 0)
+ err(1, "PTRACE_TRACEME");
+
+ pid_t pid = getpid(), tid = syscall(SYS_gettid);
+
+ printf("\tChild will make one syscall\n");
+ syscall(SYS_tgkill, pid, tid, SIGSTOP);
+
+ syscall(SYS_gettid, 10, 11, 12, 13, 14, 15);
+ _exit(0);
+ }
+ /* Parent process below */
+
+ /* Wait for SIGSTOP sent by tgkill above. */
+ if (waitpid(chld, &status, 0) != chld || !WIFSTOPPED(status))
+ err(1, "waitpid");
+
+ printf("[RUN]\tSYSEMU\n");
+ if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
+ err(1, "PTRACE_SYSEMU");
+ wait_trap(chld);
+
+ if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
+ err(1, "PTRACE_GETREGS");
+
+ /*
+ * Ptrace trapped prior to executing the syscall, thus r3 still has
+ * the syscall number instead of the sys_gettid() result
+ */
+ if (regs.user_syscall_nr != SYS_gettid ||
+ regs.user_arg0 != 10 || regs.user_arg1 != 11 ||
+ regs.user_arg2 != 12 || regs.user_arg3 != 13 ||
+ regs.user_arg4 != 14 || regs.user_arg5 != 15) {
+ printf("[FAIL]\tInitial args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n",
+ (unsigned long)regs.user_syscall_nr,
+ (unsigned long)regs.user_arg0,
+ (unsigned long)regs.user_arg1,
+ (unsigned long)regs.user_arg2,
+ (unsigned long)regs.user_arg3,
+ (unsigned long)regs.user_arg4,
+ (unsigned long)regs.user_arg5);
+ nerrs++;
+ } else {
+ printf("[OK]\tInitial nr and args are correct\n"); }
+
+ printf("[RUN]\tRestart the syscall (ip = 0x%lx)\n",
+ (unsigned long)regs.user_ip);
+
+ /*
+ * Rewind to retry the same syscall again. This will basically test
+ * the rewind process together with PTRACE_SETREGS and PTRACE_GETREGS.
+ */
+ regs.user_ip -= 4;
+ if (ptrace(PTRACE_SETREGS, chld, 0, &regs) != 0)
+ err(1, "PTRACE_SETREGS");
+
+ if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
+ err(1, "PTRACE_SYSEMU");
+ wait_trap(chld);
+
+ if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
+ err(1, "PTRACE_GETREGS");
+
+ if (regs.user_syscall_nr != SYS_gettid ||
+ regs.user_arg0 != 10 || regs.user_arg1 != 11 ||
+ regs.user_arg2 != 12 || regs.user_arg3 != 13 ||
+ regs.user_arg4 != 14 || regs.user_arg5 != 15) {
+ printf("[FAIL]\tRestart nr or args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n",
+ (unsigned long)regs.user_syscall_nr,
+ (unsigned long)regs.user_arg0,
+ (unsigned long)regs.user_arg1,
+ (unsigned long)regs.user_arg2,
+ (unsigned long)regs.user_arg3,
+ (unsigned long)regs.user_arg4,
+ (unsigned long)regs.user_arg5);
+ nerrs++;
+ } else {
+ printf("[OK]\tRestarted nr and args are correct\n");
+ }
+
+ printf("[RUN]\tChange nr and args and restart the syscall (ip = 0x%lx)\n",
+ (unsigned long)regs.user_ip);
+
+ /*
+ * Inject a new syscall (getpid) in the same place the previous
+ * syscall (gettid), rewind and re-execute.
+ */
+ regs.user_syscall_nr = SYS_getpid;
+ regs.user_arg0 = 20;
+ regs.user_arg1 = 21;
+ regs.user_arg2 = 22;
+ regs.user_arg3 = 23;
+ regs.user_arg4 = 24;
+ regs.user_arg5 = 25;
+ regs.user_ip -= 4;
+
+ if (ptrace(PTRACE_SETREGS, chld, 0, &regs) != 0)
+ err(1, "PTRACE_SETREGS");
+
+ if (ptrace(PTRACE_SYSEMU, chld, 0, 0) != 0)
+ err(1, "PTRACE_SYSEMU");
+ wait_trap(chld);
+
+ if (ptrace(PTRACE_GETREGS, chld, 0, &regs) != 0)
+ err(1, "PTRACE_GETREGS");
+
+ /* Check that ptrace stopped at the new syscall that was
+ * injected, and guarantee that it haven't executed, i.e, user_args
+ * contain the arguments and not the syscall return value, for
+ * instance.
+ */
+ if (regs.user_syscall_nr != SYS_getpid
+ || regs.user_arg0 != 20 || regs.user_arg1 != 21
+ || regs.user_arg2 != 22 || regs.user_arg3 != 23
+ || regs.user_arg4 != 24 || regs.user_arg5 != 25) {
+
+ printf("[FAIL]\tRestart nr or args are wrong (nr=%lu, args=%lu %lu %lu %lu %lu %lu)\n",
+ (unsigned long)regs.user_syscall_nr,
+ (unsigned long)regs.user_arg0,
+ (unsigned long)regs.user_arg1,
+ (unsigned long)regs.user_arg2,
+ (unsigned long)regs.user_arg3,
+ (unsigned long)regs.user_arg4,
+ (unsigned long)regs.user_arg5);
+ nerrs++;
+ } else {
+ printf("[OK]\tReplacement nr and args are correct\n");
+ }
+
+ if (ptrace(PTRACE_CONT, chld, 0, 0) != 0)
+ err(1, "PTRACE_CONT");
+
+ if (waitpid(chld, &status, 0) != chld)
+ err(1, "waitpid");
+
+ /* Guarantee that the process executed properly, returning 0 */
+ if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ printf("[FAIL]\tChild failed\n");
+ nerrs++;
+ } else {
+ printf("[OK]\tChild exited cleanly\n");
+ }
+}
+
+int ptrace_syscall(void)
+{
+ test_ptrace_syscall_restart();
+
+ return nerrs;
+}
+
+int main(void)
+{
+ return test_harness(ptrace_syscall, "ptrace_syscall");
+}
diff --git a/tools/testing/selftests/powerpc/security/Makefile b/tools/testing/selftests/powerpc/security/Makefile
new file mode 100644
index 000000000000..44690f1bb26a
--- /dev/null
+++ b/tools/testing/selftests/powerpc/security/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+TEST_GEN_PROGS := rfi_flush
+
+CFLAGS += -I../../../../../usr/include
+
+include ../../lib.mk
+
+$(TEST_GEN_PROGS): ../harness.c ../utils.c
diff --git a/tools/testing/selftests/powerpc/security/rfi_flush.c b/tools/testing/selftests/powerpc/security/rfi_flush.c
new file mode 100644
index 000000000000..564ed45bbf73
--- /dev/null
+++ b/tools/testing/selftests/powerpc/security/rfi_flush.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2018 IBM Corporation.
+ */
+
+#define __SANE_USERSPACE_TYPES__
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <malloc.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include "utils.h"
+
+#define CACHELINE_SIZE 128
+
+struct perf_event_read {
+ __u64 nr;
+ __u64 l1d_misses;
+};
+
+static inline __u64 load(void *addr)
+{
+ __u64 tmp;
+
+ asm volatile("ld %0,0(%1)" : "=r"(tmp) : "b"(addr));
+
+ return tmp;
+}
+
+static void syscall_loop(char *p, unsigned long iterations,
+ unsigned long zero_size)
+{
+ for (unsigned long i = 0; i < iterations; i++) {
+ for (unsigned long j = 0; j < zero_size; j += CACHELINE_SIZE)
+ load(p + j);
+ getppid();
+ }
+}
+
+int rfi_flush_test(void)
+{
+ char *p;
+ int repetitions = 10;
+ int fd, passes = 0, iter, rc = 0;
+ struct perf_event_read v;
+ __u64 l1d_misses_total = 0;
+ unsigned long iterations = 100000, zero_size = 24 * 1024;
+ int rfi_flush_org, rfi_flush;
+
+ SKIP_IF(geteuid() != 0);
+
+ if (read_debugfs_file("powerpc/rfi_flush", &rfi_flush_org)) {
+ perror("Unable to read powerpc/rfi_flush debugfs file");
+ SKIP_IF(1);
+ }
+
+ rfi_flush = rfi_flush_org;
+
+ fd = perf_event_open_counter(PERF_TYPE_RAW, /* L1d miss */ 0x400f0, -1);
+ FAIL_IF(fd < 0);
+
+ p = (char *)memalign(zero_size, CACHELINE_SIZE);
+
+ FAIL_IF(perf_event_enable(fd));
+
+ set_dscr(1);
+
+ iter = repetitions;
+
+again:
+ FAIL_IF(perf_event_reset(fd));
+
+ syscall_loop(p, iterations, zero_size);
+
+ FAIL_IF(read(fd, &v, sizeof(v)) != sizeof(v));
+
+ /* Expect at least zero_size/CACHELINE_SIZE misses per iteration */
+ if (v.l1d_misses >= (iterations * zero_size / CACHELINE_SIZE) && rfi_flush)
+ passes++;
+ else if (v.l1d_misses < iterations && !rfi_flush)
+ passes++;
+
+ l1d_misses_total += v.l1d_misses;
+
+ while (--iter)
+ goto again;
+
+ if (passes < repetitions) {
+ printf("FAIL (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d failures]\n",
+ rfi_flush, l1d_misses_total, rfi_flush ? '<' : '>',
+ rfi_flush ? (repetitions * iterations * zero_size / CACHELINE_SIZE) : iterations,
+ repetitions - passes, repetitions);
+ rc = 1;
+ } else
+ printf("PASS (L1D misses with rfi_flush=%d: %llu %c %lu) [%d/%d pass]\n",
+ rfi_flush, l1d_misses_total, rfi_flush ? '>' : '<',
+ rfi_flush ? (repetitions * iterations * zero_size / CACHELINE_SIZE) : iterations,
+ passes, repetitions);
+
+ if (rfi_flush == rfi_flush_org) {
+ rfi_flush = !rfi_flush_org;
+ if (write_debugfs_file("powerpc/rfi_flush", rfi_flush) < 0) {
+ perror("error writing to powerpc/rfi_flush debugfs file");
+ return 1;
+ }
+ iter = repetitions;
+ l1d_misses_total = 0;
+ passes = 0;
+ goto again;
+ }
+
+ perf_event_disable(fd);
+ close(fd);
+
+ set_dscr(0);
+
+ if (write_debugfs_file("powerpc/rfi_flush", rfi_flush_org) < 0) {
+ perror("unable to restore original value of powerpc/rfi_flush debugfs file");
+ return 1;
+ }
+
+ return rc;
+}
+
+int main(int argc, char *argv[])
+{
+ return test_harness(rfi_flush_test, "rfi_flush_test");
+}
diff --git a/tools/testing/selftests/powerpc/tm/tm-tmspr.c b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
index 2bda81c7bf23..df1d7d4b1c89 100644
--- a/tools/testing/selftests/powerpc/tm/tm-tmspr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-tmspr.c
@@ -98,7 +98,7 @@ void texasr(void *in)
int test_tmspr()
{
- pthread_t thread;
+ pthread_t *thread;
int thread_num;
unsigned long i;
@@ -107,21 +107,28 @@ int test_tmspr()
/* To cause some context switching */
thread_num = 10 * sysconf(_SC_NPROCESSORS_ONLN);
+ thread = malloc(thread_num * sizeof(pthread_t));
+ if (thread == NULL)
+ return EXIT_FAILURE;
+
/* Test TFIAR and TFHAR */
- for (i = 0 ; i < thread_num ; i += 2){
- if (pthread_create(&thread, NULL, (void*)tfiar_tfhar, (void *)i))
+ for (i = 0; i < thread_num; i += 2) {
+ if (pthread_create(&thread[i], NULL, (void *)tfiar_tfhar,
+ (void *)i))
return EXIT_FAILURE;
}
- if (pthread_join(thread, NULL) != 0)
- return EXIT_FAILURE;
-
/* Test TEXASR */
- for (i = 0 ; i < thread_num ; i++){
- if (pthread_create(&thread, NULL, (void*)texasr, (void *)i))
+ for (i = 1; i < thread_num; i += 2) {
+ if (pthread_create(&thread[i], NULL, (void *)texasr, (void *)i))
return EXIT_FAILURE;
}
- if (pthread_join(thread, NULL) != 0)
- return EXIT_FAILURE;
+
+ for (i = 0; i < thread_num; i++) {
+ if (pthread_join(thread[i], NULL) != 0)
+ return EXIT_FAILURE;
+ }
+
+ free(thread);
if (passed)
return 0;
diff --git a/tools/testing/selftests/powerpc/tm/tm-unavailable.c b/tools/testing/selftests/powerpc/tm/tm-unavailable.c
index 156c8e750259..09894f4ff62e 100644
--- a/tools/testing/selftests/powerpc/tm/tm-unavailable.c
+++ b/tools/testing/selftests/powerpc/tm/tm-unavailable.c
@@ -236,7 +236,8 @@ void *tm_una_ping(void *input)
}
/* Check if we were not expecting a failure and a it occurred. */
- if (!expecting_failure() && is_failure(cr_)) {
+ if (!expecting_failure() && is_failure(cr_) &&
+ !failure_is_reschedule()) {
printf("\n\tUnexpected transaction failure 0x%02lx\n\t",
failure_code());
return (void *) -1;
@@ -244,9 +245,11 @@ void *tm_una_ping(void *input)
/*
* Check if TM failed due to the cause we were expecting. 0xda is a
- * TM_CAUSE_FAC_UNAV cause, otherwise it's an unexpected cause.
+ * TM_CAUSE_FAC_UNAV cause, otherwise it's an unexpected cause, unless
+ * it was caused by a reschedule.
*/
- if (is_failure(cr_) && !failure_is_unavailable()) {
+ if (is_failure(cr_) && !failure_is_unavailable() &&
+ !failure_is_reschedule()) {
printf("\n\tUnexpected failure cause 0x%02lx\n\t",
failure_code());
return (void *) -1;
diff --git a/tools/testing/selftests/powerpc/tm/tm.h b/tools/testing/selftests/powerpc/tm/tm.h
index df4204247d45..5518b1d4ef8b 100644
--- a/tools/testing/selftests/powerpc/tm/tm.h
+++ b/tools/testing/selftests/powerpc/tm/tm.h
@@ -52,6 +52,15 @@ static inline bool failure_is_unavailable(void)
return (failure_code() & TM_CAUSE_FAC_UNAV) == TM_CAUSE_FAC_UNAV;
}
+static inline bool failure_is_reschedule(void)
+{
+ if ((failure_code() & TM_CAUSE_RESCHED) == TM_CAUSE_RESCHED ||
+ (failure_code() & TM_CAUSE_KVM_RESCHED) == TM_CAUSE_KVM_RESCHED)
+ return true;
+
+ return false;
+}
+
static inline bool failure_is_nesting(void)
{
return (__builtin_get_texasru() & 0x400000);
diff --git a/tools/testing/selftests/powerpc/utils.c b/tools/testing/selftests/powerpc/utils.c
index aa8fc1e6365b..43c342845be0 100644
--- a/tools/testing/selftests/powerpc/utils.c
+++ b/tools/testing/selftests/powerpc/utils.c
@@ -10,16 +10,22 @@
#include <fcntl.h>
#include <link.h>
#include <sched.h>
+#include <signal.h>
#include <stdio.h>
+#include <stdlib.h>
#include <string.h>
+#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <unistd.h>
+#include <asm/unistd.h>
+#include <linux/limits.h>
#include "utils.h"
static char auxv[4096];
+extern unsigned int dscr_insn[];
int read_auxv(char *buf, ssize_t buf_size)
{
@@ -121,3 +127,149 @@ bool is_ppc64le(void)
return strcmp(uts.machine, "ppc64le") == 0;
}
+
+int read_debugfs_file(char *debugfs_file, int *result)
+{
+ int rc = -1, fd;
+ char path[PATH_MAX];
+ char value[16];
+
+ strcpy(path, "/sys/kernel/debug/");
+ strncat(path, debugfs_file, PATH_MAX - strlen(path) - 1);
+
+ if ((fd = open(path, O_RDONLY)) < 0)
+ return rc;
+
+ if ((rc = read(fd, value, sizeof(value))) < 0)
+ return rc;
+
+ value[15] = 0;
+ *result = atoi(value);
+ close(fd);
+
+ return 0;
+}
+
+int write_debugfs_file(char *debugfs_file, int result)
+{
+ int rc = -1, fd;
+ char path[PATH_MAX];
+ char value[16];
+
+ strcpy(path, "/sys/kernel/debug/");
+ strncat(path, debugfs_file, PATH_MAX - strlen(path) - 1);
+
+ if ((fd = open(path, O_WRONLY)) < 0)
+ return rc;
+
+ snprintf(value, 16, "%d", result);
+
+ if ((rc = write(fd, value, strlen(value))) < 0)
+ return rc;
+
+ close(fd);
+
+ return 0;
+}
+
+static long perf_event_open(struct perf_event_attr *hw_event, pid_t pid,
+ int cpu, int group_fd, unsigned long flags)
+{
+ return syscall(__NR_perf_event_open, hw_event, pid, cpu,
+ group_fd, flags);
+}
+
+static void perf_event_attr_init(struct perf_event_attr *event_attr,
+ unsigned int type,
+ unsigned long config)
+{
+ memset(event_attr, 0, sizeof(*event_attr));
+
+ event_attr->type = type;
+ event_attr->size = sizeof(struct perf_event_attr);
+ event_attr->config = config;
+ event_attr->read_format = PERF_FORMAT_GROUP;
+ event_attr->disabled = 1;
+ event_attr->exclude_kernel = 1;
+ event_attr->exclude_hv = 1;
+ event_attr->exclude_guest = 1;
+}
+
+int perf_event_open_counter(unsigned int type,
+ unsigned long config, int group_fd)
+{
+ int fd;
+ struct perf_event_attr event_attr;
+
+ perf_event_attr_init(&event_attr, type, config);
+
+ fd = perf_event_open(&event_attr, 0, -1, group_fd, 0);
+
+ if (fd < 0)
+ perror("perf_event_open() failed");
+
+ return fd;
+}
+
+int perf_event_enable(int fd)
+{
+ if (ioctl(fd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP) == -1) {
+ perror("error while enabling perf events");
+ return -1;
+ }
+
+ return 0;
+}
+
+int perf_event_disable(int fd)
+{
+ if (ioctl(fd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP) == -1) {
+ perror("error disabling perf events");
+ return -1;
+ }
+
+ return 0;
+}
+
+int perf_event_reset(int fd)
+{
+ if (ioctl(fd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP) == -1) {
+ perror("error resetting perf events");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void sigill_handler(int signr, siginfo_t *info, void *unused)
+{
+ static int warned = 0;
+ ucontext_t *ctx = (ucontext_t *)unused;
+ unsigned long *pc = &UCONTEXT_NIA(ctx);
+
+ if (*pc == (unsigned long)&dscr_insn) {
+ if (!warned++)
+ printf("WARNING: Skipping over dscr setup. Consider running 'ppc64_cpu --dscr=1' manually.\n");
+ *pc += 4;
+ } else {
+ printf("SIGILL at %p\n", pc);
+ abort();
+ }
+}
+
+void set_dscr(unsigned long val)
+{
+ static int init = 0;
+ struct sigaction sa;
+
+ if (!init) {
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_sigaction = sigill_handler;
+ sa.sa_flags = SA_SIGINFO;
+ if (sigaction(SIGILL, &sa, NULL))
+ perror("sigill_handler");
+ init = 1;
+ }
+
+ asm volatile("dscr_insn: mtspr %1,%0" : : "r" (val), "i" (SPRN_DSCR));
+}
diff --git a/tools/usb/usbip/libsrc/usbip_host_common.c b/tools/usb/usbip/libsrc/usbip_host_common.c
index dc93fadbee96..d79c7581b175 100644
--- a/tools/usb/usbip/libsrc/usbip_host_common.c
+++ b/tools/usb/usbip/libsrc/usbip_host_common.c
@@ -43,7 +43,7 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
int size;
int fd;
int length;
- char status;
+ char status[2] = { 0 };
int value = 0;
size = snprintf(status_attr_path, sizeof(status_attr_path),
@@ -61,14 +61,14 @@ static int32_t read_attr_usbip_status(struct usbip_usb_device *udev)
return -1;
}
- length = read(fd, &status, 1);
+ length = read(fd, status, 1);
if (length < 0) {
err("error reading attribute %s", status_attr_path);
close(fd);
return -1;
}
- value = atoi(&status);
+ value = atoi(status);
return value;
}
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c
index 4204359c9fee..8159fd98680b 100644
--- a/tools/usb/usbip/libsrc/vhci_driver.c
+++ b/tools/usb/usbip/libsrc/vhci_driver.c
@@ -150,7 +150,7 @@ static int get_nports(struct udev_device *hc_device)
static int vhci_hcd_filter(const struct dirent *dirent)
{
- return strcmp(dirent->d_name, "vhci_hcd") >= 0;
+ return !strncmp(dirent->d_name, "vhci_hcd.", 9);
}
static int get_ncontrollers(void)
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 150c8a69cdaf..23774970c9df 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -120,8 +120,9 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int ret, cpu;
- if (type)
- return -EINVAL;
+ ret = kvm_arm_setup_stage2(kvm, type);
+ if (ret)
+ return ret;
kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
if (!kvm->arch.last_vcpu_ran)
@@ -212,6 +213,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_READONLY_MEM:
case KVM_CAP_MP_STATE:
case KVM_CAP_IMMEDIATE_EXIT:
+ case KVM_CAP_VCPU_EVENTS:
r = 1;
break;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
@@ -240,7 +242,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = 1;
break;
default:
- r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
+ r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
break;
}
return r;
@@ -544,7 +546,7 @@ static void update_vttbr(struct kvm *kvm)
/* update vttbr to be used with the new vmid */
pgd_phys = virt_to_phys(kvm->arch.pgd);
- BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
+ BUG_ON(pgd_phys & ~kvm_vttbr_baddr_mask(kvm));
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
@@ -1295,8 +1297,6 @@ static void cpu_init_hyp_mode(void *dummy)
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
-
- kvm_arm_init_debug();
}
static void cpu_hyp_reset(void)
@@ -1309,16 +1309,12 @@ static void cpu_hyp_reinit(void)
{
cpu_hyp_reset();
- if (is_kernel_in_hyp_mode()) {
- /*
- * __cpu_init_stage2() is safe to call even if the PM
- * event was cancelled before the CPU was reset.
- */
- __cpu_init_stage2();
+ if (is_kernel_in_hyp_mode())
kvm_timer_init_vhe();
- } else {
+ else
cpu_init_hyp_mode(NULL);
- }
+
+ kvm_arm_init_debug();
if (vgic_present)
kvm_vgic_init_cpu_hardware();
@@ -1412,6 +1408,8 @@ static int init_common_resources(void)
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+ kvm_set_ipa_limit();
+
return 0;
}
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 1a2c3a1c56ce..5eca48bdb1a6 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -45,7 +45,6 @@ static phys_addr_t hyp_idmap_vector;
static unsigned long io_map_base;
-#define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t))
#define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
#define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
@@ -150,20 +149,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
{
- pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
- stage2_pgd_clear(pgd);
+ pud_t *pud_table __maybe_unused = stage2_pud_offset(kvm, pgd, 0UL);
+ stage2_pgd_clear(kvm, pgd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- stage2_pud_free(pud_table);
+ stage2_pud_free(kvm, pud_table);
put_page(virt_to_page(pgd));
}
static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
- pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
- VM_BUG_ON(stage2_pud_huge(*pud));
- stage2_pud_clear(pud);
+ pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(kvm, pud, 0);
+ VM_BUG_ON(stage2_pud_huge(kvm, *pud));
+ stage2_pud_clear(kvm, pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
- stage2_pmd_free(pmd_table);
+ stage2_pmd_free(kvm, pmd_table);
put_page(virt_to_page(pud));
}
@@ -252,7 +251,7 @@ static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
}
} while (pte++, addr += PAGE_SIZE, addr != end);
- if (stage2_pte_table_empty(start_pte))
+ if (stage2_pte_table_empty(kvm, start_pte))
clear_stage2_pmd_entry(kvm, pmd, start_addr);
}
@@ -262,9 +261,9 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
phys_addr_t next, start_addr = addr;
pmd_t *pmd, *start_pmd;
- start_pmd = pmd = stage2_pmd_offset(pud, addr);
+ start_pmd = pmd = stage2_pmd_offset(kvm, pud, addr);
do {
- next = stage2_pmd_addr_end(addr, end);
+ next = stage2_pmd_addr_end(kvm, addr, end);
if (!pmd_none(*pmd)) {
if (pmd_thp_or_huge(*pmd)) {
pmd_t old_pmd = *pmd;
@@ -281,7 +280,7 @@ static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
}
} while (pmd++, addr = next, addr != end);
- if (stage2_pmd_table_empty(start_pmd))
+ if (stage2_pmd_table_empty(kvm, start_pmd))
clear_stage2_pud_entry(kvm, pud, start_addr);
}
@@ -291,14 +290,14 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
phys_addr_t next, start_addr = addr;
pud_t *pud, *start_pud;
- start_pud = pud = stage2_pud_offset(pgd, addr);
+ start_pud = pud = stage2_pud_offset(kvm, pgd, addr);
do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- if (stage2_pud_huge(*pud)) {
+ next = stage2_pud_addr_end(kvm, addr, end);
+ if (!stage2_pud_none(kvm, *pud)) {
+ if (stage2_pud_huge(kvm, *pud)) {
pud_t old_pud = *pud;
- stage2_pud_clear(pud);
+ stage2_pud_clear(kvm, pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
kvm_flush_dcache_pud(old_pud);
put_page(virt_to_page(pud));
@@ -308,7 +307,7 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
}
} while (pud++, addr = next, addr != end);
- if (stage2_pud_table_empty(start_pud))
+ if (stage2_pud_table_empty(kvm, start_pud))
clear_stage2_pgd_entry(kvm, pgd, start_addr);
}
@@ -332,7 +331,7 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
assert_spin_locked(&kvm->mmu_lock);
WARN_ON(size & ~PAGE_MASK);
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
do {
/*
* Make sure the page table is still active, as another thread
@@ -341,8 +340,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
*/
if (!READ_ONCE(kvm->arch.pgd))
break;
- next = stage2_pgd_addr_end(addr, end);
- if (!stage2_pgd_none(*pgd))
+ next = stage2_pgd_addr_end(kvm, addr, end);
+ if (!stage2_pgd_none(kvm, *pgd))
unmap_stage2_puds(kvm, pgd, addr, next);
/*
* If the range is too large, release the kvm->mmu_lock
@@ -371,9 +370,9 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
pmd_t *pmd;
phys_addr_t next;
- pmd = stage2_pmd_offset(pud, addr);
+ pmd = stage2_pmd_offset(kvm, pud, addr);
do {
- next = stage2_pmd_addr_end(addr, end);
+ next = stage2_pmd_addr_end(kvm, addr, end);
if (!pmd_none(*pmd)) {
if (pmd_thp_or_huge(*pmd))
kvm_flush_dcache_pmd(*pmd);
@@ -389,11 +388,11 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
pud_t *pud;
phys_addr_t next;
- pud = stage2_pud_offset(pgd, addr);
+ pud = stage2_pud_offset(kvm, pgd, addr);
do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
- if (stage2_pud_huge(*pud))
+ next = stage2_pud_addr_end(kvm, addr, end);
+ if (!stage2_pud_none(kvm, *pud)) {
+ if (stage2_pud_huge(kvm, *pud))
kvm_flush_dcache_pud(*pud);
else
stage2_flush_pmds(kvm, pud, addr, next);
@@ -409,10 +408,11 @@ static void stage2_flush_memslot(struct kvm *kvm,
phys_addr_t next;
pgd_t *pgd;
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
do {
- next = stage2_pgd_addr_end(addr, end);
- stage2_flush_puds(kvm, pgd, addr, next);
+ next = stage2_pgd_addr_end(kvm, addr, end);
+ if (!stage2_pgd_none(kvm, *pgd))
+ stage2_flush_puds(kvm, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
@@ -897,7 +897,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
}
/* Allocate the HW PGD, making sure that each page gets its own refcount */
- pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
+ pgd = alloc_pages_exact(stage2_pgd_size(kvm), GFP_KERNEL | __GFP_ZERO);
if (!pgd)
return -ENOMEM;
@@ -986,7 +986,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
spin_lock(&kvm->mmu_lock);
if (kvm->arch.pgd) {
- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ unmap_stage2_range(kvm, 0, kvm_phys_size(kvm));
pgd = READ_ONCE(kvm->arch.pgd);
kvm->arch.pgd = NULL;
}
@@ -994,7 +994,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
/* Free the HW pgd, one page at a time */
if (pgd)
- free_pages_exact(pgd, S2_PGD_SIZE);
+ free_pages_exact(pgd, stage2_pgd_size(kvm));
}
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1003,16 +1003,16 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
pgd_t *pgd;
pud_t *pud;
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
- if (WARN_ON(stage2_pgd_none(*pgd))) {
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
+ if (stage2_pgd_none(kvm, *pgd)) {
if (!cache)
return NULL;
pud = mmu_memory_cache_alloc(cache);
- stage2_pgd_populate(pgd, pud);
+ stage2_pgd_populate(kvm, pgd, pud);
get_page(virt_to_page(pgd));
}
- return stage2_pud_offset(pgd, addr);
+ return stage2_pud_offset(kvm, pgd, addr);
}
static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -1025,15 +1025,15 @@ static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
if (!pud)
return NULL;
- if (stage2_pud_none(*pud)) {
+ if (stage2_pud_none(kvm, *pud)) {
if (!cache)
return NULL;
pmd = mmu_memory_cache_alloc(cache);
- stage2_pud_populate(pud, pmd);
+ stage2_pud_populate(kvm, pud, pmd);
get_page(virt_to_page(pud));
}
- return stage2_pmd_offset(pud, addr);
+ return stage2_pmd_offset(kvm, pud, addr);
}
static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
@@ -1207,8 +1207,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
if (writable)
pte = kvm_s2pte_mkwrite(pte);
- ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
- KVM_NR_MEM_OBJS);
+ ret = mmu_topup_memory_cache(&cache,
+ kvm_mmu_cache_min_pages(kvm),
+ KVM_NR_MEM_OBJS);
if (ret)
goto out;
spin_lock(&kvm->mmu_lock);
@@ -1230,8 +1231,14 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
{
kvm_pfn_t pfn = *pfnp;
gfn_t gfn = *ipap >> PAGE_SHIFT;
+ struct page *page = pfn_to_page(pfn);
- if (PageTransCompoundMap(pfn_to_page(pfn))) {
+ /*
+ * PageTransCompoungMap() returns true for THP and
+ * hugetlbfs. Make sure the adjustment is done only for THP
+ * pages.
+ */
+ if (!PageHuge(page) && PageTransCompoundMap(page)) {
unsigned long mask;
/*
* The address we faulted on is backed by a transparent huge
@@ -1296,19 +1303,21 @@ static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
/**
* stage2_wp_pmds - write protect PUD range
+ * kvm: kvm instance for the VM
* @pud: pointer to pud entry
* @addr: range start address
* @end: range end address
*/
-static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
+static void stage2_wp_pmds(struct kvm *kvm, pud_t *pud,
+ phys_addr_t addr, phys_addr_t end)
{
pmd_t *pmd;
phys_addr_t next;
- pmd = stage2_pmd_offset(pud, addr);
+ pmd = stage2_pmd_offset(kvm, pud, addr);
do {
- next = stage2_pmd_addr_end(addr, end);
+ next = stage2_pmd_addr_end(kvm, addr, end);
if (!pmd_none(*pmd)) {
if (pmd_thp_or_huge(*pmd)) {
if (!kvm_s2pmd_readonly(pmd))
@@ -1328,18 +1337,19 @@ static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
*
* Process PUD entries, for a huge PUD we cause a panic.
*/
-static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
+static void stage2_wp_puds(struct kvm *kvm, pgd_t *pgd,
+ phys_addr_t addr, phys_addr_t end)
{
pud_t *pud;
phys_addr_t next;
- pud = stage2_pud_offset(pgd, addr);
+ pud = stage2_pud_offset(kvm, pgd, addr);
do {
- next = stage2_pud_addr_end(addr, end);
- if (!stage2_pud_none(*pud)) {
+ next = stage2_pud_addr_end(kvm, addr, end);
+ if (!stage2_pud_none(kvm, *pud)) {
/* TODO:PUD not supported, revisit later if supported */
- BUG_ON(stage2_pud_huge(*pud));
- stage2_wp_pmds(pud, addr, next);
+ BUG_ON(stage2_pud_huge(kvm, *pud));
+ stage2_wp_pmds(kvm, pud, addr, next);
}
} while (pud++, addr = next, addr != end);
}
@@ -1355,7 +1365,7 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
pgd_t *pgd;
phys_addr_t next;
- pgd = kvm->arch.pgd + stage2_pgd_index(addr);
+ pgd = kvm->arch.pgd + stage2_pgd_index(kvm, addr);
do {
/*
* Release kvm_mmu_lock periodically if the memory region is
@@ -1369,9 +1379,9 @@ static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
cond_resched_lock(&kvm->mmu_lock);
if (!READ_ONCE(kvm->arch.pgd))
break;
- next = stage2_pgd_addr_end(addr, end);
- if (stage2_pgd_present(*pgd))
- stage2_wp_puds(pgd, addr, next);
+ next = stage2_pgd_addr_end(kvm, addr, end);
+ if (stage2_pgd_present(kvm, *pgd))
+ stage2_wp_puds(kvm, pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
@@ -1514,7 +1524,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
up_read(&current->mm->mmap_sem);
/* We need minimum second+third level pages */
- ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
+ ret = mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm),
KVM_NR_MEM_OBJS);
if (ret)
return ret;
@@ -1757,7 +1767,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
/* Userspace should not be able to register out-of-bounds IPAs */
- VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
+ VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
if (fault_status == FSC_ACCESS) {
handle_access_fault(vcpu, fault_ipa);
@@ -2056,7 +2066,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* space addressable by the KVM guest IPA space.
*/
if (memslot->base_gfn + memslot->npages >=
- (KVM_PHYS_SIZE >> PAGE_SHIFT))
+ (kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
down_read(&current->mm->mmap_sem);
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 12502251727e..eb2a390a6c86 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -241,13 +241,6 @@ static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
list_for_each_entry(dev, &(its)->device_list, dev_list) \
list_for_each_entry(ite, &(dev)->itt_head, ite_list)
-/*
- * We only implement 48 bits of PA at the moment, although the ITS
- * supports more. Let's be restrictive here.
- */
-#define BASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 16))
-#define CBASER_ADDRESS(x) ((x) & GENMASK_ULL(47, 12))
-
#define GIC_LPI_OFFSET 8192
#define VITS_TYPER_IDBITS 16
@@ -759,6 +752,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
{
int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
+ phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
int esz = GITS_BASER_ENTRY_SIZE(baser);
int index;
gfn_t gfn;
@@ -783,7 +777,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
if (id >= (l1_tbl_size / esz))
return false;
- addr = BASER_ADDRESS(baser) + id * esz;
+ addr = base + id * esz;
gfn = addr >> PAGE_SHIFT;
if (eaddr)
@@ -798,7 +792,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
/* Each 1st level entry is represented by a 64-bit value. */
if (kvm_read_guest_lock(its->dev->kvm,
- BASER_ADDRESS(baser) + index * sizeof(indirect_ptr),
+ base + index * sizeof(indirect_ptr),
&indirect_ptr, sizeof(indirect_ptr)))
return false;
@@ -808,11 +802,7 @@ static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
if (!(indirect_ptr & BIT_ULL(63)))
return false;
- /*
- * Mask the guest physical address and calculate the frame number.
- * Any address beyond our supported 48 bits of PA will be caught
- * by the actual check in the final step.
- */
+ /* Mask the guest physical address and calculate the frame number. */
indirect_ptr &= GENMASK_ULL(51, 16);
/* Find the address of the actual entry */
@@ -1304,9 +1294,6 @@ static u64 vgic_sanitise_its_baser(u64 reg)
GITS_BASER_OUTER_CACHEABILITY_SHIFT,
vgic_sanitise_outer_cacheability);
- /* Bits 15:12 contain bits 51:48 of the PA, which we don't support. */
- reg &= ~GENMASK_ULL(15, 12);
-
/* We support only one (ITS) page size: 64K */
reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
@@ -1325,11 +1312,8 @@ static u64 vgic_sanitise_its_cbaser(u64 reg)
GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
vgic_sanitise_outer_cacheability);
- /*
- * Sanitise the physical address to be 64k aligned.
- * Also limit the physical addresses to 48 bits.
- */
- reg &= ~(GENMASK_ULL(51, 48) | GENMASK_ULL(15, 12));
+ /* Sanitise the physical address to be 64k aligned. */
+ reg &= ~GENMASK_ULL(15, 12);
return reg;
}
@@ -1375,7 +1359,7 @@ static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
if (!its->enabled)
return;
- cbaser = CBASER_ADDRESS(its->cbaser);
+ cbaser = GITS_CBASER_ADDRESS(its->cbaser);
while (its->cwriter != its->creadr) {
int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
@@ -2233,7 +2217,7 @@ static int vgic_its_restore_device_tables(struct vgic_its *its)
if (!(baser & GITS_BASER_VALID))
return 0;
- l1_gpa = BASER_ADDRESS(baser);
+ l1_gpa = GITS_BASER_ADDR_48_to_52(baser);
if (baser & GITS_BASER_INDIRECT) {
l1_esz = GITS_LVL1_ENTRY_SIZE;
@@ -2305,7 +2289,7 @@ static int vgic_its_save_collection_table(struct vgic_its *its)
{
const struct vgic_its_abi *abi = vgic_its_get_abi(its);
u64 baser = its->baser_coll_table;
- gpa_t gpa = BASER_ADDRESS(baser);
+ gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
struct its_collection *collection;
u64 val;
size_t max_size, filled = 0;
@@ -2354,7 +2338,7 @@ static int vgic_its_restore_collection_table(struct vgic_its *its)
if (!(baser & GITS_BASER_VALID))
return 0;
- gpa = BASER_ADDRESS(baser);
+ gpa = GITS_BASER_ADDR_48_to_52(baser);
max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 6ada2432e37c..114dce9f4bf5 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -25,7 +25,7 @@
int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
phys_addr_t addr, phys_addr_t alignment)
{
- if (addr & ~KVM_PHYS_MASK)
+ if (addr & ~kvm_phys_mask(kvm))
return -E2BIG;
if (!IS_ALIGNED(addr, alignment))
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index a2a175b08b17..b3d1f0985117 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -364,7 +364,6 @@ static u64 vgic_sanitise_pendbaser(u64 reg)
vgic_sanitise_outer_cacheability);
reg &= ~PENDBASER_RES0_MASK;
- reg &= ~GENMASK_ULL(51, 48);
return reg;
}
@@ -382,7 +381,6 @@ static u64 vgic_sanitise_propbaser(u64 reg)
vgic_sanitise_outer_cacheability);
reg &= ~PROPBASER_RES0_MASK;
- reg &= ~GENMASK_ULL(51, 48);
return reg;
}
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 9e65feb6fa58..3710342cf6ad 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -83,6 +83,7 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
ring->coalesced_mmio[ring->last].phys_addr = addr;
ring->coalesced_mmio[ring->last].len = len;
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
+ ring->coalesced_mmio[ring->last].pio = dev->zone.pio;
smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
spin_unlock(&dev->kvm->ring_lock);
@@ -140,6 +141,9 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
int ret;
struct kvm_coalesced_mmio_dev *dev;
+ if (zone->pio != 1 && zone->pio != 0)
+ return -EINVAL;
+
dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -149,8 +153,9 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
dev->zone = *zone;
mutex_lock(&kvm->slots_lock);
- ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
- zone->size, &dev->dev);
+ ret = kvm_io_bus_register_dev(kvm,
+ zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
+ zone->addr, zone->size, &dev->dev);
if (ret < 0)
goto out_free_dev;
list_add_tail(&dev->list, &kvm->coalesced_zones);
@@ -174,7 +179,8 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
- kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
+ kvm_io_bus_unregister_dev(kvm,
+ zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
kvm_iodevice_destructor(&dev->dev);
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f986e31fa68c..786ade1843a2 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -219,7 +219,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
me = get_cpu();
kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!test_bit(i, vcpu_bitmap))
+ if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
continue;
kvm_make_request(req, vcpu);
@@ -243,12 +243,10 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
cpumask_var_t cpus;
bool called;
- static unsigned long vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)]
- = {[0 ... BITS_TO_LONGS(KVM_MAX_VCPUS)-1] = ULONG_MAX};
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
- called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap, cpus);
+ called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus);
free_cpumask_var(cpus);
return called;
@@ -807,20 +805,25 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
* sorted array and known changed memslot position.
*/
static void update_memslots(struct kvm_memslots *slots,
- struct kvm_memory_slot *new)
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
int id = new->id;
int i = slots->id_to_index[id];
struct kvm_memory_slot *mslots = slots->memslots;
WARN_ON(mslots[i].id != id);
- if (!new->npages) {
- WARN_ON(!mslots[i].npages);
- if (mslots[i].npages)
- slots->used_slots--;
- } else {
- if (!mslots[i].npages)
- slots->used_slots++;
+ switch (change) {
+ case KVM_MR_CREATE:
+ slots->used_slots++;
+ WARN_ON(mslots[i].npages || !new->npages);
+ break;
+ case KVM_MR_DELETE:
+ slots->used_slots--;
+ WARN_ON(new->npages || !mslots[i].npages);
+ break;
+ default:
+ break;
}
while (i < KVM_MEM_SLOTS_NUM - 1 &&
@@ -1056,7 +1059,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
memset(&new.arch, 0, sizeof(new.arch));
}
- update_memslots(slots, &new);
+ update_memslots(slots, &new, change);
old_memslots = install_new_memslots(kvm, as_id, slots);
kvm_arch_commit_memory_region(kvm, mem, &old, &new, change);
@@ -1311,8 +1314,12 @@ unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
/*
- * If writable is set to false, the hva returned by this function is only
- * allowed to be read.
+ * Return the hva of a @gfn and the R/W attribute if possible.
+ *
+ * @slot: the kvm_memory_slot which contains @gfn
+ * @gfn: the gfn to be translated
+ * @writable: used to return the read/write attribute of the @slot if the hva
+ * is valid and @writable is not NULL
*/
unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
gfn_t gfn, bool *writable)
@@ -2946,6 +2953,8 @@ static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
#ifdef CONFIG_KVM_MMIO
case KVM_CAP_COALESCED_MMIO:
return KVM_COALESCED_MMIO_PAGE_OFFSET;
+ case KVM_CAP_COALESCED_PIO:
+ return 1;
#endif
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
case KVM_CAP_IRQ_ROUTING: